2009-11-09 Zoltan Varga <vargaz@gmail.com>
[mono-project.git] / mono / mini / mini-arm.c
blob35216abc54542fdacef3d8335d05cda557ff783b
1 /*
2 * mini-arm.c: ARM backend for the Mono code generator
4 * Authors:
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
9 */
10 #include "mini.h"
11 #include <string.h>
13 #include <mono/metadata/appdomain.h>
14 #include <mono/metadata/debug-helpers.h>
15 #include <mono/utils/mono-mmap.h>
17 #include "mini-arm.h"
18 #include "cpu-arm.h"
19 #include "trace.h"
20 #include "ir-emit.h"
21 #ifdef ARM_FPU_FPA
22 #include "mono/arch/arm/arm-fpa-codegen.h"
23 #elif defined(ARM_FPU_VFP)
24 #include "mono/arch/arm/arm-vfp-codegen.h"
25 #endif
27 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID)
28 #define HAVE_AEABI_READ_TP 1
29 #endif
31 static gint lmf_tls_offset = -1;
32 static gint lmf_addr_tls_offset = -1;
34 /* This mutex protects architecture specific caches */
35 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
36 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
37 static CRITICAL_SECTION mini_arch_mutex;
39 static int v5_supported = 0;
40 static int v7_supported = 0;
41 static int thumb_supported = 0;
44 * The code generated for sequence points reads from this location, which is
45 * made read-only when single stepping is enabled.
47 static gpointer ss_trigger_page;
49 /* Enabled breakpoints read from this trigger page */
50 static gpointer bp_trigger_page;
52 /* Structure used by the sequence points in AOTed code */
53 typedef struct {
54 gpointer ss_trigger_page;
55 gpointer bp_trigger_page;
56 guint8* bp_addrs [MONO_ZERO_LEN_ARRAY];
57 } SeqPointInfo;
60 * TODO:
61 * floating point support: on ARM it is a mess, there are at least 3
62 * different setups, each of which binary incompat with the other.
63 * 1) FPA: old and ugly, but unfortunately what current distros use
64 * the double binary format has the two words swapped. 8 double registers.
65 * Implemented usually by kernel emulation.
66 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
67 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
68 * 3) VFP: the new and actually sensible and useful FP support. Implemented
69 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
71 * The plan is to write the FPA support first. softfloat can be tested in a chroot.
73 int mono_exc_esp_offset = 0;
75 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
76 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
77 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
79 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
80 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
81 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
83 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
84 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
85 #define DEBUG_IMT 0
87 const char*
88 mono_arch_regname (int reg)
90 static const char * rnames[] = {
91 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
92 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
93 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
94 "arm_pc"
96 if (reg >= 0 && reg < 16)
97 return rnames [reg];
98 return "unknown";
101 const char*
102 mono_arch_fregname (int reg)
104 static const char * rnames[] = {
105 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
106 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
107 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
108 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
109 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
110 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
111 "arm_f30", "arm_f31"
113 if (reg >= 0 && reg < 32)
114 return rnames [reg];
115 return "unknown";
118 static guint8*
119 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
121 int imm8, rot_amount;
122 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
123 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
124 return code;
126 g_assert (dreg != sreg);
127 code = mono_arm_emit_load_imm (code, dreg, imm);
128 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
129 return code;
132 static guint8*
133 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
135 /* we can use r0-r3, since this is called only for incoming args on the stack */
136 if (size > sizeof (gpointer) * 4) {
137 guint8 *start_loop;
138 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
139 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
140 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
141 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
142 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
143 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
144 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
145 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
146 ARM_B_COND (code, ARMCOND_NE, 0);
147 arm_patch (code - 4, start_loop);
148 return code;
150 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
151 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
152 while (size >= 4) {
153 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
154 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
155 doffset += 4;
156 soffset += 4;
157 size -= 4;
159 } else if (size) {
160 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
161 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
162 doffset = soffset = 0;
163 while (size >= 4) {
164 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
165 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
166 doffset += 4;
167 soffset += 4;
168 size -= 4;
171 g_assert (size == 0);
172 return code;
175 static guint8*
176 emit_call_reg (guint8 *code, int reg)
178 if (v5_supported) {
179 ARM_BLX_REG (code, reg);
180 } else {
181 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
182 if (thumb_supported)
183 ARM_BX (code, reg);
184 else
185 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
187 return code;
190 static guint8*
191 emit_call_seq (MonoCompile *cfg, guint8 *code)
193 if (cfg->method->dynamic) {
194 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
195 ARM_B (code, 0);
196 *(gpointer*)code = NULL;
197 code += 4;
198 code = emit_call_reg (code, ARMREG_IP);
199 } else {
200 ARM_BL (code, 0);
202 return code;
205 static guint8*
206 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
208 switch (ins->opcode) {
209 case OP_FCALL:
210 case OP_FCALL_REG:
211 case OP_FCALL_MEMBASE:
212 #ifdef ARM_FPU_FPA
213 if (ins->dreg != ARM_FPA_F0)
214 ARM_MVFD (code, ins->dreg, ARM_FPA_F0);
215 #elif defined(ARM_FPU_VFP)
216 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
217 ARM_FMSR (code, ins->dreg, ARMREG_R0);
218 ARM_CVTS (code, ins->dreg, ins->dreg);
219 } else {
220 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
222 #endif
223 break;
226 return code;
230 * mono_arch_get_argument_info:
231 * @csig: a method signature
232 * @param_count: the number of parameters to consider
233 * @arg_info: an array to store the result infos
235 * Gathers information on parameters such as size, alignment and
236 * padding. arg_info should be large enought to hold param_count + 1 entries.
238 * Returns the size of the activation frame.
241 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
243 int k, frame_size = 0;
244 guint32 size, align, pad;
245 int offset = 8;
247 if (MONO_TYPE_ISSTRUCT (csig->ret)) {
248 frame_size += sizeof (gpointer);
249 offset += 4;
252 arg_info [0].offset = offset;
254 if (csig->hasthis) {
255 frame_size += sizeof (gpointer);
256 offset += 4;
259 arg_info [0].size = frame_size;
261 for (k = 0; k < param_count; k++) {
262 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
264 /* ignore alignment for now */
265 align = 1;
267 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
268 arg_info [k].pad = pad;
269 frame_size += size;
270 arg_info [k + 1].pad = 0;
271 arg_info [k + 1].size = size;
272 offset += pad;
273 arg_info [k + 1].offset = offset;
274 offset += size;
277 align = MONO_ARCH_FRAME_ALIGNMENT;
278 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
279 arg_info [k].pad = pad;
281 return frame_size;
284 static gpointer
285 decode_vcall_slot_from_ldr (guint32 ldr, mgreg_t *regs, int *displacement)
287 char *o = NULL;
288 int reg, offset = 0;
289 reg = (ldr >> 16 ) & 0xf;
290 offset = ldr & 0xfff;
291 if (((ldr >> 23) & 1) == 0) /*U bit, 0 means negative and 1 positive*/
292 offset = -offset;
293 /*g_print ("found vcall at r%d + %d for code at %p 0x%x\n", reg, offset, code, *code);*/
294 o = (gpointer)regs [reg];
296 *displacement = offset;
297 return o;
300 gpointer
301 mono_arch_get_vcall_slot (guint8 *code_ptr, mgreg_t *regs, int *displacement)
303 guint32* code = (guint32*)code_ptr;
305 /* Locate the address of the method-specific trampoline. The call using
306 the vtable slot that took the processing flow to 'arch_create_jit_trampoline'
307 looks something like this:
309 ldr rA, rX, #offset
310 mov lr, pc
311 mov pc, rA
312 or better:
313 mov lr, pc
314 ldr pc, rX, #offset
316 The call sequence could be also:
317 ldr ip, pc, 0
318 b skip
319 function pointer literal
320 skip:
321 mov lr, pc
322 mov pc, ip
323 Note that on ARM5+ we can use one instruction instead of the last two.
324 Therefore, we need to locate the 'ldr rA' instruction to know which
325 register was used to hold the method addrs.
328 /* This is the instruction after "ldc pc, xxx", "mov pc, xxx" or "bl xxx" could be either the IMT value or some other instruction*/
329 --code;
331 /* Three possible code sequences can happen here:
332 * interface call:
334 * add lr, [pc + #4]
335 * ldr pc, [rX - #offset]
336 * .word IMT value
338 * virtual call:
340 * mov lr, pc
341 * ldr pc, [rX - #offset]
343 * direct branch with bl:
345 * bl #offset
347 * direct branch with mov:
349 * mv pc, rX
351 * We only need to identify interface and virtual calls, the others can be ignored.
354 if (IS_LDR_PC (code [-1]) && code [-2] == ADD_LR_PC_4)
355 return decode_vcall_slot_from_ldr (code [-1], regs, displacement);
357 if (IS_LDR_PC (code [0]) && code [-1] == MOV_LR_PC)
358 return decode_vcall_slot_from_ldr (code [0], regs, displacement);
360 return NULL;
363 #define MAX_ARCH_DELEGATE_PARAMS 3
365 static gpointer
366 get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
368 guint8 *code, *start;
370 if (has_target) {
371 start = code = mono_global_codeman_reserve (12);
373 /* Replace the this argument with the target */
374 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
375 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, target));
376 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
378 g_assert ((code - start) <= 12);
380 mono_arch_flush_icache (start, 12);
381 } else {
382 int size, i;
384 size = 8 + param_count * 4;
385 start = code = mono_global_codeman_reserve (size);
387 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
388 /* slide down the arguments */
389 for (i = 0; i < param_count; ++i) {
390 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
392 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
394 g_assert ((code - start) <= size);
396 mono_arch_flush_icache (start, size);
399 if (code_size)
400 *code_size = code - start;
402 return start;
406 * mono_arch_get_delegate_invoke_impls:
408 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
409 * trampolines.
411 GSList*
412 mono_arch_get_delegate_invoke_impls (void)
414 GSList *res = NULL;
415 guint8 *code;
416 guint32 code_len;
417 int i;
419 code = get_delegate_invoke_impl (TRUE, 0, &code_len);
420 res = g_slist_prepend (res, mono_aot_tramp_info_create (g_strdup ("delegate_invoke_impl_has_target"), code, code_len));
422 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
423 code = get_delegate_invoke_impl (FALSE, i, &code_len);
424 res = g_slist_prepend (res, mono_aot_tramp_info_create (g_strdup_printf ("delegate_invoke_impl_target_%d", i), code, code_len));
427 return res;
430 gpointer
431 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
433 guint8 *code, *start;
435 /* FIXME: Support more cases */
436 if (MONO_TYPE_ISSTRUCT (sig->ret))
437 return NULL;
439 if (has_target) {
440 static guint8* cached = NULL;
441 mono_mini_arch_lock ();
442 if (cached) {
443 mono_mini_arch_unlock ();
444 return cached;
447 if (mono_aot_only)
448 start = mono_aot_get_named_code ("delegate_invoke_impl_has_target");
449 else
450 start = get_delegate_invoke_impl (TRUE, 0, NULL);
451 cached = start;
452 mono_mini_arch_unlock ();
453 return cached;
454 } else {
455 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
456 int i;
458 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
459 return NULL;
460 for (i = 0; i < sig->param_count; ++i)
461 if (!mono_is_regsize_var (sig->params [i]))
462 return NULL;
464 mono_mini_arch_lock ();
465 code = cache [sig->param_count];
466 if (code) {
467 mono_mini_arch_unlock ();
468 return code;
471 if (mono_aot_only) {
472 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
473 start = mono_aot_get_named_code (name);
474 g_free (name);
475 } else {
476 start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
478 cache [sig->param_count] = start;
479 mono_mini_arch_unlock ();
480 return start;
483 return NULL;
486 gpointer
487 mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, mgreg_t *regs, guint8 *code)
489 /* FIXME: handle returning a struct */
490 if (MONO_TYPE_ISSTRUCT (sig->ret))
491 return (gpointer)regs [ARMREG_R1];
492 return (gpointer)regs [ARMREG_R0];
496 * Initialize the cpu to execute managed code.
498 void
499 mono_arch_cpu_init (void)
504 * Initialize architecture specific code.
506 void
507 mono_arch_init (void)
509 InitializeCriticalSection (&mini_arch_mutex);
511 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
512 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
513 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
517 * Cleanup architecture specific code.
519 void
520 mono_arch_cleanup (void)
525 * This function returns the optimizations supported on this cpu.
527 guint32
528 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
530 guint32 opts = 0;
531 #if __APPLE__
532 thumb_supported = TRUE;
533 v5_supported = TRUE;
534 #else
535 char buf [512];
536 char *line;
537 FILE *file = fopen ("/proc/cpuinfo", "r");
538 if (file) {
539 while ((line = fgets (buf, 512, file))) {
540 if (strncmp (line, "Processor", 9) == 0) {
541 char *ver = strstr (line, "(v");
542 if (ver && (ver [2] == '5' || ver [2] == '6' || ver [2] == '7'))
543 v5_supported = TRUE;
544 if (ver && (ver [2] == '7'))
545 v7_supported = TRUE;
546 continue;
548 if (strncmp (line, "Features", 8) == 0) {
549 char *th = strstr (line, "thumb");
550 if (th) {
551 thumb_supported = TRUE;
552 if (v5_supported)
553 break;
555 continue;
558 fclose (file);
559 /*printf ("features: v5: %d, thumb: %d\n", v5_supported, thumb_supported);*/
561 #endif
563 /* no arm-specific optimizations yet */
564 *exclude_mask = 0;
565 return opts;
568 static gboolean
569 is_regsize_var (MonoType *t) {
570 if (t->byref)
571 return TRUE;
572 t = mini_type_get_underlying_type (NULL, t);
573 switch (t->type) {
574 case MONO_TYPE_I4:
575 case MONO_TYPE_U4:
576 case MONO_TYPE_I:
577 case MONO_TYPE_U:
578 case MONO_TYPE_PTR:
579 case MONO_TYPE_FNPTR:
580 return TRUE;
581 case MONO_TYPE_OBJECT:
582 case MONO_TYPE_STRING:
583 case MONO_TYPE_CLASS:
584 case MONO_TYPE_SZARRAY:
585 case MONO_TYPE_ARRAY:
586 return TRUE;
587 case MONO_TYPE_GENERICINST:
588 if (!mono_type_generic_inst_is_valuetype (t))
589 return TRUE;
590 return FALSE;
591 case MONO_TYPE_VALUETYPE:
592 return FALSE;
594 return FALSE;
597 GList *
598 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
600 GList *vars = NULL;
601 int i;
603 for (i = 0; i < cfg->num_varinfo; i++) {
604 MonoInst *ins = cfg->varinfo [i];
605 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
607 /* unused vars */
608 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
609 continue;
611 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
612 continue;
614 /* we can only allocate 32 bit values */
615 if (is_regsize_var (ins->inst_vtype)) {
616 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
617 g_assert (i == vmv->idx);
618 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
622 return vars;
625 #define USE_EXTRA_TEMPS 0
627 GList *
628 mono_arch_get_global_int_regs (MonoCompile *cfg)
630 GList *regs = NULL;
633 * FIXME: Interface calls might go through a static rgctx trampoline which
634 * sets V5, but it doesn't save it, so we need to save it ourselves, and
635 * avoid using it.
637 if (cfg->flags & MONO_CFG_HAS_CALLS)
638 cfg->uses_rgctx_reg = TRUE;
640 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
641 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
642 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
643 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
644 if (!(cfg->compile_aot || cfg->uses_rgctx_reg))
645 /* V5 is reserved for passing the vtable/rgctx/IMT method */
646 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
647 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
648 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
650 return regs;
654 * mono_arch_regalloc_cost:
656 * Return the cost, in number of memory references, of the action of
657 * allocating the variable VMV into a register during global register
658 * allocation.
660 guint32
661 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
663 /* FIXME: */
664 return 2;
667 #ifndef __GNUC_PREREQ
668 #define __GNUC_PREREQ(maj, min) (0)
669 #endif
671 void
672 mono_arch_flush_icache (guint8 *code, gint size)
674 #if __APPLE__
675 sys_icache_invalidate (code, size);
676 #elif __GNUC_PREREQ(4, 1)
677 __clear_cache (code, code + size);
678 #elif defined(PLATFORM_ANDROID)
679 const int syscall = 0xf0002;
680 __asm __volatile (
681 "mov r0, %0\n"
682 "mov r1, %1\n"
683 "mov r7, %2\n"
684 "mov r2, #0x0\n"
685 "svc 0x00000000\n"
687 : "r" (code), "r" (code + size), "r" (syscall)
688 : "r0", "r1", "r7", "r2"
690 #else
691 __asm __volatile ("mov r0, %0\n"
692 "mov r1, %1\n"
693 "mov r2, %2\n"
694 "swi 0x9f0002 @ sys_cacheflush"
695 : /* no outputs */
696 : "r" (code), "r" (code + size), "r" (0)
697 : "r0", "r1", "r3" );
698 #endif
701 enum {
702 RegTypeNone,
703 RegTypeGeneral,
704 RegTypeIRegPair,
705 RegTypeBase,
706 RegTypeBaseGen,
707 RegTypeFP,
708 RegTypeStructByVal,
709 RegTypeStructByAddr
712 typedef struct {
713 gint32 offset;
714 guint16 vtsize; /* in param area */
715 guint8 reg;
716 guint8 regtype : 4; /* 0 general, 1 basereg, 2 floating point register, see RegType* */
717 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
718 } ArgInfo;
720 typedef struct {
721 int nargs;
722 guint32 stack_usage;
723 guint32 struct_ret;
724 gboolean vtype_retaddr;
725 ArgInfo ret;
726 ArgInfo sig_cookie;
727 ArgInfo args [1];
728 } CallInfo;
730 #define DEBUG(a)
732 #ifndef __GNUC__
733 /*#define __alignof__(a) sizeof(a)*/
734 #define __alignof__(type) G_STRUCT_OFFSET(struct { char c; type x; }, x)
735 #endif
737 #define PARAM_REGS 4
739 static void inline
740 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
742 if (simple) {
743 if (*gr > ARMREG_R3) {
744 ainfo->offset = *stack_size;
745 ainfo->reg = ARMREG_SP; /* in the caller */
746 ainfo->regtype = RegTypeBase;
747 *stack_size += 4;
748 } else {
749 ainfo->regtype = RegTypeGeneral;
750 ainfo->reg = *gr;
752 } else {
753 #if defined(__APPLE__) && defined(MONO_CROSS_COMPILE)
754 int i8_align = 4;
755 #else
756 int i8_align = __alignof__ (gint64);
757 #endif
759 #if __ARM_EABI__
760 gboolean split = i8_align == 4;
761 #else
762 gboolean split = TRUE;
763 #endif
765 if (*gr == ARMREG_R3 && split) {
766 /* first word in r3 and the second on the stack */
767 ainfo->offset = *stack_size;
768 ainfo->reg = ARMREG_SP; /* in the caller */
769 ainfo->regtype = RegTypeBaseGen;
770 *stack_size += 4;
771 } else if (*gr >= ARMREG_R3) {
772 #ifdef __ARM_EABI__
773 /* darwin aligns longs to 4 byte only */
774 if (i8_align == 8) {
775 *stack_size += 7;
776 *stack_size &= ~7;
778 #endif
779 ainfo->offset = *stack_size;
780 ainfo->reg = ARMREG_SP; /* in the caller */
781 ainfo->regtype = RegTypeBase;
782 *stack_size += 8;
783 } else {
784 #ifdef __ARM_EABI__
785 if (i8_align == 8 && ((*gr) & 1))
786 (*gr) ++;
787 #endif
788 ainfo->regtype = RegTypeIRegPair;
789 ainfo->reg = *gr;
791 (*gr) ++;
793 (*gr) ++;
796 static CallInfo*
797 get_call_info (MonoMethodSignature *sig, gboolean is_pinvoke)
799 guint i, gr;
800 int n = sig->hasthis + sig->param_count;
801 MonoType *simpletype;
802 guint32 stack_size = 0;
803 CallInfo *cinfo = g_malloc0 (sizeof (CallInfo) + sizeof (ArgInfo) * n);
805 cinfo->nargs = n;
806 gr = ARMREG_R0;
808 /* FIXME: handle returning a struct */
809 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
810 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
811 cinfo->struct_ret = ARMREG_R0;
812 cinfo->vtype_retaddr = TRUE;
815 n = 0;
816 if (sig->hasthis) {
817 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
818 n++;
820 DEBUG(printf("params: %d\n", sig->param_count));
821 for (i = 0; i < sig->param_count; ++i) {
822 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
823 /* Prevent implicit arguments and sig_cookie from
824 being passed in registers */
825 gr = ARMREG_R3 + 1;
826 /* Emit the signature cookie just before the implicit arguments */
827 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
829 DEBUG(printf("param %d: ", i));
830 if (sig->params [i]->byref) {
831 DEBUG(printf("byref\n"));
832 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
833 n++;
834 continue;
836 simpletype = mini_type_get_underlying_type (NULL, sig->params [i]);
837 switch (simpletype->type) {
838 case MONO_TYPE_BOOLEAN:
839 case MONO_TYPE_I1:
840 case MONO_TYPE_U1:
841 cinfo->args [n].size = 1;
842 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
843 n++;
844 break;
845 case MONO_TYPE_CHAR:
846 case MONO_TYPE_I2:
847 case MONO_TYPE_U2:
848 cinfo->args [n].size = 2;
849 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
850 n++;
851 break;
852 case MONO_TYPE_I4:
853 case MONO_TYPE_U4:
854 cinfo->args [n].size = 4;
855 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
856 n++;
857 break;
858 case MONO_TYPE_I:
859 case MONO_TYPE_U:
860 case MONO_TYPE_PTR:
861 case MONO_TYPE_FNPTR:
862 case MONO_TYPE_CLASS:
863 case MONO_TYPE_OBJECT:
864 case MONO_TYPE_STRING:
865 case MONO_TYPE_SZARRAY:
866 case MONO_TYPE_ARRAY:
867 case MONO_TYPE_R4:
868 cinfo->args [n].size = sizeof (gpointer);
869 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
870 n++;
871 break;
872 case MONO_TYPE_GENERICINST:
873 if (!mono_type_generic_inst_is_valuetype (sig->params [i])) {
874 cinfo->args [n].size = sizeof (gpointer);
875 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
876 n++;
877 break;
879 /* Fall through */
880 case MONO_TYPE_TYPEDBYREF:
881 case MONO_TYPE_VALUETYPE: {
882 gint size;
883 int align_size;
884 int nwords;
885 guint32 align;
887 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
888 size = sizeof (MonoTypedRef);
889 align = sizeof (gpointer);
890 } else {
891 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
892 if (is_pinvoke)
893 size = mono_class_native_size (klass, &align);
894 else
895 size = mono_class_value_size (klass, &align);
897 DEBUG(printf ("load %d bytes struct\n",
898 mono_class_native_size (sig->params [i]->data.klass, NULL)));
899 align_size = size;
900 nwords = 0;
901 align_size += (sizeof (gpointer) - 1);
902 align_size &= ~(sizeof (gpointer) - 1);
903 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
904 cinfo->args [n].regtype = RegTypeStructByVal;
905 /* FIXME: align stack_size if needed */
906 #ifdef __ARM_EABI__
907 if (align >= 8 && (gr & 1))
908 gr ++;
909 #endif
910 if (gr > ARMREG_R3) {
911 cinfo->args [n].size = 0;
912 cinfo->args [n].vtsize = nwords;
913 } else {
914 int rest = ARMREG_R3 - gr + 1;
915 int n_in_regs = rest >= nwords? nwords: rest;
917 cinfo->args [n].size = n_in_regs;
918 cinfo->args [n].vtsize = nwords - n_in_regs;
919 cinfo->args [n].reg = gr;
920 gr += n_in_regs;
921 nwords -= n_in_regs;
923 cinfo->args [n].offset = stack_size;
924 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
925 stack_size += nwords * sizeof (gpointer);
926 n++;
927 break;
929 case MONO_TYPE_U8:
930 case MONO_TYPE_I8:
931 case MONO_TYPE_R8:
932 cinfo->args [n].size = 8;
933 add_general (&gr, &stack_size, cinfo->args + n, FALSE);
934 n++;
935 break;
936 default:
937 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
942 simpletype = mini_type_get_underlying_type (NULL, sig->ret);
943 switch (simpletype->type) {
944 case MONO_TYPE_BOOLEAN:
945 case MONO_TYPE_I1:
946 case MONO_TYPE_U1:
947 case MONO_TYPE_I2:
948 case MONO_TYPE_U2:
949 case MONO_TYPE_CHAR:
950 case MONO_TYPE_I4:
951 case MONO_TYPE_U4:
952 case MONO_TYPE_I:
953 case MONO_TYPE_U:
954 case MONO_TYPE_PTR:
955 case MONO_TYPE_FNPTR:
956 case MONO_TYPE_CLASS:
957 case MONO_TYPE_OBJECT:
958 case MONO_TYPE_SZARRAY:
959 case MONO_TYPE_ARRAY:
960 case MONO_TYPE_STRING:
961 cinfo->ret.regtype = RegTypeGeneral;
962 cinfo->ret.reg = ARMREG_R0;
963 break;
964 case MONO_TYPE_U8:
965 case MONO_TYPE_I8:
966 cinfo->ret.regtype = RegTypeIRegPair;
967 cinfo->ret.reg = ARMREG_R0;
968 break;
969 case MONO_TYPE_R4:
970 case MONO_TYPE_R8:
971 cinfo->ret.regtype = RegTypeFP;
972 cinfo->ret.reg = ARMREG_R0;
973 /* FIXME: cinfo->ret.reg = ???;
974 cinfo->ret.regtype = RegTypeFP;*/
975 break;
976 case MONO_TYPE_GENERICINST:
977 if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
978 cinfo->ret.regtype = RegTypeGeneral;
979 cinfo->ret.reg = ARMREG_R0;
980 break;
982 cinfo->ret.regtype = RegTypeStructByAddr;
983 break;
984 case MONO_TYPE_VALUETYPE:
985 cinfo->ret.regtype = RegTypeStructByAddr;
986 break;
987 case MONO_TYPE_TYPEDBYREF:
988 cinfo->ret.regtype = RegTypeStructByAddr;
989 break;
990 case MONO_TYPE_VOID:
991 break;
992 default:
993 g_error ("Can't handle as return value 0x%x", sig->ret->type);
997 /* align stack size to 8 */
998 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
999 stack_size = (stack_size + 7) & ~7;
1001 cinfo->stack_usage = stack_size;
1002 return cinfo;
1007 * Set var information according to the calling convention. arm version.
1008 * The locals var stuff should most likely be split in another method.
1010 void
1011 mono_arch_allocate_vars (MonoCompile *cfg)
1013 MonoMethodSignature *sig;
1014 MonoMethodHeader *header;
1015 MonoInst *inst;
1016 int i, offset, size, align, curinst;
1017 int frame_reg = ARMREG_FP;
1018 guint32 ualign;
1020 /* FIXME: this will change when we use FP as gcc does */
1021 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1023 /* allow room for the vararg method args: void* and long/double */
1024 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1025 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
1027 header = mono_method_get_header (cfg->method);
1030 * We use the frame register also for any method that has
1031 * exception clauses. This way, when the handlers are called,
1032 * the code will reference local variables using the frame reg instead of
1033 * the stack pointer: if we had to restore the stack pointer, we'd
1034 * corrupt the method frames that are already on the stack (since
1035 * filters get called before stack unwinding happens) when the filter
1036 * code would call any method (this also applies to finally etc.).
1038 if ((cfg->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
1039 frame_reg = ARMREG_FP;
1040 cfg->frame_reg = frame_reg;
1041 if (frame_reg != ARMREG_SP) {
1042 cfg->used_int_regs |= 1 << frame_reg;
1045 if (cfg->compile_aot || cfg->uses_rgctx_reg)
1046 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1047 cfg->used_int_regs |= (1 << ARMREG_V5);
1049 sig = mono_method_signature (cfg->method);
1051 offset = 0;
1052 curinst = 0;
1053 if (!MONO_TYPE_ISSTRUCT (sig->ret)) {
1054 /* FIXME: handle long and FP values */
1055 switch (mini_type_get_underlying_type (NULL, sig->ret)->type) {
1056 case MONO_TYPE_VOID:
1057 break;
1058 default:
1059 cfg->ret->opcode = OP_REGVAR;
1060 cfg->ret->inst_c0 = ARMREG_R0;
1061 break;
1064 /* local vars are at a positive offset from the stack pointer */
1066 * also note that if the function uses alloca, we use FP
1067 * to point at the local variables.
1069 offset = 0; /* linkage area */
1070 /* align the offset to 16 bytes: not sure this is needed here */
1071 //offset += 8 - 1;
1072 //offset &= ~(8 - 1);
1074 /* add parameter area size for called functions */
1075 offset += cfg->param_area;
1076 offset += 8 - 1;
1077 offset &= ~(8 - 1);
1078 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1079 offset += 8;
1081 /* allow room to save the return value */
1082 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1083 offset += 8;
1085 /* the MonoLMF structure is stored just below the stack pointer */
1087 if (sig->call_convention == MONO_CALL_VARARG) {
1088 cfg->sig_cookie = 0;
1091 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1092 inst = cfg->vret_addr;
1093 offset += sizeof(gpointer) - 1;
1094 offset &= ~(sizeof(gpointer) - 1);
1095 inst->inst_offset = offset;
1096 inst->opcode = OP_REGOFFSET;
1097 inst->inst_basereg = frame_reg;
1098 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1099 printf ("vret_addr =");
1100 mono_print_ins (cfg->vret_addr);
1102 offset += sizeof(gpointer);
1103 if (sig->call_convention == MONO_CALL_VARARG)
1104 cfg->sig_cookie += sizeof (gpointer);
1107 curinst = cfg->locals_start;
1108 for (i = curinst; i < cfg->num_varinfo; ++i) {
1109 inst = cfg->varinfo [i];
1110 if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR)
1111 continue;
1113 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1114 * pinvoke wrappers when they call functions returning structure */
1115 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF) {
1116 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), &ualign);
1117 align = ualign;
1119 else
1120 size = mono_type_size (inst->inst_vtype, &align);
1122 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1123 * since it loads/stores misaligned words, which don't do the right thing.
1125 if (align < 4 && size >= 4)
1126 align = 4;
1127 offset += align - 1;
1128 offset &= ~(align - 1);
1129 inst->inst_offset = offset;
1130 inst->opcode = OP_REGOFFSET;
1131 inst->inst_basereg = frame_reg;
1132 offset += size;
1133 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
1136 curinst = 0;
1137 if (sig->hasthis) {
1138 inst = cfg->args [curinst];
1139 if (inst->opcode != OP_REGVAR) {
1140 inst->opcode = OP_REGOFFSET;
1141 inst->inst_basereg = frame_reg;
1142 offset += sizeof (gpointer) - 1;
1143 offset &= ~(sizeof (gpointer) - 1);
1144 inst->inst_offset = offset;
1145 offset += sizeof (gpointer);
1146 if (sig->call_convention == MONO_CALL_VARARG)
1147 cfg->sig_cookie += sizeof (gpointer);
1149 curinst++;
1152 for (i = 0; i < sig->param_count; ++i) {
1153 inst = cfg->args [curinst];
1154 if (inst->opcode != OP_REGVAR) {
1155 inst->opcode = OP_REGOFFSET;
1156 inst->inst_basereg = frame_reg;
1157 size = mini_type_stack_size_full (NULL, sig->params [i], &ualign, sig->pinvoke);
1158 align = ualign;
1159 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1160 * since it loads/stores misaligned words, which don't do the right thing.
1162 if (align < 4 && size >= 4)
1163 align = 4;
1164 offset += align - 1;
1165 offset &= ~(align - 1);
1166 inst->inst_offset = offset;
1167 offset += size;
1168 if ((sig->call_convention == MONO_CALL_VARARG) && (i < sig->sentinelpos))
1169 cfg->sig_cookie += size;
1171 curinst++;
1174 /* align the offset to 8 bytes */
1175 offset += 8 - 1;
1176 offset &= ~(8 - 1);
1178 /* change sign? */
1179 cfg->stack_offset = offset;
1182 void
1183 mono_arch_create_vars (MonoCompile *cfg)
1185 MonoMethodSignature *sig;
1187 sig = mono_method_signature (cfg->method);
1189 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1190 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1191 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1192 printf ("vret_addr = ");
1193 mono_print_ins (cfg->vret_addr);
1197 if (cfg->gen_seq_points && cfg->compile_aot) {
1198 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1199 ins->flags |= MONO_INST_VOLATILE;
1200 cfg->arch.seq_point_info_var = ins;
1202 /* Allocate a separate variable for this to save 1 load per seq point */
1203 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1204 ins->flags |= MONO_INST_VOLATILE;
1205 cfg->arch.ss_trigger_page_var = ins;
1209 void
1210 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1212 MonoInst *in, *ins;
1213 MonoMethodSignature *sig;
1214 int i, n;
1215 CallInfo *cinfo;
1217 sig = call->signature;
1218 n = sig->param_count + sig->hasthis;
1220 cinfo = get_call_info (sig, sig->pinvoke);
1222 for (i = 0; i < n; ++i) {
1223 ArgInfo *ainfo = cinfo->args + i;
1224 MonoType *t;
1226 if (i >= sig->hasthis)
1227 t = sig->params [i - sig->hasthis];
1228 else
1229 t = &mono_defaults.int_class->byval_arg;
1230 t = mini_type_get_underlying_type (NULL, t);
1232 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1233 /* FIXME: */
1234 NOT_IMPLEMENTED;
1237 in = call->args [i];
1239 switch (ainfo->regtype) {
1240 case RegTypeGeneral:
1241 case RegTypeIRegPair:
1242 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1243 MONO_INST_NEW (cfg, ins, OP_MOVE);
1244 ins->dreg = mono_alloc_ireg (cfg);
1245 ins->sreg1 = in->dreg + 1;
1246 MONO_ADD_INS (cfg->cbb, ins);
1247 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1249 MONO_INST_NEW (cfg, ins, OP_MOVE);
1250 ins->dreg = mono_alloc_ireg (cfg);
1251 ins->sreg1 = in->dreg + 2;
1252 MONO_ADD_INS (cfg->cbb, ins);
1253 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1254 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
1255 #ifndef MONO_ARCH_SOFT_FLOAT
1256 int creg;
1257 #endif
1259 if (ainfo->size == 4) {
1260 #ifdef MONO_ARCH_SOFT_FLOAT
1261 /* mono_emit_call_args () have already done the r8->r4 conversion */
1262 /* The converted value is in an int vreg */
1263 MONO_INST_NEW (cfg, ins, OP_MOVE);
1264 ins->dreg = mono_alloc_ireg (cfg);
1265 ins->sreg1 = in->dreg;
1266 MONO_ADD_INS (cfg->cbb, ins);
1267 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1268 #else
1269 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1270 creg = mono_alloc_ireg (cfg);
1271 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1272 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1273 #endif
1274 } else {
1275 #ifdef MONO_ARCH_SOFT_FLOAT
1276 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
1277 ins->dreg = mono_alloc_ireg (cfg);
1278 ins->sreg1 = in->dreg;
1279 MONO_ADD_INS (cfg->cbb, ins);
1280 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1282 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
1283 ins->dreg = mono_alloc_ireg (cfg);
1284 ins->sreg1 = in->dreg;
1285 MONO_ADD_INS (cfg->cbb, ins);
1286 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1287 #else
1288 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1289 creg = mono_alloc_ireg (cfg);
1290 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1291 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1292 creg = mono_alloc_ireg (cfg);
1293 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
1294 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
1295 #endif
1297 cfg->flags |= MONO_CFG_HAS_FPOUT;
1298 } else {
1299 MONO_INST_NEW (cfg, ins, OP_MOVE);
1300 ins->dreg = mono_alloc_ireg (cfg);
1301 ins->sreg1 = in->dreg;
1302 MONO_ADD_INS (cfg->cbb, ins);
1304 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1306 break;
1307 case RegTypeStructByAddr:
1308 NOT_IMPLEMENTED;
1309 #if 0
1310 /* FIXME: where si the data allocated? */
1311 arg->backend.reg3 = ainfo->reg;
1312 call->used_iregs |= 1 << ainfo->reg;
1313 g_assert_not_reached ();
1314 #endif
1315 break;
1316 case RegTypeStructByVal:
1317 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
1318 ins->opcode = OP_OUTARG_VT;
1319 ins->sreg1 = in->dreg;
1320 ins->klass = in->klass;
1321 ins->inst_p0 = call;
1322 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1323 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
1324 MONO_ADD_INS (cfg->cbb, ins);
1325 break;
1326 case RegTypeBase:
1327 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1328 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1329 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
1330 if (t->type == MONO_TYPE_R8) {
1331 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1332 } else {
1333 #ifdef MONO_ARCH_SOFT_FLOAT
1334 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1335 #else
1336 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1337 #endif
1339 } else {
1340 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1342 break;
1343 case RegTypeBaseGen:
1344 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1345 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
1346 MONO_INST_NEW (cfg, ins, OP_MOVE);
1347 ins->dreg = mono_alloc_ireg (cfg);
1348 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
1349 MONO_ADD_INS (cfg->cbb, ins);
1350 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
1351 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
1352 int creg;
1354 #ifdef MONO_ARCH_SOFT_FLOAT
1355 g_assert_not_reached ();
1356 #endif
1358 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1359 creg = mono_alloc_ireg (cfg);
1360 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
1361 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1362 creg = mono_alloc_ireg (cfg);
1363 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
1364 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
1365 cfg->flags |= MONO_CFG_HAS_FPOUT;
1366 } else {
1367 g_assert_not_reached ();
1369 break;
1370 case RegTypeFP: {
1371 /* FIXME: */
1372 NOT_IMPLEMENTED;
1373 #if 0
1374 arg->backend.reg3 = ainfo->reg;
1375 /* FP args are passed in int regs */
1376 call->used_iregs |= 1 << ainfo->reg;
1377 if (ainfo->size == 8) {
1378 arg->opcode = OP_OUTARG_R8;
1379 call->used_iregs |= 1 << (ainfo->reg + 1);
1380 } else {
1381 arg->opcode = OP_OUTARG_R4;
1383 #endif
1384 cfg->flags |= MONO_CFG_HAS_FPOUT;
1385 break;
1387 default:
1388 g_assert_not_reached ();
1392 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
1393 MonoInst *vtarg;
1395 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
1396 vtarg->sreg1 = call->vret_var->dreg;
1397 vtarg->dreg = mono_alloc_preg (cfg);
1398 MONO_ADD_INS (cfg->cbb, vtarg);
1400 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
1403 call->stack_usage = cinfo->stack_usage;
1405 g_free (cinfo);
1408 void
1409 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1411 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
1412 ArgInfo *ainfo = ins->inst_p1;
1413 int ovf_size = ainfo->vtsize;
1414 int doffset = ainfo->offset;
1415 int i, soffset, dreg;
1417 soffset = 0;
1418 for (i = 0; i < ainfo->size; ++i) {
1419 dreg = mono_alloc_ireg (cfg);
1420 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
1421 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
1422 soffset += sizeof (gpointer);
1424 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
1425 if (ovf_size != 0)
1426 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, ovf_size * sizeof (gpointer), 0);
1429 void
1430 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1432 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
1434 if (!ret->byref) {
1435 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
1436 MonoInst *ins;
1438 MONO_INST_NEW (cfg, ins, OP_SETLRET);
1439 ins->sreg1 = val->dreg + 1;
1440 ins->sreg2 = val->dreg + 2;
1441 MONO_ADD_INS (cfg->cbb, ins);
1442 return;
1444 #ifdef MONO_ARCH_SOFT_FLOAT
1445 if (ret->type == MONO_TYPE_R8) {
1446 MonoInst *ins;
1448 MONO_INST_NEW (cfg, ins, OP_SETFRET);
1449 ins->dreg = cfg->ret->dreg;
1450 ins->sreg1 = val->dreg;
1451 MONO_ADD_INS (cfg->cbb, ins);
1452 return;
1454 if (ret->type == MONO_TYPE_R4) {
1455 /* Already converted to an int in method_to_ir () */
1456 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1457 return;
1459 #elif defined(ARM_FPU_VFP)
1460 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
1461 MonoInst *ins;
1463 MONO_INST_NEW (cfg, ins, OP_SETFRET);
1464 ins->dreg = cfg->ret->dreg;
1465 ins->sreg1 = val->dreg;
1466 MONO_ADD_INS (cfg->cbb, ins);
1467 return;
1469 #else
1470 if (ret->type == MONO_TYPE_R4 || ret->type == MONO_TYPE_R8) {
1471 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
1472 return;
1474 #endif
1477 /* FIXME: */
1478 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1481 gboolean
1482 mono_arch_is_inst_imm (gint64 imm)
1484 return TRUE;
1487 #define DYN_CALL_STACK_ARGS 6
1489 typedef struct {
1490 MonoMethodSignature *sig;
1491 CallInfo *cinfo;
1492 } ArchDynCallInfo;
1494 typedef struct {
1495 mgreg_t regs [PARAM_REGS + DYN_CALL_STACK_ARGS];
1496 mgreg_t res, res2;
1497 guint8 *ret;
1498 } DynCallArgs;
1500 static gboolean
1501 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
1503 int i;
1505 if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS)
1506 return FALSE;
1508 switch (cinfo->ret.regtype) {
1509 case RegTypeNone:
1510 case RegTypeGeneral:
1511 case RegTypeIRegPair:
1512 case RegTypeStructByAddr:
1513 break;
1514 case RegTypeFP:
1515 #ifdef ARM_FPU_FPA
1516 return FALSE;
1517 #elif defined(ARM_FPU_VFP)
1518 break;
1519 #else
1520 return FALSE;
1521 #endif
1522 default:
1523 return FALSE;
1526 for (i = 0; i < cinfo->nargs; ++i) {
1527 switch (cinfo->args [i].regtype) {
1528 case RegTypeGeneral:
1529 break;
1530 case RegTypeIRegPair:
1531 break;
1532 case RegTypeBase:
1533 if (cinfo->args [i].offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
1534 return FALSE;
1535 break;
1536 case RegTypeStructByVal:
1537 if (cinfo->args [i].reg + cinfo->args [i].vtsize >= PARAM_REGS + DYN_CALL_STACK_ARGS)
1538 return FALSE;
1539 break;
1540 default:
1541 return FALSE;
1545 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
1546 for (i = 0; i < sig->param_count; ++i) {
1547 MonoType *t = sig->params [i];
1549 if (t->byref)
1550 continue;
1552 switch (t->type) {
1553 case MONO_TYPE_R4:
1554 case MONO_TYPE_R8:
1555 #ifdef MONO_ARCH_SOFT_FLOAT
1556 return FALSE;
1557 #else
1558 break;
1559 #endif
1561 case MONO_TYPE_I8:
1562 case MONO_TYPE_U8:
1563 return FALSE;
1565 default:
1566 break;
1570 return TRUE;
1573 MonoDynCallInfo*
1574 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
1576 ArchDynCallInfo *info;
1577 CallInfo *cinfo;
1579 cinfo = get_call_info (sig, FALSE);
1581 if (!dyn_call_supported (cinfo, sig)) {
1582 g_free (cinfo);
1583 return NULL;
1586 info = g_new0 (ArchDynCallInfo, 1);
1587 // FIXME: Preprocess the info to speed up start_dyn_call ()
1588 info->sig = sig;
1589 info->cinfo = cinfo;
1591 return (MonoDynCallInfo*)info;
1594 void
1595 mono_arch_dyn_call_free (MonoDynCallInfo *info)
1597 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
1599 g_free (ainfo->cinfo);
1600 g_free (ainfo);
1603 void
1604 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
1606 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
1607 DynCallArgs *p = (DynCallArgs*)buf;
1608 int arg_index, greg, i, j;
1609 MonoMethodSignature *sig = dinfo->sig;
1611 g_assert (buf_len >= sizeof (DynCallArgs));
1613 p->res = 0;
1614 p->ret = ret;
1616 arg_index = 0;
1617 greg = 0;
1619 if (dinfo->cinfo->vtype_retaddr)
1620 p->regs [greg ++] = (mgreg_t)ret;
1622 if (sig->hasthis)
1623 p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
1625 for (i = 0; i < sig->param_count; i++) {
1626 MonoType *t = mono_type_get_underlying_type (sig->params [i]);
1627 gpointer *arg = args [arg_index ++];
1628 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
1629 int slot = -1;
1631 if (ainfo->regtype == RegTypeGeneral || ainfo->regtype == RegTypeIRegPair || ainfo->regtype == RegTypeStructByVal)
1632 slot = ainfo->reg;
1633 else if (ainfo->regtype == RegTypeBase)
1634 slot = PARAM_REGS + (ainfo->offset / 4);
1635 else
1636 g_assert_not_reached ();
1638 if (t->byref) {
1639 p->regs [slot] = (mgreg_t)*arg;
1640 continue;
1643 switch (t->type) {
1644 case MONO_TYPE_STRING:
1645 case MONO_TYPE_CLASS:
1646 case MONO_TYPE_ARRAY:
1647 case MONO_TYPE_SZARRAY:
1648 case MONO_TYPE_OBJECT:
1649 case MONO_TYPE_PTR:
1650 case MONO_TYPE_I:
1651 case MONO_TYPE_U:
1652 p->regs [slot] = (mgreg_t)*arg;
1653 break;
1654 case MONO_TYPE_BOOLEAN:
1655 case MONO_TYPE_U1:
1656 p->regs [slot] = *(guint8*)arg;
1657 break;
1658 case MONO_TYPE_I1:
1659 p->regs [slot] = *(gint8*)arg;
1660 break;
1661 case MONO_TYPE_I2:
1662 p->regs [slot] = *(gint16*)arg;
1663 break;
1664 case MONO_TYPE_U2:
1665 case MONO_TYPE_CHAR:
1666 p->regs [slot] = *(guint16*)arg;
1667 break;
1668 case MONO_TYPE_I4:
1669 p->regs [slot] = *(gint32*)arg;
1670 break;
1671 case MONO_TYPE_U4:
1672 p->regs [slot] = *(guint32*)arg;
1673 break;
1674 case MONO_TYPE_I8:
1675 case MONO_TYPE_U8:
1676 p->regs [slot ++] = (mgreg_t)arg [0];
1677 p->regs [slot] = (mgreg_t)arg [1];
1678 break;
1679 case MONO_TYPE_R4:
1680 p->regs [slot] = *(mgreg_t*)arg;
1681 break;
1682 case MONO_TYPE_R8:
1683 p->regs [slot ++] = (mgreg_t)arg [0];
1684 p->regs [slot] = (mgreg_t)arg [1];
1685 break;
1686 case MONO_TYPE_GENERICINST:
1687 if (MONO_TYPE_IS_REFERENCE (t)) {
1688 p->regs [slot] = (mgreg_t)*arg;
1689 break;
1690 } else {
1691 /* Fall though */
1693 case MONO_TYPE_VALUETYPE:
1694 g_assert (ainfo->regtype == RegTypeStructByVal);
1696 if (ainfo->size == 0)
1697 slot = PARAM_REGS + (ainfo->offset / 4);
1698 else
1699 slot = ainfo->reg;
1701 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
1702 p->regs [slot ++] = ((mgreg_t*)arg) [j];
1703 break;
1704 default:
1705 g_assert_not_reached ();
1710 void
1711 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
1713 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
1714 MonoMethodSignature *sig = ((ArchDynCallInfo*)info)->sig;
1715 guint8 *ret = ((DynCallArgs*)buf)->ret;
1716 mgreg_t res = ((DynCallArgs*)buf)->res;
1717 mgreg_t res2 = ((DynCallArgs*)buf)->res2;
1719 switch (mono_type_get_underlying_type (sig->ret)->type) {
1720 case MONO_TYPE_VOID:
1721 *(gpointer*)ret = NULL;
1722 break;
1723 case MONO_TYPE_STRING:
1724 case MONO_TYPE_CLASS:
1725 case MONO_TYPE_ARRAY:
1726 case MONO_TYPE_SZARRAY:
1727 case MONO_TYPE_OBJECT:
1728 case MONO_TYPE_I:
1729 case MONO_TYPE_U:
1730 case MONO_TYPE_PTR:
1731 *(gpointer*)ret = (gpointer)res;
1732 break;
1733 case MONO_TYPE_I1:
1734 *(gint8*)ret = res;
1735 break;
1736 case MONO_TYPE_U1:
1737 case MONO_TYPE_BOOLEAN:
1738 *(guint8*)ret = res;
1739 break;
1740 case MONO_TYPE_I2:
1741 *(gint16*)ret = res;
1742 break;
1743 case MONO_TYPE_U2:
1744 case MONO_TYPE_CHAR:
1745 *(guint16*)ret = res;
1746 break;
1747 case MONO_TYPE_I4:
1748 *(gint32*)ret = res;
1749 break;
1750 case MONO_TYPE_U4:
1751 *(guint32*)ret = res;
1752 break;
1753 case MONO_TYPE_I8:
1754 case MONO_TYPE_U8:
1755 /* This handles endianness as well */
1756 ((gint32*)ret) [0] = res;
1757 ((gint32*)ret) [1] = res2;
1758 break;
1759 case MONO_TYPE_GENERICINST:
1760 if (MONO_TYPE_IS_REFERENCE (sig->ret)) {
1761 *(gpointer*)ret = (gpointer)res;
1762 break;
1763 } else {
1764 /* Fall though */
1766 case MONO_TYPE_VALUETYPE:
1767 g_assert (ainfo->cinfo->vtype_retaddr);
1768 /* Nothing to do */
1769 break;
1770 #if defined(ARM_FPU_VFP)
1771 case MONO_TYPE_R4:
1772 *(float*)ret = *(float*)&res;
1773 break;
1774 case MONO_TYPE_R8: {
1775 mgreg_t regs [2];
1777 regs [0] = res;
1778 regs [1] = res2;
1780 *(double*)ret = *(double*)&regs;
1781 break;
1783 #endif
1784 default:
1785 g_assert_not_reached ();
1790 * Allow tracing to work with this interface (with an optional argument)
1793 void*
1794 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
1796 guchar *code = p;
1798 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
1799 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
1800 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
1801 code = emit_call_reg (code, ARMREG_R2);
1802 return code;
1805 enum {
1806 SAVE_NONE,
1807 SAVE_STRUCT,
1808 SAVE_ONE,
1809 SAVE_TWO,
1810 SAVE_FP
1813 void*
1814 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
1816 guchar *code = p;
1817 int save_mode = SAVE_NONE;
1818 int offset;
1819 MonoMethod *method = cfg->method;
1820 int rtype = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret)->type;
1821 int save_offset = cfg->param_area;
1822 save_offset += 7;
1823 save_offset &= ~7;
1825 offset = code - cfg->native_code;
1826 /* we need about 16 instructions */
1827 if (offset > (cfg->code_size - 16 * 4)) {
1828 cfg->code_size *= 2;
1829 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
1830 code = cfg->native_code + offset;
1832 switch (rtype) {
1833 case MONO_TYPE_VOID:
1834 /* special case string .ctor icall */
1835 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
1836 save_mode = SAVE_ONE;
1837 else
1838 save_mode = SAVE_NONE;
1839 break;
1840 case MONO_TYPE_I8:
1841 case MONO_TYPE_U8:
1842 save_mode = SAVE_TWO;
1843 break;
1844 case MONO_TYPE_R4:
1845 case MONO_TYPE_R8:
1846 save_mode = SAVE_FP;
1847 break;
1848 case MONO_TYPE_VALUETYPE:
1849 save_mode = SAVE_STRUCT;
1850 break;
1851 default:
1852 save_mode = SAVE_ONE;
1853 break;
1856 switch (save_mode) {
1857 case SAVE_TWO:
1858 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1859 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
1860 if (enable_arguments) {
1861 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
1862 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
1864 break;
1865 case SAVE_ONE:
1866 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1867 if (enable_arguments) {
1868 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
1870 break;
1871 case SAVE_FP:
1872 /* FIXME: what reg? */
1873 if (enable_arguments) {
1874 /* FIXME: what reg? */
1876 break;
1877 case SAVE_STRUCT:
1878 if (enable_arguments) {
1879 /* FIXME: get the actual address */
1880 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
1882 break;
1883 case SAVE_NONE:
1884 default:
1885 break;
1888 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
1889 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
1890 code = emit_call_reg (code, ARMREG_IP);
1892 switch (save_mode) {
1893 case SAVE_TWO:
1894 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1895 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
1896 break;
1897 case SAVE_ONE:
1898 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1899 break;
1900 case SAVE_FP:
1901 /* FIXME */
1902 break;
1903 case SAVE_NONE:
1904 default:
1905 break;
1908 return code;
1912 * The immediate field for cond branches is big enough for all reasonable methods
1914 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
1915 if (0 && ins->inst_true_bb->native_offset) { \
1916 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
1917 } else { \
1918 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1919 ARM_B_COND (code, (condcode), 0); \
1922 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
1924 /* emit an exception if condition is fail
1926 * We assign the extra code used to throw the implicit exceptions
1927 * to cfg->bb_exit as far as the big branch handling is concerned
1929 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
1930 do { \
1931 mono_add_patch_info (cfg, code - cfg->native_code, \
1932 MONO_PATCH_INFO_EXC, exc_name); \
1933 ARM_BL_COND (code, (condcode), 0); \
1934 } while (0);
1936 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
1938 void
1939 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
1943 void
1944 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
1946 MonoInst *ins, *n, *last_ins = NULL;
1948 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
1949 switch (ins->opcode) {
1950 case OP_MUL_IMM:
1951 case OP_IMUL_IMM:
1952 /* Already done by an arch-independent pass */
1953 break;
1954 case OP_LOAD_MEMBASE:
1955 case OP_LOADI4_MEMBASE:
1957 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1958 * OP_LOAD_MEMBASE offset(basereg), reg
1960 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
1961 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1962 ins->inst_basereg == last_ins->inst_destbasereg &&
1963 ins->inst_offset == last_ins->inst_offset) {
1964 if (ins->dreg == last_ins->sreg1) {
1965 MONO_DELETE_INS (bb, ins);
1966 continue;
1967 } else {
1968 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1969 ins->opcode = OP_MOVE;
1970 ins->sreg1 = last_ins->sreg1;
1974 * Note: reg1 must be different from the basereg in the second load
1975 * OP_LOAD_MEMBASE offset(basereg), reg1
1976 * OP_LOAD_MEMBASE offset(basereg), reg2
1977 * -->
1978 * OP_LOAD_MEMBASE offset(basereg), reg1
1979 * OP_MOVE reg1, reg2
1981 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
1982 || last_ins->opcode == OP_LOAD_MEMBASE) &&
1983 ins->inst_basereg != last_ins->dreg &&
1984 ins->inst_basereg == last_ins->inst_basereg &&
1985 ins->inst_offset == last_ins->inst_offset) {
1987 if (ins->dreg == last_ins->dreg) {
1988 MONO_DELETE_INS (bb, ins);
1989 continue;
1990 } else {
1991 ins->opcode = OP_MOVE;
1992 ins->sreg1 = last_ins->dreg;
1995 //g_assert_not_reached ();
1997 #if 0
1999 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2000 * OP_LOAD_MEMBASE offset(basereg), reg
2001 * -->
2002 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2003 * OP_ICONST reg, imm
2005 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
2006 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
2007 ins->inst_basereg == last_ins->inst_destbasereg &&
2008 ins->inst_offset == last_ins->inst_offset) {
2009 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2010 ins->opcode = OP_ICONST;
2011 ins->inst_c0 = last_ins->inst_imm;
2012 g_assert_not_reached (); // check this rule
2013 #endif
2015 break;
2016 case OP_LOADU1_MEMBASE:
2017 case OP_LOADI1_MEMBASE:
2018 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
2019 ins->inst_basereg == last_ins->inst_destbasereg &&
2020 ins->inst_offset == last_ins->inst_offset) {
2021 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
2022 ins->sreg1 = last_ins->sreg1;
2024 break;
2025 case OP_LOADU2_MEMBASE:
2026 case OP_LOADI2_MEMBASE:
2027 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
2028 ins->inst_basereg == last_ins->inst_destbasereg &&
2029 ins->inst_offset == last_ins->inst_offset) {
2030 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
2031 ins->sreg1 = last_ins->sreg1;
2033 break;
2034 case OP_MOVE:
2035 ins->opcode = OP_MOVE;
2037 * OP_MOVE reg, reg
2039 if (ins->dreg == ins->sreg1) {
2040 MONO_DELETE_INS (bb, ins);
2041 continue;
2044 * OP_MOVE sreg, dreg
2045 * OP_MOVE dreg, sreg
2047 if (last_ins && last_ins->opcode == OP_MOVE &&
2048 ins->sreg1 == last_ins->dreg &&
2049 ins->dreg == last_ins->sreg1) {
2050 MONO_DELETE_INS (bb, ins);
2051 continue;
2053 break;
2055 last_ins = ins;
2056 ins = ins->next;
2058 bb->last_ins = last_ins;
2062 * the branch_cc_table should maintain the order of these
2063 * opcodes.
2064 case CEE_BEQ:
2065 case CEE_BGE:
2066 case CEE_BGT:
2067 case CEE_BLE:
2068 case CEE_BLT:
2069 case CEE_BNE_UN:
2070 case CEE_BGE_UN:
2071 case CEE_BGT_UN:
2072 case CEE_BLE_UN:
2073 case CEE_BLT_UN:
2075 static const guchar
2076 branch_cc_table [] = {
2077 ARMCOND_EQ,
2078 ARMCOND_GE,
2079 ARMCOND_GT,
2080 ARMCOND_LE,
2081 ARMCOND_LT,
2083 ARMCOND_NE,
2084 ARMCOND_HS,
2085 ARMCOND_HI,
2086 ARMCOND_LS,
2087 ARMCOND_LO
2090 #define NEW_INS(cfg,dest,op) do { \
2091 MONO_INST_NEW ((cfg), (dest), (op)); \
2092 mono_bblock_insert_before_ins (bb, ins, (dest)); \
2093 } while (0)
2095 static int
2096 map_to_reg_reg_op (int op)
2098 switch (op) {
2099 case OP_ADD_IMM:
2100 return OP_IADD;
2101 case OP_SUB_IMM:
2102 return OP_ISUB;
2103 case OP_AND_IMM:
2104 return OP_IAND;
2105 case OP_COMPARE_IMM:
2106 return OP_COMPARE;
2107 case OP_ICOMPARE_IMM:
2108 return OP_ICOMPARE;
2109 case OP_ADDCC_IMM:
2110 return OP_ADDCC;
2111 case OP_ADC_IMM:
2112 return OP_ADC;
2113 case OP_SUBCC_IMM:
2114 return OP_SUBCC;
2115 case OP_SBB_IMM:
2116 return OP_SBB;
2117 case OP_OR_IMM:
2118 return OP_IOR;
2119 case OP_XOR_IMM:
2120 return OP_IXOR;
2121 case OP_LOAD_MEMBASE:
2122 return OP_LOAD_MEMINDEX;
2123 case OP_LOADI4_MEMBASE:
2124 return OP_LOADI4_MEMINDEX;
2125 case OP_LOADU4_MEMBASE:
2126 return OP_LOADU4_MEMINDEX;
2127 case OP_LOADU1_MEMBASE:
2128 return OP_LOADU1_MEMINDEX;
2129 case OP_LOADI2_MEMBASE:
2130 return OP_LOADI2_MEMINDEX;
2131 case OP_LOADU2_MEMBASE:
2132 return OP_LOADU2_MEMINDEX;
2133 case OP_LOADI1_MEMBASE:
2134 return OP_LOADI1_MEMINDEX;
2135 case OP_STOREI1_MEMBASE_REG:
2136 return OP_STOREI1_MEMINDEX;
2137 case OP_STOREI2_MEMBASE_REG:
2138 return OP_STOREI2_MEMINDEX;
2139 case OP_STOREI4_MEMBASE_REG:
2140 return OP_STOREI4_MEMINDEX;
2141 case OP_STORE_MEMBASE_REG:
2142 return OP_STORE_MEMINDEX;
2143 case OP_STORER4_MEMBASE_REG:
2144 return OP_STORER4_MEMINDEX;
2145 case OP_STORER8_MEMBASE_REG:
2146 return OP_STORER8_MEMINDEX;
2147 case OP_STORE_MEMBASE_IMM:
2148 return OP_STORE_MEMBASE_REG;
2149 case OP_STOREI1_MEMBASE_IMM:
2150 return OP_STOREI1_MEMBASE_REG;
2151 case OP_STOREI2_MEMBASE_IMM:
2152 return OP_STOREI2_MEMBASE_REG;
2153 case OP_STOREI4_MEMBASE_IMM:
2154 return OP_STOREI4_MEMBASE_REG;
2156 g_assert_not_reached ();
2160 * Remove from the instruction list the instructions that can't be
2161 * represented with very simple instructions with no register
2162 * requirements.
2164 void
2165 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
2167 MonoInst *ins, *temp, *last_ins = NULL;
2168 int rot_amount, imm8, low_imm;
2170 MONO_BB_FOR_EACH_INS (bb, ins) {
2171 loop_start:
2172 switch (ins->opcode) {
2173 case OP_ADD_IMM:
2174 case OP_SUB_IMM:
2175 case OP_AND_IMM:
2176 case OP_COMPARE_IMM:
2177 case OP_ICOMPARE_IMM:
2178 case OP_ADDCC_IMM:
2179 case OP_ADC_IMM:
2180 case OP_SUBCC_IMM:
2181 case OP_SBB_IMM:
2182 case OP_OR_IMM:
2183 case OP_XOR_IMM:
2184 case OP_IADD_IMM:
2185 case OP_ISUB_IMM:
2186 case OP_IAND_IMM:
2187 case OP_IADC_IMM:
2188 case OP_ISBB_IMM:
2189 case OP_IOR_IMM:
2190 case OP_IXOR_IMM:
2191 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
2192 NEW_INS (cfg, temp, OP_ICONST);
2193 temp->inst_c0 = ins->inst_imm;
2194 temp->dreg = mono_alloc_ireg (cfg);
2195 ins->sreg2 = temp->dreg;
2196 ins->opcode = mono_op_imm_to_op (ins->opcode);
2198 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
2199 goto loop_start;
2200 else
2201 break;
2202 case OP_MUL_IMM:
2203 case OP_IMUL_IMM:
2204 if (ins->inst_imm == 1) {
2205 ins->opcode = OP_MOVE;
2206 break;
2208 if (ins->inst_imm == 0) {
2209 ins->opcode = OP_ICONST;
2210 ins->inst_c0 = 0;
2211 break;
2213 imm8 = mono_is_power_of_two (ins->inst_imm);
2214 if (imm8 > 0) {
2215 ins->opcode = OP_SHL_IMM;
2216 ins->inst_imm = imm8;
2217 break;
2219 NEW_INS (cfg, temp, OP_ICONST);
2220 temp->inst_c0 = ins->inst_imm;
2221 temp->dreg = mono_alloc_ireg (cfg);
2222 ins->sreg2 = temp->dreg;
2223 ins->opcode = OP_IMUL;
2224 break;
2225 case OP_SBB:
2226 case OP_ISBB:
2227 case OP_SUBCC:
2228 case OP_ISUBCC:
2229 if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
2230 /* ARM sets the C flag to 1 if there was _no_ overflow */
2231 ins->next->opcode = OP_COND_EXC_NC;
2232 break;
2233 case OP_LOCALLOC_IMM:
2234 NEW_INS (cfg, temp, OP_ICONST);
2235 temp->inst_c0 = ins->inst_imm;
2236 temp->dreg = mono_alloc_ireg (cfg);
2237 ins->sreg1 = temp->dreg;
2238 ins->opcode = OP_LOCALLOC;
2239 break;
2240 case OP_LOAD_MEMBASE:
2241 case OP_LOADI4_MEMBASE:
2242 case OP_LOADU4_MEMBASE:
2243 case OP_LOADU1_MEMBASE:
2244 /* we can do two things: load the immed in a register
2245 * and use an indexed load, or see if the immed can be
2246 * represented as an ad_imm + a load with a smaller offset
2247 * that fits. We just do the first for now, optimize later.
2249 if (arm_is_imm12 (ins->inst_offset))
2250 break;
2251 NEW_INS (cfg, temp, OP_ICONST);
2252 temp->inst_c0 = ins->inst_offset;
2253 temp->dreg = mono_alloc_ireg (cfg);
2254 ins->sreg2 = temp->dreg;
2255 ins->opcode = map_to_reg_reg_op (ins->opcode);
2256 break;
2257 case OP_LOADI2_MEMBASE:
2258 case OP_LOADU2_MEMBASE:
2259 case OP_LOADI1_MEMBASE:
2260 if (arm_is_imm8 (ins->inst_offset))
2261 break;
2262 NEW_INS (cfg, temp, OP_ICONST);
2263 temp->inst_c0 = ins->inst_offset;
2264 temp->dreg = mono_alloc_ireg (cfg);
2265 ins->sreg2 = temp->dreg;
2266 ins->opcode = map_to_reg_reg_op (ins->opcode);
2267 break;
2268 case OP_LOADR4_MEMBASE:
2269 case OP_LOADR8_MEMBASE:
2270 if (arm_is_fpimm8 (ins->inst_offset))
2271 break;
2272 low_imm = ins->inst_offset & 0x1ff;
2273 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
2274 NEW_INS (cfg, temp, OP_ADD_IMM);
2275 temp->inst_imm = ins->inst_offset & ~0x1ff;
2276 temp->sreg1 = ins->inst_basereg;
2277 temp->dreg = mono_alloc_ireg (cfg);
2278 ins->inst_basereg = temp->dreg;
2279 ins->inst_offset = low_imm;
2280 break;
2282 /* VFP/FPA doesn't have indexed load instructions */
2283 g_assert_not_reached ();
2284 break;
2285 case OP_STORE_MEMBASE_REG:
2286 case OP_STOREI4_MEMBASE_REG:
2287 case OP_STOREI1_MEMBASE_REG:
2288 if (arm_is_imm12 (ins->inst_offset))
2289 break;
2290 NEW_INS (cfg, temp, OP_ICONST);
2291 temp->inst_c0 = ins->inst_offset;
2292 temp->dreg = mono_alloc_ireg (cfg);
2293 ins->sreg2 = temp->dreg;
2294 ins->opcode = map_to_reg_reg_op (ins->opcode);
2295 break;
2296 case OP_STOREI2_MEMBASE_REG:
2297 if (arm_is_imm8 (ins->inst_offset))
2298 break;
2299 NEW_INS (cfg, temp, OP_ICONST);
2300 temp->inst_c0 = ins->inst_offset;
2301 temp->dreg = mono_alloc_ireg (cfg);
2302 ins->sreg2 = temp->dreg;
2303 ins->opcode = map_to_reg_reg_op (ins->opcode);
2304 break;
2305 case OP_STORER4_MEMBASE_REG:
2306 case OP_STORER8_MEMBASE_REG:
2307 if (arm_is_fpimm8 (ins->inst_offset))
2308 break;
2309 low_imm = ins->inst_offset & 0x1ff;
2310 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
2311 NEW_INS (cfg, temp, OP_ADD_IMM);
2312 temp->inst_imm = ins->inst_offset & ~0x1ff;
2313 temp->sreg1 = ins->inst_destbasereg;
2314 temp->dreg = mono_alloc_ireg (cfg);
2315 ins->inst_destbasereg = temp->dreg;
2316 ins->inst_offset = low_imm;
2317 break;
2319 /*g_print ("fail with: %d (%d, %d)\n", ins->inst_offset, ins->inst_offset & ~0x1ff, low_imm);*/
2320 /* VFP/FPA doesn't have indexed store instructions */
2321 g_assert_not_reached ();
2322 break;
2323 case OP_STORE_MEMBASE_IMM:
2324 case OP_STOREI1_MEMBASE_IMM:
2325 case OP_STOREI2_MEMBASE_IMM:
2326 case OP_STOREI4_MEMBASE_IMM:
2327 NEW_INS (cfg, temp, OP_ICONST);
2328 temp->inst_c0 = ins->inst_imm;
2329 temp->dreg = mono_alloc_ireg (cfg);
2330 ins->sreg1 = temp->dreg;
2331 ins->opcode = map_to_reg_reg_op (ins->opcode);
2332 last_ins = temp;
2333 goto loop_start; /* make it handle the possibly big ins->inst_offset */
2334 case OP_FCOMPARE: {
2335 gboolean swap = FALSE;
2336 int reg;
2338 /* Some fp compares require swapped operands */
2339 g_assert (ins->next);
2340 switch (ins->next->opcode) {
2341 case OP_FBGT:
2342 ins->next->opcode = OP_FBLT;
2343 swap = TRUE;
2344 break;
2345 case OP_FBGT_UN:
2346 ins->next->opcode = OP_FBLT_UN;
2347 swap = TRUE;
2348 break;
2349 case OP_FBLE:
2350 ins->next->opcode = OP_FBGE;
2351 swap = TRUE;
2352 break;
2353 case OP_FBLE_UN:
2354 ins->next->opcode = OP_FBGE_UN;
2355 swap = TRUE;
2356 break;
2357 default:
2358 break;
2360 if (swap) {
2361 reg = ins->sreg1;
2362 ins->sreg1 = ins->sreg2;
2363 ins->sreg2 = reg;
2365 break;
2369 last_ins = ins;
2371 bb->last_ins = last_ins;
2372 bb->max_vreg = cfg->next_vreg;
2375 void
2376 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
2378 MonoInst *ins;
2380 if (long_ins->opcode == OP_LNEG) {
2381 ins = long_ins;
2382 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, ins->dreg + 1, ins->sreg1 + 1, 0);
2383 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, ins->dreg + 2, ins->sreg1 + 2, 0);
2384 NULLIFY_INS (ins);
2388 static guchar*
2389 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
2391 /* sreg is a float, dreg is an integer reg */
2392 #ifdef ARM_FPU_FPA
2393 ARM_FIXZ (code, dreg, sreg);
2394 #elif defined(ARM_FPU_VFP)
2395 if (is_signed)
2396 ARM_TOSIZD (code, ARM_VFP_F0, sreg);
2397 else
2398 ARM_TOUIZD (code, ARM_VFP_F0, sreg);
2399 ARM_FMRS (code, dreg, ARM_VFP_F0);
2400 #endif
2401 if (!is_signed) {
2402 if (size == 1)
2403 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
2404 else if (size == 2) {
2405 ARM_SHL_IMM (code, dreg, dreg, 16);
2406 ARM_SHR_IMM (code, dreg, dreg, 16);
2408 } else {
2409 if (size == 1) {
2410 ARM_SHL_IMM (code, dreg, dreg, 24);
2411 ARM_SAR_IMM (code, dreg, dreg, 24);
2412 } else if (size == 2) {
2413 ARM_SHL_IMM (code, dreg, dreg, 16);
2414 ARM_SAR_IMM (code, dreg, dreg, 16);
2417 return code;
2420 typedef struct {
2421 guchar *code;
2422 const guchar *target;
2423 int absolute;
2424 int found;
2425 } PatchData;
2427 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
2429 static int
2430 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
2431 PatchData *pdata = (PatchData*)user_data;
2432 guchar *code = data;
2433 guint32 *thunks = data;
2434 guint32 *endthunks = (guint32*)(code + bsize);
2435 int count = 0;
2436 int difflow, diffhigh;
2438 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
2439 difflow = (char*)pdata->code - (char*)thunks;
2440 diffhigh = (char*)pdata->code - (char*)endthunks;
2441 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
2442 return 0;
2445 * The thunk is composed of 3 words:
2446 * load constant from thunks [2] into ARM_IP
2447 * bx to ARM_IP
2448 * address constant
2449 * Note that the LR register is already setup
2451 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
2452 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
2453 while (thunks < endthunks) {
2454 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
2455 if (thunks [2] == (guint32)pdata->target) {
2456 arm_patch (pdata->code, (guchar*)thunks);
2457 mono_arch_flush_icache (pdata->code, 4);
2458 pdata->found = 1;
2459 return 1;
2460 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
2461 /* found a free slot instead: emit thunk */
2462 /* ARMREG_IP is fine to use since this can't be an IMT call
2463 * which is indirect
2465 code = (guchar*)thunks;
2466 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
2467 if (thumb_supported)
2468 ARM_BX (code, ARMREG_IP);
2469 else
2470 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
2471 thunks [2] = (guint32)pdata->target;
2472 mono_arch_flush_icache ((guchar*)thunks, 12);
2474 arm_patch (pdata->code, (guchar*)thunks);
2475 mono_arch_flush_icache (pdata->code, 4);
2476 pdata->found = 1;
2477 return 1;
2479 /* skip 12 bytes, the size of the thunk */
2480 thunks += 3;
2481 count++;
2483 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
2485 return 0;
2488 static void
2489 handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target)
2491 PatchData pdata;
2493 if (!domain)
2494 domain = mono_domain_get ();
2496 pdata.code = code;
2497 pdata.target = target;
2498 pdata.absolute = absolute;
2499 pdata.found = 0;
2501 mono_domain_lock (domain);
2502 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
2504 if (!pdata.found) {
2505 /* this uses the first available slot */
2506 pdata.found = 2;
2507 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
2509 mono_domain_unlock (domain);
2511 if (pdata.found != 1)
2512 g_print ("thunk failed for %p from %p\n", target, code);
2513 g_assert (pdata.found == 1);
2516 static void
2517 arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target)
2519 guint32 *code32 = (void*)code;
2520 guint32 ins = *code32;
2521 guint32 prim = (ins >> 25) & 7;
2522 guint32 tval = GPOINTER_TO_UINT (target);
2524 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2525 if (prim == 5) { /* 101b */
2526 /* the diff starts 8 bytes from the branch opcode */
2527 gint diff = target - code - 8;
2528 gint tbits;
2529 gint tmask = 0xffffffff;
2530 if (tval & 1) { /* entering thumb mode */
2531 diff = target - 1 - code - 8;
2532 g_assert (thumb_supported);
2533 tbits = 0xf << 28; /* bl->blx bit pattern */
2534 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
2535 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
2536 if (diff & 2) {
2537 tbits |= 1 << 24;
2539 tmask = ~(1 << 24); /* clear the link bit */
2540 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
2541 } else {
2542 tbits = 0;
2544 if (diff >= 0) {
2545 if (diff <= 33554431) {
2546 diff >>= 2;
2547 ins = (ins & 0xff000000) | diff;
2548 ins &= tmask;
2549 *code32 = ins | tbits;
2550 return;
2552 } else {
2553 /* diff between 0 and -33554432 */
2554 if (diff >= -33554432) {
2555 diff >>= 2;
2556 ins = (ins & 0xff000000) | (diff & ~0xff000000);
2557 ins &= tmask;
2558 *code32 = ins | tbits;
2559 return;
2563 handle_thunk (domain, TRUE, code, target);
2564 return;
2568 * The alternative call sequences looks like this:
2570 * ldr ip, [pc] // loads the address constant
2571 * b 1f // jumps around the constant
2572 * address constant embedded in the code
2573 * 1f:
2574 * mov lr, pc
2575 * mov pc, ip
2577 * There are two cases for patching:
2578 * a) at the end of method emission: in this case code points to the start
2579 * of the call sequence
2580 * b) during runtime patching of the call site: in this case code points
2581 * to the mov pc, ip instruction
2583 * We have to handle also the thunk jump code sequence:
2585 * ldr ip, [pc]
2586 * mov pc, ip
2587 * address constant // execution never reaches here
2589 if ((ins & 0x0ffffff0) == 0x12fff10) {
2590 /* Branch and exchange: the address is constructed in a reg
2591 * We can patch BX when the code sequence is the following:
2592 * ldr ip, [pc, #0] ; 0x8
2593 * b 0xc
2594 * .word code_ptr
2595 * mov lr, pc
2596 * bx ips
2597 * */
2598 guint32 ccode [4];
2599 guint8 *emit = (guint8*)ccode;
2600 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2601 ARM_B (emit, 0);
2602 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2603 ARM_BX (emit, ARMREG_IP);
2605 /*patching from magic trampoline*/
2606 if (ins == ccode [3]) {
2607 g_assert (code32 [-4] == ccode [0]);
2608 g_assert (code32 [-3] == ccode [1]);
2609 g_assert (code32 [-1] == ccode [2]);
2610 code32 [-2] = (guint32)target;
2611 return;
2613 /*patching from JIT*/
2614 if (ins == ccode [0]) {
2615 g_assert (code32 [1] == ccode [1]);
2616 g_assert (code32 [3] == ccode [2]);
2617 g_assert (code32 [4] == ccode [3]);
2618 code32 [2] = (guint32)target;
2619 return;
2621 g_assert_not_reached ();
2622 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
2624 * ldr ip, [pc, #0]
2625 * b 0xc
2626 * .word code_ptr
2627 * blx ip
2629 guint32 ccode [4];
2630 guint8 *emit = (guint8*)ccode;
2631 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2632 ARM_B (emit, 0);
2633 ARM_BLX_REG (emit, ARMREG_IP);
2635 g_assert (code32 [-3] == ccode [0]);
2636 g_assert (code32 [-2] == ccode [1]);
2637 g_assert (code32 [0] == ccode [2]);
2639 code32 [-1] = (guint32)target;
2640 } else {
2641 guint32 ccode [4];
2642 guint32 *tmp = ccode;
2643 guint8 *emit = (guint8*)tmp;
2644 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2645 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2646 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
2647 ARM_BX (emit, ARMREG_IP);
2648 if (ins == ccode [2]) {
2649 g_assert_not_reached (); // should be -2 ...
2650 code32 [-1] = (guint32)target;
2651 return;
2653 if (ins == ccode [0]) {
2654 /* handles both thunk jump code and the far call sequence */
2655 code32 [2] = (guint32)target;
2656 return;
2658 g_assert_not_reached ();
2660 // g_print ("patched with 0x%08x\n", ins);
2663 void
2664 arm_patch (guchar *code, const guchar *target)
2666 arm_patch_general (NULL, code, target);
2670 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
2671 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
2672 * to be used with the emit macros.
2673 * Return -1 otherwise.
2676 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
2678 guint32 res, i;
2679 for (i = 0; i < 31; i+= 2) {
2680 res = (val << (32 - i)) | (val >> i);
2681 if (res & ~0xff)
2682 continue;
2683 *rot_amount = i? 32 - i: 0;
2684 return res;
2686 return -1;
2690 * Emits in code a sequence of instructions that load the value 'val'
2691 * into the dreg register. Uses at most 4 instructions.
2693 guint8*
2694 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
2696 int imm8, rot_amount;
2697 #if 0
2698 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
2699 /* skip the constant pool */
2700 ARM_B (code, 0);
2701 *(int*)code = val;
2702 code += 4;
2703 return code;
2704 #endif
2705 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
2706 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
2707 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
2708 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
2709 } else {
2710 if (v7_supported) {
2711 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
2712 if (val >> 16)
2713 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
2714 return code;
2716 if (val & 0xFF) {
2717 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
2718 if (val & 0xFF00) {
2719 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
2721 if (val & 0xFF0000) {
2722 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2724 if (val & 0xFF000000) {
2725 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2727 } else if (val & 0xFF00) {
2728 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
2729 if (val & 0xFF0000) {
2730 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2732 if (val & 0xFF000000) {
2733 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2735 } else if (val & 0xFF0000) {
2736 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
2737 if (val & 0xFF000000) {
2738 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2741 //g_assert_not_reached ();
2743 return code;
2747 * emit_load_volatile_arguments:
2749 * Load volatile arguments from the stack to the original input registers.
2750 * Required before a tail call.
2752 static guint8*
2753 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
2755 MonoMethod *method = cfg->method;
2756 MonoMethodSignature *sig;
2757 MonoInst *inst;
2758 CallInfo *cinfo;
2759 guint32 i, pos;
2761 /* FIXME: Generate intermediate code instead */
2763 sig = mono_method_signature (method);
2765 /* This is the opposite of the code in emit_prolog */
2767 pos = 0;
2769 cinfo = get_call_info (sig, sig->pinvoke);
2771 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2772 ArgInfo *ainfo = &cinfo->ret;
2773 inst = cfg->vret_addr;
2774 g_assert (arm_is_imm12 (inst->inst_offset));
2775 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2777 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2778 ArgInfo *ainfo = cinfo->args + i;
2779 inst = cfg->args [pos];
2781 if (cfg->verbose_level > 2)
2782 g_print ("Loading argument %d (type: %d)\n", i, ainfo->regtype);
2783 if (inst->opcode == OP_REGVAR) {
2784 if (ainfo->regtype == RegTypeGeneral)
2785 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
2786 else if (ainfo->regtype == RegTypeFP) {
2787 g_assert_not_reached ();
2788 } else if (ainfo->regtype == RegTypeBase) {
2789 // FIXME:
2790 NOT_IMPLEMENTED;
2792 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
2793 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
2794 } else {
2795 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
2796 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
2799 } else
2800 g_assert_not_reached ();
2801 } else {
2802 if (ainfo->regtype == RegTypeGeneral || ainfo->regtype == RegTypeIRegPair) {
2803 switch (ainfo->size) {
2804 case 1:
2805 case 2:
2806 // FIXME:
2807 NOT_IMPLEMENTED;
2808 break;
2809 case 8:
2810 g_assert (arm_is_imm12 (inst->inst_offset));
2811 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2812 g_assert (arm_is_imm12 (inst->inst_offset + 4));
2813 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
2814 break;
2815 default:
2816 if (arm_is_imm12 (inst->inst_offset)) {
2817 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2818 } else {
2819 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
2820 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
2822 break;
2824 } else if (ainfo->regtype == RegTypeBaseGen) {
2825 // FIXME:
2826 NOT_IMPLEMENTED;
2827 } else if (ainfo->regtype == RegTypeBase) {
2828 /* Nothing to do */
2829 } else if (ainfo->regtype == RegTypeFP) {
2830 g_assert_not_reached ();
2831 } else if (ainfo->regtype == RegTypeStructByVal) {
2832 int doffset = inst->inst_offset;
2833 int soffset = 0;
2834 int cur_reg;
2835 int size = 0;
2836 if (mono_class_from_mono_type (inst->inst_vtype))
2837 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
2838 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
2839 if (arm_is_imm12 (doffset)) {
2840 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
2841 } else {
2842 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
2843 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
2845 soffset += sizeof (gpointer);
2846 doffset += sizeof (gpointer);
2848 if (ainfo->vtsize)
2849 // FIXME:
2850 NOT_IMPLEMENTED;
2851 } else if (ainfo->regtype == RegTypeStructByAddr) {
2852 } else {
2853 // FIXME:
2854 NOT_IMPLEMENTED;
2857 pos ++;
2860 g_free (cinfo);
2862 return code;
2865 #ifndef DISABLE_JIT
2867 void
2868 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
2870 MonoInst *ins;
2871 MonoCallInst *call;
2872 guint offset;
2873 guint8 *code = cfg->native_code + cfg->code_len;
2874 MonoInst *last_ins = NULL;
2875 guint last_offset = 0;
2876 int max_len, cpos;
2877 int imm8, rot_amount;
2879 /* we don't align basic blocks of loops on arm */
2881 if (cfg->verbose_level > 2)
2882 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
2884 cpos = bb->max_offset;
2886 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
2887 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
2888 //g_assert (!mono_compile_aot);
2889 //cpos += 6;
2890 //if (bb->cil_code)
2891 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
2892 /* this is not thread save, but good enough */
2893 /* fixme: howto handle overflows? */
2894 //x86_inc_mem (code, &cov->data [bb->dfn].count);
2897 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
2898 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2899 (gpointer)"mono_break");
2900 code = emit_call_seq (cfg, code);
2903 MONO_BB_FOR_EACH_INS (bb, ins) {
2904 offset = code - cfg->native_code;
2906 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
2908 if (offset > (cfg->code_size - max_len - 16)) {
2909 cfg->code_size *= 2;
2910 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2911 code = cfg->native_code + offset;
2913 // if (ins->cil_code)
2914 // g_print ("cil code\n");
2915 mono_debug_record_line_number (cfg, ins, offset);
2917 switch (ins->opcode) {
2918 case OP_MEMORY_BARRIER:
2919 break;
2920 case OP_TLS_GET:
2921 #ifdef HAVE_AEABI_READ_TP
2922 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2923 (gpointer)"__aeabi_read_tp");
2924 code = emit_call_seq (cfg, code);
2926 ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
2927 #else
2928 g_assert_not_reached ();
2929 #endif
2930 break;
2931 /*case OP_BIGMUL:
2932 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
2933 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
2934 break;
2935 case OP_BIGMUL_UN:
2936 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
2937 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
2938 break;*/
2939 case OP_STOREI1_MEMBASE_IMM:
2940 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
2941 g_assert (arm_is_imm12 (ins->inst_offset));
2942 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
2943 break;
2944 case OP_STOREI2_MEMBASE_IMM:
2945 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
2946 g_assert (arm_is_imm8 (ins->inst_offset));
2947 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
2948 break;
2949 case OP_STORE_MEMBASE_IMM:
2950 case OP_STOREI4_MEMBASE_IMM:
2951 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
2952 g_assert (arm_is_imm12 (ins->inst_offset));
2953 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
2954 break;
2955 case OP_STOREI1_MEMBASE_REG:
2956 g_assert (arm_is_imm12 (ins->inst_offset));
2957 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2958 break;
2959 case OP_STOREI2_MEMBASE_REG:
2960 g_assert (arm_is_imm8 (ins->inst_offset));
2961 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2962 break;
2963 case OP_STORE_MEMBASE_REG:
2964 case OP_STOREI4_MEMBASE_REG:
2965 /* this case is special, since it happens for spill code after lowering has been called */
2966 if (arm_is_imm12 (ins->inst_offset)) {
2967 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2968 } else {
2969 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
2970 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
2972 break;
2973 case OP_STOREI1_MEMINDEX:
2974 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
2975 break;
2976 case OP_STOREI2_MEMINDEX:
2977 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
2978 break;
2979 case OP_STORE_MEMINDEX:
2980 case OP_STOREI4_MEMINDEX:
2981 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
2982 break;
2983 case OP_LOADU4_MEM:
2984 g_assert_not_reached ();
2985 break;
2986 case OP_LOAD_MEMINDEX:
2987 case OP_LOADI4_MEMINDEX:
2988 case OP_LOADU4_MEMINDEX:
2989 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2990 break;
2991 case OP_LOADI1_MEMINDEX:
2992 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2993 break;
2994 case OP_LOADU1_MEMINDEX:
2995 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2996 break;
2997 case OP_LOADI2_MEMINDEX:
2998 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2999 break;
3000 case OP_LOADU2_MEMINDEX:
3001 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3002 break;
3003 case OP_LOAD_MEMBASE:
3004 case OP_LOADI4_MEMBASE:
3005 case OP_LOADU4_MEMBASE:
3006 /* this case is special, since it happens for spill code after lowering has been called */
3007 if (arm_is_imm12 (ins->inst_offset)) {
3008 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3009 } else {
3010 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3011 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
3013 break;
3014 case OP_LOADI1_MEMBASE:
3015 g_assert (arm_is_imm8 (ins->inst_offset));
3016 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3017 break;
3018 case OP_LOADU1_MEMBASE:
3019 g_assert (arm_is_imm12 (ins->inst_offset));
3020 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3021 break;
3022 case OP_LOADU2_MEMBASE:
3023 g_assert (arm_is_imm8 (ins->inst_offset));
3024 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3025 break;
3026 case OP_LOADI2_MEMBASE:
3027 g_assert (arm_is_imm8 (ins->inst_offset));
3028 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3029 break;
3030 case OP_ICONV_TO_I1:
3031 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
3032 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
3033 break;
3034 case OP_ICONV_TO_I2:
3035 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
3036 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
3037 break;
3038 case OP_ICONV_TO_U1:
3039 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
3040 break;
3041 case OP_ICONV_TO_U2:
3042 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
3043 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
3044 break;
3045 case OP_COMPARE:
3046 case OP_ICOMPARE:
3047 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
3048 break;
3049 case OP_COMPARE_IMM:
3050 case OP_ICOMPARE_IMM:
3051 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3052 g_assert (imm8 >= 0);
3053 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
3054 break;
3055 case OP_BREAK:
3057 * gdb does not like encountering the hw breakpoint ins in the debugged code.
3058 * So instead of emitting a trap, we emit a call a C function and place a
3059 * breakpoint there.
3061 //*(int*)code = 0xef9f0001;
3062 //code += 4;
3063 //ARM_DBRK (code);
3064 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3065 (gpointer)"mono_break");
3066 code = emit_call_seq (cfg, code);
3067 break;
3068 case OP_RELAXED_NOP:
3069 ARM_NOP (code);
3070 break;
3071 case OP_NOP:
3072 case OP_DUMMY_USE:
3073 case OP_DUMMY_STORE:
3074 case OP_NOT_REACHED:
3075 case OP_NOT_NULL:
3076 break;
3077 case OP_SEQ_POINT: {
3078 int i, il_offset;
3079 MonoInst *info_var = cfg->arch.seq_point_info_var;
3080 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
3081 MonoInst *var;
3082 int dreg = ARMREG_LR;
3085 * For AOT, we use one got slot per method, which will point to a
3086 * SeqPointInfo structure, containing all the information required
3087 * by the code below.
3089 if (cfg->compile_aot) {
3090 g_assert (info_var);
3091 g_assert (info_var->opcode == OP_REGOFFSET);
3092 g_assert (arm_is_imm12 (info_var->inst_offset));
3096 * Read from the single stepping trigger page. This will cause a
3097 * SIGSEGV when single stepping is enabled.
3098 * We do this _before_ the breakpoint, so single stepping after
3099 * a breakpoint is hit will step to the next IL offset.
3101 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
3103 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
3104 if (cfg->compile_aot) {
3105 /* Load the trigger page addr from the variable initialized in the prolog */
3106 var = ss_trigger_page_var;
3107 g_assert (var);
3108 g_assert (var->opcode == OP_REGOFFSET);
3109 g_assert (arm_is_imm12 (var->inst_offset));
3110 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
3111 } else {
3112 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
3113 ARM_B (code, 0);
3114 *(int*)code = (int)ss_trigger_page;
3115 code += 4;
3117 ARM_LDR_IMM (code, dreg, dreg, 0);
3120 il_offset = ins->inst_imm;
3122 if (!cfg->seq_points)
3123 cfg->seq_points = g_ptr_array_new ();
3124 g_ptr_array_add (cfg->seq_points, GUINT_TO_POINTER (il_offset));
3125 g_ptr_array_add (cfg->seq_points, GUINT_TO_POINTER (code - cfg->native_code));
3127 if (cfg->compile_aot) {
3128 guint32 offset = code - cfg->native_code;
3129 guint32 val;
3131 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
3132 /* Add the offset */
3133 val = ((offset / 4) * sizeof (guint8*)) + G_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
3134 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
3136 * Have to emit nops to keep the difference between the offset
3137 * stored in seq_points and breakpoint instruction constant,
3138 * mono_arch_get_ip_for_breakpoint () depends on this.
3140 if (val & 0xFF00)
3141 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
3142 else
3143 ARM_NOP (code);
3144 if (val & 0xFF0000)
3145 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3146 else
3147 ARM_NOP (code);
3148 g_assert (!(val & 0xFF000000));
3149 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
3150 ARM_LDR_IMM (code, dreg, dreg, 0);
3152 /* What is faster, a branch or a load ? */
3153 ARM_CMP_REG_IMM (code, dreg, 0, 0);
3154 /* The breakpoint instruction */
3155 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
3156 } else {
3158 * A placeholder for a possible breakpoint inserted by
3159 * mono_arch_set_breakpoint ().
3161 for (i = 0; i < 4; ++i)
3162 ARM_NOP (code);
3164 break;
3166 case OP_ADDCC:
3167 case OP_IADDCC:
3168 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3169 break;
3170 case OP_IADD:
3171 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3172 break;
3173 case OP_ADC:
3174 case OP_IADC:
3175 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3176 break;
3177 case OP_ADDCC_IMM:
3178 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3179 g_assert (imm8 >= 0);
3180 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3181 break;
3182 case OP_ADD_IMM:
3183 case OP_IADD_IMM:
3184 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3185 g_assert (imm8 >= 0);
3186 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3187 break;
3188 case OP_ADC_IMM:
3189 case OP_IADC_IMM:
3190 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3191 g_assert (imm8 >= 0);
3192 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3193 break;
3194 case OP_IADD_OVF:
3195 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3196 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3197 break;
3198 case OP_IADD_OVF_UN:
3199 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3200 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3201 break;
3202 case OP_ISUB_OVF:
3203 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3204 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3205 break;
3206 case OP_ISUB_OVF_UN:
3207 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3208 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3209 break;
3210 case OP_ADD_OVF_CARRY:
3211 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3212 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3213 break;
3214 case OP_ADD_OVF_UN_CARRY:
3215 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3216 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3217 break;
3218 case OP_SUB_OVF_CARRY:
3219 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3220 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3221 break;
3222 case OP_SUB_OVF_UN_CARRY:
3223 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3224 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3225 break;
3226 case OP_SUBCC:
3227 case OP_ISUBCC:
3228 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3229 break;
3230 case OP_SUBCC_IMM:
3231 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3232 g_assert (imm8 >= 0);
3233 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3234 break;
3235 case OP_ISUB:
3236 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3237 break;
3238 case OP_SBB:
3239 case OP_ISBB:
3240 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3241 break;
3242 case OP_SUB_IMM:
3243 case OP_ISUB_IMM:
3244 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3245 g_assert (imm8 >= 0);
3246 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3247 break;
3248 case OP_SBB_IMM:
3249 case OP_ISBB_IMM:
3250 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3251 g_assert (imm8 >= 0);
3252 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3253 break;
3254 case OP_ARM_RSBS_IMM:
3255 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3256 g_assert (imm8 >= 0);
3257 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3258 break;
3259 case OP_ARM_RSC_IMM:
3260 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3261 g_assert (imm8 >= 0);
3262 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3263 break;
3264 case OP_IAND:
3265 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3266 break;
3267 case OP_AND_IMM:
3268 case OP_IAND_IMM:
3269 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3270 g_assert (imm8 >= 0);
3271 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3272 break;
3273 case OP_IDIV:
3274 case OP_IDIV_UN:
3275 case OP_DIV_IMM:
3276 case OP_IREM:
3277 case OP_IREM_UN:
3278 case OP_REM_IMM:
3279 /* crappy ARM arch doesn't have a DIV instruction */
3280 g_assert_not_reached ();
3281 case OP_IOR:
3282 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3283 break;
3284 case OP_OR_IMM:
3285 case OP_IOR_IMM:
3286 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3287 g_assert (imm8 >= 0);
3288 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3289 break;
3290 case OP_IXOR:
3291 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3292 break;
3293 case OP_XOR_IMM:
3294 case OP_IXOR_IMM:
3295 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3296 g_assert (imm8 >= 0);
3297 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3298 break;
3299 case OP_ISHL:
3300 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3301 break;
3302 case OP_SHL_IMM:
3303 case OP_ISHL_IMM:
3304 if (ins->inst_imm)
3305 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3306 else if (ins->dreg != ins->sreg1)
3307 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3308 break;
3309 case OP_ISHR:
3310 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3311 break;
3312 case OP_SHR_IMM:
3313 case OP_ISHR_IMM:
3314 if (ins->inst_imm)
3315 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3316 else if (ins->dreg != ins->sreg1)
3317 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3318 break;
3319 case OP_SHR_UN_IMM:
3320 case OP_ISHR_UN_IMM:
3321 if (ins->inst_imm)
3322 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3323 else if (ins->dreg != ins->sreg1)
3324 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3325 break;
3326 case OP_ISHR_UN:
3327 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3328 break;
3329 case OP_INOT:
3330 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
3331 break;
3332 case OP_INEG:
3333 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
3334 break;
3335 case OP_IMUL:
3336 if (ins->dreg == ins->sreg2)
3337 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3338 else
3339 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
3340 break;
3341 case OP_MUL_IMM:
3342 g_assert_not_reached ();
3343 break;
3344 case OP_IMUL_OVF:
3345 /* FIXME: handle ovf/ sreg2 != dreg */
3346 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3347 /* FIXME: MUL doesn't set the C/O flags on ARM */
3348 break;
3349 case OP_IMUL_OVF_UN:
3350 /* FIXME: handle ovf/ sreg2 != dreg */
3351 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3352 /* FIXME: MUL doesn't set the C/O flags on ARM */
3353 break;
3354 case OP_ICONST:
3355 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
3356 break;
3357 case OP_AOTCONST:
3358 /* Load the GOT offset */
3359 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
3360 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
3361 ARM_B (code, 0);
3362 *(gpointer*)code = NULL;
3363 code += 4;
3364 /* Load the value from the GOT */
3365 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
3366 break;
3367 case OP_ICONV_TO_I4:
3368 case OP_ICONV_TO_U4:
3369 case OP_MOVE:
3370 if (ins->dreg != ins->sreg1)
3371 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3372 break;
3373 case OP_SETLRET: {
3374 int saved = ins->sreg2;
3375 if (ins->sreg2 == ARM_LSW_REG) {
3376 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
3377 saved = ARMREG_LR;
3379 if (ins->sreg1 != ARM_LSW_REG)
3380 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
3381 if (saved != ARM_MSW_REG)
3382 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
3383 break;
3385 case OP_FMOVE:
3386 #ifdef ARM_FPU_FPA
3387 ARM_MVFD (code, ins->dreg, ins->sreg1);
3388 #elif defined(ARM_FPU_VFP)
3389 ARM_CPYD (code, ins->dreg, ins->sreg1);
3390 #endif
3391 break;
3392 case OP_FCONV_TO_R4:
3393 #ifdef ARM_FPU_FPA
3394 ARM_MVFS (code, ins->dreg, ins->sreg1);
3395 #elif defined(ARM_FPU_VFP)
3396 ARM_CVTD (code, ins->dreg, ins->sreg1);
3397 ARM_CVTS (code, ins->dreg, ins->dreg);
3398 #endif
3399 break;
3400 case OP_JMP:
3402 * Keep in sync with mono_arch_emit_epilog
3404 g_assert (!cfg->method->save_lmf);
3406 code = emit_load_volatile_arguments (cfg, code);
3408 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
3409 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP)) | ((1 << ARMREG_LR)));
3410 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
3411 if (cfg->compile_aot) {
3412 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3413 ARM_B (code, 0);
3414 *(gpointer*)code = NULL;
3415 code += 4;
3416 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
3417 } else {
3418 ARM_B (code, 0);
3420 break;
3421 case OP_CHECK_THIS:
3422 /* ensure ins->sreg1 is not NULL */
3423 ARM_LDR_IMM (code, ARMREG_LR, ins->sreg1, 0);
3424 break;
3425 case OP_ARGLIST: {
3426 #if ARM_PORT
3427 if (ppc_is_imm16 (cfg->sig_cookie + cfg->stack_usage)) {
3428 ppc_addi (code, ppc_r11, cfg->frame_reg, cfg->sig_cookie + cfg->stack_usage);
3429 } else {
3430 ppc_load (code, ppc_r11, cfg->sig_cookie + cfg->stack_usage);
3431 ppc_add (code, ppc_r11, cfg->frame_reg, ppc_r11);
3433 ppc_stw (code, ppc_r11, 0, ins->sreg1);
3434 #endif
3435 break;
3437 case OP_FCALL:
3438 case OP_LCALL:
3439 case OP_VCALL:
3440 case OP_VCALL2:
3441 case OP_VOIDCALL:
3442 case OP_CALL:
3443 call = (MonoCallInst*)ins;
3444 if (ins->flags & MONO_INST_HAS_METHOD)
3445 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
3446 else
3447 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
3448 code = emit_call_seq (cfg, code);
3449 code = emit_move_return_value (cfg, ins, code);
3450 break;
3451 case OP_FCALL_REG:
3452 case OP_LCALL_REG:
3453 case OP_VCALL_REG:
3454 case OP_VCALL2_REG:
3455 case OP_VOIDCALL_REG:
3456 case OP_CALL_REG:
3457 code = emit_call_reg (code, ins->sreg1);
3458 code = emit_move_return_value (cfg, ins, code);
3459 break;
3460 case OP_FCALL_MEMBASE:
3461 case OP_LCALL_MEMBASE:
3462 case OP_VCALL_MEMBASE:
3463 case OP_VCALL2_MEMBASE:
3464 case OP_VOIDCALL_MEMBASE:
3465 case OP_CALL_MEMBASE:
3466 g_assert (arm_is_imm12 (ins->inst_offset));
3467 g_assert (ins->sreg1 != ARMREG_LR);
3468 call = (MonoCallInst*)ins;
3469 if (call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3470 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, 4);
3471 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
3473 * We can't embed the method in the code stream in PIC code, or
3474 * in gshared code.
3475 * Instead, we put it in V5 in code emitted by
3476 * mono_arch_emit_imt_argument (), and embed NULL here to
3477 * signal the IMT thunk that the value is in V5.
3479 if (call->dynamic_imt_arg)
3480 *((gpointer*)code) = NULL;
3481 else
3482 *((gpointer*)code) = (gpointer)call->method;
3483 code += 4;
3484 } else {
3485 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
3486 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
3488 code = emit_move_return_value (cfg, ins, code);
3489 break;
3490 case OP_LOCALLOC: {
3491 /* keep alignment */
3492 int alloca_waste = cfg->param_area;
3493 alloca_waste += 7;
3494 alloca_waste &= ~7;
3495 /* round the size to 8 bytes */
3496 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
3497 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
3498 if (alloca_waste)
3499 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->dreg, alloca_waste);
3500 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
3501 /* memzero the area: dreg holds the size, sp is the pointer */
3502 if (ins->flags & MONO_INST_INIT) {
3503 guint8 *start_loop, *branch_to_cond;
3504 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
3505 branch_to_cond = code;
3506 ARM_B (code, 0);
3507 start_loop = code;
3508 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
3509 arm_patch (branch_to_cond, code);
3510 /* decrement by 4 and set flags */
3511 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, 4);
3512 ARM_B_COND (code, ARMCOND_GE, 0);
3513 arm_patch (code - 4, start_loop);
3515 ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
3516 break;
3518 case OP_DYN_CALL: {
3519 int i;
3520 MonoInst *var = cfg->dyn_call_var;
3522 g_assert (var->opcode == OP_REGOFFSET);
3523 g_assert (arm_is_imm12 (var->inst_offset));
3525 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
3526 ARM_MOV_REG_REG( code, ARMREG_LR, ins->sreg1);
3527 /* ip = ftn */
3528 ARM_MOV_REG_REG( code, ARMREG_IP, ins->sreg2);
3530 /* Save args buffer */
3531 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
3533 /* Set stack slots using R0 as scratch reg */
3534 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
3535 for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
3536 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (gpointer));
3537 ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (gpointer));
3540 /* Set argument registers */
3541 for (i = 0; i < PARAM_REGS; ++i)
3542 ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (gpointer));
3544 /* Make the call */
3545 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
3546 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3548 /* Save result */
3549 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
3550 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res));
3551 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res2));
3552 break;
3554 case OP_THROW: {
3555 if (ins->sreg1 != ARMREG_R0)
3556 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3557 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3558 (gpointer)"mono_arch_throw_exception");
3559 code = emit_call_seq (cfg, code);
3560 break;
3562 case OP_RETHROW: {
3563 if (ins->sreg1 != ARMREG_R0)
3564 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3565 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3566 (gpointer)"mono_arch_rethrow_exception");
3567 code = emit_call_seq (cfg, code);
3568 break;
3570 case OP_START_HANDLER: {
3571 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3573 if (arm_is_imm12 (spvar->inst_offset)) {
3574 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
3575 } else {
3576 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3577 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
3579 break;
3581 case OP_ENDFILTER: {
3582 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3584 if (ins->sreg1 != ARMREG_R0)
3585 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3586 if (arm_is_imm12 (spvar->inst_offset)) {
3587 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
3588 } else {
3589 g_assert (ARMREG_IP != spvar->inst_basereg);
3590 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3591 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
3593 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3594 break;
3596 case OP_ENDFINALLY: {
3597 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3599 if (arm_is_imm12 (spvar->inst_offset)) {
3600 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
3601 } else {
3602 g_assert (ARMREG_IP != spvar->inst_basereg);
3603 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3604 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
3606 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3607 break;
3609 case OP_CALL_HANDLER:
3610 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3611 ARM_BL (code, 0);
3612 break;
3613 case OP_LABEL:
3614 ins->inst_c0 = code - cfg->native_code;
3615 break;
3616 case OP_BR:
3617 /*if (ins->inst_target_bb->native_offset) {
3618 ARM_B (code, 0);
3619 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
3620 } else*/ {
3621 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3622 ARM_B (code, 0);
3624 break;
3625 case OP_BR_REG:
3626 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
3627 break;
3628 case OP_SWITCH:
3630 * In the normal case we have:
3631 * ldr pc, [pc, ins->sreg1 << 2]
3632 * nop
3633 * If aot, we have:
3634 * ldr lr, [pc, ins->sreg1 << 2]
3635 * add pc, pc, lr
3636 * After follows the data.
3637 * FIXME: add aot support.
3639 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
3640 max_len += 4 * GPOINTER_TO_INT (ins->klass);
3641 if (offset > (cfg->code_size - max_len - 16)) {
3642 cfg->code_size += max_len;
3643 cfg->code_size *= 2;
3644 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3645 code = cfg->native_code + offset;
3647 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
3648 ARM_NOP (code);
3649 code += 4 * GPOINTER_TO_INT (ins->klass);
3650 break;
3651 case OP_CEQ:
3652 case OP_ICEQ:
3653 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
3654 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
3655 break;
3656 case OP_CLT:
3657 case OP_ICLT:
3658 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3659 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
3660 break;
3661 case OP_CLT_UN:
3662 case OP_ICLT_UN:
3663 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3664 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
3665 break;
3666 case OP_CGT:
3667 case OP_ICGT:
3668 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3669 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
3670 break;
3671 case OP_CGT_UN:
3672 case OP_ICGT_UN:
3673 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3674 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
3675 break;
3676 case OP_COND_EXC_EQ:
3677 case OP_COND_EXC_NE_UN:
3678 case OP_COND_EXC_LT:
3679 case OP_COND_EXC_LT_UN:
3680 case OP_COND_EXC_GT:
3681 case OP_COND_EXC_GT_UN:
3682 case OP_COND_EXC_GE:
3683 case OP_COND_EXC_GE_UN:
3684 case OP_COND_EXC_LE:
3685 case OP_COND_EXC_LE_UN:
3686 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
3687 break;
3688 case OP_COND_EXC_IEQ:
3689 case OP_COND_EXC_INE_UN:
3690 case OP_COND_EXC_ILT:
3691 case OP_COND_EXC_ILT_UN:
3692 case OP_COND_EXC_IGT:
3693 case OP_COND_EXC_IGT_UN:
3694 case OP_COND_EXC_IGE:
3695 case OP_COND_EXC_IGE_UN:
3696 case OP_COND_EXC_ILE:
3697 case OP_COND_EXC_ILE_UN:
3698 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
3699 break;
3700 case OP_COND_EXC_C:
3701 case OP_COND_EXC_IC:
3702 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
3703 break;
3704 case OP_COND_EXC_OV:
3705 case OP_COND_EXC_IOV:
3706 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
3707 break;
3708 case OP_COND_EXC_NC:
3709 case OP_COND_EXC_INC:
3710 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
3711 break;
3712 case OP_COND_EXC_NO:
3713 case OP_COND_EXC_INO:
3714 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
3715 break;
3716 case OP_IBEQ:
3717 case OP_IBNE_UN:
3718 case OP_IBLT:
3719 case OP_IBLT_UN:
3720 case OP_IBGT:
3721 case OP_IBGT_UN:
3722 case OP_IBGE:
3723 case OP_IBGE_UN:
3724 case OP_IBLE:
3725 case OP_IBLE_UN:
3726 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
3727 break;
3729 /* floating point opcodes */
3730 #ifdef ARM_FPU_FPA
3731 case OP_R8CONST:
3732 if (cfg->compile_aot) {
3733 ARM_LDFD (code, ins->dreg, ARMREG_PC, 0);
3734 ARM_B (code, 1);
3735 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3736 code += 4;
3737 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
3738 code += 4;
3739 } else {
3740 /* FIXME: we can optimize the imm load by dealing with part of
3741 * the displacement in LDFD (aligning to 512).
3743 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3744 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3746 break;
3747 case OP_R4CONST:
3748 if (cfg->compile_aot) {
3749 ARM_LDFS (code, ins->dreg, ARMREG_PC, 0);
3750 ARM_B (code, 0);
3751 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3752 code += 4;
3753 } else {
3754 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3755 ARM_LDFS (code, ins->dreg, ARMREG_LR, 0);
3757 break;
3758 case OP_STORER8_MEMBASE_REG:
3759 /* This is generated by the local regalloc pass which runs after the lowering pass */
3760 if (!arm_is_fpimm8 (ins->inst_offset)) {
3761 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3762 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
3763 ARM_STFD (code, ins->sreg1, ARMREG_LR, 0);
3764 } else {
3765 ARM_STFD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3767 break;
3768 case OP_LOADR8_MEMBASE:
3769 /* This is generated by the local regalloc pass which runs after the lowering pass */
3770 if (!arm_is_fpimm8 (ins->inst_offset)) {
3771 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3772 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
3773 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3774 } else {
3775 ARM_LDFD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3777 break;
3778 case OP_STORER4_MEMBASE_REG:
3779 g_assert (arm_is_fpimm8 (ins->inst_offset));
3780 ARM_STFS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3781 break;
3782 case OP_LOADR4_MEMBASE:
3783 g_assert (arm_is_fpimm8 (ins->inst_offset));
3784 ARM_LDFS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3785 break;
3786 case OP_ICONV_TO_R_UN: {
3787 int tmpreg;
3788 tmpreg = ins->dreg == 0? 1: 0;
3789 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
3790 ARM_FLTD (code, ins->dreg, ins->sreg1);
3791 ARM_B_COND (code, ARMCOND_GE, 8);
3792 /* save the temp register */
3793 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
3794 ARM_STFD (code, tmpreg, ARMREG_SP, 0);
3795 ARM_LDFD (code, tmpreg, ARMREG_PC, 12);
3796 ARM_FPA_ADFD (code, ins->dreg, ins->dreg, tmpreg);
3797 ARM_LDFD (code, tmpreg, ARMREG_SP, 0);
3798 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
3799 /* skip the constant pool */
3800 ARM_B (code, 8);
3801 code += 4;
3802 *(int*)code = 0x41f00000;
3803 code += 4;
3804 *(int*)code = 0;
3805 code += 4;
3806 /* FIXME: adjust:
3807 * ldfltd ftemp, [pc, #8] 0x41f00000 0x00000000
3808 * adfltd fdest, fdest, ftemp
3810 break;
3812 case OP_ICONV_TO_R4:
3813 ARM_FLTS (code, ins->dreg, ins->sreg1);
3814 break;
3815 case OP_ICONV_TO_R8:
3816 ARM_FLTD (code, ins->dreg, ins->sreg1);
3817 break;
3819 #elif defined(ARM_FPU_VFP)
3821 case OP_R8CONST:
3822 if (cfg->compile_aot) {
3823 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
3824 ARM_B (code, 1);
3825 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3826 code += 4;
3827 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
3828 code += 4;
3829 } else {
3830 /* FIXME: we can optimize the imm load by dealing with part of
3831 * the displacement in LDFD (aligning to 512).
3833 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3834 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
3836 break;
3837 case OP_R4CONST:
3838 if (cfg->compile_aot) {
3839 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
3840 ARM_B (code, 0);
3841 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3842 code += 4;
3843 ARM_CVTS (code, ins->dreg, ins->dreg);
3844 } else {
3845 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3846 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
3847 ARM_CVTS (code, ins->dreg, ins->dreg);
3849 break;
3850 case OP_STORER8_MEMBASE_REG:
3851 /* This is generated by the local regalloc pass which runs after the lowering pass */
3852 if (!arm_is_fpimm8 (ins->inst_offset)) {
3853 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3854 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
3855 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
3856 } else {
3857 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3859 break;
3860 case OP_LOADR8_MEMBASE:
3861 /* This is generated by the local regalloc pass which runs after the lowering pass */
3862 if (!arm_is_fpimm8 (ins->inst_offset)) {
3863 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3864 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
3865 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
3866 } else {
3867 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3869 break;
3870 case OP_STORER4_MEMBASE_REG:
3871 g_assert (arm_is_fpimm8 (ins->inst_offset));
3872 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
3873 ARM_FSTS (code, ARM_VFP_F0, ins->inst_destbasereg, ins->inst_offset);
3874 break;
3875 case OP_LOADR4_MEMBASE:
3876 g_assert (arm_is_fpimm8 (ins->inst_offset));
3877 ARM_FLDS (code, ARM_VFP_F0, ins->inst_basereg, ins->inst_offset);
3878 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
3879 break;
3880 case OP_ICONV_TO_R_UN: {
3881 g_assert_not_reached ();
3882 break;
3884 case OP_ICONV_TO_R4:
3885 ARM_FMSR (code, ARM_VFP_F0, ins->sreg1);
3886 ARM_FSITOS (code, ARM_VFP_F0, ARM_VFP_F0);
3887 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
3888 break;
3889 case OP_ICONV_TO_R8:
3890 ARM_FMSR (code, ARM_VFP_F0, ins->sreg1);
3891 ARM_FSITOD (code, ins->dreg, ARM_VFP_F0);
3892 break;
3894 case OP_SETFRET:
3895 if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4) {
3896 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
3897 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
3898 } else {
3899 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
3901 break;
3903 #endif
3905 case OP_FCONV_TO_I1:
3906 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
3907 break;
3908 case OP_FCONV_TO_U1:
3909 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
3910 break;
3911 case OP_FCONV_TO_I2:
3912 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
3913 break;
3914 case OP_FCONV_TO_U2:
3915 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
3916 break;
3917 case OP_FCONV_TO_I4:
3918 case OP_FCONV_TO_I:
3919 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
3920 break;
3921 case OP_FCONV_TO_U4:
3922 case OP_FCONV_TO_U:
3923 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
3924 break;
3925 case OP_FCONV_TO_I8:
3926 case OP_FCONV_TO_U8:
3927 g_assert_not_reached ();
3928 /* Implemented as helper calls */
3929 break;
3930 case OP_LCONV_TO_R_UN:
3931 g_assert_not_reached ();
3932 /* Implemented as helper calls */
3933 break;
3934 case OP_LCONV_TO_OVF_I4_2: {
3935 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
3937 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
3940 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
3941 high_bit_not_set = code;
3942 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
3944 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
3945 valid_negative = code;
3946 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
3947 invalid_negative = code;
3948 ARM_B_COND (code, ARMCOND_AL, 0);
3950 arm_patch (high_bit_not_set, code);
3952 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
3953 valid_positive = code;
3954 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
3956 arm_patch (invalid_negative, code);
3957 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
3959 arm_patch (valid_negative, code);
3960 arm_patch (valid_positive, code);
3962 if (ins->dreg != ins->sreg1)
3963 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3964 break;
3966 #ifdef ARM_FPU_FPA
3967 case OP_FADD:
3968 ARM_FPA_ADFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3969 break;
3970 case OP_FSUB:
3971 ARM_FPA_SUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3972 break;
3973 case OP_FMUL:
3974 ARM_FPA_MUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3975 break;
3976 case OP_FDIV:
3977 ARM_FPA_DVFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3978 break;
3979 case OP_FNEG:
3980 ARM_MNFD (code, ins->dreg, ins->sreg1);
3981 break;
3982 #elif defined(ARM_FPU_VFP)
3983 case OP_FADD:
3984 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
3985 break;
3986 case OP_FSUB:
3987 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
3988 break;
3989 case OP_FMUL:
3990 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
3991 break;
3992 case OP_FDIV:
3993 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
3994 break;
3995 case OP_FNEG:
3996 ARM_NEGD (code, ins->dreg, ins->sreg1);
3997 break;
3998 #endif
3999 case OP_FREM:
4000 /* emulated */
4001 g_assert_not_reached ();
4002 break;
4003 case OP_FCOMPARE:
4004 #ifdef ARM_FPU_FPA
4005 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4006 #elif defined(ARM_FPU_VFP)
4007 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4008 ARM_FMSTAT (code);
4009 #endif
4010 break;
4011 case OP_FCEQ:
4012 #ifdef ARM_FPU_FPA
4013 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4014 #elif defined(ARM_FPU_VFP)
4015 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4016 ARM_FMSTAT (code);
4017 #endif
4018 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
4019 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
4020 break;
4021 case OP_FCLT:
4022 #ifdef ARM_FPU_FPA
4023 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4024 #elif defined(ARM_FPU_VFP)
4025 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4026 ARM_FMSTAT (code);
4027 #endif
4028 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4029 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4030 break;
4031 case OP_FCLT_UN:
4032 #ifdef ARM_FPU_FPA
4033 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4034 #elif defined(ARM_FPU_VFP)
4035 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4036 ARM_FMSTAT (code);
4037 #endif
4038 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4039 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4040 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
4041 break;
4042 case OP_FCGT:
4043 /* swapped */
4044 #ifdef ARM_FPU_FPA
4045 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
4046 #elif defined(ARM_FPU_VFP)
4047 ARM_CMPD (code, ins->sreg2, ins->sreg1);
4048 ARM_FMSTAT (code);
4049 #endif
4050 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4051 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4052 break;
4053 case OP_FCGT_UN:
4054 /* swapped */
4055 #ifdef ARM_FPU_FPA
4056 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
4057 #elif defined(ARM_FPU_VFP)
4058 ARM_CMPD (code, ins->sreg2, ins->sreg1);
4059 ARM_FMSTAT (code);
4060 #endif
4061 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4062 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4063 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
4064 break;
4065 /* ARM FPA flags table:
4066 * N Less than ARMCOND_MI
4067 * Z Equal ARMCOND_EQ
4068 * C Greater Than or Equal ARMCOND_CS
4069 * V Unordered ARMCOND_VS
4071 case OP_FBEQ:
4072 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
4073 break;
4074 case OP_FBNE_UN:
4075 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
4076 break;
4077 case OP_FBLT:
4078 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
4079 break;
4080 case OP_FBLT_UN:
4081 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
4082 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
4083 break;
4084 case OP_FBGT:
4085 case OP_FBGT_UN:
4086 case OP_FBLE:
4087 case OP_FBLE_UN:
4088 g_assert_not_reached ();
4089 break;
4090 case OP_FBGE:
4091 #ifdef ARM_FPU_VFP
4092 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
4093 #else
4094 /* FPA requires EQ even thou the docs suggests that just CS is enough */
4095 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
4096 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
4097 #endif
4098 break;
4099 case OP_FBGE_UN:
4100 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
4101 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
4102 break;
4104 case OP_CKFINITE: {
4105 #ifdef ARM_FPU_FPA
4106 if (ins->dreg != ins->sreg1)
4107 ARM_MVFD (code, ins->dreg, ins->sreg1);
4108 #elif defined(ARM_FPU_VFP)
4109 ARM_ABSD (code, ARM_VFP_D1, ins->sreg1);
4110 ARM_FLDD (code, ARM_VFP_D0, ARMREG_PC, 0);
4111 ARM_B (code, 1);
4112 *(guint32*)code = 0xffffffff;
4113 code += 4;
4114 *(guint32*)code = 0x7fefffff;
4115 code += 4;
4116 ARM_CMPD (code, ARM_VFP_D1, ARM_VFP_D0);
4117 ARM_FMSTAT (code);
4118 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
4119 ARM_CMPD (code, ins->sreg1, ins->sreg1);
4120 ARM_FMSTAT (code);
4121 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
4123 ARM_CPYD (code, ins->dreg, ins->sreg1);
4124 #endif
4125 break;
4127 default:
4128 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
4129 g_assert_not_reached ();
4132 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
4133 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
4134 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
4135 g_assert_not_reached ();
4138 cpos += max_len;
4140 last_ins = ins;
4141 last_offset = offset;
4144 cfg->code_len = code - cfg->native_code;
4147 #endif /* DISABLE_JIT */
4149 #ifdef HAVE_AEABI_READ_TP
4150 void __aeabi_read_tp (void);
4151 #endif
4153 void
4154 mono_arch_register_lowlevel_calls (void)
4156 /* The signature doesn't matter */
4157 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
4158 mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
4160 #ifdef HAVE_AEABI_READ_TP
4161 mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
4162 #endif
4165 #define patch_lis_ori(ip,val) do {\
4166 guint16 *__lis_ori = (guint16*)(ip); \
4167 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
4168 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
4169 } while (0)
4171 void
4172 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
4174 MonoJumpInfo *patch_info;
4175 gboolean compile_aot = !run_cctors;
4177 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
4178 unsigned char *ip = patch_info->ip.i + code;
4179 const unsigned char *target;
4181 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
4182 gpointer *jt = (gpointer*)(ip + 8);
4183 int i;
4184 /* jt is the inlined jump table, 2 instructions after ip
4185 * In the normal case we store the absolute addresses,
4186 * otherwise the displacements.
4188 for (i = 0; i < patch_info->data.table->table_size; i++)
4189 jt [i] = code + (int)patch_info->data.table->table [i];
4190 continue;
4192 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
4194 if (compile_aot) {
4195 switch (patch_info->type) {
4196 case MONO_PATCH_INFO_BB:
4197 case MONO_PATCH_INFO_LABEL:
4198 break;
4199 default:
4200 /* No need to patch these */
4201 continue;
4205 switch (patch_info->type) {
4206 case MONO_PATCH_INFO_IP:
4207 g_assert_not_reached ();
4208 patch_lis_ori (ip, ip);
4209 continue;
4210 case MONO_PATCH_INFO_METHOD_REL:
4211 g_assert_not_reached ();
4212 *((gpointer *)(ip)) = code + patch_info->data.offset;
4213 continue;
4214 case MONO_PATCH_INFO_METHODCONST:
4215 case MONO_PATCH_INFO_CLASS:
4216 case MONO_PATCH_INFO_IMAGE:
4217 case MONO_PATCH_INFO_FIELD:
4218 case MONO_PATCH_INFO_VTABLE:
4219 case MONO_PATCH_INFO_IID:
4220 case MONO_PATCH_INFO_SFLDA:
4221 case MONO_PATCH_INFO_LDSTR:
4222 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
4223 case MONO_PATCH_INFO_LDTOKEN:
4224 g_assert_not_reached ();
4225 /* from OP_AOTCONST : lis + ori */
4226 patch_lis_ori (ip, target);
4227 continue;
4228 case MONO_PATCH_INFO_R4:
4229 case MONO_PATCH_INFO_R8:
4230 g_assert_not_reached ();
4231 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
4232 continue;
4233 case MONO_PATCH_INFO_EXC_NAME:
4234 g_assert_not_reached ();
4235 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
4236 continue;
4237 case MONO_PATCH_INFO_NONE:
4238 case MONO_PATCH_INFO_BB_OVF:
4239 case MONO_PATCH_INFO_EXC_OVF:
4240 /* everything is dealt with at epilog output time */
4241 continue;
4242 default:
4243 break;
4245 arm_patch_general (domain, ip, target);
4250 * Stack frame layout:
4252 * ------------------- fp
4253 * MonoLMF structure or saved registers
4254 * -------------------
4255 * locals
4256 * -------------------
4257 * spilled regs
4258 * -------------------
4259 * optional 8 bytes for tracing
4260 * -------------------
4261 * param area size is cfg->param_area
4262 * ------------------- sp
4264 guint8 *
4265 mono_arch_emit_prolog (MonoCompile *cfg)
4267 MonoMethod *method = cfg->method;
4268 MonoBasicBlock *bb;
4269 MonoMethodSignature *sig;
4270 MonoInst *inst;
4271 int alloc_size, pos, max_offset, i, rot_amount;
4272 guint8 *code;
4273 CallInfo *cinfo;
4274 int tracing = 0;
4275 int lmf_offset = 0;
4276 int prev_sp_offset, reg_offset;
4278 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4279 tracing = 1;
4281 sig = mono_method_signature (method);
4282 cfg->code_size = 256 + sig->param_count * 20;
4283 code = cfg->native_code = g_malloc (cfg->code_size);
4285 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
4287 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
4289 alloc_size = cfg->stack_offset;
4290 pos = 0;
4292 if (!method->save_lmf) {
4293 /* We save SP by storing it into IP and saving IP */
4294 ARM_PUSH (code, (cfg->used_int_regs | (1 << ARMREG_IP) | (1 << ARMREG_LR)));
4295 prev_sp_offset = 8; /* ip and lr */
4296 for (i = 0; i < 16; ++i) {
4297 if (cfg->used_int_regs & (1 << i))
4298 prev_sp_offset += 4;
4300 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
4301 reg_offset = 0;
4302 for (i = 0; i < 16; ++i) {
4303 if ((cfg->used_int_regs & (1 << i)) || (i == ARMREG_IP) || (i == ARMREG_LR)) {
4304 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
4305 reg_offset += 4;
4308 } else {
4309 ARM_PUSH (code, 0x5ff0);
4310 prev_sp_offset = 4 * 10; /* all but r0-r3, sp and pc */
4311 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
4312 reg_offset = 0;
4313 for (i = 0; i < 16; ++i) {
4314 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
4315 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
4316 reg_offset += 4;
4319 pos += sizeof (MonoLMF) - prev_sp_offset;
4320 lmf_offset = pos;
4322 alloc_size += pos;
4323 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
4324 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
4325 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
4326 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
4329 /* the stack used in the pushed regs */
4330 if (prev_sp_offset & 4)
4331 alloc_size += 4;
4332 cfg->stack_usage = alloc_size;
4333 if (alloc_size) {
4334 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
4335 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
4336 } else {
4337 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
4338 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4340 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
4342 if (cfg->frame_reg != ARMREG_SP) {
4343 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
4344 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
4346 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
4347 prev_sp_offset += alloc_size;
4349 /* compute max_offset in order to use short forward jumps
4350 * we could skip do it on arm because the immediate displacement
4351 * for jumps is large enough, it may be useful later for constant pools
4353 max_offset = 0;
4354 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4355 MonoInst *ins = bb->code;
4356 bb->max_offset = max_offset;
4358 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
4359 max_offset += 6;
4361 MONO_BB_FOR_EACH_INS (bb, ins)
4362 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
4365 /* store runtime generic context */
4366 if (cfg->rgctx_var) {
4367 MonoInst *ins = cfg->rgctx_var;
4369 g_assert (ins->opcode == OP_REGOFFSET);
4371 if (arm_is_imm12 (ins->inst_offset)) {
4372 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
4373 } else {
4374 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4375 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
4379 /* load arguments allocated to register from the stack */
4380 pos = 0;
4382 cinfo = get_call_info (sig, sig->pinvoke);
4384 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
4385 ArgInfo *ainfo = &cinfo->ret;
4386 inst = cfg->vret_addr;
4387 g_assert (arm_is_imm12 (inst->inst_offset));
4388 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4390 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4391 ArgInfo *ainfo = cinfo->args + i;
4392 inst = cfg->args [pos];
4394 if (cfg->verbose_level > 2)
4395 g_print ("Saving argument %d (type: %d)\n", i, ainfo->regtype);
4396 if (inst->opcode == OP_REGVAR) {
4397 if (ainfo->regtype == RegTypeGeneral)
4398 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
4399 else if (ainfo->regtype == RegTypeFP) {
4400 g_assert_not_reached ();
4401 } else if (ainfo->regtype == RegTypeBase) {
4402 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4403 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4404 } else {
4405 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4406 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
4408 } else
4409 g_assert_not_reached ();
4411 if (cfg->verbose_level > 2)
4412 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
4413 } else {
4414 /* the argument should be put on the stack: FIXME handle size != word */
4415 if (ainfo->regtype == RegTypeGeneral || ainfo->regtype == RegTypeIRegPair) {
4416 switch (ainfo->size) {
4417 case 1:
4418 if (arm_is_imm12 (inst->inst_offset))
4419 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4420 else {
4421 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4422 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4424 break;
4425 case 2:
4426 if (arm_is_imm8 (inst->inst_offset)) {
4427 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4428 } else {
4429 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4430 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4432 break;
4433 case 8:
4434 g_assert (arm_is_imm12 (inst->inst_offset));
4435 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4436 g_assert (arm_is_imm12 (inst->inst_offset + 4));
4437 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4438 break;
4439 default:
4440 if (arm_is_imm12 (inst->inst_offset)) {
4441 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4442 } else {
4443 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4444 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4446 break;
4448 } else if (ainfo->regtype == RegTypeBaseGen) {
4449 g_assert (arm_is_imm12 (prev_sp_offset + ainfo->offset));
4450 g_assert (arm_is_imm12 (inst->inst_offset));
4451 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4452 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
4453 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
4454 } else if (ainfo->regtype == RegTypeBase) {
4455 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4456 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4457 } else {
4458 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
4459 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
4462 switch (ainfo->size) {
4463 case 1:
4464 if (arm_is_imm8 (inst->inst_offset)) {
4465 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4466 } else {
4467 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4468 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4470 break;
4471 case 2:
4472 if (arm_is_imm8 (inst->inst_offset)) {
4473 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4474 } else {
4475 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4476 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4478 break;
4479 case 8:
4480 if (arm_is_imm12 (inst->inst_offset)) {
4481 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4482 } else {
4483 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4484 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4486 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
4487 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
4488 } else {
4489 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
4490 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
4492 if (arm_is_imm12 (inst->inst_offset + 4)) {
4493 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
4494 } else {
4495 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
4496 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4498 break;
4499 default:
4500 if (arm_is_imm12 (inst->inst_offset)) {
4501 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4502 } else {
4503 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4504 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4506 break;
4508 } else if (ainfo->regtype == RegTypeFP) {
4509 g_assert_not_reached ();
4510 } else if (ainfo->regtype == RegTypeStructByVal) {
4511 int doffset = inst->inst_offset;
4512 int soffset = 0;
4513 int cur_reg;
4514 int size = 0;
4515 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
4516 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
4517 if (arm_is_imm12 (doffset)) {
4518 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
4519 } else {
4520 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
4521 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
4523 soffset += sizeof (gpointer);
4524 doffset += sizeof (gpointer);
4526 if (ainfo->vtsize) {
4527 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
4528 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
4529 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
4531 } else if (ainfo->regtype == RegTypeStructByAddr) {
4532 g_assert_not_reached ();
4533 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
4534 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
4535 } else
4536 g_assert_not_reached ();
4538 pos++;
4541 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
4542 if (cfg->compile_aot)
4543 /* AOT code is only used in the root domain */
4544 code = mono_arm_emit_load_imm (code, ARMREG_R0, 0);
4545 else
4546 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->domain);
4547 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4548 (gpointer)"mono_jit_thread_attach");
4549 code = emit_call_seq (cfg, code);
4552 if (method->save_lmf) {
4553 gboolean get_lmf_fast = FALSE;
4555 #ifdef HAVE_AEABI_READ_TP
4556 gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
4558 if (lmf_addr_tls_offset != -1) {
4559 get_lmf_fast = TRUE;
4561 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4562 (gpointer)"__aeabi_read_tp");
4563 code = emit_call_seq (cfg, code);
4565 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
4566 get_lmf_fast = TRUE;
4568 #endif
4569 if (!get_lmf_fast) {
4570 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4571 (gpointer)"mono_get_lmf_addr");
4572 code = emit_call_seq (cfg, code);
4574 /* we build the MonoLMF structure on the stack - see mini-arm.h */
4575 /* lmf_offset is the offset from the previous stack pointer,
4576 * alloc_size is the total stack space allocated, so the offset
4577 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
4578 * The pointer to the struct is put in r1 (new_lmf).
4579 * r2 is used as scratch
4580 * The callee-saved registers are already in the MonoLMF structure
4582 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, alloc_size - lmf_offset);
4583 /* r0 is the result from mono_get_lmf_addr () */
4584 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
4585 /* new_lmf->previous_lmf = *lmf_addr */
4586 ARM_LDR_IMM (code, ARMREG_R2, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4587 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4588 /* *(lmf_addr) = r1 */
4589 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4590 /* Skip method (only needed for trampoline LMF frames) */
4591 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ebp));
4592 /* save the current IP */
4593 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
4594 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, eip));
4597 if (tracing)
4598 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
4600 if (cfg->arch.seq_point_info_var) {
4601 MonoInst *ins = cfg->arch.seq_point_info_var;
4603 /* Initialize the variable from a GOT slot */
4604 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
4605 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
4606 ARM_B (code, 0);
4607 *(gpointer*)code = NULL;
4608 code += 4;
4609 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
4611 g_assert (ins->opcode == OP_REGOFFSET);
4613 if (arm_is_imm12 (ins->inst_offset)) {
4614 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
4615 } else {
4616 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4617 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
4621 /* Initialize ss_trigger_page_var */
4623 MonoInst *info_var = cfg->arch.seq_point_info_var;
4624 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4625 int dreg = ARMREG_LR;
4627 if (info_var) {
4628 g_assert (info_var->opcode == OP_REGOFFSET);
4629 g_assert (arm_is_imm12 (info_var->inst_offset));
4631 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
4632 /* Load the trigger page addr */
4633 ARM_LDR_IMM (code, dreg, dreg, G_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
4634 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
4638 cfg->code_len = code - cfg->native_code;
4639 g_assert (cfg->code_len < cfg->code_size);
4640 g_free (cinfo);
4642 return code;
4645 void
4646 mono_arch_emit_epilog (MonoCompile *cfg)
4648 MonoMethod *method = cfg->method;
4649 int pos, i, rot_amount;
4650 int max_epilog_size = 16 + 20*4;
4651 guint8 *code;
4653 if (cfg->method->save_lmf)
4654 max_epilog_size += 128;
4656 if (mono_jit_trace_calls != NULL)
4657 max_epilog_size += 50;
4659 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
4660 max_epilog_size += 50;
4662 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4663 cfg->code_size *= 2;
4664 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4665 mono_jit_stats.code_reallocs++;
4669 * Keep in sync with OP_JMP
4671 code = cfg->native_code + cfg->code_len;
4673 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
4674 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4676 pos = 0;
4678 if (method->save_lmf) {
4679 int lmf_offset;
4680 /* all but r0-r3, sp and pc */
4681 pos += sizeof (MonoLMF) - (4 * 10);
4682 lmf_offset = pos;
4683 /* r2 contains the pointer to the current LMF */
4684 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, cfg->stack_usage - lmf_offset);
4685 /* ip = previous_lmf */
4686 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4687 /* lr = lmf_addr */
4688 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
4689 /* *(lmf_addr) = previous_lmf */
4690 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4691 /* FIXME: speedup: there is no actual need to restore the registers if
4692 * we didn't actually change them (idea from Zoltan).
4694 /* restore iregs */
4695 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
4696 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_R2, (sizeof (MonoLMF) - 10 * sizeof (gulong)));
4697 ARM_POP_NWB (code, 0xaff0); /* restore ip to sp and lr to pc */
4698 } else {
4699 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
4700 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
4701 } else {
4702 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
4703 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4705 /* FIXME: add v4 thumb interworking support */
4706 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP) | (1 << ARMREG_PC)));
4709 cfg->code_len = code - cfg->native_code;
4711 g_assert (cfg->code_len < cfg->code_size);
4715 /* remove once throw_exception_by_name is eliminated */
4716 static int
4717 exception_id_by_name (const char *name)
4719 if (strcmp (name, "IndexOutOfRangeException") == 0)
4720 return MONO_EXC_INDEX_OUT_OF_RANGE;
4721 if (strcmp (name, "OverflowException") == 0)
4722 return MONO_EXC_OVERFLOW;
4723 if (strcmp (name, "ArithmeticException") == 0)
4724 return MONO_EXC_ARITHMETIC;
4725 if (strcmp (name, "DivideByZeroException") == 0)
4726 return MONO_EXC_DIVIDE_BY_ZERO;
4727 if (strcmp (name, "InvalidCastException") == 0)
4728 return MONO_EXC_INVALID_CAST;
4729 if (strcmp (name, "NullReferenceException") == 0)
4730 return MONO_EXC_NULL_REF;
4731 if (strcmp (name, "ArrayTypeMismatchException") == 0)
4732 return MONO_EXC_ARRAY_TYPE_MISMATCH;
4733 g_error ("Unknown intrinsic exception %s\n", name);
4734 return -1;
4737 void
4738 mono_arch_emit_exceptions (MonoCompile *cfg)
4740 MonoJumpInfo *patch_info;
4741 int i;
4742 guint8 *code;
4743 const guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM] = {NULL};
4744 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM] = {0};
4745 int max_epilog_size = 50;
4747 /* count the number of exception infos */
4750 * make sure we have enough space for exceptions
4752 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4753 if (patch_info->type == MONO_PATCH_INFO_EXC) {
4754 i = exception_id_by_name (patch_info->data.target);
4755 if (!exc_throw_found [i]) {
4756 max_epilog_size += 32;
4757 exc_throw_found [i] = TRUE;
4762 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4763 cfg->code_size *= 2;
4764 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4765 mono_jit_stats.code_reallocs++;
4768 code = cfg->native_code + cfg->code_len;
4770 /* add code to raise exceptions */
4771 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4772 switch (patch_info->type) {
4773 case MONO_PATCH_INFO_EXC: {
4774 MonoClass *exc_class;
4775 unsigned char *ip = patch_info->ip.i + cfg->native_code;
4777 i = exception_id_by_name (patch_info->data.target);
4778 if (exc_throw_pos [i]) {
4779 arm_patch (ip, exc_throw_pos [i]);
4780 patch_info->type = MONO_PATCH_INFO_NONE;
4781 break;
4782 } else {
4783 exc_throw_pos [i] = code;
4785 arm_patch (ip, code);
4787 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
4788 g_assert (exc_class);
4790 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
4791 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
4792 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4793 patch_info->data.name = "mono_arch_throw_corlib_exception";
4794 patch_info->ip.i = code - cfg->native_code;
4795 ARM_BL (code, 0);
4796 *(guint32*)(gpointer)code = exc_class->type_token;
4797 code += 4;
4798 break;
4800 default:
4801 /* do nothing */
4802 break;
4806 cfg->code_len = code - cfg->native_code;
4808 g_assert (cfg->code_len < cfg->code_size);
4812 static gboolean tls_offset_inited = FALSE;
4814 void
4815 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
4817 if (!tls_offset_inited) {
4818 tls_offset_inited = TRUE;
4820 lmf_tls_offset = mono_get_lmf_tls_offset ();
4821 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
4825 void
4826 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
4830 MonoInst*
4831 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4833 /* FIXME: */
4834 return NULL;
4837 gboolean
4838 mono_arch_print_tree (MonoInst *tree, int arity)
4840 return 0;
4843 MonoInst*
4844 mono_arch_get_domain_intrinsic (MonoCompile* cfg)
4846 return mono_get_domain_intrinsic (cfg);
4849 MonoInst*
4850 mono_arch_get_thread_intrinsic (MonoCompile* cfg)
4852 return mono_get_thread_intrinsic (cfg);
4855 guint32
4856 mono_arch_get_patch_offset (guint8 *code)
4858 /* OP_AOTCONST */
4859 return 8;
4862 void
4863 mono_arch_flush_register_windows (void)
4867 #ifdef MONO_ARCH_HAVE_IMT
4869 void
4870 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
4872 if (cfg->compile_aot) {
4873 int method_reg = mono_alloc_ireg (cfg);
4874 MonoInst *ins;
4876 call->dynamic_imt_arg = TRUE;
4878 if (imt_arg) {
4879 mono_call_inst_add_outarg_reg (cfg, call, imt_arg->dreg, ARMREG_V5, FALSE);
4880 } else {
4881 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
4882 ins->dreg = method_reg;
4883 ins->inst_p0 = call->method;
4884 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
4885 MONO_ADD_INS (cfg->cbb, ins);
4887 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
4889 } else if (cfg->generic_context) {
4891 /* Always pass in a register for simplicity */
4892 call->dynamic_imt_arg = TRUE;
4894 cfg->uses_rgctx_reg = TRUE;
4896 if (imt_arg) {
4897 mono_call_inst_add_outarg_reg (cfg, call, imt_arg->dreg, ARMREG_V5, FALSE);
4898 } else {
4899 MonoInst *ins;
4900 int method_reg = mono_alloc_preg (cfg);
4902 MONO_INST_NEW (cfg, ins, OP_PCONST);
4903 ins->inst_p0 = call->method;
4904 ins->dreg = method_reg;
4905 MONO_ADD_INS (cfg->cbb, ins);
4907 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
4912 MonoMethod*
4913 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
4915 guint32 *code_ptr = (guint32*)code;
4916 code_ptr -= 2;
4917 /* The IMT value is stored in the code stream right after the LDC instruction. */
4918 if (!IS_LDR_PC (code_ptr [0])) {
4919 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
4920 g_assert (IS_LDR_PC (code_ptr [0]));
4922 if (code_ptr [1] == 0)
4923 /* This is AOTed code, the IMT method is in V5 */
4924 return (MonoMethod*)regs [ARMREG_V5];
4925 else
4926 return (MonoMethod*) code_ptr [1];
4929 MonoObject*
4930 mono_arch_find_this_argument (mgreg_t *regs, MonoMethod *method, MonoGenericSharingContext *gsctx)
4932 return mono_arch_get_this_arg_from_call (gsctx, mono_method_signature (method), regs, NULL);
4935 MonoVTable*
4936 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
4938 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
4941 #define ENABLE_WRONG_METHOD_CHECK 0
4942 #define BASE_SIZE (6 * 4)
4943 #define BSEARCH_ENTRY_SIZE (4 * 4)
4944 #define CMP_SIZE (3 * 4)
4945 #define BRANCH_SIZE (1 * 4)
4946 #define CALL_SIZE (2 * 4)
4947 #define WMC_SIZE (5 * 4)
4948 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
4950 static arminstr_t *
4951 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
4953 guint32 delta = DISTANCE (target, code);
4954 delta -= 8;
4955 g_assert (delta >= 0 && delta <= 0xFFF);
4956 *target = *target | delta;
4957 *code = value;
4958 return code + 1;
4961 gpointer
4962 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
4963 gpointer fail_tramp)
4965 int size, i, extra_space = 0;
4966 arminstr_t *code, *start, *vtable_target = NULL;
4967 gboolean large_offsets = FALSE;
4968 guint32 **constant_pool_starts;
4970 size = BASE_SIZE;
4971 constant_pool_starts = g_new0 (guint32*, count);
4974 * We might be called with a fail_tramp from the IMT builder code even if
4975 * MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK is not defined.
4977 //g_assert (!fail_tramp);
4979 for (i = 0; i < count; ++i) {
4980 MonoIMTCheckItem *item = imt_entries [i];
4981 if (item->is_equals) {
4982 if (!arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
4983 item->chunk_size += 32;
4984 large_offsets = TRUE;
4987 if (item->check_target_idx) {
4988 if (!item->compare_done)
4989 item->chunk_size += CMP_SIZE;
4990 item->chunk_size += BRANCH_SIZE;
4991 } else {
4992 #if ENABLE_WRONG_METHOD_CHECK
4993 item->chunk_size += WMC_SIZE;
4994 #endif
4996 item->chunk_size += CALL_SIZE;
4997 } else {
4998 item->chunk_size += BSEARCH_ENTRY_SIZE;
4999 imt_entries [item->check_target_idx]->compare_done = TRUE;
5001 size += item->chunk_size;
5004 if (large_offsets)
5005 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
5007 start = code = mono_domain_code_reserve (domain, size);
5009 #if DEBUG_IMT
5010 printf ("building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable);
5011 for (i = 0; i < count; ++i) {
5012 MonoIMTCheckItem *item = imt_entries [i];
5013 printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, item->key->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
5015 #endif
5017 if (large_offsets)
5018 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
5019 else
5020 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
5021 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
5022 vtable_target = code;
5023 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
5025 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
5026 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
5027 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
5029 for (i = 0; i < count; ++i) {
5030 MonoIMTCheckItem *item = imt_entries [i];
5031 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL;
5032 gint32 vtable_offset;
5034 item->code_target = (guint8*)code;
5036 if (item->is_equals) {
5037 if (item->check_target_idx) {
5038 if (!item->compare_done) {
5039 imt_method = code;
5040 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5041 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5043 item->jmp_code = (guint8*)code;
5044 ARM_B_COND (code, ARMCOND_NE, 0);
5045 } else {
5046 /*Enable the commented code to assert on wrong method*/
5047 #if ENABLE_WRONG_METHOD_CHECK
5048 imt_method = code;
5049 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5050 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5051 ARM_B_COND (code, ARMCOND_NE, 1);
5053 ARM_DBRK (code);
5054 #endif
5057 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
5058 if (!arm_is_imm12 (vtable_offset)) {
5060 * We need to branch to a computed address but we don't have
5061 * a free register to store it, since IP must contain the
5062 * vtable address. So we push the two values to the stack, and
5063 * load them both using LDM.
5065 /* Compute target address */
5066 vtable_offset_ins = code;
5067 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5068 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
5069 /* Save it to the fourth slot */
5070 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
5071 /* Restore registers and branch */
5072 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
5074 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
5075 } else {
5076 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
5077 if (large_offsets)
5078 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
5079 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
5082 if (imt_method)
5083 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
5085 /*must emit after unconditional branch*/
5086 if (vtable_target) {
5087 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
5088 item->chunk_size += 4;
5089 vtable_target = NULL;
5092 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
5093 constant_pool_starts [i] = code;
5094 if (extra_space) {
5095 code += extra_space;
5096 extra_space = 0;
5098 } else {
5099 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5100 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5102 item->jmp_code = (guint8*)code;
5103 ARM_B_COND (code, ARMCOND_GE, 0);
5104 ++extra_space;
5108 for (i = 0; i < count; ++i) {
5109 MonoIMTCheckItem *item = imt_entries [i];
5110 if (item->jmp_code) {
5111 if (item->check_target_idx)
5112 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
5114 if (i > 0 && item->is_equals) {
5115 int j;
5116 arminstr_t *space_start = constant_pool_starts [i];
5117 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
5118 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
5123 #if DEBUG_IMT
5125 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
5126 mono_disassemble_code (NULL, (guint8*)start, size, buff);
5127 g_free (buff);
5129 #endif
5131 g_free (constant_pool_starts);
5133 mono_arch_flush_icache ((guint8*)start, size);
5134 mono_stats.imt_thunks_size += code - start;
5136 g_assert (DISTANCE (start, code) <= size);
5137 return start;
5140 #endif
5142 gpointer
5143 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
5145 if (reg == ARMREG_SP)
5146 return (gpointer)ctx->esp;
5147 else
5148 return (gpointer)ctx->regs [reg];
5152 * mono_arch_set_breakpoint:
5154 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
5155 * The location should contain code emitted by OP_SEQ_POINT.
5157 void
5158 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
5160 guint8 *code = ip;
5161 guint32 native_offset = ip - (guint8*)ji->code_start;
5163 if (ji->from_aot) {
5164 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
5166 g_assert (native_offset % 4 == 0);
5167 g_assert (info->bp_addrs [native_offset / 4] == 0);
5168 info->bp_addrs [native_offset / 4] = bp_trigger_page;
5169 } else {
5170 int dreg = ARMREG_LR;
5172 /* Read from another trigger page */
5173 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
5174 ARM_B (code, 0);
5175 *(int*)code = (int)bp_trigger_page;
5176 code += 4;
5177 ARM_LDR_IMM (code, dreg, dreg, 0);
5179 mono_arch_flush_icache (code - 16, 16);
5181 #if 0
5182 /* This is currently implemented by emitting an SWI instruction, which
5183 * qemu/linux seems to convert to a SIGILL.
5185 *(int*)code = (0xef << 24) | 8;
5186 code += 4;
5187 mono_arch_flush_icache (code - 4, 4);
5188 #endif
5193 * mono_arch_clear_breakpoint:
5195 * Clear the breakpoint at IP.
5197 void
5198 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
5200 guint8 *code = ip;
5201 int i;
5203 if (ji->from_aot) {
5204 guint32 native_offset = ip - (guint8*)ji->code_start;
5205 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
5207 g_assert (native_offset % 4 == 0);
5208 g_assert (info->bp_addrs [native_offset / 4] == bp_trigger_page);
5209 info->bp_addrs [native_offset / 4] = 0;
5210 } else {
5211 for (i = 0; i < 4; ++i)
5212 ARM_NOP (code);
5214 mono_arch_flush_icache (ip, code - ip);
5219 * mono_arch_start_single_stepping:
5221 * Start single stepping.
5223 void
5224 mono_arch_start_single_stepping (void)
5226 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
5230 * mono_arch_stop_single_stepping:
5232 * Stop single stepping.
5234 void
5235 mono_arch_stop_single_stepping (void)
5237 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
5240 #if __APPLE__
5241 #define DBG_SIGNAL SIGBUS
5242 #else
5243 #define DBG_SIGNAL SIGSEGV
5244 #endif
5247 * mono_arch_is_single_step_event:
5249 * Return whenever the machine state in SIGCTX corresponds to a single
5250 * step event.
5252 gboolean
5253 mono_arch_is_single_step_event (void *info, void *sigctx)
5255 siginfo_t *sinfo = info;
5257 /* Sometimes the address is off by 4 */
5258 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
5259 return TRUE;
5260 else
5261 return FALSE;
5265 * mono_arch_is_breakpoint_event:
5267 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
5269 gboolean
5270 mono_arch_is_breakpoint_event (void *info, void *sigctx)
5272 siginfo_t *sinfo = info;
5274 if (sinfo->si_signo == DBG_SIGNAL) {
5275 /* Sometimes the address is off by 4 */
5276 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
5277 return TRUE;
5278 else
5279 return FALSE;
5280 } else {
5281 return FALSE;
5285 guint8*
5286 mono_arch_get_ip_for_breakpoint (MonoJitInfo *ji, MonoContext *ctx)
5288 guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
5290 if (ji->from_aot)
5291 ip -= 6 * 4;
5292 else
5293 ip -= 12;
5295 return ip;
5298 guint8*
5299 mono_arch_get_ip_for_single_step (MonoJitInfo *ji, MonoContext *ctx)
5301 guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
5303 ip += 4;
5305 return ip;
5309 * mono_arch_skip_breakpoint:
5311 * See mini-amd64.c for docs.
5313 void
5314 mono_arch_skip_breakpoint (MonoContext *ctx)
5316 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
5320 * mono_arch_skip_single_step:
5322 * See mini-amd64.c for docs.
5324 void
5325 mono_arch_skip_single_step (MonoContext *ctx)
5327 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
5331 * mono_arch_get_seq_point_info:
5333 * See mini-amd64.c for docs.
5335 gpointer
5336 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
5338 SeqPointInfo *info;
5339 MonoJitInfo *ji;
5341 // FIXME: Add a free function
5343 mono_domain_lock (domain);
5344 info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
5345 code);
5346 mono_domain_unlock (domain);
5348 if (!info) {
5349 ji = mono_jit_info_table_find (domain, (char*)code);
5350 g_assert (ji);
5352 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
5354 info->ss_trigger_page = ss_trigger_page;
5355 info->bp_trigger_page = bp_trigger_page;
5357 mono_domain_lock (domain);
5358 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
5359 code, info);
5360 mono_domain_unlock (domain);
5363 return info;