Apply changes from https://github.com/dotnet/runtime/commit/eb1756e97d23df13bc6fe798e...
[mono-project.git] / mono / mini / mini-arm.c
blob03670f787d989a468582cda914fc0da3d59f92ce
1 /**
2 * \file
3 * ARM backend for the Mono code generator
5 * Authors:
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
9 * (C) 2003 Ximian, Inc.
10 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
11 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
14 #include "mini.h"
15 #include <string.h>
17 #include <mono/metadata/abi-details.h>
18 #include <mono/metadata/appdomain.h>
19 #include <mono/metadata/profiler-private.h>
20 #include <mono/metadata/debug-helpers.h>
21 #include <mono/utils/mono-mmap.h>
22 #include <mono/utils/mono-hwcap.h>
23 #include <mono/utils/mono-memory-model.h>
24 #include <mono/utils/mono-threads-coop.h>
25 #include <mono/utils/unlocked.h>
27 #include "interp/interp.h"
29 #include "mini-arm.h"
30 #include "cpu-arm.h"
31 #include "ir-emit.h"
32 #include "debugger-agent.h"
33 #include "mini-gc.h"
34 #include "mini-runtime.h"
35 #include "aot-runtime.h"
36 #include "mono/arch/arm/arm-vfp-codegen.h"
37 #include "mono/utils/mono-tls-inline.h"
39 /* Sanity check: This makes no sense */
40 #if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD))
41 #error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined"
42 #endif
45 * IS_SOFT_FLOAT: Is full software floating point used?
46 * IS_HARD_FLOAT: Is full hardware floating point used?
47 * IS_VFP: Is hardware floating point with software ABI used?
49 * These are not necessarily constants, e.g. IS_SOFT_FLOAT and
50 * IS_VFP may delegate to mono_arch_is_soft_float ().
53 #if defined(ARM_FPU_VFP_HARD)
54 #define IS_SOFT_FLOAT (FALSE)
55 #define IS_HARD_FLOAT (TRUE)
56 #define IS_VFP (TRUE)
57 #elif defined(ARM_FPU_NONE)
58 #define IS_SOFT_FLOAT (mono_arch_is_soft_float ())
59 #define IS_HARD_FLOAT (FALSE)
60 #define IS_VFP (!mono_arch_is_soft_float ())
61 #else
62 #define IS_SOFT_FLOAT (FALSE)
63 #define IS_HARD_FLOAT (FALSE)
64 #define IS_VFP (TRUE)
65 #endif
67 #define THUNK_SIZE (3 * 4)
69 #if __APPLE__
70 G_BEGIN_DECLS
71 void sys_icache_invalidate (void *start, size_t len);
72 G_END_DECLS
73 #endif
75 /* This mutex protects architecture specific caches */
76 #define mono_mini_arch_lock() mono_os_mutex_lock (&mini_arch_mutex)
77 #define mono_mini_arch_unlock() mono_os_mutex_unlock (&mini_arch_mutex)
78 static mono_mutex_t mini_arch_mutex;
80 static gboolean v5_supported = FALSE;
81 static gboolean v6_supported = FALSE;
82 static gboolean v7_supported = FALSE;
83 static gboolean v7s_supported = FALSE;
84 static gboolean v7k_supported = FALSE;
85 static gboolean thumb_supported = FALSE;
86 static gboolean thumb2_supported = FALSE;
88 * Whenever to use the ARM EABI
90 static gboolean eabi_supported = FALSE;
92 /*
93 * Whenever to use the iphone ABI extensions:
94 * http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html
95 * Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr.
96 * This is required for debugging/profiling tools to work, but it has some overhead so it should
97 * only be turned on in debug builds.
99 static gboolean iphone_abi = FALSE;
102 * The FPU we are generating code for. This is NOT runtime configurable right now,
103 * since some things like MONO_ARCH_CALLEE_FREGS still depend on defines.
105 static MonoArmFPU arm_fpu;
107 #if defined(ARM_FPU_VFP_HARD)
109 * On armhf, d0-d7 are used for argument passing and d8-d15
110 * must be preserved across calls, which leaves us no room
111 * for scratch registers. So we use d14-d15 but back up their
112 * previous contents to a stack slot before using them - see
113 * mono_arm_emit_vfp_scratch_save/_restore ().
115 static int vfp_scratch1 = ARM_VFP_D14;
116 static int vfp_scratch2 = ARM_VFP_D15;
117 #else
119 * On armel, d0-d7 do not need to be preserved, so we can
120 * freely make use of them as scratch registers.
122 static int vfp_scratch1 = ARM_VFP_D0;
123 static int vfp_scratch2 = ARM_VFP_D1;
124 #endif
126 static int i8_align;
128 static gpointer single_step_tramp, breakpoint_tramp;
131 * The code generated for sequence points reads from this location, which is
132 * made read-only when single stepping is enabled.
134 static gpointer ss_trigger_page;
136 /* Enabled breakpoints read from this trigger page */
137 static gpointer bp_trigger_page;
140 * TODO:
141 * floating point support: on ARM it is a mess, there are at least 3
142 * different setups, each of which binary incompat with the other.
143 * 1) FPA: old and ugly, but unfortunately what current distros use
144 * the double binary format has the two words swapped. 8 double registers.
145 * Implemented usually by kernel emulation.
146 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
147 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
148 * 3) VFP: the new and actually sensible and useful FP support. Implemented
149 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
151 * We do not care about FPA. We will support soft float and VFP.
153 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
154 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
155 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
157 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
158 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
159 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
161 //#define DEBUG_IMT 0
163 #ifndef DISABLE_JIT
164 static void mono_arch_compute_omit_fp (MonoCompile *cfg);
165 #endif
167 static guint8*
168 emit_aotconst (MonoCompile *cfg, guint8 *code, int dreg, int patch_type, gpointer data);
170 const char*
171 mono_arch_regname (int reg)
173 static const char * rnames[] = {
174 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
175 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
176 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
177 "arm_pc"
179 if (reg >= 0 && reg < 16)
180 return rnames [reg];
181 return "unknown";
184 const char*
185 mono_arch_fregname (int reg)
187 static const char * rnames[] = {
188 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
189 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
190 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
191 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
192 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
193 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
194 "arm_f30", "arm_f31"
196 if (reg >= 0 && reg < 32)
197 return rnames [reg];
198 return "unknown";
202 #ifndef DISABLE_JIT
203 static guint8*
204 emit_big_add_temp (guint8 *code, int dreg, int sreg, int imm, int temp)
206 int imm8, rot_amount;
208 g_assert (temp == ARMREG_IP || temp == ARMREG_LR);
210 if (imm == 0) {
211 if (sreg != dreg)
212 ARM_MOV_REG_REG (code, dreg, sreg);
213 } else if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
214 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
215 return code;
217 if (dreg == sreg) {
218 code = mono_arm_emit_load_imm (code, temp, imm);
219 ARM_ADD_REG_REG (code, dreg, sreg, temp);
220 } else {
221 code = mono_arm_emit_load_imm (code, dreg, imm);
222 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
224 return code;
227 static guint8*
228 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
230 return emit_big_add_temp (code, dreg, sreg, imm, ARMREG_IP);
233 static guint8*
234 emit_ldr_imm (guint8 *code, int dreg, int sreg, int imm)
236 if (!arm_is_imm12 (imm)) {
237 g_assert (dreg != sreg);
238 code = emit_big_add (code, dreg, sreg, imm);
239 ARM_LDR_IMM (code, dreg, dreg, 0);
240 } else {
241 ARM_LDR_IMM (code, dreg, sreg, imm);
243 return code;
246 /* If dreg == sreg, this clobbers IP */
247 static guint8*
248 emit_sub_imm (guint8 *code, int dreg, int sreg, int imm)
250 int imm8, rot_amount;
251 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
252 ARM_SUB_REG_IMM (code, dreg, sreg, imm8, rot_amount);
253 return code;
255 if (dreg == sreg) {
256 code = mono_arm_emit_load_imm (code, ARMREG_IP, imm);
257 ARM_SUB_REG_REG (code, dreg, sreg, ARMREG_IP);
258 } else {
259 code = mono_arm_emit_load_imm (code, dreg, imm);
260 ARM_SUB_REG_REG (code, dreg, dreg, sreg);
262 return code;
265 static guint8*
266 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
268 /* we can use r0-r3, since this is called only for incoming args on the stack */
269 if (size > sizeof (target_mgreg_t) * 4) {
270 guint8 *start_loop;
271 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
272 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
273 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
274 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
275 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
276 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
277 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
278 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
279 ARM_B_COND (code, ARMCOND_NE, 0);
280 arm_patch (code - 4, start_loop);
281 return code;
283 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
284 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
285 while (size >= 4) {
286 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
287 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
288 doffset += 4;
289 soffset += 4;
290 size -= 4;
292 } else if (size) {
293 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
294 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
295 doffset = soffset = 0;
296 while (size >= 4) {
297 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
298 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
299 doffset += 4;
300 soffset += 4;
301 size -= 4;
304 g_assert (size == 0);
305 return code;
308 static guint8*
309 emit_jmp_reg (guint8 *code, int reg)
311 if (thumb_supported)
312 ARM_BX (code, reg);
313 else
314 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
315 return code;
318 static guint8*
319 emit_call_reg (guint8 *code, int reg)
321 if (v5_supported) {
322 ARM_BLX_REG (code, reg);
323 } else {
324 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
325 return emit_jmp_reg (code, reg);
327 return code;
330 static guint8*
331 emit_call_seq (MonoCompile *cfg, guint8 *code)
333 if (cfg->method->dynamic) {
334 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
335 ARM_B (code, 0);
336 *(gpointer*)code = NULL;
337 code += 4;
338 code = emit_call_reg (code, ARMREG_IP);
339 } else {
340 ARM_BL (code, 0);
342 cfg->thunk_area += THUNK_SIZE;
343 return code;
346 guint8*
347 mono_arm_patchable_b (guint8 *code, int cond)
349 ARM_B_COND (code, cond, 0);
350 return code;
353 guint8*
354 mono_arm_patchable_bl (guint8 *code, int cond)
356 ARM_BL_COND (code, cond, 0);
357 return code;
360 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(HOST_ANDROID) && !defined(MONO_CROSS_COMPILE)
361 #define HAVE_AEABI_READ_TP 1
362 #endif
364 #ifdef HAVE_AEABI_READ_TP
365 G_BEGIN_DECLS
366 gpointer __aeabi_read_tp (void);
367 G_END_DECLS
368 #endif
370 gboolean
371 mono_arch_have_fast_tls (void)
373 #ifdef HAVE_AEABI_READ_TP
374 static gboolean have_fast_tls = FALSE;
375 static gboolean inited = FALSE;
377 if (mini_debug_options.use_fallback_tls)
378 return FALSE;
380 if (inited)
381 return have_fast_tls;
383 if (v7_supported) {
384 gpointer tp1, tp2;
386 tp1 = __aeabi_read_tp ();
387 asm volatile("mrc p15, 0, %0, c13, c0, 3" : "=r" (tp2));
389 have_fast_tls = tp1 && tp1 == tp2;
391 inited = TRUE;
392 return have_fast_tls;
393 #else
394 return FALSE;
395 #endif
398 static guint8*
399 emit_tls_get (guint8 *code, int dreg, int tls_offset)
401 g_assert (v7_supported);
402 ARM_MRC (code, 15, 0, dreg, 13, 0, 3);
403 ARM_LDR_IMM (code, dreg, dreg, tls_offset);
404 return code;
407 static guint8*
408 emit_tls_set (guint8 *code, int sreg, int tls_offset)
410 int tp_reg = (sreg != ARMREG_R0) ? ARMREG_R0 : ARMREG_R1;
411 g_assert (v7_supported);
412 ARM_MRC (code, 15, 0, tp_reg, 13, 0, 3);
413 ARM_STR_IMM (code, sreg, tp_reg, tls_offset);
414 return code;
418 * emit_save_lmf:
420 * Emit code to push an LMF structure on the LMF stack.
421 * On arm, this is intermixed with the initialization of other fields of the structure.
423 static guint8*
424 emit_save_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
426 int i;
428 if (mono_arch_have_fast_tls () && mono_tls_get_tls_offset (TLS_KEY_LMF_ADDR) != -1) {
429 code = emit_tls_get (code, ARMREG_R0, mono_tls_get_tls_offset (TLS_KEY_LMF_ADDR));
430 } else {
431 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
432 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_tls_get_lmf_addr_extern));
433 code = emit_call_seq (cfg, code);
435 /* we build the MonoLMF structure on the stack - see mini-arm.h */
436 /* lmf_offset is the offset from the previous stack pointer,
437 * alloc_size is the total stack space allocated, so the offset
438 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
439 * The pointer to the struct is put in r1 (new_lmf).
440 * ip is used as scratch
441 * The callee-saved registers are already in the MonoLMF structure
443 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, lmf_offset);
444 /* r0 is the result from mono_get_lmf_addr () */
445 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
446 /* new_lmf->previous_lmf = *lmf_addr */
447 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
448 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
449 /* *(lmf_addr) = r1 */
450 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
451 /* Skip method (only needed for trampoline LMF frames) */
452 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, sp));
453 ARM_STR_IMM (code, ARMREG_FP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, fp));
454 /* save the current IP */
455 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_PC);
456 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, ip));
458 for (i = 0; i < MONO_ABI_SIZEOF (MonoLMF); i += sizeof (target_mgreg_t))
459 mini_gc_set_slot_type_from_fp (cfg, lmf_offset + i, SLOT_NOREF);
461 return code;
464 typedef struct {
465 gint32 vreg;
466 gint32 hreg;
467 } FloatArgData;
469 static guint8 *
470 emit_float_args (MonoCompile *cfg, MonoCallInst *inst, guint8 *code, int *max_len, guint *offset)
472 GSList *list;
474 set_code_cursor (cfg, code);
476 for (list = inst->float_args; list; list = list->next) {
477 FloatArgData *fad = (FloatArgData*)list->data;
478 MonoInst *var = get_vreg_to_inst (cfg, fad->vreg);
479 gboolean imm = arm_is_fpimm8 (var->inst_offset);
481 /* 4+1 insns for emit_big_add () and 1 for FLDS. */
482 if (!imm)
483 *max_len += 20 + 4;
485 *max_len += 4;
487 code = realloc_code (cfg, *max_len);
489 if (!imm) {
490 code = emit_big_add (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
491 ARM_FLDS (code, fad->hreg, ARMREG_LR, 0);
492 } else
493 ARM_FLDS (code, fad->hreg, var->inst_basereg, var->inst_offset);
495 set_code_cursor (cfg, code);
496 *offset = code - cfg->native_code;
499 return code;
502 static guint8 *
503 mono_arm_emit_vfp_scratch_save (MonoCompile *cfg, guint8 *code, int reg)
505 MonoInst *inst;
507 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
509 inst = cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
511 if (IS_HARD_FLOAT) {
512 if (!arm_is_fpimm8 (inst->inst_offset)) {
513 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
514 ARM_FSTD (code, reg, ARMREG_LR, 0);
515 } else
516 ARM_FSTD (code, reg, inst->inst_basereg, inst->inst_offset);
519 return code;
522 static guint8 *
523 mono_arm_emit_vfp_scratch_restore (MonoCompile *cfg, guint8 *code, int reg)
525 MonoInst *inst;
527 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
529 inst = cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
531 if (IS_HARD_FLOAT) {
532 if (!arm_is_fpimm8 (inst->inst_offset)) {
533 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
534 ARM_FLDD (code, reg, ARMREG_LR, 0);
535 } else
536 ARM_FLDD (code, reg, inst->inst_basereg, inst->inst_offset);
539 return code;
543 * emit_restore_lmf:
545 * Emit code to pop an LMF structure from the LMF stack.
547 static guint8*
548 emit_restore_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
550 int basereg, offset;
552 if (lmf_offset < 32) {
553 basereg = cfg->frame_reg;
554 offset = lmf_offset;
555 } else {
556 basereg = ARMREG_R2;
557 offset = 0;
558 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, lmf_offset);
561 /* ip = previous_lmf */
562 ARM_LDR_IMM (code, ARMREG_IP, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
563 /* lr = lmf_addr */
564 ARM_LDR_IMM (code, ARMREG_LR, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
565 /* *(lmf_addr) = previous_lmf */
566 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
568 return code;
571 #endif /* #ifndef DISABLE_JIT */
574 * mono_arch_get_argument_info:
575 * @csig: a method signature
576 * @param_count: the number of parameters to consider
577 * @arg_info: an array to store the result infos
579 * Gathers information on parameters such as size, alignment and
580 * padding. arg_info should be large enought to hold param_count + 1 entries.
582 * Returns the size of the activation frame.
585 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
587 int k, frame_size = 0;
588 guint32 size, align, pad;
589 int offset = 8;
590 MonoType *t;
592 t = mini_get_underlying_type (csig->ret);
593 if (MONO_TYPE_ISSTRUCT (t)) {
594 frame_size += sizeof (target_mgreg_t);
595 offset += 4;
598 arg_info [0].offset = offset;
600 if (csig->hasthis) {
601 frame_size += sizeof (target_mgreg_t);
602 offset += 4;
605 arg_info [0].size = frame_size;
607 for (k = 0; k < param_count; k++) {
608 size = mini_type_stack_size_full (csig->params [k], &align, csig->pinvoke);
610 /* ignore alignment for now */
611 align = 1;
613 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
614 arg_info [k].pad = pad;
615 frame_size += size;
616 arg_info [k + 1].pad = 0;
617 arg_info [k + 1].size = size;
618 offset += pad;
619 arg_info [k + 1].offset = offset;
620 offset += size;
623 align = MONO_ARCH_FRAME_ALIGNMENT;
624 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
625 arg_info [k].pad = pad;
627 return frame_size;
630 #define MAX_ARCH_DELEGATE_PARAMS 3
632 static guint8*
633 get_delegate_invoke_impl (MonoTrampInfo **info, gboolean has_target, gboolean param_count)
635 guint8 *code, *start;
636 GSList *unwind_ops = mono_arch_get_cie_program ();
638 if (has_target) {
639 start = code = mono_global_codeman_reserve (12);
641 /* Replace the this argument with the target */
642 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
643 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, target));
644 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
646 g_assert ((code - start) <= 12);
648 mono_arch_flush_icache (start, 12);
649 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL));
650 } else {
651 int size, i;
653 size = 8 + param_count * 4;
654 start = code = mono_global_codeman_reserve (size);
656 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
657 /* slide down the arguments */
658 for (i = 0; i < param_count; ++i) {
659 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
661 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
663 g_assert ((code - start) <= size);
665 mono_arch_flush_icache (start, size);
666 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL));
669 if (has_target) {
670 *info = mono_tramp_info_create ("delegate_invoke_impl_has_target", start, code - start, NULL, unwind_ops);
671 } else {
672 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", param_count);
673 *info = mono_tramp_info_create (name, start, code - start, NULL, unwind_ops);
674 g_free (name);
677 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL));
679 return start;
683 * mono_arch_get_delegate_invoke_impls:
685 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
686 * trampolines.
688 GSList*
689 mono_arch_get_delegate_invoke_impls (void)
691 GSList *res = NULL;
692 MonoTrampInfo *info;
693 int i;
695 get_delegate_invoke_impl (&info, TRUE, 0);
696 res = g_slist_prepend (res, info);
698 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
699 get_delegate_invoke_impl (&info, FALSE, i);
700 res = g_slist_prepend (res, info);
703 return res;
706 gpointer
707 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
709 guint8 *code, *start;
710 MonoType *sig_ret;
712 /* FIXME: Support more cases */
713 sig_ret = mini_get_underlying_type (sig->ret);
714 if (MONO_TYPE_ISSTRUCT (sig_ret))
715 return NULL;
717 if (has_target) {
718 static guint8* cached = NULL;
719 mono_mini_arch_lock ();
720 if (cached) {
721 mono_mini_arch_unlock ();
722 return cached;
725 if (mono_ee_features.use_aot_trampolines) {
726 start = (guint8*)mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
727 } else {
728 MonoTrampInfo *info;
729 start = get_delegate_invoke_impl (&info, TRUE, 0);
730 mono_tramp_info_register (info, NULL);
732 cached = start;
733 mono_mini_arch_unlock ();
734 return cached;
735 } else {
736 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
737 int i;
739 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
740 return NULL;
741 for (i = 0; i < sig->param_count; ++i)
742 if (!mono_is_regsize_var (sig->params [i]))
743 return NULL;
745 mono_mini_arch_lock ();
746 code = cache [sig->param_count];
747 if (code) {
748 mono_mini_arch_unlock ();
749 return code;
752 if (mono_ee_features.use_aot_trampolines) {
753 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
754 start = (guint8*)mono_aot_get_trampoline (name);
755 g_free (name);
756 } else {
757 MonoTrampInfo *info;
758 start = get_delegate_invoke_impl (&info, FALSE, sig->param_count);
759 mono_tramp_info_register (info, NULL);
761 cache [sig->param_count] = start;
762 mono_mini_arch_unlock ();
763 return start;
766 return NULL;
769 gpointer
770 mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg)
772 return NULL;
775 gpointer
776 mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code)
778 return (gpointer)regs [ARMREG_R0];
782 * Initialize the cpu to execute managed code.
784 void
785 mono_arch_cpu_init (void)
787 i8_align = MONO_ABI_ALIGNOF (gint64);
788 #ifdef MONO_CROSS_COMPILE
789 /* Need to set the alignment of i8 since it can different on the target */
790 #ifdef TARGET_ANDROID
791 /* linux gnueabi */
792 mono_type_set_alignment (MONO_TYPE_I8, i8_align);
793 #endif
794 #endif
798 * Initialize architecture specific code.
800 void
801 mono_arch_init (void)
803 char *cpu_arch;
805 #ifdef TARGET_WATCHOS
806 mini_debug_options.soft_breakpoints = TRUE;
807 #endif
809 mono_os_mutex_init_recursive (&mini_arch_mutex);
810 if (mini_debug_options.soft_breakpoints) {
811 if (!mono_aot_only)
812 breakpoint_tramp = mini_get_breakpoint_trampoline ();
813 } else {
814 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ, MONO_MEM_ACCOUNT_OTHER);
815 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ, MONO_MEM_ACCOUNT_OTHER);
816 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
819 #if defined(__ARM_EABI__)
820 eabi_supported = TRUE;
821 #endif
823 #if defined(ARM_FPU_VFP_HARD)
824 arm_fpu = MONO_ARM_FPU_VFP_HARD;
825 #else
826 arm_fpu = MONO_ARM_FPU_VFP;
828 #if defined(ARM_FPU_NONE) && !defined(TARGET_IOS)
830 * If we're compiling with a soft float fallback and it
831 * turns out that no VFP unit is available, we need to
832 * switch to soft float. We don't do this for iOS, since
833 * iOS devices always have a VFP unit.
835 if (!mono_hwcap_arm_has_vfp)
836 arm_fpu = MONO_ARM_FPU_NONE;
839 * This environment variable can be useful in testing
840 * environments to make sure the soft float fallback
841 * works. Most ARM devices have VFP units these days, so
842 * normally soft float code would not be exercised much.
844 char *soft = g_getenv ("MONO_ARM_FORCE_SOFT_FLOAT");
846 if (soft && !strncmp (soft, "1", 1))
847 arm_fpu = MONO_ARM_FPU_NONE;
848 g_free (soft);
849 #endif
850 #endif
852 v5_supported = mono_hwcap_arm_is_v5;
853 v6_supported = mono_hwcap_arm_is_v6;
854 v7_supported = mono_hwcap_arm_is_v7;
857 * On weird devices, the hwcap code may fail to detect
858 * the ARM version. In that case, we can at least safely
859 * assume the version the runtime was compiled for.
861 #ifdef HAVE_ARMV5
862 v5_supported = TRUE;
863 #endif
864 #ifdef HAVE_ARMV6
865 v6_supported = TRUE;
866 #endif
867 #ifdef HAVE_ARMV7
868 v7_supported = TRUE;
869 #endif
871 #if defined(TARGET_IOS)
872 /* iOS is special-cased here because we don't yet
873 have a way to properly detect CPU features on it. */
874 thumb_supported = TRUE;
875 iphone_abi = TRUE;
876 #else
877 thumb_supported = mono_hwcap_arm_has_thumb;
878 thumb2_supported = mono_hwcap_arm_has_thumb2;
879 #endif
881 /* Format: armv(5|6|7[s])[-thumb[2]] */
882 cpu_arch = g_getenv ("MONO_CPU_ARCH");
884 /* Do this here so it overrides any detection. */
885 if (cpu_arch) {
886 if (strncmp (cpu_arch, "armv", 4) == 0) {
887 v5_supported = cpu_arch [4] >= '5';
888 v6_supported = cpu_arch [4] >= '6';
889 v7_supported = cpu_arch [4] >= '7';
890 v7s_supported = strncmp (cpu_arch, "armv7s", 6) == 0;
891 v7k_supported = strncmp (cpu_arch, "armv7k", 6) == 0;
894 thumb_supported = strstr (cpu_arch, "thumb") != NULL;
895 thumb2_supported = strstr (cpu_arch, "thumb2") != NULL;
896 g_free (cpu_arch);
901 * Cleanup architecture specific code.
903 void
904 mono_arch_cleanup (void)
909 * This function returns the optimizations supported on this cpu.
911 guint32
912 mono_arch_cpu_optimizations (guint32 *exclude_mask)
914 /* no arm-specific optimizations yet */
915 *exclude_mask = 0;
916 return 0;
919 gboolean
920 mono_arm_is_hard_float (void)
922 return arm_fpu == MONO_ARM_FPU_VFP_HARD;
925 #ifndef DISABLE_JIT
927 gboolean
928 mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode)
930 if (v7s_supported || v7k_supported) {
931 switch (opcode) {
932 case OP_IDIV:
933 case OP_IREM:
934 case OP_IDIV_UN:
935 case OP_IREM_UN:
936 return FALSE;
937 default:
938 break;
941 return TRUE;
944 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
945 gboolean
946 mono_arch_is_soft_float (void)
948 return arm_fpu == MONO_ARM_FPU_NONE;
950 #endif
952 static gboolean
953 is_regsize_var (MonoType *t)
955 if (t->byref)
956 return TRUE;
957 t = mini_get_underlying_type (t);
958 switch (t->type) {
959 case MONO_TYPE_I4:
960 case MONO_TYPE_U4:
961 case MONO_TYPE_I:
962 case MONO_TYPE_U:
963 case MONO_TYPE_PTR:
964 case MONO_TYPE_FNPTR:
965 return TRUE;
966 case MONO_TYPE_OBJECT:
967 return TRUE;
968 case MONO_TYPE_GENERICINST:
969 if (!mono_type_generic_inst_is_valuetype (t))
970 return TRUE;
971 return FALSE;
972 case MONO_TYPE_VALUETYPE:
973 return FALSE;
975 return FALSE;
978 GList *
979 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
981 GList *vars = NULL;
982 int i;
984 for (i = 0; i < cfg->num_varinfo; i++) {
985 MonoInst *ins = cfg->varinfo [i];
986 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
988 /* unused vars */
989 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
990 continue;
992 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
993 continue;
995 /* we can only allocate 32 bit values */
996 if (is_regsize_var (ins->inst_vtype)) {
997 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
998 g_assert (i == vmv->idx);
999 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
1003 return vars;
1006 GList *
1007 mono_arch_get_global_int_regs (MonoCompile *cfg)
1009 GList *regs = NULL;
1011 mono_arch_compute_omit_fp (cfg);
1014 * FIXME: Interface calls might go through a static rgctx trampoline which
1015 * sets V5, but it doesn't save it, so we need to save it ourselves, and
1016 * avoid using it.
1018 if (cfg->flags & MONO_CFG_HAS_CALLS)
1019 cfg->uses_rgctx_reg = TRUE;
1021 if (cfg->arch.omit_fp)
1022 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_FP));
1023 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
1024 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
1025 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
1026 if (iphone_abi)
1027 /* V4=R7 is used as a frame pointer, but V7=R10 is preserved */
1028 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));
1029 else
1030 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
1031 if (!(cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg)))
1032 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1033 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
1034 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
1035 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
1037 return regs;
1041 * mono_arch_regalloc_cost:
1043 * Return the cost, in number of memory references, of the action of
1044 * allocating the variable VMV into a register during global register
1045 * allocation.
1047 guint32
1048 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
1050 /* FIXME: */
1051 return 2;
1054 #endif /* #ifndef DISABLE_JIT */
1056 void
1057 mono_arch_flush_icache (guint8 *code, gint size)
1059 #if defined(MONO_CROSS_COMPILE)
1060 #elif __APPLE__
1061 sys_icache_invalidate (code, size);
1062 #else
1063 __builtin___clear_cache ((char*)code, (char*)code + size);
1064 #endif
1067 #define DEBUG(a)
1069 static void inline
1070 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
1072 if (simple) {
1073 if (*gr > ARMREG_R3) {
1074 ainfo->size = 4;
1075 ainfo->offset = *stack_size;
1076 ainfo->reg = ARMREG_SP; /* in the caller */
1077 ainfo->storage = RegTypeBase;
1078 *stack_size += 4;
1079 } else {
1080 ainfo->storage = RegTypeGeneral;
1081 ainfo->reg = *gr;
1083 } else {
1084 gboolean split;
1086 if (eabi_supported)
1087 split = i8_align == 4;
1088 else
1089 split = TRUE;
1091 ainfo->size = 8;
1092 if (*gr == ARMREG_R3 && split) {
1093 /* first word in r3 and the second on the stack */
1094 ainfo->offset = *stack_size;
1095 ainfo->reg = ARMREG_SP; /* in the caller */
1096 ainfo->storage = RegTypeBaseGen;
1097 *stack_size += 4;
1098 } else if (*gr >= ARMREG_R3) {
1099 if (eabi_supported) {
1100 /* darwin aligns longs to 4 byte only */
1101 if (i8_align == 8) {
1102 *stack_size += 7;
1103 *stack_size &= ~7;
1106 ainfo->offset = *stack_size;
1107 ainfo->reg = ARMREG_SP; /* in the caller */
1108 ainfo->storage = RegTypeBase;
1109 *stack_size += 8;
1110 } else {
1111 if (eabi_supported) {
1112 if (i8_align == 8 && ((*gr) & 1))
1113 (*gr) ++;
1115 ainfo->storage = RegTypeIRegPair;
1116 ainfo->reg = *gr;
1118 (*gr) ++;
1120 (*gr) ++;
1123 static void inline
1124 add_float (guint *fpr, guint *stack_size, ArgInfo *ainfo, gboolean is_double, gint *float_spare)
1127 * If we're calling a function like this:
1129 * void foo(float a, double b, float c)
1131 * We pass a in s0 and b in d1. That leaves us
1132 * with s1 being unused. The armhf ABI recognizes
1133 * this and requires register assignment to then
1134 * use that for the next single-precision arg,
1135 * i.e. c in this example. So float_spare either
1136 * tells us which reg to use for the next single-
1137 * precision arg, or it's -1, meaning use *fpr.
1139 * Note that even though most of the JIT speaks
1140 * double-precision, fpr represents single-
1141 * precision registers.
1143 * See parts 5.5 and 6.1.2 of the AAPCS for how
1144 * this all works.
1147 if (*fpr < ARM_VFP_F16 || (!is_double && *float_spare >= 0)) {
1148 ainfo->storage = RegTypeFP;
1150 if (is_double) {
1152 * If we're passing a double-precision value
1153 * and *fpr is odd (e.g. it's s1, s3, ...)
1154 * we need to use the next even register. So
1155 * we mark the current *fpr as a spare that
1156 * can be used for the next single-precision
1157 * value.
1159 if (*fpr % 2) {
1160 *float_spare = *fpr;
1161 (*fpr)++;
1165 * At this point, we have an even register
1166 * so we assign that and move along.
1168 ainfo->reg = *fpr;
1169 *fpr += 2;
1170 } else if (*float_spare >= 0) {
1172 * We're passing a single-precision value
1173 * and it looks like a spare single-
1174 * precision register is available. Let's
1175 * use it.
1178 ainfo->reg = *float_spare;
1179 *float_spare = -1;
1180 } else {
1182 * If we hit this branch, we're passing a
1183 * single-precision value and we can simply
1184 * use the next available register.
1187 ainfo->reg = *fpr;
1188 (*fpr)++;
1190 } else {
1192 * We've exhausted available floating point
1193 * regs, so pass the rest on the stack.
1196 if (is_double) {
1197 *stack_size += 7;
1198 *stack_size &= ~7;
1201 ainfo->offset = *stack_size;
1202 ainfo->reg = ARMREG_SP;
1203 ainfo->storage = RegTypeBase;
1205 *stack_size += is_double ? 8 : 4;
1209 static gboolean
1210 is_hfa (MonoType *t, int *out_nfields, int *out_esize)
1212 MonoClass *klass;
1213 gpointer iter;
1214 MonoClassField *field;
1215 MonoType *ftype, *prev_ftype = NULL;
1216 int nfields = 0;
1218 klass = mono_class_from_mono_type_internal (t);
1219 iter = NULL;
1220 while ((field = mono_class_get_fields_internal (klass, &iter))) {
1221 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
1222 continue;
1223 ftype = mono_field_get_type_internal (field);
1224 ftype = mini_get_underlying_type (ftype);
1226 if (MONO_TYPE_ISSTRUCT (ftype)) {
1227 int nested_nfields, nested_esize;
1229 if (!is_hfa (ftype, &nested_nfields, &nested_esize))
1230 return FALSE;
1231 if (nested_esize == 4)
1232 ftype = m_class_get_byval_arg (mono_defaults.single_class);
1233 else
1234 ftype = m_class_get_byval_arg (mono_defaults.double_class);
1235 if (prev_ftype && prev_ftype->type != ftype->type)
1236 return FALSE;
1237 prev_ftype = ftype;
1238 nfields += nested_nfields;
1239 } else {
1240 if (!(!ftype->byref && (ftype->type == MONO_TYPE_R4 || ftype->type == MONO_TYPE_R8)))
1241 return FALSE;
1242 if (prev_ftype && prev_ftype->type != ftype->type)
1243 return FALSE;
1244 prev_ftype = ftype;
1245 nfields ++;
1248 if (nfields == 0 || nfields > 4)
1249 return FALSE;
1250 *out_nfields = nfields;
1251 *out_esize = prev_ftype->type == MONO_TYPE_R4 ? 4 : 8;
1252 return TRUE;
1255 static CallInfo*
1256 get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
1258 guint i, gr, fpr, pstart;
1259 gint float_spare;
1260 int n = sig->hasthis + sig->param_count;
1261 int nfields, esize;
1262 guint32 align;
1263 MonoType *t;
1264 guint32 stack_size = 0;
1265 CallInfo *cinfo;
1266 gboolean is_pinvoke = sig->pinvoke;
1267 gboolean vtype_retaddr = FALSE;
1269 if (mp)
1270 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1271 else
1272 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1274 cinfo->nargs = n;
1275 gr = ARMREG_R0;
1276 fpr = ARM_VFP_F0;
1277 float_spare = -1;
1279 t = mini_get_underlying_type (sig->ret);
1280 switch (t->type) {
1281 case MONO_TYPE_I1:
1282 case MONO_TYPE_U1:
1283 case MONO_TYPE_I2:
1284 case MONO_TYPE_U2:
1285 case MONO_TYPE_I4:
1286 case MONO_TYPE_U4:
1287 case MONO_TYPE_I:
1288 case MONO_TYPE_U:
1289 case MONO_TYPE_PTR:
1290 case MONO_TYPE_FNPTR:
1291 case MONO_TYPE_OBJECT:
1292 cinfo->ret.storage = RegTypeGeneral;
1293 cinfo->ret.reg = ARMREG_R0;
1294 break;
1295 case MONO_TYPE_U8:
1296 case MONO_TYPE_I8:
1297 cinfo->ret.storage = RegTypeIRegPair;
1298 cinfo->ret.reg = ARMREG_R0;
1299 break;
1300 case MONO_TYPE_R4:
1301 case MONO_TYPE_R8:
1302 cinfo->ret.storage = RegTypeFP;
1304 if (t->type == MONO_TYPE_R4)
1305 cinfo->ret.size = 4;
1306 else
1307 cinfo->ret.size = 8;
1309 if (IS_HARD_FLOAT) {
1310 cinfo->ret.reg = ARM_VFP_F0;
1311 } else {
1312 cinfo->ret.reg = ARMREG_R0;
1314 break;
1315 case MONO_TYPE_GENERICINST:
1316 if (!mono_type_generic_inst_is_valuetype (t)) {
1317 cinfo->ret.storage = RegTypeGeneral;
1318 cinfo->ret.reg = ARMREG_R0;
1319 break;
1321 if (mini_is_gsharedvt_variable_type (t)) {
1322 cinfo->ret.storage = RegTypeStructByAddr;
1323 break;
1325 /* Fall through */
1326 case MONO_TYPE_VALUETYPE:
1327 case MONO_TYPE_TYPEDBYREF:
1328 if (IS_HARD_FLOAT && sig->pinvoke && is_hfa (t, &nfields, &esize)) {
1329 cinfo->ret.storage = RegTypeHFA;
1330 cinfo->ret.reg = 0;
1331 cinfo->ret.nregs = nfields;
1332 cinfo->ret.esize = esize;
1333 } else {
1334 if (is_pinvoke) {
1335 int native_size = mono_class_native_size (mono_class_from_mono_type_internal (t), &align);
1336 int max_size;
1338 #ifdef TARGET_WATCHOS
1339 max_size = 16;
1340 #else
1341 max_size = 4;
1342 #endif
1343 if (native_size <= max_size) {
1344 cinfo->ret.storage = RegTypeStructByVal;
1345 cinfo->ret.struct_size = native_size;
1346 cinfo->ret.nregs = ALIGN_TO (native_size, 4) / 4;
1347 } else {
1348 cinfo->ret.storage = RegTypeStructByAddr;
1350 } else {
1351 cinfo->ret.storage = RegTypeStructByAddr;
1354 break;
1355 case MONO_TYPE_VAR:
1356 case MONO_TYPE_MVAR:
1357 g_assert (mini_is_gsharedvt_type (t));
1358 cinfo->ret.storage = RegTypeStructByAddr;
1359 break;
1360 case MONO_TYPE_VOID:
1361 break;
1362 default:
1363 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1366 vtype_retaddr = cinfo->ret.storage == RegTypeStructByAddr;
1368 pstart = 0;
1369 n = 0;
1371 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1372 * the first argument, allowing 'this' to be always passed in the first arg reg.
1373 * Also do this if the first argument is a reference type, since virtual calls
1374 * are sometimes made using calli without sig->hasthis set, like in the delegate
1375 * invoke wrappers.
1377 if (vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig->params [0]))))) {
1378 if (sig->hasthis) {
1379 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1380 } else {
1381 add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE);
1382 pstart = 1;
1384 n ++;
1385 cinfo->ret.reg = gr;
1386 gr ++;
1387 cinfo->vret_arg_index = 1;
1388 } else {
1389 /* this */
1390 if (sig->hasthis) {
1391 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1392 n ++;
1394 if (vtype_retaddr) {
1395 cinfo->ret.reg = gr;
1396 gr ++;
1400 DEBUG(g_print("params: %d\n", sig->param_count));
1401 for (i = pstart; i < sig->param_count; ++i) {
1402 ArgInfo *ainfo = &cinfo->args [n];
1404 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1405 /* Prevent implicit arguments and sig_cookie from
1406 being passed in registers */
1407 gr = ARMREG_R3 + 1;
1408 fpr = ARM_VFP_F16;
1409 /* Emit the signature cookie just before the implicit arguments */
1410 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1412 DEBUG(g_print("param %d: ", i));
1413 if (sig->params [i]->byref) {
1414 DEBUG(g_print("byref\n"));
1415 add_general (&gr, &stack_size, ainfo, TRUE);
1416 n++;
1417 continue;
1419 t = mini_get_underlying_type (sig->params [i]);
1420 switch (t->type) {
1421 case MONO_TYPE_I1:
1422 cinfo->args [n].is_signed = 1;
1423 case MONO_TYPE_U1:
1424 cinfo->args [n].size = 1;
1425 add_general (&gr, &stack_size, ainfo, TRUE);
1426 break;
1427 case MONO_TYPE_I2:
1428 cinfo->args [n].is_signed = 1;
1429 case MONO_TYPE_U2:
1430 cinfo->args [n].size = 2;
1431 add_general (&gr, &stack_size, ainfo, TRUE);
1432 break;
1433 case MONO_TYPE_I4:
1434 case MONO_TYPE_U4:
1435 cinfo->args [n].size = 4;
1436 add_general (&gr, &stack_size, ainfo, TRUE);
1437 break;
1438 case MONO_TYPE_I:
1439 case MONO_TYPE_U:
1440 case MONO_TYPE_PTR:
1441 case MONO_TYPE_FNPTR:
1442 case MONO_TYPE_OBJECT:
1443 cinfo->args [n].size = sizeof (target_mgreg_t);
1444 add_general (&gr, &stack_size, ainfo, TRUE);
1445 break;
1446 case MONO_TYPE_GENERICINST:
1447 if (!mono_type_generic_inst_is_valuetype (t)) {
1448 cinfo->args [n].size = sizeof (target_mgreg_t);
1449 add_general (&gr, &stack_size, ainfo, TRUE);
1450 break;
1452 if (mini_is_gsharedvt_variable_type (t)) {
1453 /* gsharedvt arguments are passed by ref */
1454 g_assert (mini_is_gsharedvt_type (t));
1455 add_general (&gr, &stack_size, ainfo, TRUE);
1456 switch (ainfo->storage) {
1457 case RegTypeGeneral:
1458 ainfo->storage = RegTypeGSharedVtInReg;
1459 break;
1460 case RegTypeBase:
1461 ainfo->storage = RegTypeGSharedVtOnStack;
1462 break;
1463 default:
1464 g_assert_not_reached ();
1466 break;
1468 /* Fall through */
1469 case MONO_TYPE_TYPEDBYREF:
1470 case MONO_TYPE_VALUETYPE: {
1471 gint size;
1472 int align_size;
1473 int nwords, nfields, esize;
1474 guint32 align;
1476 if (IS_HARD_FLOAT && sig->pinvoke && is_hfa (t, &nfields, &esize)) {
1477 if (fpr + nfields < ARM_VFP_F16) {
1478 ainfo->storage = RegTypeHFA;
1479 ainfo->reg = fpr;
1480 ainfo->nregs = nfields;
1481 ainfo->esize = esize;
1482 if (esize == 4)
1483 fpr += nfields;
1484 else
1485 fpr += nfields * 2;
1486 break;
1487 } else {
1488 fpr = ARM_VFP_F16;
1492 if (t->type == MONO_TYPE_TYPEDBYREF) {
1493 size = MONO_ABI_SIZEOF (MonoTypedRef);
1494 align = sizeof (target_mgreg_t);
1495 } else {
1496 MonoClass *klass = mono_class_from_mono_type_internal (sig->params [i]);
1497 if (is_pinvoke)
1498 size = mono_class_native_size (klass, &align);
1499 else
1500 size = mini_type_stack_size_full (t, &align, FALSE);
1502 DEBUG(g_print ("load %d bytes struct\n", size));
1504 #ifdef TARGET_WATCHOS
1505 /* Watchos pass large structures by ref */
1506 /* We only do this for pinvoke to make gsharedvt/dyncall simpler */
1507 if (sig->pinvoke && size > 16) {
1508 add_general (&gr, &stack_size, ainfo, TRUE);
1509 switch (ainfo->storage) {
1510 case RegTypeGeneral:
1511 ainfo->storage = RegTypeStructByAddr;
1512 break;
1513 case RegTypeBase:
1514 ainfo->storage = RegTypeStructByAddrOnStack;
1515 break;
1516 default:
1517 g_assert_not_reached ();
1518 break;
1520 break;
1522 #endif
1524 align_size = size;
1525 nwords = 0;
1526 align_size += (sizeof (target_mgreg_t) - 1);
1527 align_size &= ~(sizeof (target_mgreg_t) - 1);
1528 nwords = (align_size + sizeof (target_mgreg_t) -1 ) / sizeof (target_mgreg_t);
1529 ainfo->storage = RegTypeStructByVal;
1530 ainfo->struct_size = size;
1531 ainfo->align = align;
1533 if (eabi_supported) {
1534 if (align >= 8 && (gr & 1))
1535 gr ++;
1537 if (gr > ARMREG_R3) {
1538 ainfo->size = 0;
1539 ainfo->vtsize = nwords;
1540 } else {
1541 int rest = ARMREG_R3 - gr + 1;
1542 int n_in_regs = rest >= nwords? nwords: rest;
1544 ainfo->size = n_in_regs;
1545 ainfo->vtsize = nwords - n_in_regs;
1546 ainfo->reg = gr;
1547 gr += n_in_regs;
1548 nwords -= n_in_regs;
1550 stack_size = ALIGN_TO (stack_size, align);
1552 ainfo->offset = stack_size;
1553 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
1554 stack_size += nwords * sizeof (target_mgreg_t);
1555 break;
1557 case MONO_TYPE_U8:
1558 case MONO_TYPE_I8:
1559 ainfo->size = 8;
1560 add_general (&gr, &stack_size, ainfo, FALSE);
1561 break;
1562 case MONO_TYPE_R4:
1563 ainfo->size = 4;
1565 if (IS_HARD_FLOAT)
1566 add_float (&fpr, &stack_size, ainfo, FALSE, &float_spare);
1567 else
1568 add_general (&gr, &stack_size, ainfo, TRUE);
1569 break;
1570 case MONO_TYPE_R8:
1571 ainfo->size = 8;
1573 if (IS_HARD_FLOAT)
1574 add_float (&fpr, &stack_size, ainfo, TRUE, &float_spare);
1575 else
1576 add_general (&gr, &stack_size, ainfo, FALSE);
1577 break;
1578 case MONO_TYPE_VAR:
1579 case MONO_TYPE_MVAR:
1580 /* gsharedvt arguments are passed by ref */
1581 g_assert (mini_is_gsharedvt_type (t));
1582 add_general (&gr, &stack_size, ainfo, TRUE);
1583 switch (ainfo->storage) {
1584 case RegTypeGeneral:
1585 ainfo->storage = RegTypeGSharedVtInReg;
1586 break;
1587 case RegTypeBase:
1588 ainfo->storage = RegTypeGSharedVtOnStack;
1589 break;
1590 default:
1591 g_assert_not_reached ();
1593 break;
1594 default:
1595 g_error ("Can't handle 0x%x", sig->params [i]->type);
1597 n ++;
1600 /* Handle the case where there are no implicit arguments */
1601 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1602 /* Prevent implicit arguments and sig_cookie from
1603 being passed in registers */
1604 gr = ARMREG_R3 + 1;
1605 fpr = ARM_VFP_F16;
1606 /* Emit the signature cookie just before the implicit arguments */
1607 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1610 DEBUG (g_print (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1611 stack_size = ALIGN_TO (stack_size, MONO_ARCH_FRAME_ALIGNMENT);
1613 cinfo->stack_usage = stack_size;
1614 return cinfo;
1618 * We need to create a temporary value if the argument is not stored in
1619 * a linear memory range in the ccontext (this normally happens for
1620 * value types if they are passed both by stack and regs).
1622 static int
1623 arg_need_temp (ArgInfo *ainfo)
1625 if (ainfo->storage == RegTypeStructByVal && ainfo->vtsize)
1626 return ainfo->struct_size;
1627 return 0;
1630 static gpointer
1631 arg_get_storage (CallContext *ccontext, ArgInfo *ainfo)
1633 switch (ainfo->storage) {
1634 case RegTypeIRegPair:
1635 case RegTypeGeneral:
1636 case RegTypeStructByVal:
1637 return &ccontext->gregs [ainfo->reg];
1638 case RegTypeHFA:
1639 case RegTypeFP:
1640 if (IS_HARD_FLOAT)
1641 return &ccontext->fregs [ainfo->reg];
1642 else
1643 return &ccontext->gregs [ainfo->reg];
1644 case RegTypeBase:
1645 return ccontext->stack + ainfo->offset;
1646 default:
1647 g_error ("Arg storage type not yet supported");
1651 static void
1652 arg_get_val (CallContext *ccontext, ArgInfo *ainfo, gpointer dest)
1654 int reg_size = ainfo->size * sizeof (host_mgreg_t);
1655 g_assert (arg_need_temp (ainfo));
1656 memcpy (dest, &ccontext->gregs [ainfo->reg], reg_size);
1657 memcpy ((host_mgreg_t*)dest + ainfo->size, ccontext->stack + ainfo->offset, ainfo->struct_size - reg_size);
1660 static void
1661 arg_set_val (CallContext *ccontext, ArgInfo *ainfo, gpointer src)
1663 int reg_size = ainfo->size * sizeof (host_mgreg_t);
1664 g_assert (arg_need_temp (ainfo));
1665 memcpy (&ccontext->gregs [ainfo->reg], src, reg_size);
1666 memcpy (ccontext->stack + ainfo->offset, (host_mgreg_t*)src + ainfo->size, ainfo->struct_size - reg_size);
1669 /* Set arguments in the ccontext (for i2n entry) */
1670 void
1671 mono_arch_set_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
1673 const MonoEECallbacks *interp_cb = mini_get_interp_callbacks ();
1674 CallInfo *cinfo = get_call_info (NULL, sig);
1675 gpointer storage;
1676 ArgInfo *ainfo;
1678 memset (ccontext, 0, sizeof (CallContext));
1680 ccontext->stack_size = ALIGN_TO (cinfo->stack_usage, MONO_ARCH_FRAME_ALIGNMENT);
1681 if (ccontext->stack_size)
1682 ccontext->stack = (guint8*)g_calloc (1, ccontext->stack_size);
1684 if (sig->ret->type != MONO_TYPE_VOID) {
1685 ainfo = &cinfo->ret;
1686 if (ainfo->storage == RegTypeStructByAddr) {
1687 storage = interp_cb->frame_arg_to_storage ((MonoInterpFrameHandle)frame, sig, -1);
1688 ccontext->gregs [cinfo->ret.reg] = (host_mgreg_t)(gsize)storage;
1692 g_assert (!sig->hasthis);
1694 for (int i = 0; i < sig->param_count; i++) {
1695 ainfo = &cinfo->args [i];
1696 int temp_size = arg_need_temp (ainfo);
1698 if (temp_size)
1699 storage = alloca (temp_size); // FIXME? alloca in a loop
1700 else
1701 storage = arg_get_storage (ccontext, ainfo);
1703 interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, i, storage);
1704 if (temp_size)
1705 arg_set_val (ccontext, ainfo, storage);
1708 g_free (cinfo);
1711 /* Set return value in the ccontext (for n2i return) */
1712 void
1713 mono_arch_set_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
1715 const MonoEECallbacks *interp_cb;
1716 CallInfo *cinfo;
1717 gpointer storage;
1718 ArgInfo *ainfo;
1720 if (sig->ret->type == MONO_TYPE_VOID)
1721 return;
1723 interp_cb = mini_get_interp_callbacks ();
1724 cinfo = get_call_info (NULL, sig);
1725 ainfo = &cinfo->ret;
1727 if (ainfo->storage != RegTypeStructByAddr) {
1728 g_assert (!arg_need_temp (ainfo));
1729 storage = arg_get_storage (ccontext, ainfo);
1730 memset (ccontext, 0, sizeof (CallContext)); // FIXME
1731 interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, -1, storage);
1734 g_free (cinfo);
1737 /* Gets the arguments from ccontext (for n2i entry) */
1738 void
1739 mono_arch_get_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
1741 const MonoEECallbacks *interp_cb = mini_get_interp_callbacks ();
1742 CallInfo *cinfo = get_call_info (NULL, sig);
1743 gpointer storage;
1744 ArgInfo *ainfo;
1746 if (sig->ret->type != MONO_TYPE_VOID) {
1747 ainfo = &cinfo->ret;
1748 if (ainfo->storage == RegTypeStructByAddr) {
1749 storage = (gpointer)(gsize)ccontext->gregs [cinfo->ret.reg];
1750 interp_cb->frame_arg_set_storage ((MonoInterpFrameHandle)frame, sig, -1, storage);
1754 for (int i = 0; i < sig->param_count + sig->hasthis; i++) {
1755 ainfo = &cinfo->args [i];
1756 int temp_size = arg_need_temp (ainfo);
1758 if (temp_size) {
1759 storage = alloca (temp_size); // FIXME? alloca in a loop
1760 arg_get_val (ccontext, ainfo, storage);
1761 } else {
1762 storage = arg_get_storage (ccontext, ainfo);
1764 interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, i, storage);
1767 g_free (cinfo);
1770 /* Gets the return value from ccontext (for i2n exit) */
1771 void
1772 mono_arch_get_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
1774 const MonoEECallbacks *interp_cb;
1775 CallInfo *cinfo;
1776 ArgInfo *ainfo;
1777 gpointer storage;
1779 if (sig->ret->type == MONO_TYPE_VOID)
1780 return;
1782 interp_cb = mini_get_interp_callbacks ();
1783 cinfo = get_call_info (NULL, sig);
1784 ainfo = &cinfo->ret;
1786 if (ainfo->storage != RegTypeStructByAddr) {
1787 g_assert (!arg_need_temp (ainfo));
1788 storage = arg_get_storage (ccontext, ainfo);
1789 interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, -1, storage);
1792 g_free (cinfo);
1795 #ifndef DISABLE_JIT
1797 gboolean
1798 mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_)
1800 g_assert (caller_sig);
1801 g_assert (callee_sig);
1803 CallInfo *caller_info = get_call_info (NULL, caller_sig);
1804 CallInfo *callee_info = get_call_info (NULL, callee_sig);
1807 * Tailcalls with more callee stack usage than the caller cannot be supported, since
1808 * the extra stack space would be left on the stack after the tailcall.
1810 gboolean res = IS_SUPPORTED_TAILCALL (callee_info->stack_usage <= caller_info->stack_usage)
1811 && IS_SUPPORTED_TAILCALL (caller_info->ret.storage == callee_info->ret.storage);
1813 // FIXME The limit here is that moving the parameters requires addressing the parameters
1814 // with 12bit (4K) immediate offsets. - 4 for TAILCALL_REG/MEMBASE
1815 res &= IS_SUPPORTED_TAILCALL (callee_info->stack_usage < (4096 - 4));
1816 res &= IS_SUPPORTED_TAILCALL (caller_info->stack_usage < (4096 - 4));
1818 g_free (caller_info);
1819 g_free (callee_info);
1821 return res;
1824 static gboolean
1825 debug_omit_fp (void)
1827 #if 0
1828 return mono_debug_count ();
1829 #else
1830 return TRUE;
1831 #endif
1835 * mono_arch_compute_omit_fp:
1836 * Determine whether the frame pointer can be eliminated.
1838 static void
1839 mono_arch_compute_omit_fp (MonoCompile *cfg)
1841 MonoMethodSignature *sig;
1842 MonoMethodHeader *header;
1843 int i, locals_size;
1844 CallInfo *cinfo;
1846 if (cfg->arch.omit_fp_computed)
1847 return;
1849 header = cfg->header;
1851 sig = mono_method_signature_internal (cfg->method);
1853 if (!cfg->arch.cinfo)
1854 cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
1855 cinfo = cfg->arch.cinfo;
1858 * FIXME: Remove some of the restrictions.
1860 cfg->arch.omit_fp = TRUE;
1861 cfg->arch.omit_fp_computed = TRUE;
1863 if (cfg->disable_omit_fp)
1864 cfg->arch.omit_fp = FALSE;
1865 if (!debug_omit_fp ())
1866 cfg->arch.omit_fp = FALSE;
1868 if (cfg->method->save_lmf)
1869 cfg->arch.omit_fp = FALSE;
1871 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
1872 cfg->arch.omit_fp = FALSE;
1873 if (header->num_clauses)
1874 cfg->arch.omit_fp = FALSE;
1875 if (cfg->param_area)
1876 cfg->arch.omit_fp = FALSE;
1877 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1878 cfg->arch.omit_fp = FALSE;
1879 if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)))
1880 cfg->arch.omit_fp = FALSE;
1881 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1882 ArgInfo *ainfo = &cinfo->args [i];
1884 if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeBaseGen || ainfo->storage == RegTypeStructByVal) {
1886 * The stack offset can only be determined when the frame
1887 * size is known.
1889 cfg->arch.omit_fp = FALSE;
1893 locals_size = 0;
1894 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1895 MonoInst *ins = cfg->varinfo [i];
1896 int ialign;
1898 locals_size += mono_type_size (ins->inst_vtype, &ialign);
1903 * Set var information according to the calling convention. arm version.
1904 * The locals var stuff should most likely be split in another method.
1906 void
1907 mono_arch_allocate_vars (MonoCompile *cfg)
1909 MonoMethodSignature *sig;
1910 MonoMethodHeader *header;
1911 MonoInst *ins;
1912 MonoType *sig_ret;
1913 int i, offset, size, align, curinst;
1914 CallInfo *cinfo;
1915 ArgInfo *ainfo;
1916 guint32 ualign;
1918 sig = mono_method_signature_internal (cfg->method);
1920 if (!cfg->arch.cinfo)
1921 cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
1922 cinfo = cfg->arch.cinfo;
1923 sig_ret = mini_get_underlying_type (sig->ret);
1925 mono_arch_compute_omit_fp (cfg);
1927 if (cfg->arch.omit_fp)
1928 cfg->frame_reg = ARMREG_SP;
1929 else
1930 cfg->frame_reg = ARMREG_FP;
1932 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1934 /* allow room for the vararg method args: void* and long/double */
1935 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1936 cfg->param_area = MAX (cfg->param_area, sizeof (target_mgreg_t)*8);
1938 header = cfg->header;
1940 /* See mono_arch_get_global_int_regs () */
1941 if (cfg->flags & MONO_CFG_HAS_CALLS)
1942 cfg->uses_rgctx_reg = TRUE;
1944 if (cfg->frame_reg != ARMREG_SP)
1945 cfg->used_int_regs |= 1 << cfg->frame_reg;
1947 if (cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg))
1948 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1949 cfg->used_int_regs |= (1 << MONO_ARCH_IMT_REG);
1951 offset = 0;
1952 curinst = 0;
1953 if (!MONO_TYPE_ISSTRUCT (sig_ret) && cinfo->ret.storage != RegTypeStructByAddr) {
1954 if (sig_ret->type != MONO_TYPE_VOID) {
1955 cfg->ret->opcode = OP_REGVAR;
1956 cfg->ret->inst_c0 = ARMREG_R0;
1959 /* local vars are at a positive offset from the stack pointer */
1961 * also note that if the function uses alloca, we use FP
1962 * to point at the local variables.
1964 offset = 0; /* linkage area */
1965 /* align the offset to 16 bytes: not sure this is needed here */
1966 //offset += 8 - 1;
1967 //offset &= ~(8 - 1);
1969 /* add parameter area size for called functions */
1970 offset += cfg->param_area;
1971 offset += 8 - 1;
1972 offset &= ~(8 - 1);
1973 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1974 offset += 8;
1976 /* allow room to save the return value */
1977 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1978 offset += 8;
1980 switch (cinfo->ret.storage) {
1981 case RegTypeStructByVal:
1982 case RegTypeHFA:
1983 /* Allocate a local to hold the result, the epilog will copy it to the correct place */
1984 offset = ALIGN_TO (offset, 8);
1985 cfg->ret->opcode = OP_REGOFFSET;
1986 cfg->ret->inst_basereg = cfg->frame_reg;
1987 cfg->ret->inst_offset = offset;
1988 if (cinfo->ret.storage == RegTypeStructByVal)
1989 offset += cinfo->ret.nregs * sizeof (target_mgreg_t);
1990 else
1991 offset += 32;
1992 break;
1993 case RegTypeStructByAddr:
1994 ins = cfg->vret_addr;
1995 offset += sizeof (target_mgreg_t) - 1;
1996 offset &= ~(sizeof (target_mgreg_t) - 1);
1997 ins->inst_offset = offset;
1998 ins->opcode = OP_REGOFFSET;
1999 ins->inst_basereg = cfg->frame_reg;
2000 if (G_UNLIKELY (cfg->verbose_level > 1)) {
2001 g_print ("vret_addr =");
2002 mono_print_ins (cfg->vret_addr);
2004 offset += sizeof (target_mgreg_t);
2005 break;
2006 default:
2007 break;
2010 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
2011 if (cfg->arch.seq_point_info_var) {
2012 MonoInst *ins;
2014 ins = cfg->arch.seq_point_info_var;
2016 size = 4;
2017 align = 4;
2018 offset += align - 1;
2019 offset &= ~(align - 1);
2020 ins->opcode = OP_REGOFFSET;
2021 ins->inst_basereg = cfg->frame_reg;
2022 ins->inst_offset = offset;
2023 offset += size;
2025 if (cfg->arch.ss_trigger_page_var) {
2026 MonoInst *ins;
2028 ins = cfg->arch.ss_trigger_page_var;
2029 size = 4;
2030 align = 4;
2031 offset += align - 1;
2032 offset &= ~(align - 1);
2033 ins->opcode = OP_REGOFFSET;
2034 ins->inst_basereg = cfg->frame_reg;
2035 ins->inst_offset = offset;
2036 offset += size;
2039 if (cfg->arch.seq_point_ss_method_var) {
2040 MonoInst *ins;
2042 ins = cfg->arch.seq_point_ss_method_var;
2043 size = 4;
2044 align = 4;
2045 offset += align - 1;
2046 offset &= ~(align - 1);
2047 ins->opcode = OP_REGOFFSET;
2048 ins->inst_basereg = cfg->frame_reg;
2049 ins->inst_offset = offset;
2050 offset += size;
2052 if (cfg->arch.seq_point_bp_method_var) {
2053 MonoInst *ins;
2055 ins = cfg->arch.seq_point_bp_method_var;
2056 size = 4;
2057 align = 4;
2058 offset += align - 1;
2059 offset &= ~(align - 1);
2060 ins->opcode = OP_REGOFFSET;
2061 ins->inst_basereg = cfg->frame_reg;
2062 ins->inst_offset = offset;
2063 offset += size;
2066 if (cfg->has_atomic_exchange_i4 || cfg->has_atomic_cas_i4 || cfg->has_atomic_add_i4) {
2067 /* Allocate a temporary used by the atomic ops */
2068 size = 4;
2069 align = 4;
2071 /* Allocate a local slot to hold the sig cookie address */
2072 offset += align - 1;
2073 offset &= ~(align - 1);
2074 cfg->arch.atomic_tmp_offset = offset;
2075 offset += size;
2076 } else {
2077 cfg->arch.atomic_tmp_offset = -1;
2080 cfg->locals_min_stack_offset = offset;
2082 curinst = cfg->locals_start;
2083 for (i = curinst; i < cfg->num_varinfo; ++i) {
2084 MonoType *t;
2086 ins = cfg->varinfo [i];
2087 if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
2088 continue;
2090 t = ins->inst_vtype;
2091 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (t))
2092 continue;
2094 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
2095 * pinvoke wrappers when they call functions returning structure */
2096 if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
2097 size = mono_class_native_size (mono_class_from_mono_type_internal (t), &ualign);
2098 align = ualign;
2100 else
2101 size = mono_type_size (t, &align);
2103 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2104 * since it loads/stores misaligned words, which don't do the right thing.
2106 if (align < 4 && size >= 4)
2107 align = 4;
2108 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2109 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2110 offset += align - 1;
2111 offset &= ~(align - 1);
2112 ins->opcode = OP_REGOFFSET;
2113 ins->inst_offset = offset;
2114 ins->inst_basereg = cfg->frame_reg;
2115 offset += size;
2116 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
2119 cfg->locals_max_stack_offset = offset;
2121 curinst = 0;
2122 if (sig->hasthis) {
2123 ins = cfg->args [curinst];
2124 if (ins->opcode != OP_REGVAR) {
2125 ins->opcode = OP_REGOFFSET;
2126 ins->inst_basereg = cfg->frame_reg;
2127 offset += sizeof (target_mgreg_t) - 1;
2128 offset &= ~(sizeof (target_mgreg_t) - 1);
2129 ins->inst_offset = offset;
2130 offset += sizeof (target_mgreg_t);
2132 curinst++;
2135 if (sig->call_convention == MONO_CALL_VARARG) {
2136 size = 4;
2137 align = 4;
2139 /* Allocate a local slot to hold the sig cookie address */
2140 offset += align - 1;
2141 offset &= ~(align - 1);
2142 cfg->sig_cookie = offset;
2143 offset += size;
2146 for (i = 0; i < sig->param_count; ++i) {
2147 ainfo = cinfo->args + i;
2149 ins = cfg->args [curinst];
2151 switch (ainfo->storage) {
2152 case RegTypeHFA:
2153 offset = ALIGN_TO (offset, 8);
2154 ins->opcode = OP_REGOFFSET;
2155 ins->inst_basereg = cfg->frame_reg;
2156 /* These arguments are saved to the stack in the prolog */
2157 ins->inst_offset = offset;
2158 if (cfg->verbose_level >= 2)
2159 g_print ("arg %d allocated to %s+0x%0x.\n", i, mono_arch_regname (ins->inst_basereg), (int)ins->inst_offset);
2160 // FIXME:
2161 offset += 32;
2162 break;
2163 default:
2164 break;
2167 if (ins->opcode != OP_REGVAR) {
2168 ins->opcode = OP_REGOFFSET;
2169 ins->inst_basereg = cfg->frame_reg;
2170 size = mini_type_stack_size_full (sig->params [i], &ualign, sig->pinvoke);
2171 align = ualign;
2172 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2173 * since it loads/stores misaligned words, which don't do the right thing.
2175 if (align < 4 && size >= 4)
2176 align = 4;
2177 /* The code in the prolog () stores words when storing vtypes received in a register */
2178 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
2179 align = 4;
2180 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2181 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2182 offset += align - 1;
2183 offset &= ~(align - 1);
2184 ins->inst_offset = offset;
2185 offset += size;
2187 curinst++;
2190 /* align the offset to 8 bytes */
2191 if (ALIGN_TO (offset, 8) > ALIGN_TO (offset, 4))
2192 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2193 offset += 8 - 1;
2194 offset &= ~(8 - 1);
2196 /* change sign? */
2197 cfg->stack_offset = offset;
2200 void
2201 mono_arch_create_vars (MonoCompile *cfg)
2203 MonoMethodSignature *sig;
2204 CallInfo *cinfo;
2205 int i;
2207 sig = mono_method_signature_internal (cfg->method);
2209 if (!cfg->arch.cinfo)
2210 cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
2211 cinfo = cfg->arch.cinfo;
2213 if (IS_HARD_FLOAT) {
2214 for (i = 0; i < 2; i++) {
2215 MonoInst *inst = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.double_class), OP_LOCAL);
2216 inst->flags |= MONO_INST_VOLATILE;
2218 cfg->arch.vfp_scratch_slots [i] = inst;
2222 if (cinfo->ret.storage == RegTypeStructByVal)
2223 cfg->ret_var_is_local = TRUE;
2225 if (cinfo->ret.storage == RegTypeStructByAddr) {
2226 cfg->vret_addr = mono_compile_create_var (cfg, mono_get_int_type (), OP_ARG);
2227 if (G_UNLIKELY (cfg->verbose_level > 1)) {
2228 g_print ("vret_addr = ");
2229 mono_print_ins (cfg->vret_addr);
2233 if (cfg->gen_sdb_seq_points) {
2234 if (cfg->compile_aot) {
2235 MonoInst *ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
2236 ins->flags |= MONO_INST_VOLATILE;
2237 cfg->arch.seq_point_info_var = ins;
2239 if (!cfg->soft_breakpoints) {
2240 /* Allocate a separate variable for this to save 1 load per seq point */
2241 ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
2242 ins->flags |= MONO_INST_VOLATILE;
2243 cfg->arch.ss_trigger_page_var = ins;
2246 if (cfg->soft_breakpoints) {
2247 MonoInst *ins;
2249 ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
2250 ins->flags |= MONO_INST_VOLATILE;
2251 cfg->arch.seq_point_ss_method_var = ins;
2253 ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
2254 ins->flags |= MONO_INST_VOLATILE;
2255 cfg->arch.seq_point_bp_method_var = ins;
2260 static void
2261 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
2263 MonoMethodSignature *tmp_sig;
2264 int sig_reg;
2266 if (MONO_IS_TAILCALL_OPCODE (call))
2267 NOT_IMPLEMENTED;
2269 g_assert (cinfo->sig_cookie.storage == RegTypeBase);
2272 * mono_ArgIterator_Setup assumes the signature cookie is
2273 * passed first and all the arguments which were before it are
2274 * passed on the stack after the signature. So compensate by
2275 * passing a different signature.
2277 tmp_sig = mono_metadata_signature_dup (call->signature);
2278 tmp_sig->param_count -= call->signature->sentinelpos;
2279 tmp_sig->sentinelpos = 0;
2280 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
2282 sig_reg = mono_alloc_ireg (cfg);
2283 MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig);
2285 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_reg);
2288 #ifdef ENABLE_LLVM
2289 LLVMCallInfo*
2290 mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
2292 int i, n;
2293 CallInfo *cinfo;
2294 ArgInfo *ainfo;
2295 LLVMCallInfo *linfo;
2297 n = sig->param_count + sig->hasthis;
2299 cinfo = get_call_info (cfg->mempool, sig);
2301 linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
2304 * LLVM always uses the native ABI while we use our own ABI, the
2305 * only difference is the handling of vtypes:
2306 * - we only pass/receive them in registers in some cases, and only
2307 * in 1 or 2 integer registers.
2309 switch (cinfo->ret.storage) {
2310 case RegTypeGeneral:
2311 case RegTypeNone:
2312 case RegTypeFP:
2313 case RegTypeIRegPair:
2314 break;
2315 case RegTypeStructByAddr:
2316 if (sig->pinvoke) {
2317 linfo->ret.storage = LLVMArgVtypeByRef;
2318 } else {
2319 /* Vtype returned using a hidden argument */
2320 linfo->ret.storage = LLVMArgVtypeRetAddr;
2321 linfo->vret_arg_index = cinfo->vret_arg_index;
2323 break;
2324 #if TARGET_WATCHOS
2325 case RegTypeStructByVal:
2326 /* LLVM models this by returning an int array */
2327 linfo->ret.storage = LLVMArgAsIArgs;
2328 linfo->ret.nslots = cinfo->ret.nregs;
2329 break;
2330 #endif
2331 case RegTypeHFA:
2332 linfo->ret.storage = LLVMArgFpStruct;
2333 linfo->ret.nslots = cinfo->ret.nregs;
2334 linfo->ret.esize = cinfo->ret.esize;
2335 break;
2336 default:
2337 cfg->exception_message = g_strdup_printf ("unknown ret conv (%d)", cinfo->ret.storage);
2338 cfg->disable_llvm = TRUE;
2339 return linfo;
2342 for (i = 0; i < n; ++i) {
2343 LLVMArgInfo *lainfo = &linfo->args [i];
2344 ainfo = cinfo->args + i;
2346 lainfo->storage = LLVMArgNone;
2348 switch (ainfo->storage) {
2349 case RegTypeGeneral:
2350 case RegTypeIRegPair:
2351 case RegTypeBase:
2352 case RegTypeBaseGen:
2353 case RegTypeFP:
2354 lainfo->storage = LLVMArgNormal;
2355 break;
2356 case RegTypeStructByVal: {
2357 lainfo->storage = LLVMArgAsIArgs;
2358 int slotsize = eabi_supported && ainfo->align == 8 ? 8 : 4;
2359 lainfo->nslots = ALIGN_TO (ainfo->struct_size, slotsize) / slotsize;
2360 lainfo->esize = slotsize;
2361 break;
2363 case RegTypeStructByAddr:
2364 case RegTypeStructByAddrOnStack:
2365 lainfo->storage = LLVMArgVtypeByRef;
2366 break;
2367 case RegTypeHFA: {
2368 int j;
2370 lainfo->storage = LLVMArgAsFpArgs;
2371 lainfo->nslots = ainfo->nregs;
2372 lainfo->esize = ainfo->esize;
2373 for (j = 0; j < ainfo->nregs; ++j)
2374 lainfo->pair_storage [j] = LLVMArgInFPReg;
2375 break;
2377 default:
2378 cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
2379 cfg->disable_llvm = TRUE;
2380 break;
2384 return linfo;
2386 #endif
2388 void
2389 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
2391 MonoInst *in, *ins;
2392 MonoMethodSignature *sig;
2393 int i, n;
2394 CallInfo *cinfo;
2396 sig = call->signature;
2397 n = sig->param_count + sig->hasthis;
2399 cinfo = get_call_info (cfg->mempool, sig);
2401 switch (cinfo->ret.storage) {
2402 case RegTypeStructByVal:
2403 case RegTypeHFA:
2404 if (cinfo->ret.storage == RegTypeStructByVal && cinfo->ret.nregs == 1) {
2405 /* The JIT will transform this into a normal call */
2406 call->vret_in_reg = TRUE;
2407 break;
2409 if (MONO_IS_TAILCALL_OPCODE (call))
2410 break;
2412 * The vtype is returned in registers, save the return area address in a local, and save the vtype into
2413 * the location pointed to by it after call in emit_move_return_value ().
2415 if (!cfg->arch.vret_addr_loc) {
2416 cfg->arch.vret_addr_loc = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
2417 /* Prevent it from being register allocated or optimized away */
2418 cfg->arch.vret_addr_loc->flags |= MONO_INST_VOLATILE;
2421 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->arch.vret_addr_loc->dreg, call->vret_var->dreg);
2422 break;
2423 case RegTypeStructByAddr: {
2424 MonoInst *vtarg;
2425 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
2426 vtarg->sreg1 = call->vret_var->dreg;
2427 vtarg->dreg = mono_alloc_preg (cfg);
2428 MONO_ADD_INS (cfg->cbb, vtarg);
2430 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
2431 break;
2433 default:
2434 break;
2437 for (i = 0; i < n; ++i) {
2438 ArgInfo *ainfo = cinfo->args + i;
2439 MonoType *t;
2441 if (i >= sig->hasthis)
2442 t = sig->params [i - sig->hasthis];
2443 else
2444 t = mono_get_int_type ();
2445 t = mini_get_underlying_type (t);
2447 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
2448 /* Emit the signature cookie just before the implicit arguments */
2449 emit_sig_cookie (cfg, call, cinfo);
2452 in = call->args [i];
2454 switch (ainfo->storage) {
2455 case RegTypeGeneral:
2456 case RegTypeIRegPair:
2457 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2458 MONO_INST_NEW (cfg, ins, OP_MOVE);
2459 ins->dreg = mono_alloc_ireg (cfg);
2460 ins->sreg1 = MONO_LVREG_LS (in->dreg);
2461 MONO_ADD_INS (cfg->cbb, ins);
2462 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2464 MONO_INST_NEW (cfg, ins, OP_MOVE);
2465 ins->dreg = mono_alloc_ireg (cfg);
2466 ins->sreg1 = MONO_LVREG_MS (in->dreg);
2467 MONO_ADD_INS (cfg->cbb, ins);
2468 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2469 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
2470 if (ainfo->size == 4) {
2471 if (IS_SOFT_FLOAT) {
2472 /* mono_emit_call_args () have already done the r8->r4 conversion */
2473 /* The converted value is in an int vreg */
2474 MONO_INST_NEW (cfg, ins, OP_MOVE);
2475 ins->dreg = mono_alloc_ireg (cfg);
2476 ins->sreg1 = in->dreg;
2477 MONO_ADD_INS (cfg->cbb, ins);
2478 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2479 } else {
2480 int creg;
2482 cfg->param_area = MAX (cfg->param_area, 8);
2483 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2484 creg = mono_alloc_ireg (cfg);
2485 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2486 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2488 } else {
2489 if (IS_SOFT_FLOAT) {
2490 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
2491 ins->dreg = mono_alloc_ireg (cfg);
2492 ins->sreg1 = in->dreg;
2493 MONO_ADD_INS (cfg->cbb, ins);
2494 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2496 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
2497 ins->dreg = mono_alloc_ireg (cfg);
2498 ins->sreg1 = in->dreg;
2499 MONO_ADD_INS (cfg->cbb, ins);
2500 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2501 } else {
2502 int creg;
2504 cfg->param_area = MAX (cfg->param_area, 8);
2505 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2506 creg = mono_alloc_ireg (cfg);
2507 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2508 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2509 creg = mono_alloc_ireg (cfg);
2510 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
2511 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
2514 cfg->flags |= MONO_CFG_HAS_FPOUT;
2515 } else {
2516 MONO_INST_NEW (cfg, ins, OP_MOVE);
2517 ins->dreg = mono_alloc_ireg (cfg);
2518 ins->sreg1 = in->dreg;
2519 MONO_ADD_INS (cfg->cbb, ins);
2521 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2523 break;
2524 case RegTypeStructByVal:
2525 case RegTypeGSharedVtInReg:
2526 case RegTypeGSharedVtOnStack:
2527 case RegTypeHFA:
2528 case RegTypeStructByAddr:
2529 case RegTypeStructByAddrOnStack:
2530 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
2531 ins->opcode = OP_OUTARG_VT;
2532 ins->sreg1 = in->dreg;
2533 ins->klass = in->klass;
2534 ins->inst_p0 = call;
2535 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
2536 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
2537 mono_call_inst_add_outarg_vt (cfg, call, ins);
2538 MONO_ADD_INS (cfg->cbb, ins);
2539 break;
2540 case RegTypeBase:
2541 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2542 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2543 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
2544 if (t->type == MONO_TYPE_R8) {
2545 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2546 } else {
2547 if (IS_SOFT_FLOAT)
2548 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2549 else
2550 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2552 } else {
2553 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2555 break;
2556 case RegTypeBaseGen:
2557 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2558 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? MONO_LVREG_LS (in->dreg) : MONO_LVREG_MS (in->dreg));
2559 MONO_INST_NEW (cfg, ins, OP_MOVE);
2560 ins->dreg = mono_alloc_ireg (cfg);
2561 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? MONO_LVREG_MS (in->dreg) : MONO_LVREG_LS (in->dreg);
2562 MONO_ADD_INS (cfg->cbb, ins);
2563 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
2564 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
2565 int creg;
2567 /* This should work for soft-float as well */
2569 cfg->param_area = MAX (cfg->param_area, 8);
2570 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2571 creg = mono_alloc_ireg (cfg);
2572 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
2573 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2574 creg = mono_alloc_ireg (cfg);
2575 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
2576 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
2577 cfg->flags |= MONO_CFG_HAS_FPOUT;
2578 } else {
2579 g_assert_not_reached ();
2581 break;
2582 case RegTypeFP: {
2583 int fdreg = mono_alloc_freg (cfg);
2585 if (ainfo->size == 8) {
2586 MONO_INST_NEW (cfg, ins, OP_FMOVE);
2587 ins->sreg1 = in->dreg;
2588 ins->dreg = fdreg;
2589 MONO_ADD_INS (cfg->cbb, ins);
2591 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, TRUE);
2592 } else {
2593 FloatArgData *fad;
2596 * Mono's register allocator doesn't speak single-precision registers that
2597 * overlap double-precision registers (i.e. armhf). So we have to work around
2598 * the register allocator and load the value from memory manually.
2600 * So we create a variable for the float argument and an instruction to store
2601 * the argument into the variable. We then store the list of these arguments
2602 * in call->float_args. This list is then used by emit_float_args later to
2603 * pass the arguments in the various call opcodes.
2605 * This is not very nice, and we should really try to fix the allocator.
2608 MonoInst *float_arg = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.single_class), OP_LOCAL);
2610 /* Make sure the instruction isn't seen as pointless and removed.
2612 float_arg->flags |= MONO_INST_VOLATILE;
2614 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, float_arg->dreg, in->dreg);
2616 /* We use the dreg to look up the instruction later. The hreg is used to
2617 * emit the instruction that loads the value into the FP reg.
2619 fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData));
2620 fad->vreg = float_arg->dreg;
2621 fad->hreg = ainfo->reg;
2623 call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad);
2626 call->used_iregs |= 1 << ainfo->reg;
2627 cfg->flags |= MONO_CFG_HAS_FPOUT;
2628 break;
2630 default:
2631 g_assert_not_reached ();
2635 /* Handle the case where there are no implicit arguments */
2636 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
2637 emit_sig_cookie (cfg, call, cinfo);
2639 call->call_info = cinfo;
2640 call->stack_usage = cinfo->stack_usage;
2643 static void
2644 add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, MonoInst *arg)
2646 MonoInst *ins;
2648 switch (storage) {
2649 case RegTypeFP:
2650 MONO_INST_NEW (cfg, ins, OP_FMOVE);
2651 ins->dreg = mono_alloc_freg (cfg);
2652 ins->sreg1 = arg->dreg;
2653 MONO_ADD_INS (cfg->cbb, ins);
2654 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE);
2655 break;
2656 default:
2657 g_assert_not_reached ();
2658 break;
2662 void
2663 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
2665 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
2666 MonoInst *load;
2667 ArgInfo *ainfo = (ArgInfo*)ins->inst_p1;
2668 int ovf_size = ainfo->vtsize;
2669 int doffset = ainfo->offset;
2670 int struct_size = ainfo->struct_size;
2671 int i, soffset, dreg, tmpreg;
2673 switch (ainfo->storage) {
2674 case RegTypeGSharedVtInReg:
2675 case RegTypeStructByAddr:
2676 /* Pass by addr */
2677 mono_call_inst_add_outarg_reg (cfg, call, src->dreg, ainfo->reg, FALSE);
2678 break;
2679 case RegTypeGSharedVtOnStack:
2680 case RegTypeStructByAddrOnStack:
2681 /* Pass by addr on stack */
2682 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, src->dreg);
2683 break;
2684 case RegTypeHFA:
2685 for (i = 0; i < ainfo->nregs; ++i) {
2686 if (ainfo->esize == 4)
2687 MONO_INST_NEW (cfg, load, OP_LOADR4_MEMBASE);
2688 else
2689 MONO_INST_NEW (cfg, load, OP_LOADR8_MEMBASE);
2690 load->dreg = mono_alloc_freg (cfg);
2691 load->inst_basereg = src->dreg;
2692 load->inst_offset = i * ainfo->esize;
2693 MONO_ADD_INS (cfg->cbb, load);
2695 if (ainfo->esize == 4) {
2696 FloatArgData *fad;
2698 /* See RegTypeFP in mono_arch_emit_call () */
2699 MonoInst *float_arg = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.single_class), OP_LOCAL);
2700 float_arg->flags |= MONO_INST_VOLATILE;
2701 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, float_arg->dreg, load->dreg);
2703 fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData));
2704 fad->vreg = float_arg->dreg;
2705 fad->hreg = ainfo->reg + i;
2707 call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad);
2708 } else {
2709 add_outarg_reg (cfg, call, RegTypeFP, ainfo->reg + (i * 2), load);
2712 break;
2713 default:
2714 soffset = 0;
2715 for (i = 0; i < ainfo->size; ++i) {
2716 dreg = mono_alloc_ireg (cfg);
2717 switch (struct_size) {
2718 case 1:
2719 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2720 break;
2721 case 2:
2722 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, dreg, src->dreg, soffset);
2723 break;
2724 case 3:
2725 tmpreg = mono_alloc_ireg (cfg);
2726 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2727 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 1);
2728 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 8);
2729 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2730 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 2);
2731 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 16);
2732 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2733 break;
2734 default:
2735 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
2736 break;
2738 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
2739 soffset += sizeof (target_mgreg_t);
2740 struct_size -= sizeof (target_mgreg_t);
2742 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
2743 if (ovf_size != 0)
2744 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, MIN (ovf_size * sizeof (target_mgreg_t), struct_size), struct_size < 4 ? 1 : 4);
2745 break;
2749 void
2750 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
2752 MonoType *ret = mini_get_underlying_type (mono_method_signature_internal (method)->ret);
2754 if (!ret->byref) {
2755 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
2756 MonoInst *ins;
2758 if (COMPILE_LLVM (cfg)) {
2759 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2760 } else {
2761 MONO_INST_NEW (cfg, ins, OP_SETLRET);
2762 ins->sreg1 = MONO_LVREG_LS (val->dreg);
2763 ins->sreg2 = MONO_LVREG_MS (val->dreg);
2764 MONO_ADD_INS (cfg->cbb, ins);
2766 return;
2768 switch (arm_fpu) {
2769 case MONO_ARM_FPU_NONE:
2770 if (ret->type == MONO_TYPE_R8) {
2771 MonoInst *ins;
2773 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2774 ins->dreg = cfg->ret->dreg;
2775 ins->sreg1 = val->dreg;
2776 MONO_ADD_INS (cfg->cbb, ins);
2777 return;
2779 if (ret->type == MONO_TYPE_R4) {
2780 /* Already converted to an int in method_to_ir () */
2781 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2782 return;
2784 break;
2785 case MONO_ARM_FPU_VFP:
2786 case MONO_ARM_FPU_VFP_HARD:
2787 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
2788 MonoInst *ins;
2790 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2791 ins->dreg = cfg->ret->dreg;
2792 ins->sreg1 = val->dreg;
2793 MONO_ADD_INS (cfg->cbb, ins);
2794 return;
2796 break;
2797 default:
2798 g_assert_not_reached ();
2802 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2805 #endif /* #ifndef DISABLE_JIT */
2807 gboolean
2808 mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm)
2810 return TRUE;
2813 typedef struct {
2814 MonoMethodSignature *sig;
2815 CallInfo *cinfo;
2816 MonoType *rtype;
2817 MonoType **param_types;
2818 } ArchDynCallInfo;
2820 static gboolean
2821 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
2823 int i;
2825 switch (cinfo->ret.storage) {
2826 case RegTypeNone:
2827 case RegTypeGeneral:
2828 case RegTypeIRegPair:
2829 case RegTypeStructByAddr:
2830 break;
2831 case RegTypeFP:
2832 if (IS_VFP)
2833 break;
2834 else
2835 return FALSE;
2836 default:
2837 return FALSE;
2840 for (i = 0; i < cinfo->nargs; ++i) {
2841 ArgInfo *ainfo = &cinfo->args [i];
2842 int last_slot;
2844 switch (ainfo->storage) {
2845 case RegTypeGeneral:
2846 case RegTypeIRegPair:
2847 case RegTypeBaseGen:
2848 case RegTypeFP:
2849 break;
2850 case RegTypeBase:
2851 break;
2852 case RegTypeStructByVal:
2853 if (ainfo->size == 0)
2854 last_slot = PARAM_REGS + (ainfo->offset / 4) + ainfo->vtsize;
2855 else
2856 last_slot = ainfo->reg + ainfo->size + ainfo->vtsize;
2857 break;
2858 default:
2859 return FALSE;
2863 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
2864 for (i = 0; i < sig->param_count; ++i) {
2865 MonoType *t = sig->params [i];
2867 if (t->byref)
2868 continue;
2870 t = mini_get_underlying_type (t);
2872 switch (t->type) {
2873 case MONO_TYPE_R4:
2874 case MONO_TYPE_R8:
2875 if (IS_SOFT_FLOAT)
2876 return FALSE;
2877 else
2878 break;
2880 case MONO_TYPE_I8:
2881 case MONO_TYPE_U8:
2882 return FALSE;
2884 default:
2885 break;
2889 return TRUE;
2892 MonoDynCallInfo*
2893 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
2895 ArchDynCallInfo *info;
2896 CallInfo *cinfo;
2897 int i;
2899 cinfo = get_call_info (NULL, sig);
2901 if (!dyn_call_supported (cinfo, sig)) {
2902 g_free (cinfo);
2903 return NULL;
2906 info = g_new0 (ArchDynCallInfo, 1);
2907 // FIXME: Preprocess the info to speed up start_dyn_call ()
2908 info->sig = sig;
2909 info->cinfo = cinfo;
2910 info->rtype = mini_get_underlying_type (sig->ret);
2911 info->param_types = g_new0 (MonoType*, sig->param_count);
2912 for (i = 0; i < sig->param_count; ++i)
2913 info->param_types [i] = mini_get_underlying_type (sig->params [i]);
2915 return (MonoDynCallInfo*)info;
2918 void
2919 mono_arch_dyn_call_free (MonoDynCallInfo *info)
2921 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2923 g_free (ainfo->cinfo);
2924 g_free (ainfo);
2928 mono_arch_dyn_call_get_buf_size (MonoDynCallInfo *info)
2930 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2932 g_assert (ainfo->cinfo->stack_usage % MONO_ARCH_FRAME_ALIGNMENT == 0);
2933 return sizeof (DynCallArgs) + ainfo->cinfo->stack_usage;
2936 void
2937 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf)
2939 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
2940 CallInfo *cinfo = dinfo->cinfo;
2941 DynCallArgs *p = (DynCallArgs*)buf;
2942 int arg_index, greg, i, j, pindex;
2943 MonoMethodSignature *sig = dinfo->sig;
2945 p->res = 0;
2946 p->ret = ret;
2947 p->has_fpregs = 0;
2948 p->n_stackargs = cinfo->stack_usage / sizeof (host_mgreg_t);
2950 arg_index = 0;
2951 greg = 0;
2952 pindex = 0;
2954 if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) {
2955 p->regs [greg ++] = (host_mgreg_t)(gsize)*(args [arg_index ++]);
2956 if (!sig->hasthis)
2957 pindex = 1;
2960 if (dinfo->cinfo->ret.storage == RegTypeStructByAddr)
2961 p->regs [greg ++] = (host_mgreg_t)(gsize)ret;
2963 for (i = pindex; i < sig->param_count; i++) {
2964 MonoType *t = dinfo->param_types [i];
2965 gpointer *arg = args [arg_index ++];
2966 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
2967 int slot = -1;
2969 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal) {
2970 slot = ainfo->reg;
2971 } else if (ainfo->storage == RegTypeFP) {
2972 } else if (ainfo->storage == RegTypeBase) {
2973 slot = PARAM_REGS + (ainfo->offset / 4);
2974 } else if (ainfo->storage == RegTypeBaseGen) {
2975 /* slot + 1 is the first stack slot, so the code below will work */
2976 slot = 3;
2977 } else {
2978 g_assert_not_reached ();
2981 if (t->byref) {
2982 p->regs [slot] = (host_mgreg_t)(gsize)*arg;
2983 continue;
2986 switch (t->type) {
2987 case MONO_TYPE_OBJECT:
2988 case MONO_TYPE_PTR:
2989 case MONO_TYPE_I:
2990 case MONO_TYPE_U:
2991 p->regs [slot] = (host_mgreg_t)(gsize)*arg;
2992 break;
2993 case MONO_TYPE_U1:
2994 p->regs [slot] = *(guint8*)arg;
2995 break;
2996 case MONO_TYPE_I1:
2997 p->regs [slot] = *(gint8*)arg;
2998 break;
2999 case MONO_TYPE_I2:
3000 p->regs [slot] = *(gint16*)arg;
3001 break;
3002 case MONO_TYPE_U2:
3003 p->regs [slot] = *(guint16*)arg;
3004 break;
3005 case MONO_TYPE_I4:
3006 p->regs [slot] = *(gint32*)arg;
3007 break;
3008 case MONO_TYPE_U4:
3009 p->regs [slot] = *(guint32*)arg;
3010 break;
3011 case MONO_TYPE_I8:
3012 case MONO_TYPE_U8:
3013 p->regs [slot ++] = (host_mgreg_t)(gsize)arg [0];
3014 p->regs [slot] = (host_mgreg_t)(gsize)arg [1];
3015 break;
3016 case MONO_TYPE_R4:
3017 if (ainfo->storage == RegTypeFP) {
3018 float f = *(float*)arg;
3019 p->fpregs [ainfo->reg / 2] = *(double*)&f;
3020 p->has_fpregs = 1;
3021 } else {
3022 p->regs [slot] = *(host_mgreg_t*)arg;
3024 break;
3025 case MONO_TYPE_R8:
3026 if (ainfo->storage == RegTypeFP) {
3027 p->fpregs [ainfo->reg / 2] = *(double*)arg;
3028 p->has_fpregs = 1;
3029 } else {
3030 p->regs [slot ++] = (host_mgreg_t)(gsize)arg [0];
3031 p->regs [slot] = (host_mgreg_t)(gsize)arg [1];
3033 break;
3034 case MONO_TYPE_GENERICINST:
3035 if (MONO_TYPE_IS_REFERENCE (t)) {
3036 p->regs [slot] = (host_mgreg_t)(gsize)*arg;
3037 break;
3038 } else {
3039 if (t->type == MONO_TYPE_GENERICINST && mono_class_is_nullable (mono_class_from_mono_type_internal (t))) {
3040 MonoClass *klass = mono_class_from_mono_type_internal (t);
3041 guint8 *nullable_buf;
3042 int size;
3044 size = mono_class_value_size (klass, NULL);
3045 nullable_buf = g_alloca (size);
3046 g_assert (nullable_buf);
3048 /* The argument pointed to by arg is either a boxed vtype or null */
3049 mono_nullable_init (nullable_buf, (MonoObject*)arg, klass);
3051 arg = (gpointer*)nullable_buf;
3052 /* Fall though */
3053 } else {
3054 /* Fall though */
3057 case MONO_TYPE_VALUETYPE:
3058 g_assert (ainfo->storage == RegTypeStructByVal);
3060 if (ainfo->size == 0)
3061 slot = PARAM_REGS + (ainfo->offset / 4);
3062 else
3063 slot = ainfo->reg;
3065 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
3066 p->regs [slot ++] = ((host_mgreg_t*)arg) [j];
3067 break;
3068 default:
3069 g_assert_not_reached ();
3074 void
3075 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
3077 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
3078 DynCallArgs *p = (DynCallArgs*)buf;
3079 MonoType *ptype = ainfo->rtype;
3080 guint8 *ret = p->ret;
3081 host_mgreg_t res = p->res;
3082 host_mgreg_t res2 = p->res2;
3084 switch (ptype->type) {
3085 case MONO_TYPE_VOID:
3086 *(gpointer*)ret = NULL;
3087 break;
3088 case MONO_TYPE_OBJECT:
3089 case MONO_TYPE_I:
3090 case MONO_TYPE_U:
3091 case MONO_TYPE_PTR:
3092 *(gpointer*)ret = (gpointer)(gsize)res;
3093 break;
3094 case MONO_TYPE_I1:
3095 *(gint8*)ret = res;
3096 break;
3097 case MONO_TYPE_U1:
3098 *(guint8*)ret = res;
3099 break;
3100 case MONO_TYPE_I2:
3101 *(gint16*)ret = res;
3102 break;
3103 case MONO_TYPE_U2:
3104 *(guint16*)ret = res;
3105 break;
3106 case MONO_TYPE_I4:
3107 *(gint32*)ret = res;
3108 break;
3109 case MONO_TYPE_U4:
3110 *(guint32*)ret = res;
3111 break;
3112 case MONO_TYPE_I8:
3113 case MONO_TYPE_U8:
3114 /* This handles endianness as well */
3115 ((gint32*)ret) [0] = res;
3116 ((gint32*)ret) [1] = res2;
3117 break;
3118 case MONO_TYPE_GENERICINST:
3119 if (MONO_TYPE_IS_REFERENCE (ptype)) {
3120 *(gpointer*)ret = (gpointer)res;
3121 break;
3122 } else {
3123 /* Fall though */
3125 case MONO_TYPE_VALUETYPE:
3126 g_assert (ainfo->cinfo->ret.storage == RegTypeStructByAddr);
3127 /* Nothing to do */
3128 break;
3129 case MONO_TYPE_R4:
3130 g_assert (IS_VFP);
3131 if (IS_HARD_FLOAT)
3132 *(float*)ret = *(float*)&p->fpregs [0];
3133 else
3134 *(float*)ret = *(float*)&res;
3135 break;
3136 case MONO_TYPE_R8: {
3137 host_mgreg_t regs [2];
3139 g_assert (IS_VFP);
3140 if (IS_HARD_FLOAT) {
3141 *(double*)ret = p->fpregs [0];
3142 } else {
3143 regs [0] = res;
3144 regs [1] = res2;
3146 *(double*)ret = *(double*)&regs;
3148 break;
3150 default:
3151 g_assert_not_reached ();
3155 #ifndef DISABLE_JIT
3158 * The immediate field for cond branches is big enough for all reasonable methods
3160 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
3161 if (0 && ins->inst_true_bb->native_offset) { \
3162 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
3163 } else { \
3164 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
3165 ARM_B_COND (code, (condcode), 0); \
3168 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
3170 /* emit an exception if condition is fail
3172 * We assign the extra code used to throw the implicit exceptions
3173 * to cfg->bb_exit as far as the big branch handling is concerned
3175 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
3176 do { \
3177 mono_add_patch_info (cfg, code - cfg->native_code, \
3178 MONO_PATCH_INFO_EXC, exc_name); \
3179 ARM_BL_COND (code, (condcode), 0); \
3180 } while (0);
3182 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
3184 void
3185 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
3189 void
3190 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
3192 MonoInst *ins, *n;
3194 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
3195 MonoInst *last_ins = mono_inst_prev (ins, FILTER_IL_SEQ_POINT);
3197 switch (ins->opcode) {
3198 case OP_MUL_IMM:
3199 case OP_IMUL_IMM:
3200 /* Already done by an arch-independent pass */
3201 break;
3202 case OP_LOAD_MEMBASE:
3203 case OP_LOADI4_MEMBASE:
3205 * OP_STORE_MEMBASE_REG reg, offset(basereg)
3206 * OP_LOAD_MEMBASE offset(basereg), reg
3208 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
3209 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
3210 ins->inst_basereg == last_ins->inst_destbasereg &&
3211 ins->inst_offset == last_ins->inst_offset) {
3212 if (ins->dreg == last_ins->sreg1) {
3213 MONO_DELETE_INS (bb, ins);
3214 continue;
3215 } else {
3216 //static int c = 0; g_print ("MATCHX %s %d\n", cfg->method->name,c++);
3217 ins->opcode = OP_MOVE;
3218 ins->sreg1 = last_ins->sreg1;
3222 * Note: reg1 must be different from the basereg in the second load
3223 * OP_LOAD_MEMBASE offset(basereg), reg1
3224 * OP_LOAD_MEMBASE offset(basereg), reg2
3225 * -->
3226 * OP_LOAD_MEMBASE offset(basereg), reg1
3227 * OP_MOVE reg1, reg2
3229 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
3230 || last_ins->opcode == OP_LOAD_MEMBASE) &&
3231 ins->inst_basereg != last_ins->dreg &&
3232 ins->inst_basereg == last_ins->inst_basereg &&
3233 ins->inst_offset == last_ins->inst_offset) {
3235 if (ins->dreg == last_ins->dreg) {
3236 MONO_DELETE_INS (bb, ins);
3237 continue;
3238 } else {
3239 ins->opcode = OP_MOVE;
3240 ins->sreg1 = last_ins->dreg;
3243 //g_assert_not_reached ();
3245 #if 0
3247 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3248 * OP_LOAD_MEMBASE offset(basereg), reg
3249 * -->
3250 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3251 * OP_ICONST reg, imm
3253 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
3254 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
3255 ins->inst_basereg == last_ins->inst_destbasereg &&
3256 ins->inst_offset == last_ins->inst_offset) {
3257 //static int c = 0; g_print ("MATCHX %s %d\n", cfg->method->name,c++);
3258 ins->opcode = OP_ICONST;
3259 ins->inst_c0 = last_ins->inst_imm;
3260 g_assert_not_reached (); // check this rule
3261 #endif
3263 break;
3264 case OP_LOADU1_MEMBASE:
3265 case OP_LOADI1_MEMBASE:
3266 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
3267 ins->inst_basereg == last_ins->inst_destbasereg &&
3268 ins->inst_offset == last_ins->inst_offset) {
3269 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
3270 ins->sreg1 = last_ins->sreg1;
3272 break;
3273 case OP_LOADU2_MEMBASE:
3274 case OP_LOADI2_MEMBASE:
3275 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
3276 ins->inst_basereg == last_ins->inst_destbasereg &&
3277 ins->inst_offset == last_ins->inst_offset) {
3278 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
3279 ins->sreg1 = last_ins->sreg1;
3281 break;
3282 case OP_MOVE:
3283 ins->opcode = OP_MOVE;
3285 * OP_MOVE reg, reg
3287 if (ins->dreg == ins->sreg1) {
3288 MONO_DELETE_INS (bb, ins);
3289 continue;
3292 * OP_MOVE sreg, dreg
3293 * OP_MOVE dreg, sreg
3295 if (last_ins && last_ins->opcode == OP_MOVE &&
3296 ins->sreg1 == last_ins->dreg &&
3297 ins->dreg == last_ins->sreg1) {
3298 MONO_DELETE_INS (bb, ins);
3299 continue;
3301 break;
3307 * the branch_cc_table should maintain the order of these
3308 * opcodes.
3309 case CEE_BEQ:
3310 case CEE_BGE:
3311 case CEE_BGT:
3312 case CEE_BLE:
3313 case CEE_BLT:
3314 case CEE_BNE_UN:
3315 case CEE_BGE_UN:
3316 case CEE_BGT_UN:
3317 case CEE_BLE_UN:
3318 case CEE_BLT_UN:
3320 static const guchar
3321 branch_cc_table [] = {
3322 ARMCOND_EQ,
3323 ARMCOND_GE,
3324 ARMCOND_GT,
3325 ARMCOND_LE,
3326 ARMCOND_LT,
3328 ARMCOND_NE,
3329 ARMCOND_HS,
3330 ARMCOND_HI,
3331 ARMCOND_LS,
3332 ARMCOND_LO
3335 #define ADD_NEW_INS(cfg,dest,op) do { \
3336 MONO_INST_NEW ((cfg), (dest), (op)); \
3337 mono_bblock_insert_before_ins (bb, ins, (dest)); \
3338 } while (0)
3340 static int
3341 map_to_reg_reg_op (int op)
3343 switch (op) {
3344 case OP_ADD_IMM:
3345 return OP_IADD;
3346 case OP_SUB_IMM:
3347 return OP_ISUB;
3348 case OP_AND_IMM:
3349 return OP_IAND;
3350 case OP_COMPARE_IMM:
3351 return OP_COMPARE;
3352 case OP_ICOMPARE_IMM:
3353 return OP_ICOMPARE;
3354 case OP_ADDCC_IMM:
3355 return OP_ADDCC;
3356 case OP_ADC_IMM:
3357 return OP_ADC;
3358 case OP_SUBCC_IMM:
3359 return OP_SUBCC;
3360 case OP_SBB_IMM:
3361 return OP_SBB;
3362 case OP_OR_IMM:
3363 return OP_IOR;
3364 case OP_XOR_IMM:
3365 return OP_IXOR;
3366 case OP_LOAD_MEMBASE:
3367 return OP_LOAD_MEMINDEX;
3368 case OP_LOADI4_MEMBASE:
3369 return OP_LOADI4_MEMINDEX;
3370 case OP_LOADU4_MEMBASE:
3371 return OP_LOADU4_MEMINDEX;
3372 case OP_LOADU1_MEMBASE:
3373 return OP_LOADU1_MEMINDEX;
3374 case OP_LOADI2_MEMBASE:
3375 return OP_LOADI2_MEMINDEX;
3376 case OP_LOADU2_MEMBASE:
3377 return OP_LOADU2_MEMINDEX;
3378 case OP_LOADI1_MEMBASE:
3379 return OP_LOADI1_MEMINDEX;
3380 case OP_STOREI1_MEMBASE_REG:
3381 return OP_STOREI1_MEMINDEX;
3382 case OP_STOREI2_MEMBASE_REG:
3383 return OP_STOREI2_MEMINDEX;
3384 case OP_STOREI4_MEMBASE_REG:
3385 return OP_STOREI4_MEMINDEX;
3386 case OP_STORE_MEMBASE_REG:
3387 return OP_STORE_MEMINDEX;
3388 case OP_STORER4_MEMBASE_REG:
3389 return OP_STORER4_MEMINDEX;
3390 case OP_STORER8_MEMBASE_REG:
3391 return OP_STORER8_MEMINDEX;
3392 case OP_STORE_MEMBASE_IMM:
3393 return OP_STORE_MEMBASE_REG;
3394 case OP_STOREI1_MEMBASE_IMM:
3395 return OP_STOREI1_MEMBASE_REG;
3396 case OP_STOREI2_MEMBASE_IMM:
3397 return OP_STOREI2_MEMBASE_REG;
3398 case OP_STOREI4_MEMBASE_IMM:
3399 return OP_STOREI4_MEMBASE_REG;
3401 g_assert_not_reached ();
3405 * Remove from the instruction list the instructions that can't be
3406 * represented with very simple instructions with no register
3407 * requirements.
3409 void
3410 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
3412 MonoInst *ins, *temp, *last_ins = NULL;
3413 int rot_amount, imm8, low_imm;
3415 MONO_BB_FOR_EACH_INS (bb, ins) {
3416 loop_start:
3417 switch (ins->opcode) {
3418 case OP_ADD_IMM:
3419 case OP_SUB_IMM:
3420 case OP_AND_IMM:
3421 case OP_COMPARE_IMM:
3422 case OP_ICOMPARE_IMM:
3423 case OP_ADDCC_IMM:
3424 case OP_ADC_IMM:
3425 case OP_SUBCC_IMM:
3426 case OP_SBB_IMM:
3427 case OP_OR_IMM:
3428 case OP_XOR_IMM:
3429 case OP_IADD_IMM:
3430 case OP_ISUB_IMM:
3431 case OP_IAND_IMM:
3432 case OP_IADC_IMM:
3433 case OP_ISBB_IMM:
3434 case OP_IOR_IMM:
3435 case OP_IXOR_IMM:
3436 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
3437 int opcode2 = mono_op_imm_to_op (ins->opcode);
3438 ADD_NEW_INS (cfg, temp, OP_ICONST);
3439 temp->inst_c0 = ins->inst_imm;
3440 temp->dreg = mono_alloc_ireg (cfg);
3441 ins->sreg2 = temp->dreg;
3442 if (opcode2 == -1)
3443 g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins->opcode));
3444 ins->opcode = opcode2;
3446 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
3447 goto loop_start;
3448 else
3449 break;
3450 case OP_MUL_IMM:
3451 case OP_IMUL_IMM:
3452 if (ins->inst_imm == 1) {
3453 ins->opcode = OP_MOVE;
3454 break;
3456 if (ins->inst_imm == 0) {
3457 ins->opcode = OP_ICONST;
3458 ins->inst_c0 = 0;
3459 break;
3461 imm8 = mono_is_power_of_two (ins->inst_imm);
3462 if (imm8 > 0) {
3463 ins->opcode = OP_SHL_IMM;
3464 ins->inst_imm = imm8;
3465 break;
3467 ADD_NEW_INS (cfg, temp, OP_ICONST);
3468 temp->inst_c0 = ins->inst_imm;
3469 temp->dreg = mono_alloc_ireg (cfg);
3470 ins->sreg2 = temp->dreg;
3471 ins->opcode = OP_IMUL;
3472 break;
3473 case OP_SBB:
3474 case OP_ISBB:
3475 case OP_SUBCC:
3476 case OP_ISUBCC: {
3477 int try_count = 2;
3478 MonoInst *current = ins;
3480 /* may require a look-ahead of a couple instructions due to spilling */
3481 while (try_count-- && current->next) {
3482 if (current->next->opcode == OP_COND_EXC_C || current->next->opcode == OP_COND_EXC_IC) {
3483 /* ARM sets the C flag to 1 if there was _no_ overflow */
3484 current->next->opcode = OP_COND_EXC_NC;
3485 break;
3487 current = current->next;
3489 break;
3491 case OP_IDIV_IMM:
3492 case OP_IDIV_UN_IMM:
3493 case OP_IREM_IMM:
3494 case OP_IREM_UN_IMM: {
3495 int opcode2 = mono_op_imm_to_op (ins->opcode);
3496 ADD_NEW_INS (cfg, temp, OP_ICONST);
3497 temp->inst_c0 = ins->inst_imm;
3498 temp->dreg = mono_alloc_ireg (cfg);
3499 ins->sreg2 = temp->dreg;
3500 if (opcode2 == -1)
3501 g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins->opcode));
3502 ins->opcode = opcode2;
3503 break;
3505 case OP_LOCALLOC_IMM:
3506 ADD_NEW_INS (cfg, temp, OP_ICONST);
3507 temp->inst_c0 = ins->inst_imm;
3508 temp->dreg = mono_alloc_ireg (cfg);
3509 ins->sreg1 = temp->dreg;
3510 ins->opcode = OP_LOCALLOC;
3511 break;
3512 case OP_LOAD_MEMBASE:
3513 case OP_LOADI4_MEMBASE:
3514 case OP_LOADU4_MEMBASE:
3515 case OP_LOADU1_MEMBASE:
3516 /* we can do two things: load the immed in a register
3517 * and use an indexed load, or see if the immed can be
3518 * represented as an ad_imm + a load with a smaller offset
3519 * that fits. We just do the first for now, optimize later.
3521 if (arm_is_imm12 (ins->inst_offset))
3522 break;
3523 ADD_NEW_INS (cfg, temp, OP_ICONST);
3524 temp->inst_c0 = ins->inst_offset;
3525 temp->dreg = mono_alloc_ireg (cfg);
3526 ins->sreg2 = temp->dreg;
3527 ins->opcode = map_to_reg_reg_op (ins->opcode);
3528 break;
3529 case OP_LOADI2_MEMBASE:
3530 case OP_LOADU2_MEMBASE:
3531 case OP_LOADI1_MEMBASE:
3532 if (arm_is_imm8 (ins->inst_offset))
3533 break;
3534 ADD_NEW_INS (cfg, temp, OP_ICONST);
3535 temp->inst_c0 = ins->inst_offset;
3536 temp->dreg = mono_alloc_ireg (cfg);
3537 ins->sreg2 = temp->dreg;
3538 ins->opcode = map_to_reg_reg_op (ins->opcode);
3539 break;
3540 case OP_LOADR4_MEMBASE:
3541 case OP_LOADR8_MEMBASE:
3542 if (arm_is_fpimm8 (ins->inst_offset))
3543 break;
3544 low_imm = ins->inst_offset & 0x1ff;
3545 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
3546 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3547 temp->inst_imm = ins->inst_offset & ~0x1ff;
3548 temp->sreg1 = ins->inst_basereg;
3549 temp->dreg = mono_alloc_ireg (cfg);
3550 ins->inst_basereg = temp->dreg;
3551 ins->inst_offset = low_imm;
3552 } else {
3553 MonoInst *add_ins;
3555 ADD_NEW_INS (cfg, temp, OP_ICONST);
3556 temp->inst_c0 = ins->inst_offset;
3557 temp->dreg = mono_alloc_ireg (cfg);
3559 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3560 add_ins->sreg1 = ins->inst_basereg;
3561 add_ins->sreg2 = temp->dreg;
3562 add_ins->dreg = mono_alloc_ireg (cfg);
3564 ins->inst_basereg = add_ins->dreg;
3565 ins->inst_offset = 0;
3567 break;
3568 case OP_STORE_MEMBASE_REG:
3569 case OP_STOREI4_MEMBASE_REG:
3570 case OP_STOREI1_MEMBASE_REG:
3571 if (arm_is_imm12 (ins->inst_offset))
3572 break;
3573 ADD_NEW_INS (cfg, temp, OP_ICONST);
3574 temp->inst_c0 = ins->inst_offset;
3575 temp->dreg = mono_alloc_ireg (cfg);
3576 ins->sreg2 = temp->dreg;
3577 ins->opcode = map_to_reg_reg_op (ins->opcode);
3578 break;
3579 case OP_STOREI2_MEMBASE_REG:
3580 if (arm_is_imm8 (ins->inst_offset))
3581 break;
3582 ADD_NEW_INS (cfg, temp, OP_ICONST);
3583 temp->inst_c0 = ins->inst_offset;
3584 temp->dreg = mono_alloc_ireg (cfg);
3585 ins->sreg2 = temp->dreg;
3586 ins->opcode = map_to_reg_reg_op (ins->opcode);
3587 break;
3588 case OP_STORER4_MEMBASE_REG:
3589 case OP_STORER8_MEMBASE_REG:
3590 if (arm_is_fpimm8 (ins->inst_offset))
3591 break;
3592 low_imm = ins->inst_offset & 0x1ff;
3593 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
3594 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3595 temp->inst_imm = ins->inst_offset & ~0x1ff;
3596 temp->sreg1 = ins->inst_destbasereg;
3597 temp->dreg = mono_alloc_ireg (cfg);
3598 ins->inst_destbasereg = temp->dreg;
3599 ins->inst_offset = low_imm;
3600 } else {
3601 MonoInst *add_ins;
3603 ADD_NEW_INS (cfg, temp, OP_ICONST);
3604 temp->inst_c0 = ins->inst_offset;
3605 temp->dreg = mono_alloc_ireg (cfg);
3607 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3608 add_ins->sreg1 = ins->inst_destbasereg;
3609 add_ins->sreg2 = temp->dreg;
3610 add_ins->dreg = mono_alloc_ireg (cfg);
3612 ins->inst_destbasereg = add_ins->dreg;
3613 ins->inst_offset = 0;
3615 break;
3616 case OP_STORE_MEMBASE_IMM:
3617 case OP_STOREI1_MEMBASE_IMM:
3618 case OP_STOREI2_MEMBASE_IMM:
3619 case OP_STOREI4_MEMBASE_IMM:
3620 ADD_NEW_INS (cfg, temp, OP_ICONST);
3621 temp->inst_c0 = ins->inst_imm;
3622 temp->dreg = mono_alloc_ireg (cfg);
3623 ins->sreg1 = temp->dreg;
3624 ins->opcode = map_to_reg_reg_op (ins->opcode);
3625 last_ins = temp;
3626 goto loop_start; /* make it handle the possibly big ins->inst_offset */
3627 case OP_FCOMPARE:
3628 case OP_RCOMPARE: {
3629 gboolean swap = FALSE;
3630 int reg;
3632 if (!ins->next) {
3633 /* Optimized away */
3634 NULLIFY_INS (ins);
3635 break;
3638 /* Some fp compares require swapped operands */
3639 switch (ins->next->opcode) {
3640 case OP_FBGT:
3641 ins->next->opcode = OP_FBLT;
3642 swap = TRUE;
3643 break;
3644 case OP_FBGT_UN:
3645 ins->next->opcode = OP_FBLT_UN;
3646 swap = TRUE;
3647 break;
3648 case OP_FBLE:
3649 ins->next->opcode = OP_FBGE;
3650 swap = TRUE;
3651 break;
3652 case OP_FBLE_UN:
3653 ins->next->opcode = OP_FBGE_UN;
3654 swap = TRUE;
3655 break;
3656 default:
3657 break;
3659 if (swap) {
3660 reg = ins->sreg1;
3661 ins->sreg1 = ins->sreg2;
3662 ins->sreg2 = reg;
3664 break;
3668 last_ins = ins;
3670 bb->last_ins = last_ins;
3671 bb->max_vreg = cfg->next_vreg;
3674 void
3675 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
3677 MonoInst *ins;
3679 if (long_ins->opcode == OP_LNEG) {
3680 ins = long_ins;
3681 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), 0);
3682 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), 0);
3683 NULLIFY_INS (ins);
3687 static guchar*
3688 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
3690 /* sreg is a float, dreg is an integer reg */
3691 if (IS_VFP) {
3692 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
3693 if (is_signed)
3694 ARM_TOSIZD (code, vfp_scratch1, sreg);
3695 else
3696 ARM_TOUIZD (code, vfp_scratch1, sreg);
3697 ARM_FMRS (code, dreg, vfp_scratch1);
3698 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
3700 if (!is_signed) {
3701 if (size == 1)
3702 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
3703 else if (size == 2) {
3704 ARM_SHL_IMM (code, dreg, dreg, 16);
3705 ARM_SHR_IMM (code, dreg, dreg, 16);
3707 } else {
3708 if (size == 1) {
3709 ARM_SHL_IMM (code, dreg, dreg, 24);
3710 ARM_SAR_IMM (code, dreg, dreg, 24);
3711 } else if (size == 2) {
3712 ARM_SHL_IMM (code, dreg, dreg, 16);
3713 ARM_SAR_IMM (code, dreg, dreg, 16);
3716 return code;
3719 static guchar*
3720 emit_r4_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
3722 /* sreg is a float, dreg is an integer reg */
3723 g_assert (IS_VFP);
3724 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
3725 if (is_signed)
3726 ARM_TOSIZS (code, vfp_scratch1, sreg);
3727 else
3728 ARM_TOUIZS (code, vfp_scratch1, sreg);
3729 ARM_FMRS (code, dreg, vfp_scratch1);
3730 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
3732 if (!is_signed) {
3733 if (size == 1)
3734 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
3735 else if (size == 2) {
3736 ARM_SHL_IMM (code, dreg, dreg, 16);
3737 ARM_SHR_IMM (code, dreg, dreg, 16);
3739 } else {
3740 if (size == 1) {
3741 ARM_SHL_IMM (code, dreg, dreg, 24);
3742 ARM_SAR_IMM (code, dreg, dreg, 24);
3743 } else if (size == 2) {
3744 ARM_SHL_IMM (code, dreg, dreg, 16);
3745 ARM_SAR_IMM (code, dreg, dreg, 16);
3748 return code;
3751 #endif /* #ifndef DISABLE_JIT */
3753 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
3755 static void
3756 emit_thunk (guint8 *code, gconstpointer target)
3758 guint8 *p = code;
3760 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3761 if (thumb_supported)
3762 ARM_BX (code, ARMREG_IP);
3763 else
3764 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3765 *(guint32*)code = (guint32)(gsize)target;
3766 code += 4;
3767 mono_arch_flush_icache (p, code - p);
3770 static void
3771 handle_thunk (MonoCompile *cfg, MonoDomain *domain, guchar *code, const guchar *target)
3773 MonoJitInfo *ji = NULL;
3774 MonoThunkJitInfo *info;
3775 guint8 *thunks, *p;
3776 int thunks_size;
3777 guint8 *orig_target;
3778 guint8 *target_thunk;
3780 if (!domain)
3781 domain = mono_domain_get ();
3783 if (cfg) {
3785 * This can be called multiple times during JITting,
3786 * save the current position in cfg->arch to avoid
3787 * doing a O(n^2) search.
3789 if (!cfg->arch.thunks) {
3790 cfg->arch.thunks = cfg->thunks;
3791 cfg->arch.thunks_size = cfg->thunk_area;
3793 thunks = cfg->arch.thunks;
3794 thunks_size = cfg->arch.thunks_size;
3795 if (!thunks_size) {
3796 g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, mono_method_full_name (cfg->method, TRUE));
3797 g_assert_not_reached ();
3800 g_assert (*(guint32*)thunks == 0);
3801 emit_thunk (thunks, target);
3802 arm_patch (code, thunks);
3804 cfg->arch.thunks += THUNK_SIZE;
3805 cfg->arch.thunks_size -= THUNK_SIZE;
3806 } else {
3807 ji = mini_jit_info_table_find (domain, (char*)code, NULL);
3808 g_assert (ji);
3809 info = mono_jit_info_get_thunk_info (ji);
3810 g_assert (info);
3812 thunks = (guint8*)ji->code_start + info->thunks_offset;
3813 thunks_size = info->thunks_size;
3815 orig_target = mono_arch_get_call_target (code + 4);
3817 mono_mini_arch_lock ();
3819 target_thunk = NULL;
3820 if (orig_target >= thunks && orig_target < thunks + thunks_size) {
3821 /* The call already points to a thunk, because of trampolines etc. */
3822 target_thunk = orig_target;
3823 } else {
3824 for (p = thunks; p < thunks + thunks_size; p += THUNK_SIZE) {
3825 if (((guint32*)p) [0] == 0) {
3826 /* Free entry */
3827 target_thunk = p;
3828 break;
3829 } else if (((guint32*)p) [2] == (guint32)(gsize)target) {
3830 /* Thunk already points to target */
3831 target_thunk = p;
3832 break;
3837 //g_print ("THUNK: %p %p %p\n", code, target, target_thunk);
3839 if (!target_thunk) {
3840 mono_mini_arch_unlock ();
3841 g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, cfg ? mono_method_full_name (cfg->method, TRUE) : mono_method_full_name (jinfo_get_method (ji), TRUE));
3842 g_assert_not_reached ();
3845 emit_thunk (target_thunk, target);
3846 arm_patch (code, target_thunk);
3847 mono_arch_flush_icache (code, 4);
3849 mono_mini_arch_unlock ();
3853 static void
3854 arm_patch_general (MonoCompile *cfg, MonoDomain *domain, guchar *code, const guchar *target)
3856 guint32 *code32 = (guint32*)code;
3857 guint32 ins = *code32;
3858 guint32 prim = (ins >> 25) & 7;
3859 guint32 tval = GPOINTER_TO_UINT (target);
3861 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
3862 if (prim == 5) { /* 101b */
3863 /* the diff starts 8 bytes from the branch opcode */
3864 gint diff = target - code - 8;
3865 gint tbits;
3866 gint tmask = 0xffffffff;
3867 if (tval & 1) { /* entering thumb mode */
3868 diff = target - 1 - code - 8;
3869 g_assert (thumb_supported);
3870 tbits = 0xf << 28; /* bl->blx bit pattern */
3871 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
3872 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
3873 if (diff & 2) {
3874 tbits |= 1 << 24;
3876 tmask = ~(1 << 24); /* clear the link bit */
3877 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
3878 } else {
3879 tbits = 0;
3881 if (diff >= 0) {
3882 if (diff <= 33554431) {
3883 diff >>= 2;
3884 ins = (ins & 0xff000000) | diff;
3885 ins &= tmask;
3886 *code32 = ins | tbits;
3887 return;
3889 } else {
3890 /* diff between 0 and -33554432 */
3891 if (diff >= -33554432) {
3892 diff >>= 2;
3893 ins = (ins & 0xff000000) | (diff & ~0xff000000);
3894 ins &= tmask;
3895 *code32 = ins | tbits;
3896 return;
3900 handle_thunk (cfg, domain, code, target);
3901 return;
3905 * The alternative call sequences looks like this:
3907 * ldr ip, [pc] // loads the address constant
3908 * b 1f // jumps around the constant
3909 * address constant embedded in the code
3910 * 1f:
3911 * mov lr, pc
3912 * mov pc, ip
3914 * There are two cases for patching:
3915 * a) at the end of method emission: in this case code points to the start
3916 * of the call sequence
3917 * b) during runtime patching of the call site: in this case code points
3918 * to the mov pc, ip instruction
3920 * We have to handle also the thunk jump code sequence:
3922 * ldr ip, [pc]
3923 * mov pc, ip
3924 * address constant // execution never reaches here
3926 if ((ins & 0x0ffffff0) == 0x12fff10) {
3927 /* Branch and exchange: the address is constructed in a reg
3928 * We can patch BX when the code sequence is the following:
3929 * ldr ip, [pc, #0] ; 0x8
3930 * b 0xc
3931 * .word code_ptr
3932 * mov lr, pc
3933 * bx ips
3934 * */
3935 guint32 ccode [4];
3936 guint8 *emit = (guint8*)ccode;
3937 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3938 ARM_B (emit, 0);
3939 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3940 ARM_BX (emit, ARMREG_IP);
3942 /*patching from magic trampoline*/
3943 if (ins == ccode [3]) {
3944 g_assert (code32 [-4] == ccode [0]);
3945 g_assert (code32 [-3] == ccode [1]);
3946 g_assert (code32 [-1] == ccode [2]);
3947 code32 [-2] = (guint32)(gsize)target;
3948 return;
3950 /*patching from JIT*/
3951 if (ins == ccode [0]) {
3952 g_assert (code32 [1] == ccode [1]);
3953 g_assert (code32 [3] == ccode [2]);
3954 g_assert (code32 [4] == ccode [3]);
3955 code32 [2] = (guint32)(gsize)target;
3956 return;
3958 g_assert_not_reached ();
3959 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
3961 * ldr ip, [pc, #0]
3962 * b 0xc
3963 * .word code_ptr
3964 * blx ip
3966 guint32 ccode [4];
3967 guint8 *emit = (guint8*)ccode;
3968 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3969 ARM_B (emit, 0);
3970 ARM_BLX_REG (emit, ARMREG_IP);
3972 g_assert (code32 [-3] == ccode [0]);
3973 g_assert (code32 [-2] == ccode [1]);
3974 g_assert (code32 [0] == ccode [2]);
3976 code32 [-1] = (guint32)(gsize)target;
3977 } else {
3978 guint32 ccode [4];
3979 guint32 *tmp = ccode;
3980 guint8 *emit = (guint8*)tmp;
3981 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3982 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3983 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
3984 ARM_BX (emit, ARMREG_IP);
3985 if (ins == ccode [2]) {
3986 g_assert_not_reached (); // should be -2 ...
3987 code32 [-1] = (guint32)(gsize)target;
3988 return;
3990 if (ins == ccode [0]) {
3991 /* handles both thunk jump code and the far call sequence */
3992 code32 [2] = (guint32)(gsize)target;
3993 return;
3995 g_assert_not_reached ();
3997 // g_print ("patched with 0x%08x\n", ins);
4000 void
4001 arm_patch (guchar *code, const guchar *target)
4003 arm_patch_general (NULL, NULL, code, target);
4007 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
4008 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
4009 * to be used with the emit macros.
4010 * Return -1 otherwise.
4013 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
4015 guint32 res, i;
4016 for (i = 0; i < 31; i+= 2) {
4017 if (i == 0)
4018 res = val;
4019 else
4020 res = (val << (32 - i)) | (val >> i);
4021 if (res & ~0xff)
4022 continue;
4023 *rot_amount = i? 32 - i: 0;
4024 return res;
4026 return -1;
4030 * Emits in code a sequence of instructions that load the value 'val'
4031 * into the dreg register. Uses at most 4 instructions.
4033 guint8*
4034 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
4036 int imm8, rot_amount;
4037 #if 0
4038 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
4039 /* skip the constant pool */
4040 ARM_B (code, 0);
4041 *(int*)code = val;
4042 code += 4;
4043 return code;
4044 #endif
4045 if (mini_debug_options.single_imm_size && v7_supported) {
4046 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
4047 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
4048 return code;
4051 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
4052 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
4053 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
4054 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
4055 } else {
4056 if (v7_supported) {
4057 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
4058 if (val >> 16)
4059 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
4060 return code;
4062 if (val & 0xFF) {
4063 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
4064 if (val & 0xFF00) {
4065 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
4067 if (val & 0xFF0000) {
4068 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
4070 if (val & 0xFF000000) {
4071 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
4073 } else if (val & 0xFF00) {
4074 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
4075 if (val & 0xFF0000) {
4076 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
4078 if (val & 0xFF000000) {
4079 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
4081 } else if (val & 0xFF0000) {
4082 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
4083 if (val & 0xFF000000) {
4084 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
4087 //g_assert_not_reached ();
4089 return code;
4092 gboolean
4093 mono_arm_thumb_supported (void)
4095 return thumb_supported;
4098 gboolean
4099 mono_arm_eabi_supported (void)
4101 return eabi_supported;
4105 mono_arm_i8_align (void)
4107 return i8_align;
4110 #ifndef DISABLE_JIT
4112 static guint8*
4113 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
4115 CallInfo *cinfo;
4116 MonoCallInst *call;
4118 call = (MonoCallInst*)ins;
4119 cinfo = call->call_info;
4121 switch (cinfo->ret.storage) {
4122 case RegTypeStructByVal:
4123 case RegTypeHFA: {
4124 MonoInst *loc = cfg->arch.vret_addr_loc;
4125 int i;
4127 if (cinfo->ret.storage == RegTypeStructByVal && cinfo->ret.nregs == 1) {
4128 /* The JIT treats this as a normal call */
4129 break;
4132 /* Load the destination address */
4133 g_assert (loc && loc->opcode == OP_REGOFFSET);
4135 if (arm_is_imm12 (loc->inst_offset)) {
4136 ARM_LDR_IMM (code, ARMREG_LR, loc->inst_basereg, loc->inst_offset);
4137 } else {
4138 code = mono_arm_emit_load_imm (code, ARMREG_LR, loc->inst_offset);
4139 ARM_LDR_REG_REG (code, ARMREG_LR, loc->inst_basereg, ARMREG_LR);
4142 if (cinfo->ret.storage == RegTypeStructByVal) {
4143 int rsize = cinfo->ret.struct_size;
4145 for (i = 0; i < cinfo->ret.nregs; ++i) {
4146 g_assert (rsize >= 0);
4147 switch (rsize) {
4148 case 0:
4149 break;
4150 case 1:
4151 ARM_STRB_IMM (code, i, ARMREG_LR, i * 4);
4152 break;
4153 case 2:
4154 ARM_STRH_IMM (code, i, ARMREG_LR, i * 4);
4155 break;
4156 default:
4157 ARM_STR_IMM (code, i, ARMREG_LR, i * 4);
4158 break;
4160 rsize -= 4;
4162 } else {
4163 for (i = 0; i < cinfo->ret.nregs; ++i) {
4164 if (cinfo->ret.esize == 4)
4165 ARM_FSTS (code, cinfo->ret.reg + i, ARMREG_LR, i * 4);
4166 else
4167 ARM_FSTD (code, cinfo->ret.reg + (i * 2), ARMREG_LR, i * 8);
4170 return code;
4172 default:
4173 break;
4176 switch (ins->opcode) {
4177 case OP_FCALL:
4178 case OP_FCALL_REG:
4179 case OP_FCALL_MEMBASE:
4180 if (IS_VFP) {
4181 MonoType *sig_ret = mini_get_underlying_type (((MonoCallInst*)ins)->signature->ret);
4182 if (sig_ret->type == MONO_TYPE_R4) {
4183 if (IS_HARD_FLOAT) {
4184 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
4185 } else {
4186 ARM_FMSR (code, ins->dreg, ARMREG_R0);
4187 ARM_CVTS (code, ins->dreg, ins->dreg);
4189 } else {
4190 if (IS_HARD_FLOAT) {
4191 ARM_CPYD (code, ins->dreg, ARM_VFP_D0);
4192 } else {
4193 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
4197 break;
4198 case OP_RCALL:
4199 case OP_RCALL_REG:
4200 case OP_RCALL_MEMBASE: {
4201 MonoType *sig_ret;
4203 g_assert (IS_VFP);
4205 sig_ret = mini_get_underlying_type (((MonoCallInst*)ins)->signature->ret);
4206 g_assert (sig_ret->type == MONO_TYPE_R4);
4207 if (IS_HARD_FLOAT) {
4208 ARM_CPYS (code, ins->dreg, ARM_VFP_F0);
4209 } else {
4210 ARM_FMSR (code, ins->dreg, ARMREG_R0);
4211 ARM_CPYS (code, ins->dreg, ins->dreg);
4213 break;
4215 default:
4216 break;
4219 return code;
4222 void
4223 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
4225 MonoInst *ins;
4226 MonoCallInst *call;
4227 guint8 *code = cfg->native_code + cfg->code_len;
4228 MonoInst *last_ins = NULL;
4229 int max_len, cpos;
4230 int imm8, rot_amount;
4232 /* we don't align basic blocks of loops on arm */
4234 if (cfg->verbose_level > 2)
4235 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
4237 cpos = bb->max_offset;
4239 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
4240 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
4241 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break));
4242 code = emit_call_seq (cfg, code);
4245 MONO_BB_FOR_EACH_INS (bb, ins) {
4246 guint offset = code - cfg->native_code;
4247 set_code_cursor (cfg, code);
4248 max_len = ins_get_size (ins->opcode);
4249 code = realloc_code (cfg, max_len);
4250 // if (ins->cil_code)
4251 // g_print ("cil code\n");
4252 mono_debug_record_line_number (cfg, ins, offset);
4254 switch (ins->opcode) {
4255 case OP_MEMORY_BARRIER:
4256 if (v7_supported) {
4257 ARM_DMB (code, ARM_DMB_ISH);
4258 } else if (v6_supported) {
4259 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
4260 ARM_MCR (code, 15, 0, ARMREG_R0, 7, 10, 5);
4262 break;
4263 case OP_TLS_GET:
4264 code = emit_tls_get (code, ins->dreg, ins->inst_offset);
4265 break;
4266 case OP_TLS_SET:
4267 code = emit_tls_set (code, ins->sreg1, ins->inst_offset);
4268 break;
4269 case OP_ATOMIC_EXCHANGE_I4:
4270 case OP_ATOMIC_CAS_I4:
4271 case OP_ATOMIC_ADD_I4: {
4272 int tmpreg;
4273 guint8 *buf [16];
4275 g_assert (v7_supported);
4277 /* Free up a reg */
4278 if (ins->sreg1 != ARMREG_IP && ins->sreg2 != ARMREG_IP && ins->sreg3 != ARMREG_IP)
4279 tmpreg = ARMREG_IP;
4280 else if (ins->sreg1 != ARMREG_R0 && ins->sreg2 != ARMREG_R0 && ins->sreg3 != ARMREG_R0)
4281 tmpreg = ARMREG_R0;
4282 else if (ins->sreg1 != ARMREG_R1 && ins->sreg2 != ARMREG_R1 && ins->sreg3 != ARMREG_R1)
4283 tmpreg = ARMREG_R1;
4284 else
4285 tmpreg = ARMREG_R2;
4286 g_assert (cfg->arch.atomic_tmp_offset != -1);
4287 ARM_STR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4289 switch (ins->opcode) {
4290 case OP_ATOMIC_EXCHANGE_I4:
4291 buf [0] = code;
4292 ARM_DMB (code, ARM_DMB_ISH);
4293 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4294 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4295 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4296 buf [1] = code;
4297 ARM_B_COND (code, ARMCOND_NE, 0);
4298 arm_patch (buf [1], buf [0]);
4299 break;
4300 case OP_ATOMIC_CAS_I4:
4301 ARM_DMB (code, ARM_DMB_ISH);
4302 buf [0] = code;
4303 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4304 ARM_CMP_REG_REG (code, ARMREG_LR, ins->sreg3);
4305 buf [1] = code;
4306 ARM_B_COND (code, ARMCOND_NE, 0);
4307 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4308 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4309 buf [2] = code;
4310 ARM_B_COND (code, ARMCOND_NE, 0);
4311 arm_patch (buf [2], buf [0]);
4312 arm_patch (buf [1], code);
4313 break;
4314 case OP_ATOMIC_ADD_I4:
4315 buf [0] = code;
4316 ARM_DMB (code, ARM_DMB_ISH);
4317 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4318 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->sreg2);
4319 ARM_STREX_REG (code, tmpreg, ARMREG_LR, ins->sreg1);
4320 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4321 buf [1] = code;
4322 ARM_B_COND (code, ARMCOND_NE, 0);
4323 arm_patch (buf [1], buf [0]);
4324 break;
4325 default:
4326 g_assert_not_reached ();
4329 ARM_DMB (code, ARM_DMB_ISH);
4330 if (tmpreg != ins->dreg)
4331 ARM_LDR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4332 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_LR);
4333 break;
4335 case OP_ATOMIC_LOAD_I1:
4336 case OP_ATOMIC_LOAD_U1:
4337 case OP_ATOMIC_LOAD_I2:
4338 case OP_ATOMIC_LOAD_U2:
4339 case OP_ATOMIC_LOAD_I4:
4340 case OP_ATOMIC_LOAD_U4:
4341 case OP_ATOMIC_LOAD_R4:
4342 case OP_ATOMIC_LOAD_R8: {
4343 if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
4344 ARM_DMB (code, ARM_DMB_ISH);
4346 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4348 switch (ins->opcode) {
4349 case OP_ATOMIC_LOAD_I1:
4350 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4351 break;
4352 case OP_ATOMIC_LOAD_U1:
4353 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4354 break;
4355 case OP_ATOMIC_LOAD_I2:
4356 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4357 break;
4358 case OP_ATOMIC_LOAD_U2:
4359 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4360 break;
4361 case OP_ATOMIC_LOAD_I4:
4362 case OP_ATOMIC_LOAD_U4:
4363 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4364 break;
4365 case OP_ATOMIC_LOAD_R4:
4366 if (cfg->r4fp) {
4367 ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_basereg, ARMREG_LR);
4368 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
4369 } else {
4370 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
4371 ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_basereg, ARMREG_LR);
4372 ARM_FLDS (code, vfp_scratch1, ARMREG_LR, 0);
4373 ARM_CVTS (code, ins->dreg, vfp_scratch1);
4374 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
4376 break;
4377 case OP_ATOMIC_LOAD_R8:
4378 ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_basereg, ARMREG_LR);
4379 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
4380 break;
4383 if (ins->backend.memory_barrier_kind != MONO_MEMORY_BARRIER_NONE)
4384 ARM_DMB (code, ARM_DMB_ISH);
4385 break;
4387 case OP_ATOMIC_STORE_I1:
4388 case OP_ATOMIC_STORE_U1:
4389 case OP_ATOMIC_STORE_I2:
4390 case OP_ATOMIC_STORE_U2:
4391 case OP_ATOMIC_STORE_I4:
4392 case OP_ATOMIC_STORE_U4:
4393 case OP_ATOMIC_STORE_R4:
4394 case OP_ATOMIC_STORE_R8: {
4395 if (ins->backend.memory_barrier_kind != MONO_MEMORY_BARRIER_NONE)
4396 ARM_DMB (code, ARM_DMB_ISH);
4398 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4400 switch (ins->opcode) {
4401 case OP_ATOMIC_STORE_I1:
4402 case OP_ATOMIC_STORE_U1:
4403 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4404 break;
4405 case OP_ATOMIC_STORE_I2:
4406 case OP_ATOMIC_STORE_U2:
4407 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4408 break;
4409 case OP_ATOMIC_STORE_I4:
4410 case OP_ATOMIC_STORE_U4:
4411 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4412 break;
4413 case OP_ATOMIC_STORE_R4:
4414 if (cfg->r4fp) {
4415 ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_destbasereg, ARMREG_LR);
4416 ARM_FSTS (code, ins->sreg1, ARMREG_LR, 0);
4417 } else {
4418 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
4419 ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_destbasereg, ARMREG_LR);
4420 ARM_CVTD (code, vfp_scratch1, ins->sreg1);
4421 ARM_FSTS (code, vfp_scratch1, ARMREG_LR, 0);
4422 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
4424 break;
4425 case OP_ATOMIC_STORE_R8:
4426 ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_destbasereg, ARMREG_LR);
4427 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
4428 break;
4431 if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
4432 ARM_DMB (code, ARM_DMB_ISH);
4433 break;
4435 case OP_BIGMUL:
4436 ARM_SMULL_REG_REG (code, ins->backend.reg3, ins->dreg, ins->sreg1, ins->sreg2);
4437 break;
4438 case OP_BIGMUL_UN:
4439 ARM_UMULL_REG_REG (code, ins->backend.reg3, ins->dreg, ins->sreg1, ins->sreg2);
4440 break;
4441 case OP_STOREI1_MEMBASE_IMM:
4442 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
4443 g_assert (arm_is_imm12 (ins->inst_offset));
4444 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4445 break;
4446 case OP_STOREI2_MEMBASE_IMM:
4447 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
4448 g_assert (arm_is_imm8 (ins->inst_offset));
4449 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4450 break;
4451 case OP_STORE_MEMBASE_IMM:
4452 case OP_STOREI4_MEMBASE_IMM:
4453 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
4454 g_assert (arm_is_imm12 (ins->inst_offset));
4455 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4456 break;
4457 case OP_STOREI1_MEMBASE_REG:
4458 g_assert (arm_is_imm12 (ins->inst_offset));
4459 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4460 break;
4461 case OP_STOREI2_MEMBASE_REG:
4462 g_assert (arm_is_imm8 (ins->inst_offset));
4463 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4464 break;
4465 case OP_STORE_MEMBASE_REG:
4466 case OP_STOREI4_MEMBASE_REG:
4467 /* this case is special, since it happens for spill code after lowering has been called */
4468 if (arm_is_imm12 (ins->inst_offset)) {
4469 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4470 } else {
4471 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4472 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4474 break;
4475 case OP_STOREI1_MEMINDEX:
4476 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4477 break;
4478 case OP_STOREI2_MEMINDEX:
4479 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4480 break;
4481 case OP_STORE_MEMINDEX:
4482 case OP_STOREI4_MEMINDEX:
4483 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4484 break;
4485 case OP_LOADU4_MEM:
4486 g_assert_not_reached ();
4487 break;
4488 case OP_LOAD_MEMINDEX:
4489 case OP_LOADI4_MEMINDEX:
4490 case OP_LOADU4_MEMINDEX:
4491 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4492 break;
4493 case OP_LOADI1_MEMINDEX:
4494 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4495 break;
4496 case OP_LOADU1_MEMINDEX:
4497 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4498 break;
4499 case OP_LOADI2_MEMINDEX:
4500 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4501 break;
4502 case OP_LOADU2_MEMINDEX:
4503 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4504 break;
4505 case OP_LOAD_MEMBASE:
4506 case OP_LOADI4_MEMBASE:
4507 case OP_LOADU4_MEMBASE:
4508 /* this case is special, since it happens for spill code after lowering has been called */
4509 if (arm_is_imm12 (ins->inst_offset)) {
4510 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4511 } else {
4512 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4513 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4515 break;
4516 case OP_LOADI1_MEMBASE:
4517 g_assert (arm_is_imm8 (ins->inst_offset));
4518 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4519 break;
4520 case OP_LOADU1_MEMBASE:
4521 g_assert (arm_is_imm12 (ins->inst_offset));
4522 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4523 break;
4524 case OP_LOADU2_MEMBASE:
4525 g_assert (arm_is_imm8 (ins->inst_offset));
4526 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4527 break;
4528 case OP_LOADI2_MEMBASE:
4529 g_assert (arm_is_imm8 (ins->inst_offset));
4530 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4531 break;
4532 case OP_ICONV_TO_I1:
4533 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
4534 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
4535 break;
4536 case OP_ICONV_TO_I2:
4537 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4538 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
4539 break;
4540 case OP_ICONV_TO_U1:
4541 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
4542 break;
4543 case OP_ICONV_TO_U2:
4544 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4545 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
4546 break;
4547 case OP_COMPARE:
4548 case OP_ICOMPARE:
4549 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
4550 break;
4551 case OP_COMPARE_IMM:
4552 case OP_ICOMPARE_IMM:
4553 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4554 g_assert (imm8 >= 0);
4555 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
4556 break;
4557 case OP_BREAK:
4559 * gdb does not like encountering the hw breakpoint ins in the debugged code.
4560 * So instead of emitting a trap, we emit a call a C function and place a
4561 * breakpoint there.
4563 //*(int*)code = 0xef9f0001;
4564 //code += 4;
4565 //ARM_DBRK (code);
4566 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
4567 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break));
4568 code = emit_call_seq (cfg, code);
4569 break;
4570 case OP_RELAXED_NOP:
4571 ARM_NOP (code);
4572 break;
4573 case OP_NOP:
4574 case OP_DUMMY_USE:
4575 case OP_DUMMY_ICONST:
4576 case OP_DUMMY_R8CONST:
4577 case OP_DUMMY_R4CONST:
4578 case OP_NOT_REACHED:
4579 case OP_NOT_NULL:
4580 break;
4581 case OP_IL_SEQ_POINT:
4582 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
4583 break;
4584 case OP_SEQ_POINT: {
4585 int i;
4586 MonoInst *info_var = cfg->arch.seq_point_info_var;
4587 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4588 MonoInst *ss_method_var = cfg->arch.seq_point_ss_method_var;
4589 MonoInst *bp_method_var = cfg->arch.seq_point_bp_method_var;
4590 MonoInst *var;
4591 int dreg = ARMREG_LR;
4593 #if 0
4594 if (cfg->soft_breakpoints) {
4595 g_assert (!cfg->compile_aot);
4597 #endif
4600 * For AOT, we use one got slot per method, which will point to a
4601 * SeqPointInfo structure, containing all the information required
4602 * by the code below.
4604 if (cfg->compile_aot) {
4605 g_assert (info_var);
4606 g_assert (info_var->opcode == OP_REGOFFSET);
4609 if (!cfg->soft_breakpoints && !cfg->compile_aot) {
4611 * Read from the single stepping trigger page. This will cause a
4612 * SIGSEGV when single stepping is enabled.
4613 * We do this _before_ the breakpoint, so single stepping after
4614 * a breakpoint is hit will step to the next IL offset.
4616 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
4619 /* Single step check */
4620 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
4621 if (cfg->soft_breakpoints) {
4622 /* Load the address of the sequence point method variable. */
4623 var = ss_method_var;
4624 g_assert (var);
4625 g_assert (var->opcode == OP_REGOFFSET);
4626 code = emit_ldr_imm (code, dreg, var->inst_basereg, var->inst_offset);
4627 /* Read the value and check whether it is non-zero. */
4628 ARM_LDR_IMM (code, dreg, dreg, 0);
4629 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4630 /* Call it conditionally. */
4631 ARM_BLX_REG_COND (code, ARMCOND_NE, dreg);
4632 } else {
4633 if (cfg->compile_aot) {
4634 /* Load the trigger page addr from the variable initialized in the prolog */
4635 var = ss_trigger_page_var;
4636 g_assert (var);
4637 g_assert (var->opcode == OP_REGOFFSET);
4638 code = emit_ldr_imm (code, dreg, var->inst_basereg, var->inst_offset);
4639 } else {
4640 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
4641 ARM_B (code, 0);
4642 *(int*)code = (int)(gsize)ss_trigger_page;
4643 code += 4;
4645 ARM_LDR_IMM (code, dreg, dreg, 0);
4649 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
4651 /* Breakpoint check */
4652 if (cfg->compile_aot) {
4653 const guint32 offset = code - cfg->native_code;
4654 guint32 val;
4656 var = info_var;
4657 code = emit_ldr_imm (code, dreg, var->inst_basereg, var->inst_offset);
4658 /* Add the offset */
4659 val = ((offset / 4) * sizeof (target_mgreg_t)) + MONO_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
4660 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
4661 if (arm_is_imm12 ((int)val)) {
4662 ARM_LDR_IMM (code, dreg, dreg, val);
4663 } else {
4664 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
4665 if (val & 0xFF00)
4666 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
4667 if (val & 0xFF0000)
4668 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
4669 g_assert (!(val & 0xFF000000));
4671 ARM_LDR_IMM (code, dreg, dreg, 0);
4673 /* What is faster, a branch or a load ? */
4674 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4675 /* The breakpoint instruction */
4676 if (cfg->soft_breakpoints)
4677 ARM_BLX_REG_COND (code, ARMCOND_NE, dreg);
4678 else
4679 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
4680 } else if (cfg->soft_breakpoints) {
4681 /* Load the address of the breakpoint method into ip. */
4682 var = bp_method_var;
4683 g_assert (var);
4684 g_assert (var->opcode == OP_REGOFFSET);
4685 g_assert (arm_is_imm12 (var->inst_offset));
4686 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4689 * A placeholder for a possible breakpoint inserted by
4690 * mono_arch_set_breakpoint ().
4692 ARM_NOP (code);
4693 } else {
4695 * A placeholder for a possible breakpoint inserted by
4696 * mono_arch_set_breakpoint ().
4698 for (i = 0; i < 4; ++i)
4699 ARM_NOP (code);
4703 * Add an additional nop so skipping the bp doesn't cause the ip to point
4704 * to another IL offset.
4707 ARM_NOP (code);
4708 break;
4710 case OP_ADDCC:
4711 case OP_IADDCC:
4712 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4713 break;
4714 case OP_IADD:
4715 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4716 break;
4717 case OP_ADC:
4718 case OP_IADC:
4719 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4720 break;
4721 case OP_ADDCC_IMM:
4722 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4723 g_assert (imm8 >= 0);
4724 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4725 break;
4726 case OP_ADD_IMM:
4727 case OP_IADD_IMM:
4728 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4729 g_assert (imm8 >= 0);
4730 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4731 break;
4732 case OP_ADC_IMM:
4733 case OP_IADC_IMM:
4734 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4735 g_assert (imm8 >= 0);
4736 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4737 break;
4738 case OP_IADD_OVF:
4739 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4740 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4741 break;
4742 case OP_IADD_OVF_UN:
4743 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4744 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4745 break;
4746 case OP_ISUB_OVF:
4747 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4748 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4749 break;
4750 case OP_ISUB_OVF_UN:
4751 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4752 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4753 break;
4754 case OP_ADD_OVF_CARRY:
4755 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4756 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4757 break;
4758 case OP_ADD_OVF_UN_CARRY:
4759 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4760 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4761 break;
4762 case OP_SUB_OVF_CARRY:
4763 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4764 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4765 break;
4766 case OP_SUB_OVF_UN_CARRY:
4767 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4768 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4769 break;
4770 case OP_SUBCC:
4771 case OP_ISUBCC:
4772 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4773 break;
4774 case OP_SUBCC_IMM:
4775 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4776 g_assert (imm8 >= 0);
4777 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4778 break;
4779 case OP_ISUB:
4780 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4781 break;
4782 case OP_SBB:
4783 case OP_ISBB:
4784 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4785 break;
4786 case OP_SUB_IMM:
4787 case OP_ISUB_IMM:
4788 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4789 g_assert (imm8 >= 0);
4790 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4791 break;
4792 case OP_SBB_IMM:
4793 case OP_ISBB_IMM:
4794 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4795 g_assert (imm8 >= 0);
4796 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4797 break;
4798 case OP_ARM_RSBS_IMM:
4799 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4800 g_assert (imm8 >= 0);
4801 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4802 break;
4803 case OP_ARM_RSC_IMM:
4804 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4805 g_assert (imm8 >= 0);
4806 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4807 break;
4808 case OP_IAND:
4809 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4810 break;
4811 case OP_AND_IMM:
4812 case OP_IAND_IMM:
4813 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4814 g_assert (imm8 >= 0);
4815 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4816 break;
4817 case OP_IDIV:
4818 g_assert (v7s_supported || v7k_supported);
4819 ARM_SDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4820 break;
4821 case OP_IDIV_UN:
4822 g_assert (v7s_supported || v7k_supported);
4823 ARM_UDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4824 break;
4825 case OP_IREM:
4826 g_assert (v7s_supported || v7k_supported);
4827 ARM_SDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4828 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4829 break;
4830 case OP_IREM_UN:
4831 g_assert (v7s_supported || v7k_supported);
4832 ARM_UDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4833 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4834 break;
4835 case OP_DIV_IMM:
4836 case OP_REM_IMM:
4837 g_assert_not_reached ();
4838 case OP_IOR:
4839 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4840 break;
4841 case OP_OR_IMM:
4842 case OP_IOR_IMM:
4843 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4844 g_assert (imm8 >= 0);
4845 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4846 break;
4847 case OP_IXOR:
4848 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4849 break;
4850 case OP_XOR_IMM:
4851 case OP_IXOR_IMM:
4852 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4853 g_assert (imm8 >= 0);
4854 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4855 break;
4856 case OP_ISHL:
4857 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4858 break;
4859 case OP_SHL_IMM:
4860 case OP_ISHL_IMM:
4861 if (ins->inst_imm)
4862 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4863 else if (ins->dreg != ins->sreg1)
4864 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4865 break;
4866 case OP_ISHR:
4867 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4868 break;
4869 case OP_SHR_IMM:
4870 case OP_ISHR_IMM:
4871 if (ins->inst_imm)
4872 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4873 else if (ins->dreg != ins->sreg1)
4874 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4875 break;
4876 case OP_SHR_UN_IMM:
4877 case OP_ISHR_UN_IMM:
4878 if (ins->inst_imm)
4879 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4880 else if (ins->dreg != ins->sreg1)
4881 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4882 break;
4883 case OP_ISHR_UN:
4884 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4885 break;
4886 case OP_INOT:
4887 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
4888 break;
4889 case OP_INEG:
4890 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
4891 break;
4892 case OP_IMUL:
4893 if (ins->dreg == ins->sreg2)
4894 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4895 else
4896 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
4897 break;
4898 case OP_MUL_IMM:
4899 g_assert_not_reached ();
4900 break;
4901 case OP_IMUL_OVF:
4902 /* FIXME: handle ovf/ sreg2 != dreg */
4903 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4904 /* FIXME: MUL doesn't set the C/O flags on ARM */
4905 break;
4906 case OP_IMUL_OVF_UN:
4907 /* FIXME: handle ovf/ sreg2 != dreg */
4908 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4909 /* FIXME: MUL doesn't set the C/O flags on ARM */
4910 break;
4911 case OP_ICONST:
4912 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
4913 break;
4914 case OP_AOTCONST:
4915 /* Load the GOT offset */
4916 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)(gsize)ins->inst_i1, ins->inst_p0);
4917 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4918 ARM_B (code, 0);
4919 *(gpointer*)code = NULL;
4920 code += 4;
4921 /* Load the value from the GOT */
4922 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4923 break;
4924 case OP_OBJC_GET_SELECTOR:
4925 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_OBJC_SELECTOR_REF, ins->inst_p0);
4926 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4927 ARM_B (code, 0);
4928 *(gpointer*)code = NULL;
4929 code += 4;
4930 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4931 break;
4932 case OP_ICONV_TO_I4:
4933 case OP_ICONV_TO_U4:
4934 case OP_MOVE:
4935 if (ins->dreg != ins->sreg1)
4936 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4937 break;
4938 case OP_SETLRET: {
4939 int saved = ins->sreg2;
4940 if (ins->sreg2 == ARM_LSW_REG) {
4941 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
4942 saved = ARMREG_LR;
4944 if (ins->sreg1 != ARM_LSW_REG)
4945 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
4946 if (saved != ARM_MSW_REG)
4947 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
4948 break;
4950 case OP_FMOVE:
4951 if (IS_VFP && ins->dreg != ins->sreg1)
4952 ARM_CPYD (code, ins->dreg, ins->sreg1);
4953 break;
4954 case OP_RMOVE:
4955 if (IS_VFP && ins->dreg != ins->sreg1)
4956 ARM_CPYS (code, ins->dreg, ins->sreg1);
4957 break;
4958 case OP_MOVE_F_TO_I4:
4959 if (cfg->r4fp) {
4960 ARM_FMRS (code, ins->dreg, ins->sreg1);
4961 } else {
4962 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
4963 ARM_CVTD (code, vfp_scratch1, ins->sreg1);
4964 ARM_FMRS (code, ins->dreg, vfp_scratch1);
4965 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
4967 break;
4968 case OP_MOVE_I4_TO_F:
4969 if (cfg->r4fp) {
4970 ARM_FMSR (code, ins->dreg, ins->sreg1);
4971 } else {
4972 ARM_FMSR (code, ins->dreg, ins->sreg1);
4973 ARM_CVTS (code, ins->dreg, ins->dreg);
4975 break;
4976 case OP_FCONV_TO_R4:
4977 if (IS_VFP) {
4978 if (cfg->r4fp) {
4979 ARM_CVTD (code, ins->dreg, ins->sreg1);
4980 } else {
4981 ARM_CVTD (code, ins->dreg, ins->sreg1);
4982 ARM_CVTS (code, ins->dreg, ins->dreg);
4985 break;
4987 case OP_TAILCALL_PARAMETER:
4988 // This opcode helps compute sizes, i.e.
4989 // of the subsequent OP_TAILCALL, but contributes no code.
4990 g_assert (ins->next);
4991 break;
4993 case OP_TAILCALL:
4994 case OP_TAILCALL_MEMBASE:
4995 case OP_TAILCALL_REG: {
4996 gboolean const tailcall_membase = ins->opcode == OP_TAILCALL_MEMBASE;
4997 gboolean const tailcall_reg = ins->opcode == OP_TAILCALL_REG;
4998 MonoCallInst *call = (MonoCallInst*)ins;
5000 max_len += call->stack_usage / sizeof (target_mgreg_t) * ins_get_size (OP_TAILCALL_PARAMETER);
5002 if (IS_HARD_FLOAT)
5003 code = emit_float_args (cfg, call, code, &max_len, &offset);
5005 code = realloc_code (cfg, max_len);
5007 // For reg and membase, get destination in IP.
5009 if (tailcall_reg) {
5010 g_assert (ins->sreg1 > -1);
5011 if (ins->sreg1 != ARMREG_IP)
5012 ARM_MOV_REG_REG (code, ARMREG_IP, ins->sreg1);
5013 } else if (tailcall_membase) {
5014 g_assert (ins->sreg1 > -1);
5015 if (!arm_is_imm12 (ins->inst_offset)) {
5016 g_assert (ins->sreg1 != ARMREG_IP); // temp in emit_big_add
5017 code = emit_big_add (code, ARMREG_IP, ins->sreg1, ins->inst_offset);
5018 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, 0);
5019 } else {
5020 ARM_LDR_IMM (code, ARMREG_IP, ins->sreg1, ins->inst_offset);
5025 * The stack looks like the following:
5026 * <caller argument area>
5027 * <saved regs etc>
5028 * <rest of frame>
5029 * <callee argument area>
5030 * <optionally saved IP> (about to be)
5031 * Need to copy the arguments from the callee argument area to
5032 * the caller argument area, and pop the frame.
5034 if (call->stack_usage) {
5035 int i, prev_sp_offset = 0;
5037 // When we get here, the parameters to the tailcall are already formed,
5038 // in registers and at the bottom of the grow-down stack.
5040 // Our goal is generally preserve parameters, and trim the stack,
5041 // and, before trimming stack, move parameters from the bottom of the
5042 // frame to the bottom of the trimmed frame.
5044 // For the case of large frames, and presently therefore always,
5045 // IP is used as an adjusted frame_reg.
5046 // Be conservative and save IP around the movement
5047 // of parameters from the bottom of frame to top of the frame.
5048 const gboolean save_ip = tailcall_membase || tailcall_reg;
5049 if (save_ip)
5050 ARM_PUSH (code, 1 << ARMREG_IP);
5052 // When moving stacked parameters from the bottom
5053 // of the frame (sp) to the top of the frame (ip),
5054 // account, 0 or 4, for the conditional save of IP.
5055 const int offset_sp = save_ip ? 4 : 0;
5056 const int offset_ip = (save_ip && (cfg->frame_reg == ARMREG_SP)) ? 4 : 0;
5058 /* Compute size of saved registers restored below */
5059 if (iphone_abi)
5060 prev_sp_offset = 2 * 4;
5061 else
5062 prev_sp_offset = 1 * 4;
5063 for (i = 0; i < 16; ++i) {
5064 if (cfg->used_int_regs & (1 << i))
5065 prev_sp_offset += 4;
5068 // Point IP at the start of where the parameters will go after trimming stack.
5069 // After locals and saved registers.
5070 code = emit_big_add (code, ARMREG_IP, cfg->frame_reg, cfg->stack_usage + prev_sp_offset);
5072 /* Copy arguments on the stack to our argument area */
5073 // FIXME a fixed size memcpy is desirable here,
5074 // at least for larger values of stack_usage.
5076 // FIXME For most functions, with frames < 4K, we can use frame_reg directly here instead of IP.
5077 // See https://github.com/mono/mono/pull/12079
5078 // See https://github.com/mono/mono/pull/12079/commits/93e7007a9567b78fa8152ce404b372b26e735516
5079 for (i = 0; i < call->stack_usage; i += sizeof (target_mgreg_t)) {
5080 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, i + offset_sp);
5081 ARM_STR_IMM (code, ARMREG_LR, ARMREG_IP, i + offset_ip);
5084 if (save_ip)
5085 ARM_POP (code, 1 << ARMREG_IP);
5089 * Keep in sync with mono_arch_emit_epilog
5091 g_assert (!cfg->method->save_lmf);
5092 code = emit_big_add_temp (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage, ARMREG_LR);
5093 if (iphone_abi) {
5094 if (cfg->used_int_regs)
5095 ARM_POP (code, cfg->used_int_regs);
5096 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
5097 } else {
5098 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
5101 if (tailcall_reg || tailcall_membase) {
5102 code = emit_jmp_reg (code, ARMREG_IP);
5103 } else {
5104 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method);
5106 if (cfg->compile_aot) {
5107 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
5108 ARM_B (code, 0);
5109 *(gpointer*)code = NULL;
5110 code += 4;
5111 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
5112 } else {
5113 code = mono_arm_patchable_b (code, ARMCOND_AL);
5114 cfg->thunk_area += THUNK_SIZE;
5117 break;
5119 case OP_CHECK_THIS:
5120 /* ensure ins->sreg1 is not NULL */
5121 ARM_LDRB_IMM (code, ARMREG_LR, ins->sreg1, 0);
5122 break;
5123 case OP_ARGLIST: {
5124 g_assert (cfg->sig_cookie < 128);
5125 ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
5126 ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
5127 break;
5129 case OP_FCALL:
5130 case OP_RCALL:
5131 case OP_LCALL:
5132 case OP_VCALL:
5133 case OP_VCALL2:
5134 case OP_VOIDCALL:
5135 case OP_CALL:
5136 call = (MonoCallInst*)ins;
5138 if (IS_HARD_FLOAT)
5139 code = emit_float_args (cfg, call, code, &max_len, &offset);
5141 mono_call_add_patch_info (cfg, call, code - cfg->native_code);
5143 code = emit_call_seq (cfg, code);
5144 ins->flags |= MONO_INST_GC_CALLSITE;
5145 ins->backend.pc_offset = code - cfg->native_code;
5146 code = emit_move_return_value (cfg, ins, code);
5147 break;
5148 case OP_FCALL_REG:
5149 case OP_RCALL_REG:
5150 case OP_LCALL_REG:
5151 case OP_VCALL_REG:
5152 case OP_VCALL2_REG:
5153 case OP_VOIDCALL_REG:
5154 case OP_CALL_REG:
5155 if (IS_HARD_FLOAT)
5156 code = emit_float_args (cfg, (MonoCallInst *)ins, code, &max_len, &offset);
5158 code = emit_call_reg (code, ins->sreg1);
5159 ins->flags |= MONO_INST_GC_CALLSITE;
5160 ins->backend.pc_offset = code - cfg->native_code;
5161 code = emit_move_return_value (cfg, ins, code);
5162 break;
5163 case OP_FCALL_MEMBASE:
5164 case OP_RCALL_MEMBASE:
5165 case OP_LCALL_MEMBASE:
5166 case OP_VCALL_MEMBASE:
5167 case OP_VCALL2_MEMBASE:
5168 case OP_VOIDCALL_MEMBASE:
5169 case OP_CALL_MEMBASE: {
5170 g_assert (ins->sreg1 != ARMREG_LR);
5171 call = (MonoCallInst*)ins;
5173 if (IS_HARD_FLOAT)
5174 code = emit_float_args (cfg, call, code, &max_len, &offset);
5175 if (!arm_is_imm12 (ins->inst_offset)) {
5176 /* sreg1 might be IP */
5177 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg1);
5178 code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_offset);
5179 ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, ARMREG_LR);
5180 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
5181 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, 0);
5182 } else {
5183 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
5184 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
5186 ins->flags |= MONO_INST_GC_CALLSITE;
5187 ins->backend.pc_offset = code - cfg->native_code;
5188 code = emit_move_return_value (cfg, ins, code);
5189 break;
5191 case OP_GENERIC_CLASS_INIT: {
5192 int byte_offset;
5193 guint8 *jump;
5195 byte_offset = MONO_STRUCT_OFFSET (MonoVTable, initialized);
5197 g_assert (arm_is_imm8 (byte_offset));
5198 ARM_LDRSB_IMM (code, ARMREG_IP, ins->sreg1, byte_offset);
5199 ARM_CMP_REG_IMM (code, ARMREG_IP, 0, 0);
5200 jump = code;
5201 ARM_B_COND (code, ARMCOND_NE, 0);
5203 /* Uninitialized case */
5204 g_assert (ins->sreg1 == ARMREG_R0);
5206 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
5207 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_generic_class_init));
5208 code = emit_call_seq (cfg, code);
5210 /* Initialized case */
5211 arm_patch (jump, code);
5212 break;
5214 case OP_LOCALLOC: {
5215 /* round the size to 8 bytes */
5216 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, (MONO_ARCH_FRAME_ALIGNMENT - 1));
5217 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, (MONO_ARCH_FRAME_ALIGNMENT - 1));
5218 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
5219 /* memzero the area: dreg holds the size, sp is the pointer */
5220 if (ins->flags & MONO_INST_INIT) {
5221 guint8 *start_loop, *branch_to_cond;
5222 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
5223 branch_to_cond = code;
5224 ARM_B (code, 0);
5225 start_loop = code;
5226 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
5227 arm_patch (branch_to_cond, code);
5228 /* decrement by 4 and set flags */
5229 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, sizeof (target_mgreg_t));
5230 ARM_B_COND (code, ARMCOND_GE, 0);
5231 arm_patch (code - 4, start_loop);
5233 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_SP);
5234 if (cfg->param_area)
5235 code = emit_sub_imm (code, ARMREG_SP, ARMREG_SP, ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT));
5236 break;
5238 case OP_DYN_CALL: {
5239 int i;
5240 MonoInst *var = cfg->dyn_call_var;
5241 guint8 *labels [16];
5243 g_assert (var->opcode == OP_REGOFFSET);
5244 g_assert (arm_is_imm12 (var->inst_offset));
5246 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
5247 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg1);
5248 /* ip = ftn */
5249 ARM_MOV_REG_REG (code, ARMREG_IP, ins->sreg2);
5251 /* Save args buffer */
5252 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
5254 /* Set fp argument registers */
5255 if (IS_HARD_FLOAT) {
5256 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, has_fpregs));
5257 ARM_CMP_REG_IMM (code, ARMREG_R0, 0, 0);
5258 labels [0] = code;
5259 ARM_B_COND (code, ARMCOND_EQ, 0);
5260 for (i = 0; i < FP_PARAM_REGS; ++i) {
5261 const int offset = MONO_STRUCT_OFFSET (DynCallArgs, fpregs) + (i * sizeof (double));
5262 g_assert (arm_is_fpimm8 (offset));
5263 ARM_FLDD (code, i * 2, ARMREG_LR, offset);
5265 arm_patch (labels [0], code);
5268 /* Allocate callee area */
5269 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_stackargs));
5270 ARM_SHL_IMM (code, ARMREG_R1, ARMREG_R1, 2);
5271 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_R1);
5273 /* Set stack args */
5274 /* R1 = limit */
5275 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_stackargs));
5276 /* R2 = pointer into regs */
5277 code = emit_big_add (code, ARMREG_R2, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, regs) + (PARAM_REGS * sizeof (target_mgreg_t)));
5278 /* R3 = pointer to stack */
5279 ARM_MOV_REG_REG (code, ARMREG_R3, ARMREG_SP);
5280 /* Loop */
5281 labels [0] = code;
5282 ARM_B_COND (code, ARMCOND_AL, 0);
5283 labels [1] = code;
5284 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R2, 0);
5285 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R3, 0);
5286 ARM_ADD_REG_IMM (code, ARMREG_R2, ARMREG_R2, sizeof (target_mgreg_t), 0);
5287 ARM_ADD_REG_IMM (code, ARMREG_R3, ARMREG_R3, sizeof (target_mgreg_t), 0);
5288 ARM_SUB_REG_IMM (code, ARMREG_R1, ARMREG_R1, 1, 0);
5289 arm_patch (labels [0], code);
5290 ARM_CMP_REG_IMM (code, ARMREG_R1, 0, 0);
5291 labels [2] = code;
5292 ARM_B_COND (code, ARMCOND_GT, 0);
5293 arm_patch (labels [2], labels [1]);
5295 /* Set argument registers */
5296 for (i = 0; i < PARAM_REGS; ++i)
5297 ARM_LDR_IMM (code, i, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, regs) + (i * sizeof (target_mgreg_t)));
5299 /* Make the call */
5300 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
5301 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5303 /* Save result */
5304 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
5305 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res));
5306 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res2));
5307 if (IS_HARD_FLOAT)
5308 ARM_FSTD (code, ARM_VFP_D0, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, fpregs));
5309 break;
5311 case OP_THROW: {
5312 if (ins->sreg1 != ARMREG_R0)
5313 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5314 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
5315 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_throw_exception));
5316 code = emit_call_seq (cfg, code);
5317 break;
5319 case OP_RETHROW: {
5320 if (ins->sreg1 != ARMREG_R0)
5321 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5322 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
5323 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_rethrow_exception));
5324 code = emit_call_seq (cfg, code);
5325 break;
5327 case OP_START_HANDLER: {
5328 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5329 int param_area = ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT);
5330 int i, rot_amount;
5332 /* Reserve a param area, see filter-stack.exe */
5333 if (param_area) {
5334 if ((i = mono_arm_is_rotated_imm8 (param_area, &rot_amount)) >= 0) {
5335 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5336 } else {
5337 code = mono_arm_emit_load_imm (code, ARMREG_IP, param_area);
5338 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5342 if (arm_is_imm12 (spvar->inst_offset)) {
5343 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
5344 } else {
5345 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5346 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
5348 break;
5350 case OP_ENDFILTER: {
5351 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5352 int param_area = ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT);
5353 int i, rot_amount;
5355 /* Free the param area */
5356 if (param_area) {
5357 if ((i = mono_arm_is_rotated_imm8 (param_area, &rot_amount)) >= 0) {
5358 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5359 } else {
5360 code = mono_arm_emit_load_imm (code, ARMREG_IP, param_area);
5361 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5365 if (ins->sreg1 != ARMREG_R0)
5366 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5367 if (arm_is_imm12 (spvar->inst_offset)) {
5368 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5369 } else {
5370 g_assert (ARMREG_IP != spvar->inst_basereg);
5371 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5372 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5374 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5375 break;
5377 case OP_ENDFINALLY: {
5378 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5379 int param_area = ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT);
5380 int i, rot_amount;
5382 /* Free the param area */
5383 if (param_area) {
5384 if ((i = mono_arm_is_rotated_imm8 (param_area, &rot_amount)) >= 0) {
5385 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5386 } else {
5387 code = mono_arm_emit_load_imm (code, ARMREG_IP, param_area);
5388 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5392 if (arm_is_imm12 (spvar->inst_offset)) {
5393 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5394 } else {
5395 g_assert (ARMREG_IP != spvar->inst_basereg);
5396 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5397 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5399 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5400 break;
5402 case OP_CALL_HANDLER:
5403 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5404 code = mono_arm_patchable_bl (code, ARMCOND_AL);
5405 cfg->thunk_area += THUNK_SIZE;
5406 for (GList *tmp = ins->inst_eh_blocks; tmp != bb->clause_holes; tmp = tmp->prev)
5407 mono_cfg_add_try_hole (cfg, ((MonoLeaveClause *) tmp->data)->clause, code, bb);
5408 break;
5409 case OP_GET_EX_OBJ:
5410 if (ins->dreg != ARMREG_R0)
5411 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_R0);
5412 break;
5414 case OP_LABEL:
5415 ins->inst_c0 = code - cfg->native_code;
5416 break;
5417 case OP_BR:
5418 /*if (ins->inst_target_bb->native_offset) {
5419 ARM_B (code, 0);
5420 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
5421 } else*/ {
5422 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5423 code = mono_arm_patchable_b (code, ARMCOND_AL);
5425 break;
5426 case OP_BR_REG:
5427 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
5428 break;
5429 case OP_SWITCH:
5431 * In the normal case we have:
5432 * ldr pc, [pc, ins->sreg1 << 2]
5433 * nop
5434 * If aot, we have:
5435 * ldr lr, [pc, ins->sreg1 << 2]
5436 * add pc, pc, lr
5437 * After follows the data.
5438 * FIXME: add aot support.
5440 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
5441 max_len += 4 * GPOINTER_TO_INT (ins->klass);
5442 code = realloc_code (cfg, max_len);
5443 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
5444 ARM_NOP (code);
5445 code += 4 * GPOINTER_TO_INT (ins->klass);
5446 break;
5447 case OP_CEQ:
5448 case OP_ICEQ:
5449 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5450 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5451 break;
5452 case OP_CLT:
5453 case OP_ICLT:
5454 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5455 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
5456 break;
5457 case OP_CLT_UN:
5458 case OP_ICLT_UN:
5459 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5460 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
5461 break;
5462 case OP_CGT:
5463 case OP_ICGT:
5464 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5465 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
5466 break;
5467 case OP_CGT_UN:
5468 case OP_ICGT_UN:
5469 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5470 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
5471 break;
5472 case OP_ICNEQ:
5473 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5474 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5475 break;
5476 case OP_ICGE:
5477 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5478 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LT);
5479 break;
5480 case OP_ICLE:
5481 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5482 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_GT);
5483 break;
5484 case OP_ICGE_UN:
5485 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5486 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LO);
5487 break;
5488 case OP_ICLE_UN:
5489 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5490 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_HI);
5491 break;
5492 case OP_COND_EXC_EQ:
5493 case OP_COND_EXC_NE_UN:
5494 case OP_COND_EXC_LT:
5495 case OP_COND_EXC_LT_UN:
5496 case OP_COND_EXC_GT:
5497 case OP_COND_EXC_GT_UN:
5498 case OP_COND_EXC_GE:
5499 case OP_COND_EXC_GE_UN:
5500 case OP_COND_EXC_LE:
5501 case OP_COND_EXC_LE_UN:
5502 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
5503 break;
5504 case OP_COND_EXC_IEQ:
5505 case OP_COND_EXC_INE_UN:
5506 case OP_COND_EXC_ILT:
5507 case OP_COND_EXC_ILT_UN:
5508 case OP_COND_EXC_IGT:
5509 case OP_COND_EXC_IGT_UN:
5510 case OP_COND_EXC_IGE:
5511 case OP_COND_EXC_IGE_UN:
5512 case OP_COND_EXC_ILE:
5513 case OP_COND_EXC_ILE_UN:
5514 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
5515 break;
5516 case OP_COND_EXC_C:
5517 case OP_COND_EXC_IC:
5518 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
5519 break;
5520 case OP_COND_EXC_OV:
5521 case OP_COND_EXC_IOV:
5522 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
5523 break;
5524 case OP_COND_EXC_NC:
5525 case OP_COND_EXC_INC:
5526 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
5527 break;
5528 case OP_COND_EXC_NO:
5529 case OP_COND_EXC_INO:
5530 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
5531 break;
5532 case OP_IBEQ:
5533 case OP_IBNE_UN:
5534 case OP_IBLT:
5535 case OP_IBLT_UN:
5536 case OP_IBGT:
5537 case OP_IBGT_UN:
5538 case OP_IBGE:
5539 case OP_IBGE_UN:
5540 case OP_IBLE:
5541 case OP_IBLE_UN:
5542 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
5543 break;
5545 /* floating point opcodes */
5546 case OP_R8CONST:
5547 if (cfg->compile_aot) {
5548 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
5549 ARM_B (code, 1);
5550 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5551 code += 4;
5552 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
5553 code += 4;
5554 } else {
5555 /* FIXME: we can optimize the imm load by dealing with part of
5556 * the displacement in LDFD (aligning to 512).
5558 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)(gsize)ins->inst_p0);
5559 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5561 break;
5562 case OP_R4CONST:
5563 if (cfg->compile_aot) {
5564 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
5565 ARM_B (code, 0);
5566 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5567 code += 4;
5568 if (!cfg->r4fp)
5569 ARM_CVTS (code, ins->dreg, ins->dreg);
5570 } else {
5571 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)(gsize)ins->inst_p0);
5572 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
5573 if (!cfg->r4fp)
5574 ARM_CVTS (code, ins->dreg, ins->dreg);
5576 break;
5577 case OP_STORER8_MEMBASE_REG:
5578 /* This is generated by the local regalloc pass which runs after the lowering pass */
5579 if (!arm_is_fpimm8 (ins->inst_offset)) {
5580 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5581 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
5582 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
5583 } else {
5584 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
5586 break;
5587 case OP_LOADR8_MEMBASE:
5588 /* This is generated by the local regalloc pass which runs after the lowering pass */
5589 if (!arm_is_fpimm8 (ins->inst_offset)) {
5590 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5591 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
5592 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5593 } else {
5594 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
5596 break;
5597 case OP_STORER4_MEMBASE_REG:
5598 g_assert (arm_is_fpimm8 (ins->inst_offset));
5599 if (cfg->r4fp) {
5600 ARM_FSTS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
5601 } else {
5602 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5603 ARM_CVTD (code, vfp_scratch1, ins->sreg1);
5604 ARM_FSTS (code, vfp_scratch1, ins->inst_destbasereg, ins->inst_offset);
5605 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5607 break;
5608 case OP_LOADR4_MEMBASE:
5609 if (cfg->r4fp) {
5610 ARM_FLDS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
5611 } else {
5612 g_assert (arm_is_fpimm8 (ins->inst_offset));
5613 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5614 ARM_FLDS (code, vfp_scratch1, ins->inst_basereg, ins->inst_offset);
5615 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5616 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5618 break;
5619 case OP_ICONV_TO_R_UN: {
5620 g_assert_not_reached ();
5621 break;
5623 case OP_ICONV_TO_R4:
5624 if (cfg->r4fp) {
5625 ARM_FMSR (code, ins->dreg, ins->sreg1);
5626 ARM_FSITOS (code, ins->dreg, ins->dreg);
5627 } else {
5628 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5629 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5630 ARM_FSITOS (code, vfp_scratch1, vfp_scratch1);
5631 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5632 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5634 break;
5635 case OP_ICONV_TO_R8:
5636 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5637 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5638 ARM_FSITOD (code, ins->dreg, vfp_scratch1);
5639 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5640 break;
5642 case OP_SETFRET: {
5643 MonoType *sig_ret = mini_get_underlying_type (mono_method_signature_internal (cfg->method)->ret);
5644 if (sig_ret->type == MONO_TYPE_R4) {
5645 if (cfg->r4fp) {
5646 if (IS_HARD_FLOAT) {
5647 if (ins->sreg1 != ARM_VFP_D0)
5648 ARM_CPYS (code, ARM_VFP_D0, ins->sreg1);
5649 } else {
5650 ARM_FMRS (code, ARMREG_R0, ins->sreg1);
5652 } else {
5653 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
5655 if (!IS_HARD_FLOAT)
5656 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
5658 } else {
5659 if (IS_HARD_FLOAT)
5660 ARM_CPYD (code, ARM_VFP_D0, ins->sreg1);
5661 else
5662 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
5664 break;
5666 case OP_FCONV_TO_I1:
5667 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
5668 break;
5669 case OP_FCONV_TO_U1:
5670 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
5671 break;
5672 case OP_FCONV_TO_I2:
5673 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
5674 break;
5675 case OP_FCONV_TO_U2:
5676 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
5677 break;
5678 case OP_FCONV_TO_I4:
5679 case OP_FCONV_TO_I:
5680 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
5681 break;
5682 case OP_FCONV_TO_U4:
5683 case OP_FCONV_TO_U:
5684 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
5685 break;
5686 case OP_FCONV_TO_I8:
5687 case OP_FCONV_TO_U8:
5688 g_assert_not_reached ();
5689 /* Implemented as helper calls */
5690 break;
5691 case OP_LCONV_TO_R_UN:
5692 g_assert_not_reached ();
5693 /* Implemented as helper calls */
5694 break;
5695 case OP_LCONV_TO_OVF_I4_2: {
5696 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
5698 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
5701 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
5702 high_bit_not_set = code;
5703 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
5705 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
5706 valid_negative = code;
5707 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
5708 invalid_negative = code;
5709 ARM_B_COND (code, ARMCOND_AL, 0);
5711 arm_patch (high_bit_not_set, code);
5713 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
5714 valid_positive = code;
5715 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
5717 arm_patch (invalid_negative, code);
5718 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
5720 arm_patch (valid_negative, code);
5721 arm_patch (valid_positive, code);
5723 if (ins->dreg != ins->sreg1)
5724 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
5725 break;
5727 case OP_FADD:
5728 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
5729 break;
5730 case OP_FSUB:
5731 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
5732 break;
5733 case OP_FMUL:
5734 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
5735 break;
5736 case OP_FDIV:
5737 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
5738 break;
5739 case OP_FNEG:
5740 ARM_NEGD (code, ins->dreg, ins->sreg1);
5741 break;
5742 case OP_FREM:
5743 /* emulated */
5744 g_assert_not_reached ();
5745 break;
5746 case OP_FCOMPARE:
5747 if (IS_VFP) {
5748 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5749 ARM_FMSTAT (code);
5751 break;
5752 case OP_RCOMPARE:
5753 g_assert (IS_VFP);
5754 ARM_CMPS (code, ins->sreg1, ins->sreg2);
5755 ARM_FMSTAT (code);
5756 break;
5757 case OP_FCEQ:
5758 if (IS_VFP) {
5759 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5760 ARM_FMSTAT (code);
5762 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5763 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5764 break;
5765 case OP_FCLT:
5766 if (IS_VFP) {
5767 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5768 ARM_FMSTAT (code);
5770 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5771 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5772 break;
5773 case OP_FCLT_UN:
5774 if (IS_VFP) {
5775 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5776 ARM_FMSTAT (code);
5778 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5779 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5780 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5781 break;
5782 case OP_FCGT:
5783 if (IS_VFP) {
5784 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5785 ARM_FMSTAT (code);
5787 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5788 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5789 break;
5790 case OP_FCGT_UN:
5791 if (IS_VFP) {
5792 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5793 ARM_FMSTAT (code);
5795 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5796 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5797 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5798 break;
5799 case OP_FCNEQ:
5800 if (IS_VFP) {
5801 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5802 ARM_FMSTAT (code);
5804 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5805 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5806 break;
5807 case OP_FCGE:
5808 if (IS_VFP) {
5809 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5810 ARM_FMSTAT (code);
5812 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5813 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5814 break;
5815 case OP_FCLE:
5816 if (IS_VFP) {
5817 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5818 ARM_FMSTAT (code);
5820 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5821 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5822 break;
5824 /* ARM FPA flags table:
5825 * N Less than ARMCOND_MI
5826 * Z Equal ARMCOND_EQ
5827 * C Greater Than or Equal ARMCOND_CS
5828 * V Unordered ARMCOND_VS
5830 case OP_FBEQ:
5831 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
5832 break;
5833 case OP_FBNE_UN:
5834 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
5835 break;
5836 case OP_FBLT:
5837 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5838 break;
5839 case OP_FBLT_UN:
5840 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5841 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5842 break;
5843 case OP_FBGT:
5844 case OP_FBGT_UN:
5845 case OP_FBLE:
5846 case OP_FBLE_UN:
5847 g_assert_not_reached ();
5848 break;
5849 case OP_FBGE:
5850 if (IS_VFP) {
5851 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5852 } else {
5853 /* FPA requires EQ even thou the docs suggests that just CS is enough */
5854 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
5855 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
5857 break;
5858 case OP_FBGE_UN:
5859 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5860 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5861 break;
5863 case OP_CKFINITE: {
5864 if (IS_VFP) {
5865 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5866 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch2);
5868 ARM_ABSD (code, vfp_scratch2, ins->sreg1);
5869 ARM_FLDD (code, vfp_scratch1, ARMREG_PC, 0);
5870 ARM_B (code, 1);
5871 *(guint32*)code = 0xffffffff;
5872 code += 4;
5873 *(guint32*)code = 0x7fefffff;
5874 code += 4;
5875 ARM_CMPD (code, vfp_scratch2, vfp_scratch1);
5876 ARM_FMSTAT (code);
5877 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "OverflowException");
5878 ARM_CMPD (code, ins->sreg1, ins->sreg1);
5879 ARM_FMSTAT (code);
5880 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "OverflowException");
5881 ARM_CPYD (code, ins->dreg, ins->sreg1);
5883 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5884 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch2);
5886 break;
5889 case OP_RCONV_TO_I1:
5890 code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
5891 break;
5892 case OP_RCONV_TO_U1:
5893 code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
5894 break;
5895 case OP_RCONV_TO_I2:
5896 code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
5897 break;
5898 case OP_RCONV_TO_U2:
5899 code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
5900 break;
5901 case OP_RCONV_TO_I4:
5902 code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
5903 break;
5904 case OP_RCONV_TO_U4:
5905 code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
5906 break;
5907 case OP_RCONV_TO_R4:
5908 g_assert (IS_VFP);
5909 if (ins->dreg != ins->sreg1)
5910 ARM_CPYS (code, ins->dreg, ins->sreg1);
5911 break;
5912 case OP_RCONV_TO_R8:
5913 g_assert (IS_VFP);
5914 ARM_CVTS (code, ins->dreg, ins->sreg1);
5915 break;
5916 case OP_RADD:
5917 ARM_VFP_ADDS (code, ins->dreg, ins->sreg1, ins->sreg2);
5918 break;
5919 case OP_RSUB:
5920 ARM_VFP_SUBS (code, ins->dreg, ins->sreg1, ins->sreg2);
5921 break;
5922 case OP_RMUL:
5923 ARM_VFP_MULS (code, ins->dreg, ins->sreg1, ins->sreg2);
5924 break;
5925 case OP_RDIV:
5926 ARM_VFP_DIVS (code, ins->dreg, ins->sreg1, ins->sreg2);
5927 break;
5928 case OP_RNEG:
5929 ARM_NEGS (code, ins->dreg, ins->sreg1);
5930 break;
5931 case OP_RCEQ:
5932 if (IS_VFP) {
5933 ARM_CMPS (code, ins->sreg1, ins->sreg2);
5934 ARM_FMSTAT (code);
5936 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5937 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5938 break;
5939 case OP_RCLT:
5940 if (IS_VFP) {
5941 ARM_CMPS (code, ins->sreg1, ins->sreg2);
5942 ARM_FMSTAT (code);
5944 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5945 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5946 break;
5947 case OP_RCLT_UN:
5948 if (IS_VFP) {
5949 ARM_CMPS (code, ins->sreg1, ins->sreg2);
5950 ARM_FMSTAT (code);
5952 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5953 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5954 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5955 break;
5956 case OP_RCGT:
5957 if (IS_VFP) {
5958 ARM_CMPS (code, ins->sreg2, ins->sreg1);
5959 ARM_FMSTAT (code);
5961 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5962 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5963 break;
5964 case OP_RCGT_UN:
5965 if (IS_VFP) {
5966 ARM_CMPS (code, ins->sreg2, ins->sreg1);
5967 ARM_FMSTAT (code);
5969 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5970 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5971 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5972 break;
5973 case OP_RCNEQ:
5974 if (IS_VFP) {
5975 ARM_CMPS (code, ins->sreg1, ins->sreg2);
5976 ARM_FMSTAT (code);
5978 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5979 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5980 break;
5981 case OP_RCGE:
5982 if (IS_VFP) {
5983 ARM_CMPS (code, ins->sreg1, ins->sreg2);
5984 ARM_FMSTAT (code);
5986 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5987 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5988 break;
5989 case OP_RCLE:
5990 if (IS_VFP) {
5991 ARM_CMPS (code, ins->sreg2, ins->sreg1);
5992 ARM_FMSTAT (code);
5994 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5995 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5996 break;
5998 case OP_GC_LIVENESS_DEF:
5999 case OP_GC_LIVENESS_USE:
6000 case OP_GC_PARAM_SLOT_LIVENESS_DEF:
6001 ins->backend.pc_offset = code - cfg->native_code;
6002 break;
6003 case OP_GC_SPILL_SLOT_LIVENESS_DEF:
6004 ins->backend.pc_offset = code - cfg->native_code;
6005 bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
6006 break;
6007 case OP_LIVERANGE_START: {
6008 if (cfg->verbose_level > 1)
6009 printf ("R%d START=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code));
6010 MONO_VARINFO (cfg, ins->inst_c0)->live_range_start = code - cfg->native_code;
6011 break;
6013 case OP_LIVERANGE_END: {
6014 if (cfg->verbose_level > 1)
6015 printf ("R%d END=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code));
6016 MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code;
6017 break;
6019 case OP_GC_SAFE_POINT: {
6020 guint8 *buf [1];
6022 ARM_LDR_IMM (code, ARMREG_IP, ins->sreg1, 0);
6023 ARM_CMP_REG_IMM (code, ARMREG_IP, 0, 0);
6024 buf [0] = code;
6025 ARM_B_COND (code, ARMCOND_EQ, 0);
6026 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_threads_state_poll));
6027 code = emit_call_seq (cfg, code);
6028 arm_patch (buf [0], code);
6029 break;
6031 case OP_FILL_PROF_CALL_CTX:
6032 for (int i = 0; i < ARMREG_MAX; i++)
6033 if ((MONO_ARCH_CALLEE_SAVED_REGS & (1 << i)) || i == ARMREG_SP || i == ARMREG_FP)
6034 ARM_STR_IMM (code, i, ins->sreg1, MONO_STRUCT_OFFSET (MonoContext, regs) + i * sizeof (target_mgreg_t));
6035 break;
6036 default:
6037 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
6038 g_assert_not_reached ();
6041 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
6042 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
6043 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
6044 g_assert_not_reached ();
6047 cpos += max_len;
6049 last_ins = ins;
6052 set_code_cursor (cfg, code);
6055 #endif /* DISABLE_JIT */
6057 void
6058 mono_arch_register_lowlevel_calls (void)
6060 /* The signature doesn't matter */
6061 mono_register_jit_icall (mono_arm_throw_exception, mono_icall_sig_void, TRUE);
6062 mono_register_jit_icall (mono_arm_throw_exception_by_token, mono_icall_sig_void, TRUE);
6063 mono_register_jit_icall (mono_arm_unaligned_stack, mono_icall_sig_void, TRUE);
6066 #define patch_lis_ori(ip,val) do {\
6067 guint16 *__lis_ori = (guint16*)(ip); \
6068 __lis_ori [1] = (((guint32)(gsize)(val)) >> 16) & 0xffff; \
6069 __lis_ori [3] = ((guint32)(gsize)(val)) & 0xffff; \
6070 } while (0)
6072 void
6073 mono_arch_patch_code_new (MonoCompile *cfg, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gpointer target)
6075 unsigned char *ip = ji->ip.i + code;
6077 if (ji->type == MONO_PATCH_INFO_SWITCH) {
6080 switch (ji->type) {
6081 case MONO_PATCH_INFO_SWITCH: {
6082 gpointer *jt = (gpointer*)(ip + 8);
6083 int i;
6084 /* jt is the inlined jump table, 2 instructions after ip
6085 * In the normal case we store the absolute addresses,
6086 * otherwise the displacements.
6088 for (i = 0; i < ji->data.table->table_size; i++)
6089 jt [i] = code + (int)(gsize)ji->data.table->table [i];
6090 break;
6092 case MONO_PATCH_INFO_IP:
6093 g_assert_not_reached ();
6094 patch_lis_ori (ip, ip);
6095 break;
6096 case MONO_PATCH_INFO_METHODCONST:
6097 case MONO_PATCH_INFO_CLASS:
6098 case MONO_PATCH_INFO_IMAGE:
6099 case MONO_PATCH_INFO_FIELD:
6100 case MONO_PATCH_INFO_VTABLE:
6101 case MONO_PATCH_INFO_IID:
6102 case MONO_PATCH_INFO_SFLDA:
6103 case MONO_PATCH_INFO_LDSTR:
6104 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
6105 case MONO_PATCH_INFO_LDTOKEN:
6106 g_assert_not_reached ();
6107 /* from OP_AOTCONST : lis + ori */
6108 patch_lis_ori (ip, target);
6109 break;
6110 case MONO_PATCH_INFO_R4:
6111 case MONO_PATCH_INFO_R8:
6112 g_assert_not_reached ();
6113 *((gconstpointer *)(ip + 2)) = target;
6114 break;
6115 case MONO_PATCH_INFO_EXC_NAME:
6116 g_assert_not_reached ();
6117 *((gconstpointer *)(ip + 1)) = target;
6118 break;
6119 case MONO_PATCH_INFO_NONE:
6120 case MONO_PATCH_INFO_BB_OVF:
6121 case MONO_PATCH_INFO_EXC_OVF:
6122 /* everything is dealt with at epilog output time */
6123 break;
6124 default:
6125 arm_patch_general (cfg, domain, ip, (const guchar*)target);
6126 break;
6130 void
6131 mono_arm_unaligned_stack (MonoMethod *method)
6133 g_assert_not_reached ();
6136 #ifndef DISABLE_JIT
6139 * Stack frame layout:
6141 * ------------------- fp
6142 * MonoLMF structure or saved registers
6143 * -------------------
6144 * locals
6145 * -------------------
6146 * spilled regs
6147 * -------------------
6148 * param area size is cfg->param_area
6149 * ------------------- sp
6151 guint8 *
6152 mono_arch_emit_prolog (MonoCompile *cfg)
6154 MonoMethod *method = cfg->method;
6155 MonoBasicBlock *bb;
6156 MonoMethodSignature *sig;
6157 MonoInst *inst;
6158 int alloc_size, orig_alloc_size, pos, max_offset, i, rot_amount, part;
6159 guint8 *code;
6160 CallInfo *cinfo;
6161 int lmf_offset = 0;
6162 int prev_sp_offset, reg_offset;
6164 sig = mono_method_signature_internal (method);
6165 cfg->code_size = 256 + sig->param_count * 64;
6166 code = cfg->native_code = g_malloc (cfg->code_size);
6168 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
6170 alloc_size = cfg->stack_offset;
6171 pos = 0;
6172 prev_sp_offset = 0;
6174 if (iphone_abi) {
6176 * The iphone uses R7 as the frame pointer, and it points at the saved
6177 * r7+lr:
6178 * <lr>
6179 * r7 -> <r7>
6180 * <rest of frame>
6181 * We can't use r7 as a frame pointer since it points into the middle of
6182 * the frame, so we keep using our own frame pointer.
6183 * FIXME: Optimize this.
6185 ARM_PUSH (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
6186 prev_sp_offset += 8; /* r7 and lr */
6187 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
6188 mono_emit_unwind_op_offset (cfg, code, ARMREG_R7, (- prev_sp_offset) + 0);
6189 ARM_MOV_REG_REG (code, ARMREG_R7, ARMREG_SP);
6192 if (!method->save_lmf) {
6193 if (iphone_abi) {
6194 /* No need to push LR again */
6195 if (cfg->used_int_regs)
6196 ARM_PUSH (code, cfg->used_int_regs);
6197 } else {
6198 ARM_PUSH (code, cfg->used_int_regs | (1 << ARMREG_LR));
6199 prev_sp_offset += 4;
6201 for (i = 0; i < 16; ++i) {
6202 if (cfg->used_int_regs & (1 << i))
6203 prev_sp_offset += 4;
6205 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
6206 reg_offset = 0;
6207 for (i = 0; i < 16; ++i) {
6208 if ((cfg->used_int_regs & (1 << i))) {
6209 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
6210 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + reg_offset, SLOT_NOREF);
6211 reg_offset += 4;
6214 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
6215 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
6216 } else {
6217 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
6218 ARM_PUSH (code, 0x5ff0);
6219 prev_sp_offset += 4 * 10; /* all but r0-r3, sp and pc */
6220 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
6221 reg_offset = 0;
6222 for (i = 0; i < 16; ++i) {
6223 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
6224 /* The original r7 is saved at the start */
6225 if (!(iphone_abi && i == ARMREG_R7))
6226 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
6227 reg_offset += 4;
6230 g_assert (reg_offset == 4 * 10);
6231 pos += MONO_ABI_SIZEOF (MonoLMF) - (4 * 10);
6232 lmf_offset = pos;
6234 alloc_size += pos;
6235 orig_alloc_size = alloc_size;
6236 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
6237 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
6238 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
6239 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
6242 /* the stack used in the pushed regs */
6243 alloc_size += ALIGN_TO (prev_sp_offset, MONO_ARCH_FRAME_ALIGNMENT) - prev_sp_offset;
6244 cfg->stack_usage = alloc_size;
6245 if (alloc_size) {
6246 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
6247 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
6248 } else {
6249 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
6250 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
6252 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
6254 if (cfg->frame_reg != ARMREG_SP) {
6255 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
6256 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
6258 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
6259 prev_sp_offset += alloc_size;
6261 for (i = 0; i < alloc_size - orig_alloc_size; i += 4)
6262 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + orig_alloc_size + i, SLOT_NOREF);
6264 /* compute max_offset in order to use short forward jumps
6265 * we could skip do it on arm because the immediate displacement
6266 * for jumps is large enough, it may be useful later for constant pools
6268 max_offset = 0;
6269 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
6270 MonoInst *ins = bb->code;
6271 bb->max_offset = max_offset;
6273 MONO_BB_FOR_EACH_INS (bb, ins)
6274 max_offset += ins_get_size (ins->opcode);
6277 /* stack alignment check */
6280 guint8 *buf [16];
6281 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_SP);
6282 code = mono_arm_emit_load_imm (code, ARMREG_IP, MONO_ARCH_FRAME_ALIGNMENT -1);
6283 ARM_AND_REG_REG (code, ARMREG_LR, ARMREG_LR, ARMREG_IP);
6284 ARM_CMP_REG_IMM (code, ARMREG_LR, 0, 0);
6285 buf [0] = code;
6286 ARM_B_COND (code, ARMCOND_EQ, 0);
6287 if (cfg->compile_aot)
6288 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
6289 else
6290 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
6291 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arm_unaligned_stack));
6292 code = emit_call_seq (cfg, code);
6293 arm_patch (buf [0], code);
6297 /* store runtime generic context */
6298 if (cfg->rgctx_var) {
6299 MonoInst *ins = cfg->rgctx_var;
6301 g_assert (ins->opcode == OP_REGOFFSET);
6303 if (arm_is_imm12 (ins->inst_offset)) {
6304 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
6305 } else {
6306 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6307 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
6310 mono_add_var_location (cfg, cfg->rgctx_var, TRUE, MONO_ARCH_RGCTX_REG, 0, 0, code - cfg->native_code);
6311 mono_add_var_location (cfg, cfg->rgctx_var, FALSE, ins->inst_basereg, ins->inst_offset, code - cfg->native_code, 0);
6314 /* load arguments allocated to register from the stack */
6315 cinfo = get_call_info (NULL, sig);
6317 if (cinfo->ret.storage == RegTypeStructByAddr) {
6318 ArgInfo *ainfo = &cinfo->ret;
6319 inst = cfg->vret_addr;
6320 g_assert (arm_is_imm12 (inst->inst_offset));
6321 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
6324 if (sig->call_convention == MONO_CALL_VARARG) {
6325 ArgInfo *cookie = &cinfo->sig_cookie;
6327 /* Save the sig cookie address */
6328 g_assert (cookie->storage == RegTypeBase);
6330 g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
6331 g_assert (arm_is_imm12 (cfg->sig_cookie));
6332 ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
6333 ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
6336 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6337 ArgInfo *ainfo = cinfo->args + i;
6338 inst = cfg->args [i];
6340 if (cfg->verbose_level > 2)
6341 g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
6343 if (inst->opcode == OP_REGVAR) {
6344 if (ainfo->storage == RegTypeGeneral)
6345 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
6346 else if (ainfo->storage == RegTypeFP) {
6347 g_assert_not_reached ();
6348 } else if (ainfo->storage == RegTypeBase) {
6349 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
6350 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
6351 } else {
6352 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
6353 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
6355 } else
6356 g_assert_not_reached ();
6358 if (i == 0 && sig->hasthis) {
6359 g_assert (ainfo->storage == RegTypeGeneral);
6360 mono_add_var_location (cfg, inst, TRUE, ainfo->reg, 0, 0, code - cfg->native_code);
6361 mono_add_var_location (cfg, inst, TRUE, inst->dreg, 0, code - cfg->native_code, 0);
6364 if (cfg->verbose_level > 2)
6365 g_print ("Argument %d assigned to register %s\n", i, mono_arch_regname (inst->dreg));
6366 } else {
6367 switch (ainfo->storage) {
6368 case RegTypeHFA:
6369 for (part = 0; part < ainfo->nregs; part ++) {
6370 if (ainfo->esize == 4)
6371 ARM_FSTS (code, ainfo->reg + part, inst->inst_basereg, inst->inst_offset + (part * ainfo->esize));
6372 else
6373 ARM_FSTD (code, ainfo->reg + (part * 2), inst->inst_basereg, inst->inst_offset + (part * ainfo->esize));
6375 break;
6376 case RegTypeGeneral:
6377 case RegTypeIRegPair:
6378 case RegTypeGSharedVtInReg:
6379 case RegTypeStructByAddr:
6380 switch (ainfo->size) {
6381 case 1:
6382 if (arm_is_imm12 (inst->inst_offset))
6383 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
6384 else {
6385 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6386 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
6388 break;
6389 case 2:
6390 if (arm_is_imm8 (inst->inst_offset)) {
6391 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
6392 } else {
6393 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6394 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
6396 break;
6397 case 8:
6398 if (arm_is_imm12 (inst->inst_offset)) {
6399 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
6400 } else {
6401 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6402 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
6404 if (arm_is_imm12 (inst->inst_offset + 4)) {
6405 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
6406 } else {
6407 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
6408 ARM_STR_REG_REG (code, ainfo->reg + 1, inst->inst_basereg, ARMREG_IP);
6410 break;
6411 default:
6412 if (arm_is_imm12 (inst->inst_offset)) {
6413 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
6414 } else {
6415 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6416 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
6418 break;
6420 if (i == 0 && sig->hasthis) {
6421 g_assert (ainfo->storage == RegTypeGeneral);
6422 mono_add_var_location (cfg, inst, TRUE, ainfo->reg, 0, 0, code - cfg->native_code);
6423 mono_add_var_location (cfg, inst, FALSE, inst->inst_basereg, inst->inst_offset, code - cfg->native_code, 0);
6425 break;
6426 case RegTypeBaseGen:
6427 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
6428 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
6429 } else {
6430 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
6431 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6433 if (arm_is_imm12 (inst->inst_offset + 4)) {
6434 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
6435 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
6436 } else {
6437 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
6438 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6439 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6440 ARM_STR_REG_REG (code, ARMREG_R3, inst->inst_basereg, ARMREG_IP);
6442 break;
6443 case RegTypeBase:
6444 case RegTypeGSharedVtOnStack:
6445 case RegTypeStructByAddrOnStack:
6446 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
6447 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
6448 } else {
6449 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
6450 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6453 switch (ainfo->size) {
6454 case 1:
6455 if (arm_is_imm8 (inst->inst_offset)) {
6456 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6457 } else {
6458 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6459 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6461 break;
6462 case 2:
6463 if (arm_is_imm8 (inst->inst_offset)) {
6464 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6465 } else {
6466 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6467 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6469 break;
6470 case 8:
6471 if (arm_is_imm12 (inst->inst_offset)) {
6472 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6473 } else {
6474 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6475 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6477 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
6478 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
6479 } else {
6480 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
6481 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6483 if (arm_is_imm12 (inst->inst_offset + 4)) {
6484 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
6485 } else {
6486 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
6487 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6489 break;
6490 default:
6491 if (arm_is_imm12 (inst->inst_offset)) {
6492 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6493 } else {
6494 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6495 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6497 break;
6499 break;
6500 case RegTypeFP: {
6501 int imm8, rot_amount;
6503 if ((imm8 = mono_arm_is_rotated_imm8 (inst->inst_offset, &rot_amount)) == -1) {
6504 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6505 ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg);
6506 } else
6507 ARM_ADD_REG_IMM (code, ARMREG_IP, inst->inst_basereg, imm8, rot_amount);
6509 if (ainfo->size == 8)
6510 ARM_FSTD (code, ainfo->reg, ARMREG_IP, 0);
6511 else
6512 ARM_FSTS (code, ainfo->reg, ARMREG_IP, 0);
6513 break;
6515 case RegTypeStructByVal: {
6516 int doffset = inst->inst_offset;
6517 int soffset = 0;
6518 int cur_reg;
6519 int size = 0;
6520 size = mini_type_stack_size_full (inst->inst_vtype, NULL, sig->pinvoke);
6521 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
6522 if (arm_is_imm12 (doffset)) {
6523 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
6524 } else {
6525 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
6526 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
6528 soffset += sizeof (target_mgreg_t);
6529 doffset += sizeof (target_mgreg_t);
6531 if (ainfo->vtsize) {
6532 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6533 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
6534 code = emit_memcpy (code, ainfo->vtsize * sizeof (target_mgreg_t), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
6536 break;
6538 default:
6539 g_assert_not_reached ();
6540 break;
6545 if (method->save_lmf)
6546 code = emit_save_lmf (cfg, code, alloc_size - lmf_offset);
6548 if (cfg->arch.seq_point_info_var) {
6549 MonoInst *ins = cfg->arch.seq_point_info_var;
6551 /* Initialize the variable from a GOT slot */
6552 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
6553 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6554 ARM_B (code, 0);
6555 *(gpointer*)code = NULL;
6556 code += 4;
6557 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
6559 g_assert (ins->opcode == OP_REGOFFSET);
6561 if (arm_is_imm12 (ins->inst_offset)) {
6562 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6563 } else {
6564 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6565 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6569 /* Initialize ss_trigger_page_var */
6570 if (!cfg->soft_breakpoints) {
6571 MonoInst *info_var = cfg->arch.seq_point_info_var;
6572 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
6573 int dreg = ARMREG_LR;
6575 if (info_var) {
6576 g_assert (info_var->opcode == OP_REGOFFSET);
6578 code = emit_ldr_imm (code, dreg, info_var->inst_basereg, info_var->inst_offset);
6579 /* Load the trigger page addr */
6580 ARM_LDR_IMM (code, dreg, dreg, MONO_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
6581 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
6585 if (cfg->arch.seq_point_ss_method_var) {
6586 MonoInst *ss_method_ins = cfg->arch.seq_point_ss_method_var;
6587 MonoInst *bp_method_ins = cfg->arch.seq_point_bp_method_var;
6589 g_assert (ss_method_ins->opcode == OP_REGOFFSET);
6590 g_assert (arm_is_imm12 (ss_method_ins->inst_offset));
6592 if (cfg->compile_aot) {
6593 MonoInst *info_var = cfg->arch.seq_point_info_var;
6594 int dreg = ARMREG_LR;
6596 g_assert (info_var->opcode == OP_REGOFFSET);
6597 g_assert (arm_is_imm12 (info_var->inst_offset));
6599 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
6600 ARM_LDR_IMM (code, dreg, dreg, MONO_STRUCT_OFFSET (SeqPointInfo, ss_tramp_addr));
6601 ARM_STR_IMM (code, dreg, ss_method_ins->inst_basereg, ss_method_ins->inst_offset);
6602 } else {
6603 g_assert (bp_method_ins->opcode == OP_REGOFFSET);
6604 g_assert (arm_is_imm12 (bp_method_ins->inst_offset));
6606 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
6607 ARM_B (code, 1);
6608 *(gpointer*)code = &single_step_tramp;
6609 code += 4;
6610 *(gpointer*)code = breakpoint_tramp;
6611 code += 4;
6613 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 0);
6614 ARM_STR_IMM (code, ARMREG_IP, ss_method_ins->inst_basereg, ss_method_ins->inst_offset);
6615 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 4);
6616 ARM_STR_IMM (code, ARMREG_IP, bp_method_ins->inst_basereg, bp_method_ins->inst_offset);
6620 set_code_cursor (cfg, code);
6621 g_free (cinfo);
6623 return code;
6626 void
6627 mono_arch_emit_epilog (MonoCompile *cfg)
6629 MonoMethod *method = cfg->method;
6630 int pos, i, rot_amount;
6631 int max_epilog_size = 16 + 20*4;
6632 guint8 *code;
6633 CallInfo *cinfo;
6635 if (cfg->method->save_lmf)
6636 max_epilog_size += 128;
6638 code = realloc_code (cfg, max_epilog_size);
6640 /* Save the uwind state which is needed by the out-of-line code */
6641 mono_emit_unwind_op_remember_state (cfg, code);
6643 pos = 0;
6645 /* Load returned vtypes into registers if needed */
6646 cinfo = cfg->arch.cinfo;
6647 switch (cinfo->ret.storage) {
6648 case RegTypeStructByVal: {
6649 MonoInst *ins = cfg->ret;
6651 if (cinfo->ret.nregs == 1) {
6652 if (arm_is_imm12 (ins->inst_offset)) {
6653 ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6654 } else {
6655 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6656 ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6658 } else {
6659 for (i = 0; i < cinfo->ret.nregs; ++i) {
6660 int offset = ins->inst_offset + (i * 4);
6661 if (arm_is_imm12 (offset)) {
6662 ARM_LDR_IMM (code, i, ins->inst_basereg, offset);
6663 } else {
6664 code = mono_arm_emit_load_imm (code, ARMREG_LR, offset);
6665 ARM_LDR_REG_REG (code, i, ins->inst_basereg, ARMREG_LR);
6669 break;
6671 case RegTypeHFA: {
6672 MonoInst *ins = cfg->ret;
6674 for (i = 0; i < cinfo->ret.nregs; ++i) {
6675 if (cinfo->ret.esize == 4)
6676 ARM_FLDS (code, cinfo->ret.reg + i, ins->inst_basereg, ins->inst_offset + (i * cinfo->ret.esize));
6677 else
6678 ARM_FLDD (code, cinfo->ret.reg + (i * 2), ins->inst_basereg, ins->inst_offset + (i * cinfo->ret.esize));
6680 break;
6682 default:
6683 break;
6686 if (method->save_lmf) {
6687 int lmf_offset, reg, sp_adj, regmask, nused_int_regs = 0;
6688 /* all but r0-r3, sp and pc */
6689 pos += MONO_ABI_SIZEOF (MonoLMF) - (MONO_ARM_NUM_SAVED_REGS * sizeof (target_mgreg_t));
6690 lmf_offset = pos;
6692 code = emit_restore_lmf (cfg, code, cfg->stack_usage - lmf_offset);
6694 /* This points to r4 inside MonoLMF->iregs */
6695 sp_adj = (MONO_ABI_SIZEOF (MonoLMF) - MONO_ARM_NUM_SAVED_REGS * sizeof (target_mgreg_t));
6696 reg = ARMREG_R4;
6697 regmask = 0x9ff0; /* restore lr to pc */
6698 /* Skip caller saved registers not used by the method */
6699 while (!(cfg->used_int_regs & (1 << reg)) && reg < ARMREG_FP) {
6700 regmask &= ~(1 << reg);
6701 sp_adj += 4;
6702 reg ++;
6704 if (iphone_abi)
6705 /* Restored later */
6706 regmask &= ~(1 << ARMREG_PC);
6707 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
6708 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage - lmf_offset + sp_adj);
6709 for (i = 0; i < 16; i++) {
6710 if (regmask & (1 << i))
6711 nused_int_regs ++;
6713 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, ((iphone_abi ? 3 : 0) + nused_int_regs) * 4);
6714 /* restore iregs */
6715 ARM_POP (code, regmask);
6716 if (iphone_abi) {
6717 for (i = 0; i < 16; i++) {
6718 if (regmask & (1 << i))
6719 mono_emit_unwind_op_same_value (cfg, code, i);
6721 /* Restore saved r7, restore LR to PC */
6722 /* Skip lr from the lmf */
6723 mono_emit_unwind_op_def_cfa_offset (cfg, code, 3 * 4);
6724 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, sizeof (target_mgreg_t), 0);
6725 mono_emit_unwind_op_def_cfa_offset (cfg, code, 2 * 4);
6726 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6728 } else {
6729 int i, nused_int_regs = 0;
6731 for (i = 0; i < 16; i++) {
6732 if (cfg->used_int_regs & (1 << i))
6733 nused_int_regs ++;
6736 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
6737 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
6738 } else {
6739 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
6740 ARM_ADD_REG_REG (code, ARMREG_SP, cfg->frame_reg, ARMREG_IP);
6743 if (cfg->frame_reg != ARMREG_SP) {
6744 mono_emit_unwind_op_def_cfa_reg (cfg, code, ARMREG_SP);
6747 if (iphone_abi) {
6748 /* Restore saved gregs */
6749 if (cfg->used_int_regs) {
6750 mono_emit_unwind_op_def_cfa_offset (cfg, code, (2 + nused_int_regs) * 4);
6751 ARM_POP (code, cfg->used_int_regs);
6752 for (i = 0; i < 16; i++) {
6753 if (cfg->used_int_regs & (1 << i))
6754 mono_emit_unwind_op_same_value (cfg, code, i);
6757 mono_emit_unwind_op_def_cfa_offset (cfg, code, 2 * 4);
6758 /* Restore saved r7, restore LR to PC */
6759 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6760 } else {
6761 mono_emit_unwind_op_def_cfa_offset (cfg, code, (nused_int_regs + 1) * 4);
6762 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_PC));
6766 /* Restore the unwind state to be the same as before the epilog */
6767 mono_emit_unwind_op_restore_state (cfg, code);
6769 set_code_cursor (cfg, code);
6773 void
6774 mono_arch_emit_exceptions (MonoCompile *cfg)
6776 MonoJumpInfo *patch_info;
6777 int i;
6778 guint8 *code;
6779 guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
6780 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
6781 int max_epilog_size = 50;
6783 for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
6784 exc_throw_pos [i] = NULL;
6785 exc_throw_found [i] = 0;
6788 /* count the number of exception infos */
6791 * make sure we have enough space for exceptions
6793 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6794 if (patch_info->type == MONO_PATCH_INFO_EXC) {
6795 i = mini_exception_id_by_name ((const char*)patch_info->data.target);
6796 if (!exc_throw_found [i]) {
6797 max_epilog_size += 32;
6798 exc_throw_found [i] = TRUE;
6803 code = realloc_code (cfg, max_epilog_size);
6805 /* add code to raise exceptions */
6806 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6807 switch (patch_info->type) {
6808 case MONO_PATCH_INFO_EXC: {
6809 MonoClass *exc_class;
6810 unsigned char *ip = patch_info->ip.i + cfg->native_code;
6812 i = mini_exception_id_by_name ((const char*)patch_info->data.target);
6813 if (exc_throw_pos [i]) {
6814 arm_patch (ip, exc_throw_pos [i]);
6815 patch_info->type = MONO_PATCH_INFO_NONE;
6816 break;
6817 } else {
6818 exc_throw_pos [i] = code;
6820 arm_patch (ip, code);
6822 exc_class = mono_class_load_from_name (mono_defaults.corlib, "System", patch_info->data.name);
6824 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
6825 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6826 patch_info->type = MONO_PATCH_INFO_JIT_ICALL_ID;
6827 patch_info->data.jit_icall_id = MONO_JIT_ICALL_mono_arch_throw_corlib_exception;
6828 patch_info->ip.i = code - cfg->native_code;
6829 ARM_BL (code, 0);
6830 cfg->thunk_area += THUNK_SIZE;
6831 *(guint32*)(gpointer)code = m_class_get_type_token (exc_class) - MONO_TOKEN_TYPE_DEF;
6832 code += 4;
6833 break;
6835 default:
6836 /* do nothing */
6837 break;
6841 set_code_cursor (cfg, code);
6844 #endif /* #ifndef DISABLE_JIT */
6846 void
6847 mono_arch_finish_init (void)
6851 MonoInst*
6852 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
6854 /* FIXME: */
6855 return NULL;
6858 #ifndef DISABLE_JIT
6860 #endif
6862 guint32
6863 mono_arch_get_patch_offset (guint8 *code)
6865 /* OP_AOTCONST */
6866 return 8;
6869 void
6870 mono_arch_flush_register_windows (void)
6874 MonoMethod*
6875 mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code)
6877 return (MonoMethod*)regs [MONO_ARCH_IMT_REG];
6880 MonoVTable*
6881 mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code)
6883 return (MonoVTable*)(gsize)regs [MONO_ARCH_RGCTX_REG];
6886 GSList*
6887 mono_arch_get_cie_program (void)
6889 GSList *l = NULL;
6891 mono_add_unwind_op_def_cfa (l, (guint8*)NULL, (guint8*)NULL, ARMREG_SP, 0);
6893 return l;
6896 /* #define ENABLE_WRONG_METHOD_CHECK 1 */
6897 #define BASE_SIZE (6 * 4)
6898 #define BSEARCH_ENTRY_SIZE (4 * 4)
6899 #define CMP_SIZE (3 * 4)
6900 #define BRANCH_SIZE (1 * 4)
6901 #define CALL_SIZE (2 * 4)
6902 #define WMC_SIZE (8 * 4)
6903 #define DISTANCE(A, B) (((gint32)(gssize)(B)) - ((gint32)(gssize)(A)))
6905 static arminstr_t *
6906 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
6908 guint32 delta = DISTANCE (target, code);
6909 delta -= 8;
6910 g_assert (delta >= 0 && delta <= 0xFFF);
6911 *target = *target | delta;
6912 *code = value;
6913 return code + 1;
6916 #ifdef ENABLE_WRONG_METHOD_CHECK
6917 static void
6918 mini_dump_bad_imt (int input_imt, int compared_imt, int pc)
6920 g_print ("BAD IMT comparing %x with expected %x at ip %x", input_imt, compared_imt, pc);
6921 g_assert (0);
6923 #endif
6925 gpointer
6926 mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
6927 gpointer fail_tramp)
6929 int size, i;
6930 arminstr_t *code, *start;
6931 gboolean large_offsets = FALSE;
6932 guint32 **constant_pool_starts;
6933 arminstr_t *vtable_target = NULL;
6934 int extra_space = 0;
6935 #ifdef ENABLE_WRONG_METHOD_CHECK
6936 char * cond;
6937 #endif
6938 GSList *unwind_ops;
6940 size = BASE_SIZE;
6941 constant_pool_starts = g_new0 (guint32*, count);
6943 for (i = 0; i < count; ++i) {
6944 MonoIMTCheckItem *item = imt_entries [i];
6945 if (item->is_equals) {
6946 gboolean fail_case = !item->check_target_idx && fail_tramp;
6948 if (item->has_target_code || !arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
6949 item->chunk_size += 32;
6950 large_offsets = TRUE;
6953 if (item->check_target_idx || fail_case) {
6954 if (!item->compare_done || fail_case)
6955 item->chunk_size += CMP_SIZE;
6956 item->chunk_size += BRANCH_SIZE;
6957 } else {
6958 #ifdef ENABLE_WRONG_METHOD_CHECK
6959 item->chunk_size += WMC_SIZE;
6960 #endif
6962 if (fail_case) {
6963 item->chunk_size += 16;
6964 large_offsets = TRUE;
6966 item->chunk_size += CALL_SIZE;
6967 } else {
6968 item->chunk_size += BSEARCH_ENTRY_SIZE;
6969 imt_entries [item->check_target_idx]->compare_done = TRUE;
6971 size += item->chunk_size;
6974 if (large_offsets)
6975 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
6977 if (fail_tramp) {
6978 code = mono_method_alloc_generic_virtual_trampoline (mono_domain_ambient_memory_manager (domain), size);
6979 } else {
6980 MonoMemoryManager *mem_manager = m_class_get_mem_manager (domain, vtable->klass);
6981 code = mono_mem_manager_code_reserve (mem_manager, size);
6983 start = code;
6985 unwind_ops = mono_arch_get_cie_program ();
6987 #ifdef DEBUG_IMT
6988 g_print ("Building IMT trampoline for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", m_class_get_name_space (vtable->klass), m_class_get_name (vtable->klass), count, size, start, ((guint8*)start) + size, vtable, fail_tramp);
6989 for (i = 0; i < count; ++i) {
6990 MonoIMTCheckItem *item = imt_entries [i];
6991 g_print ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, ((MonoMethod*)item->key)->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
6993 #endif
6995 if (large_offsets) {
6996 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6997 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 4 * sizeof (target_mgreg_t));
6998 } else {
6999 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
7000 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 2 * sizeof (target_mgreg_t));
7002 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
7003 vtable_target = code;
7004 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
7005 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
7007 for (i = 0; i < count; ++i) {
7008 MonoIMTCheckItem *item = imt_entries [i];
7009 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL, *target_code_ins = NULL;
7010 gint32 vtable_offset;
7012 item->code_target = (guint8*)code;
7014 if (item->is_equals) {
7015 gboolean fail_case = !item->check_target_idx && fail_tramp;
7017 if (item->check_target_idx || fail_case) {
7018 if (!item->compare_done || fail_case) {
7019 imt_method = code;
7020 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
7021 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
7023 item->jmp_code = (guint8*)code;
7024 ARM_B_COND (code, ARMCOND_NE, 0);
7025 } else {
7026 /*Enable the commented code to assert on wrong method*/
7027 #ifdef ENABLE_WRONG_METHOD_CHECK
7028 imt_method = code;
7029 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
7030 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
7031 cond = code;
7032 ARM_B_COND (code, ARMCOND_EQ, 0);
7034 /* Define this if your system is so bad that gdb is failing. */
7035 #ifdef BROKEN_DEV_ENV
7036 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
7037 ARM_BL (code, 0);
7038 arm_patch (code - 1, mini_dump_bad_imt);
7039 #else
7040 ARM_DBRK (code);
7041 #endif
7042 arm_patch (cond, code);
7043 #endif
7046 if (item->has_target_code) {
7047 /* Load target address */
7048 target_code_ins = code;
7049 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
7050 /* Save it to the fourth slot */
7051 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (target_mgreg_t));
7052 /* Restore registers and branch */
7053 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
7055 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)item->value.target_code);
7056 } else {
7057 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
7058 if (!arm_is_imm12 (vtable_offset)) {
7060 * We need to branch to a computed address but we don't have
7061 * a free register to store it, since IP must contain the
7062 * vtable address. So we push the two values to the stack, and
7063 * load them both using LDM.
7065 /* Compute target address */
7066 vtable_offset_ins = code;
7067 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
7068 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
7069 /* Save it to the fourth slot */
7070 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (target_mgreg_t));
7071 /* Restore registers and branch */
7072 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
7074 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
7075 } else {
7076 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
7077 if (large_offsets) {
7078 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 2 * sizeof (target_mgreg_t));
7079 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (target_mgreg_t));
7081 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 0);
7082 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
7086 if (fail_case) {
7087 arm_patch (item->jmp_code, (guchar*)code);
7089 target_code_ins = code;
7090 /* Load target address */
7091 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
7092 /* Save it to the fourth slot */
7093 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (target_mgreg_t));
7094 /* Restore registers and branch */
7095 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
7097 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)fail_tramp);
7098 item->jmp_code = NULL;
7101 if (imt_method)
7102 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)(gsize)item->key);
7104 /*must emit after unconditional branch*/
7105 if (vtable_target) {
7106 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)(gsize)vtable);
7107 item->chunk_size += 4;
7108 vtable_target = NULL;
7111 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
7112 constant_pool_starts [i] = code;
7113 if (extra_space) {
7114 code += extra_space;
7115 extra_space = 0;
7117 } else {
7118 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
7119 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
7121 item->jmp_code = (guint8*)code;
7122 ARM_B_COND (code, ARMCOND_HS, 0);
7123 ++extra_space;
7127 for (i = 0; i < count; ++i) {
7128 MonoIMTCheckItem *item = imt_entries [i];
7129 if (item->jmp_code) {
7130 if (item->check_target_idx)
7131 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
7133 if (i > 0 && item->is_equals) {
7134 int j;
7135 arminstr_t *space_start = constant_pool_starts [i];
7136 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
7137 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)(gsize)imt_entries [j]->key);
7142 #ifdef DEBUG_IMT
7144 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", m_class_get_name_space (vtable->klass), m_class_get_name (vtable->klass), count);
7145 mono_disassemble_code (NULL, (guint8*)start, size, buff);
7146 g_free (buff);
7148 #endif
7150 g_free (constant_pool_starts);
7152 mono_arch_flush_icache ((guint8*)start, size);
7153 MONO_PROFILER_RAISE (jit_code_buffer, ((guint8*)start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL));
7154 UnlockedAdd (&mono_stats.imt_trampolines_size, code - start);
7156 g_assert (DISTANCE (start, code) <= size);
7158 mono_tramp_info_register (mono_tramp_info_create (NULL, (guint8*)start, DISTANCE (start, code), NULL, unwind_ops), domain);
7160 return start;
7163 host_mgreg_t
7164 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
7166 return ctx->regs [reg];
7169 void
7170 mono_arch_context_set_int_reg (MonoContext *ctx, int reg, host_mgreg_t val)
7172 ctx->regs [reg] = val;
7176 * mono_arch_get_trampolines:
7178 * Return a list of MonoTrampInfo structures describing arch specific trampolines
7179 * for AOT.
7181 GSList *
7182 mono_arch_get_trampolines (gboolean aot)
7184 return mono_arm_get_exception_trampolines (aot);
7187 #if defined(MONO_ARCH_SOFT_DEBUG_SUPPORTED)
7189 * mono_arch_set_breakpoint:
7191 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
7192 * The location should contain code emitted by OP_SEQ_POINT.
7194 void
7195 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
7197 guint8 *code = ip;
7198 guint32 native_offset = ip - (guint8*)ji->code_start;
7200 if (ji->from_aot) {
7201 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), (guint8*)ji->code_start);
7203 if (!breakpoint_tramp)
7204 breakpoint_tramp = mini_get_breakpoint_trampoline ();
7206 g_assert (native_offset % 4 == 0);
7207 g_assert (info->bp_addrs [native_offset / 4] == 0);
7208 info->bp_addrs [native_offset / 4] = (guint8*)(mini_debug_options.soft_breakpoints ? breakpoint_tramp : bp_trigger_page);
7209 } else if (mini_debug_options.soft_breakpoints) {
7210 code += 4;
7211 ARM_BLX_REG (code, ARMREG_LR);
7212 mono_arch_flush_icache (code - 4, 4);
7213 } else {
7214 int dreg = ARMREG_LR;
7216 /* Read from another trigger page */
7217 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
7218 ARM_B (code, 0);
7219 *(int*)code = (int)(gssize)bp_trigger_page;
7220 code += 4;
7221 ARM_LDR_IMM (code, dreg, dreg, 0);
7223 mono_arch_flush_icache (code - 16, 16);
7225 #if 0
7226 /* This is currently implemented by emitting an SWI instruction, which
7227 * qemu/linux seems to convert to a SIGILL.
7229 *(int*)code = (0xef << 24) | 8;
7230 code += 4;
7231 mono_arch_flush_icache (code - 4, 4);
7232 #endif
7237 * mono_arch_clear_breakpoint:
7239 * Clear the breakpoint at IP.
7241 void
7242 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
7244 guint8 *code = ip;
7245 int i;
7247 if (ji->from_aot) {
7248 guint32 native_offset = ip - (guint8*)ji->code_start;
7249 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), (guint8*)ji->code_start);
7251 if (!breakpoint_tramp)
7252 breakpoint_tramp = mini_get_breakpoint_trampoline ();
7254 g_assert (native_offset % 4 == 0);
7255 g_assert (info->bp_addrs [native_offset / 4] == (guint8*)(mini_debug_options.soft_breakpoints ? breakpoint_tramp : bp_trigger_page));
7256 info->bp_addrs [native_offset / 4] = 0;
7257 } else if (mini_debug_options.soft_breakpoints) {
7258 code += 4;
7259 ARM_NOP (code);
7260 mono_arch_flush_icache (code - 4, 4);
7261 } else {
7262 for (i = 0; i < 4; ++i)
7263 ARM_NOP (code);
7265 mono_arch_flush_icache (ip, code - ip);
7270 * mono_arch_start_single_stepping:
7272 * Start single stepping.
7274 void
7275 mono_arch_start_single_stepping (void)
7277 if (ss_trigger_page)
7278 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
7279 else
7280 single_step_tramp = mini_get_single_step_trampoline ();
7284 * mono_arch_stop_single_stepping:
7286 * Stop single stepping.
7288 void
7289 mono_arch_stop_single_stepping (void)
7291 if (ss_trigger_page)
7292 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
7293 else
7294 single_step_tramp = NULL;
7297 #if __APPLE__
7298 #define DBG_SIGNAL SIGBUS
7299 #else
7300 #define DBG_SIGNAL SIGSEGV
7301 #endif
7304 * mono_arch_is_single_step_event:
7306 * Return whenever the machine state in SIGCTX corresponds to a single
7307 * step event.
7309 gboolean
7310 mono_arch_is_single_step_event (void *info, void *sigctx)
7312 siginfo_t *sinfo = (siginfo_t*)info;
7314 if (!ss_trigger_page)
7315 return FALSE;
7317 /* Sometimes the address is off by 4 */
7318 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
7319 return TRUE;
7320 else
7321 return FALSE;
7325 * mono_arch_is_breakpoint_event:
7327 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
7329 gboolean
7330 mono_arch_is_breakpoint_event (void *info, void *sigctx)
7332 siginfo_t *sinfo = (siginfo_t*)info;
7334 if (!ss_trigger_page)
7335 return FALSE;
7337 if (sinfo->si_signo == DBG_SIGNAL) {
7338 /* Sometimes the address is off by 4 */
7339 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
7340 return TRUE;
7341 else
7342 return FALSE;
7343 } else {
7344 return FALSE;
7349 * mono_arch_skip_breakpoint:
7351 * See mini-amd64.c for docs.
7353 void
7354 mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
7356 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7360 * mono_arch_skip_single_step:
7362 * See mini-amd64.c for docs.
7364 void
7365 mono_arch_skip_single_step (MonoContext *ctx)
7367 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7371 * mono_arch_get_seq_point_info:
7373 * See mini-amd64.c for docs.
7375 SeqPointInfo*
7376 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
7378 SeqPointInfo *info;
7379 MonoJitInfo *ji;
7381 // FIXME: Add a free function
7383 mono_domain_lock (domain);
7384 info = (SeqPointInfo*)g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
7385 code);
7386 mono_domain_unlock (domain);
7388 if (!info) {
7389 ji = mono_jit_info_table_find (domain, code);
7390 g_assert (ji);
7392 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
7394 info->ss_trigger_page = ss_trigger_page;
7395 info->bp_trigger_page = bp_trigger_page;
7396 info->ss_tramp_addr = &single_step_tramp;
7398 mono_domain_lock (domain);
7399 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
7400 code, info);
7401 mono_domain_unlock (domain);
7404 return info;
7407 #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
7410 * mono_arch_set_target:
7412 * Set the target architecture the JIT backend should generate code for, in the form
7413 * of a GNU target triplet. Only used in AOT mode.
7415 void
7416 mono_arch_set_target (char *mtriple)
7418 /* The GNU target triple format is not very well documented */
7419 if (strstr (mtriple, "armv7")) {
7420 v5_supported = TRUE;
7421 v6_supported = TRUE;
7422 v7_supported = TRUE;
7424 if (strstr (mtriple, "armv6")) {
7425 v5_supported = TRUE;
7426 v6_supported = TRUE;
7428 if (strstr (mtriple, "armv7s")) {
7429 v7s_supported = TRUE;
7431 if (strstr (mtriple, "armv7k")) {
7432 v7k_supported = TRUE;
7434 if (strstr (mtriple, "thumbv7s")) {
7435 v5_supported = TRUE;
7436 v6_supported = TRUE;
7437 v7_supported = TRUE;
7438 v7s_supported = TRUE;
7439 thumb_supported = TRUE;
7440 thumb2_supported = TRUE;
7442 if (strstr (mtriple, "darwin") || strstr (mtriple, "ios")) {
7443 v5_supported = TRUE;
7444 v6_supported = TRUE;
7445 thumb_supported = TRUE;
7446 iphone_abi = TRUE;
7448 if (strstr (mtriple, "gnueabi"))
7449 eabi_supported = TRUE;
7452 gboolean
7453 mono_arch_opcode_supported (int opcode)
7455 switch (opcode) {
7456 case OP_ATOMIC_ADD_I4:
7457 case OP_ATOMIC_EXCHANGE_I4:
7458 case OP_ATOMIC_CAS_I4:
7459 case OP_ATOMIC_LOAD_I1:
7460 case OP_ATOMIC_LOAD_I2:
7461 case OP_ATOMIC_LOAD_I4:
7462 case OP_ATOMIC_LOAD_U1:
7463 case OP_ATOMIC_LOAD_U2:
7464 case OP_ATOMIC_LOAD_U4:
7465 case OP_ATOMIC_STORE_I1:
7466 case OP_ATOMIC_STORE_I2:
7467 case OP_ATOMIC_STORE_I4:
7468 case OP_ATOMIC_STORE_U1:
7469 case OP_ATOMIC_STORE_U2:
7470 case OP_ATOMIC_STORE_U4:
7471 return v7_supported;
7472 case OP_ATOMIC_LOAD_R4:
7473 case OP_ATOMIC_LOAD_R8:
7474 case OP_ATOMIC_STORE_R4:
7475 case OP_ATOMIC_STORE_R8:
7476 return v7_supported && IS_VFP;
7477 default:
7478 return FALSE;
7482 CallInfo*
7483 mono_arch_get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
7485 return get_call_info (mp, sig);
7488 gpointer
7489 mono_arch_get_get_tls_tramp (void)
7491 return NULL;
7494 static G_GNUC_UNUSED guint8*
7495 emit_aotconst (MonoCompile *cfg, guint8 *code, int dreg, int patch_type, gpointer data)
7497 /* OP_AOTCONST */
7498 mono_add_patch_info (cfg, code - cfg->native_code, (MonoJumpInfoType)patch_type, data);
7499 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
7500 ARM_B (code, 0);
7501 *(gpointer*)code = NULL;
7502 code += 4;
7503 /* Load the value from the GOT */
7504 ARM_LDR_REG_REG (code, dreg, ARMREG_PC, dreg);
7505 return code;
7508 guint8*
7509 mono_arm_emit_aotconst (gpointer ji_list, guint8 *code, guint8 *buf, int dreg, int patch_type, gconstpointer data)
7511 MonoJumpInfo **ji = (MonoJumpInfo**)ji_list;
7513 *ji = mono_patch_info_list_prepend (*ji, code - buf, (MonoJumpInfoType)patch_type, data);
7514 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
7515 ARM_B (code, 0);
7516 *(gpointer*)code = NULL;
7517 code += 4;
7518 ARM_LDR_REG_REG (code, dreg, ARMREG_PC, dreg);
7519 return code;
7522 gpointer
7523 mono_arch_load_function (MonoJitICallId jit_icall_id)
7525 gpointer target = NULL;
7526 switch (jit_icall_id) {
7527 #undef MONO_AOT_ICALL
7528 #define MONO_AOT_ICALL(x) case MONO_JIT_ICALL_ ## x: target = (gpointer)x; break;
7529 MONO_AOT_ICALL (mono_arm_resume_unwind)
7530 MONO_AOT_ICALL (mono_arm_start_gsharedvt_call)
7531 MONO_AOT_ICALL (mono_arm_throw_exception)
7532 MONO_AOT_ICALL (mono_arm_throw_exception_by_token)
7533 MONO_AOT_ICALL (mono_arm_unaligned_stack)
7535 return target;