[arm] fix some more host/target pointer size issues
[mono-project.git] / mono / mini / mini-arm.c
blobe35c2e794690e16beaf142a949255674d7bc0d78
1 /**
2 * \file
3 * ARM backend for the Mono code generator
5 * Authors:
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
9 * (C) 2003 Ximian, Inc.
10 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
11 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
14 #include "mini.h"
15 #include <string.h>
17 #include <mono/metadata/abi-details.h>
18 #include <mono/metadata/appdomain.h>
19 #include <mono/metadata/profiler-private.h>
20 #include <mono/metadata/debug-helpers.h>
21 #include <mono/utils/mono-mmap.h>
22 #include <mono/utils/mono-hwcap.h>
23 #include <mono/utils/mono-memory-model.h>
24 #include <mono/utils/mono-threads-coop.h>
25 #include <mono/utils/unlocked.h>
27 #include "interp/interp.h"
29 #include "mini-arm.h"
30 #include "cpu-arm.h"
31 #include "ir-emit.h"
32 #include "debugger-agent.h"
33 #include "mini-gc.h"
34 #include "mini-runtime.h"
35 #include "aot-runtime.h"
36 #include "mono/arch/arm/arm-vfp-codegen.h"
38 /* Sanity check: This makes no sense */
39 #if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD))
40 #error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined"
41 #endif
44 * IS_SOFT_FLOAT: Is full software floating point used?
45 * IS_HARD_FLOAT: Is full hardware floating point used?
46 * IS_VFP: Is hardware floating point with software ABI used?
48 * These are not necessarily constants, e.g. IS_SOFT_FLOAT and
49 * IS_VFP may delegate to mono_arch_is_soft_float ().
52 #if defined(ARM_FPU_VFP_HARD)
53 #define IS_SOFT_FLOAT (FALSE)
54 #define IS_HARD_FLOAT (TRUE)
55 #define IS_VFP (TRUE)
56 #elif defined(ARM_FPU_NONE)
57 #define IS_SOFT_FLOAT (mono_arch_is_soft_float ())
58 #define IS_HARD_FLOAT (FALSE)
59 #define IS_VFP (!mono_arch_is_soft_float ())
60 #else
61 #define IS_SOFT_FLOAT (FALSE)
62 #define IS_HARD_FLOAT (FALSE)
63 #define IS_VFP (TRUE)
64 #endif
66 #define THUNK_SIZE (3 * 4)
68 #if __APPLE__
69 G_BEGIN_DECLS
70 void sys_icache_invalidate (void *start, size_t len);
71 G_END_DECLS
72 #endif
74 /* This mutex protects architecture specific caches */
75 #define mono_mini_arch_lock() mono_os_mutex_lock (&mini_arch_mutex)
76 #define mono_mini_arch_unlock() mono_os_mutex_unlock (&mini_arch_mutex)
77 static mono_mutex_t mini_arch_mutex;
79 static gboolean v5_supported = FALSE;
80 static gboolean v6_supported = FALSE;
81 static gboolean v7_supported = FALSE;
82 static gboolean v7s_supported = FALSE;
83 static gboolean v7k_supported = FALSE;
84 static gboolean thumb_supported = FALSE;
85 static gboolean thumb2_supported = FALSE;
87 * Whenever to use the ARM EABI
89 static gboolean eabi_supported = FALSE;
91 /*
92 * Whenever to use the iphone ABI extensions:
93 * http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html
94 * Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr.
95 * This is required for debugging/profiling tools to work, but it has some overhead so it should
96 * only be turned on in debug builds.
98 static gboolean iphone_abi = FALSE;
101 * The FPU we are generating code for. This is NOT runtime configurable right now,
102 * since some things like MONO_ARCH_CALLEE_FREGS still depend on defines.
104 static MonoArmFPU arm_fpu;
106 #if defined(ARM_FPU_VFP_HARD)
108 * On armhf, d0-d7 are used for argument passing and d8-d15
109 * must be preserved across calls, which leaves us no room
110 * for scratch registers. So we use d14-d15 but back up their
111 * previous contents to a stack slot before using them - see
112 * mono_arm_emit_vfp_scratch_save/_restore ().
114 static int vfp_scratch1 = ARM_VFP_D14;
115 static int vfp_scratch2 = ARM_VFP_D15;
116 #else
118 * On armel, d0-d7 do not need to be preserved, so we can
119 * freely make use of them as scratch registers.
121 static int vfp_scratch1 = ARM_VFP_D0;
122 static int vfp_scratch2 = ARM_VFP_D1;
123 #endif
125 static int i8_align;
127 static gpointer single_step_tramp, breakpoint_tramp;
130 * The code generated for sequence points reads from this location, which is
131 * made read-only when single stepping is enabled.
133 static gpointer ss_trigger_page;
135 /* Enabled breakpoints read from this trigger page */
136 static gpointer bp_trigger_page;
139 * TODO:
140 * floating point support: on ARM it is a mess, there are at least 3
141 * different setups, each of which binary incompat with the other.
142 * 1) FPA: old and ugly, but unfortunately what current distros use
143 * the double binary format has the two words swapped. 8 double registers.
144 * Implemented usually by kernel emulation.
145 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
146 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
147 * 3) VFP: the new and actually sensible and useful FP support. Implemented
148 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
150 * We do not care about FPA. We will support soft float and VFP.
152 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
153 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
154 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
156 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
157 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
158 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
160 //#define DEBUG_IMT 0
162 #ifndef DISABLE_JIT
163 static void mono_arch_compute_omit_fp (MonoCompile *cfg);
164 #endif
166 static guint8*
167 emit_aotconst (MonoCompile *cfg, guint8 *code, int dreg, int patch_type, gpointer data);
169 const char*
170 mono_arch_regname (int reg)
172 static const char * rnames[] = {
173 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
174 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
175 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
176 "arm_pc"
178 if (reg >= 0 && reg < 16)
179 return rnames [reg];
180 return "unknown";
183 const char*
184 mono_arch_fregname (int reg)
186 static const char * rnames[] = {
187 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
188 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
189 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
190 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
191 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
192 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
193 "arm_f30", "arm_f31"
195 if (reg >= 0 && reg < 32)
196 return rnames [reg];
197 return "unknown";
201 #ifndef DISABLE_JIT
202 static guint8*
203 emit_big_add_temp (guint8 *code, int dreg, int sreg, int imm, int temp)
205 int imm8, rot_amount;
207 g_assert (temp == ARMREG_IP || temp == ARMREG_LR);
209 if (imm == 0) {
210 if (sreg != dreg)
211 ARM_MOV_REG_REG (code, dreg, sreg);
212 } else if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
213 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
214 return code;
216 if (dreg == sreg) {
217 code = mono_arm_emit_load_imm (code, temp, imm);
218 ARM_ADD_REG_REG (code, dreg, sreg, temp);
219 } else {
220 code = mono_arm_emit_load_imm (code, dreg, imm);
221 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
223 return code;
226 static guint8*
227 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
229 return emit_big_add_temp (code, dreg, sreg, imm, ARMREG_IP);
232 static guint8*
233 emit_ldr_imm (guint8 *code, int dreg, int sreg, int imm)
235 if (!arm_is_imm12 (imm)) {
236 g_assert (dreg != sreg);
237 code = emit_big_add (code, dreg, sreg, imm);
238 ARM_LDR_IMM (code, dreg, dreg, 0);
239 } else {
240 ARM_LDR_IMM (code, dreg, sreg, imm);
242 return code;
245 /* If dreg == sreg, this clobbers IP */
246 static guint8*
247 emit_sub_imm (guint8 *code, int dreg, int sreg, int imm)
249 int imm8, rot_amount;
250 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
251 ARM_SUB_REG_IMM (code, dreg, sreg, imm8, rot_amount);
252 return code;
254 if (dreg == sreg) {
255 code = mono_arm_emit_load_imm (code, ARMREG_IP, imm);
256 ARM_SUB_REG_REG (code, dreg, sreg, ARMREG_IP);
257 } else {
258 code = mono_arm_emit_load_imm (code, dreg, imm);
259 ARM_SUB_REG_REG (code, dreg, dreg, sreg);
261 return code;
264 static guint8*
265 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
267 /* we can use r0-r3, since this is called only for incoming args on the stack */
268 if (size > sizeof (target_mgreg_t) * 4) {
269 guint8 *start_loop;
270 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
271 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
272 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
273 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
274 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
275 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
276 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
277 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
278 ARM_B_COND (code, ARMCOND_NE, 0);
279 arm_patch (code - 4, start_loop);
280 return code;
282 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
283 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
284 while (size >= 4) {
285 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
286 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
287 doffset += 4;
288 soffset += 4;
289 size -= 4;
291 } else if (size) {
292 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
293 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
294 doffset = soffset = 0;
295 while (size >= 4) {
296 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
297 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
298 doffset += 4;
299 soffset += 4;
300 size -= 4;
303 g_assert (size == 0);
304 return code;
307 static guint8*
308 emit_jmp_reg (guint8 *code, int reg)
310 if (thumb_supported)
311 ARM_BX (code, reg);
312 else
313 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
314 return code;
317 static guint8*
318 emit_call_reg (guint8 *code, int reg)
320 if (v5_supported) {
321 ARM_BLX_REG (code, reg);
322 } else {
323 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
324 return emit_jmp_reg (code, reg);
326 return code;
329 static guint8*
330 emit_call_seq (MonoCompile *cfg, guint8 *code)
332 if (cfg->method->dynamic) {
333 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
334 ARM_B (code, 0);
335 *(gpointer*)code = NULL;
336 code += 4;
337 code = emit_call_reg (code, ARMREG_IP);
338 } else {
339 ARM_BL (code, 0);
341 cfg->thunk_area += THUNK_SIZE;
342 return code;
345 guint8*
346 mono_arm_patchable_b (guint8 *code, int cond)
348 ARM_B_COND (code, cond, 0);
349 return code;
352 guint8*
353 mono_arm_patchable_bl (guint8 *code, int cond)
355 ARM_BL_COND (code, cond, 0);
356 return code;
359 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(HOST_ANDROID) && !defined(MONO_CROSS_COMPILE)
360 #define HAVE_AEABI_READ_TP 1
361 #endif
363 #ifdef HAVE_AEABI_READ_TP
364 G_BEGIN_DECLS
365 gpointer __aeabi_read_tp (void);
366 G_END_DECLS
367 #endif
369 gboolean
370 mono_arch_have_fast_tls (void)
372 #ifdef HAVE_AEABI_READ_TP
373 static gboolean have_fast_tls = FALSE;
374 static gboolean inited = FALSE;
376 if (mini_get_debug_options ()->use_fallback_tls)
377 return FALSE;
379 if (inited)
380 return have_fast_tls;
382 if (v7_supported) {
383 gpointer tp1, tp2;
385 tp1 = __aeabi_read_tp ();
386 asm volatile("mrc p15, 0, %0, c13, c0, 3" : "=r" (tp2));
388 have_fast_tls = tp1 && tp1 == tp2;
390 inited = TRUE;
391 return have_fast_tls;
392 #else
393 return FALSE;
394 #endif
397 static guint8*
398 emit_tls_get (guint8 *code, int dreg, int tls_offset)
400 g_assert (v7_supported);
401 ARM_MRC (code, 15, 0, dreg, 13, 0, 3);
402 ARM_LDR_IMM (code, dreg, dreg, tls_offset);
403 return code;
406 static guint8*
407 emit_tls_set (guint8 *code, int sreg, int tls_offset)
409 int tp_reg = (sreg != ARMREG_R0) ? ARMREG_R0 : ARMREG_R1;
410 g_assert (v7_supported);
411 ARM_MRC (code, 15, 0, tp_reg, 13, 0, 3);
412 ARM_STR_IMM (code, sreg, tp_reg, tls_offset);
413 return code;
417 * emit_save_lmf:
419 * Emit code to push an LMF structure on the LMF stack.
420 * On arm, this is intermixed with the initialization of other fields of the structure.
422 static guint8*
423 emit_save_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
425 int i;
427 if (mono_arch_have_fast_tls () && mono_tls_get_tls_offset (TLS_KEY_LMF_ADDR) != -1) {
428 code = emit_tls_get (code, ARMREG_R0, mono_tls_get_tls_offset (TLS_KEY_LMF_ADDR));
429 } else {
430 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
431 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_tls_get_lmf_addr));
432 code = emit_call_seq (cfg, code);
434 /* we build the MonoLMF structure on the stack - see mini-arm.h */
435 /* lmf_offset is the offset from the previous stack pointer,
436 * alloc_size is the total stack space allocated, so the offset
437 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
438 * The pointer to the struct is put in r1 (new_lmf).
439 * ip is used as scratch
440 * The callee-saved registers are already in the MonoLMF structure
442 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, lmf_offset);
443 /* r0 is the result from mono_get_lmf_addr () */
444 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
445 /* new_lmf->previous_lmf = *lmf_addr */
446 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
447 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
448 /* *(lmf_addr) = r1 */
449 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
450 /* Skip method (only needed for trampoline LMF frames) */
451 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, sp));
452 ARM_STR_IMM (code, ARMREG_FP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, fp));
453 /* save the current IP */
454 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_PC);
455 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, ip));
457 for (i = 0; i < MONO_ABI_SIZEOF (MonoLMF); i += sizeof (target_mgreg_t))
458 mini_gc_set_slot_type_from_fp (cfg, lmf_offset + i, SLOT_NOREF);
460 return code;
463 typedef struct {
464 gint32 vreg;
465 gint32 hreg;
466 } FloatArgData;
468 static guint8 *
469 emit_float_args (MonoCompile *cfg, MonoCallInst *inst, guint8 *code, int *max_len, guint *offset)
471 GSList *list;
473 set_code_cursor (cfg, code);
475 for (list = inst->float_args; list; list = list->next) {
476 FloatArgData *fad = (FloatArgData*)list->data;
477 MonoInst *var = get_vreg_to_inst (cfg, fad->vreg);
478 gboolean imm = arm_is_fpimm8 (var->inst_offset);
480 /* 4+1 insns for emit_big_add () and 1 for FLDS. */
481 if (!imm)
482 *max_len += 20 + 4;
484 *max_len += 4;
486 code = realloc_code (cfg, *max_len);
488 if (!imm) {
489 code = emit_big_add (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
490 ARM_FLDS (code, fad->hreg, ARMREG_LR, 0);
491 } else
492 ARM_FLDS (code, fad->hreg, var->inst_basereg, var->inst_offset);
494 set_code_cursor (cfg, code);
495 *offset = code - cfg->native_code;
498 return code;
501 static guint8 *
502 mono_arm_emit_vfp_scratch_save (MonoCompile *cfg, guint8 *code, int reg)
504 MonoInst *inst;
506 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
508 inst = cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
510 if (IS_HARD_FLOAT) {
511 if (!arm_is_fpimm8 (inst->inst_offset)) {
512 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
513 ARM_FSTD (code, reg, ARMREG_LR, 0);
514 } else
515 ARM_FSTD (code, reg, inst->inst_basereg, inst->inst_offset);
518 return code;
521 static guint8 *
522 mono_arm_emit_vfp_scratch_restore (MonoCompile *cfg, guint8 *code, int reg)
524 MonoInst *inst;
526 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
528 inst = cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
530 if (IS_HARD_FLOAT) {
531 if (!arm_is_fpimm8 (inst->inst_offset)) {
532 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
533 ARM_FLDD (code, reg, ARMREG_LR, 0);
534 } else
535 ARM_FLDD (code, reg, inst->inst_basereg, inst->inst_offset);
538 return code;
542 * emit_restore_lmf:
544 * Emit code to pop an LMF structure from the LMF stack.
546 static guint8*
547 emit_restore_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
549 int basereg, offset;
551 if (lmf_offset < 32) {
552 basereg = cfg->frame_reg;
553 offset = lmf_offset;
554 } else {
555 basereg = ARMREG_R2;
556 offset = 0;
557 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, lmf_offset);
560 /* ip = previous_lmf */
561 ARM_LDR_IMM (code, ARMREG_IP, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
562 /* lr = lmf_addr */
563 ARM_LDR_IMM (code, ARMREG_LR, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
564 /* *(lmf_addr) = previous_lmf */
565 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
567 return code;
570 #endif /* #ifndef DISABLE_JIT */
573 * mono_arch_get_argument_info:
574 * @csig: a method signature
575 * @param_count: the number of parameters to consider
576 * @arg_info: an array to store the result infos
578 * Gathers information on parameters such as size, alignment and
579 * padding. arg_info should be large enought to hold param_count + 1 entries.
581 * Returns the size of the activation frame.
584 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
586 int k, frame_size = 0;
587 guint32 size, align, pad;
588 int offset = 8;
589 MonoType *t;
591 t = mini_get_underlying_type (csig->ret);
592 if (MONO_TYPE_ISSTRUCT (t)) {
593 frame_size += sizeof (target_mgreg_t);
594 offset += 4;
597 arg_info [0].offset = offset;
599 if (csig->hasthis) {
600 frame_size += sizeof (target_mgreg_t);
601 offset += 4;
604 arg_info [0].size = frame_size;
606 for (k = 0; k < param_count; k++) {
607 size = mini_type_stack_size_full (csig->params [k], &align, csig->pinvoke);
609 /* ignore alignment for now */
610 align = 1;
612 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
613 arg_info [k].pad = pad;
614 frame_size += size;
615 arg_info [k + 1].pad = 0;
616 arg_info [k + 1].size = size;
617 offset += pad;
618 arg_info [k + 1].offset = offset;
619 offset += size;
622 align = MONO_ARCH_FRAME_ALIGNMENT;
623 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
624 arg_info [k].pad = pad;
626 return frame_size;
629 #define MAX_ARCH_DELEGATE_PARAMS 3
631 static guint8*
632 get_delegate_invoke_impl (MonoTrampInfo **info, gboolean has_target, gboolean param_count)
634 guint8 *code, *start;
635 GSList *unwind_ops = mono_arch_get_cie_program ();
637 if (has_target) {
638 start = code = mono_global_codeman_reserve (12);
640 /* Replace the this argument with the target */
641 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
642 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, target));
643 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
645 g_assert ((code - start) <= 12);
647 mono_arch_flush_icache (start, 12);
648 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL));
649 } else {
650 int size, i;
652 size = 8 + param_count * 4;
653 start = code = mono_global_codeman_reserve (size);
655 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
656 /* slide down the arguments */
657 for (i = 0; i < param_count; ++i) {
658 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
660 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
662 g_assert ((code - start) <= size);
664 mono_arch_flush_icache (start, size);
665 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL));
668 if (has_target) {
669 *info = mono_tramp_info_create ("delegate_invoke_impl_has_target", start, code - start, NULL, unwind_ops);
670 } else {
671 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", param_count);
672 *info = mono_tramp_info_create (name, start, code - start, NULL, unwind_ops);
673 g_free (name);
676 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL));
678 return start;
682 * mono_arch_get_delegate_invoke_impls:
684 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
685 * trampolines.
687 GSList*
688 mono_arch_get_delegate_invoke_impls (void)
690 GSList *res = NULL;
691 MonoTrampInfo *info;
692 int i;
694 get_delegate_invoke_impl (&info, TRUE, 0);
695 res = g_slist_prepend (res, info);
697 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
698 get_delegate_invoke_impl (&info, FALSE, i);
699 res = g_slist_prepend (res, info);
702 return res;
705 gpointer
706 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
708 guint8 *code, *start;
709 MonoType *sig_ret;
711 /* FIXME: Support more cases */
712 sig_ret = mini_get_underlying_type (sig->ret);
713 if (MONO_TYPE_ISSTRUCT (sig_ret))
714 return NULL;
716 if (has_target) {
717 static guint8* cached = NULL;
718 mono_mini_arch_lock ();
719 if (cached) {
720 mono_mini_arch_unlock ();
721 return cached;
724 if (mono_ee_features.use_aot_trampolines) {
725 start = (guint8*)mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
726 } else {
727 MonoTrampInfo *info;
728 start = get_delegate_invoke_impl (&info, TRUE, 0);
729 mono_tramp_info_register (info, NULL);
731 cached = start;
732 mono_mini_arch_unlock ();
733 return cached;
734 } else {
735 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
736 int i;
738 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
739 return NULL;
740 for (i = 0; i < sig->param_count; ++i)
741 if (!mono_is_regsize_var (sig->params [i]))
742 return NULL;
744 mono_mini_arch_lock ();
745 code = cache [sig->param_count];
746 if (code) {
747 mono_mini_arch_unlock ();
748 return code;
751 if (mono_ee_features.use_aot_trampolines) {
752 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
753 start = (guint8*)mono_aot_get_trampoline (name);
754 g_free (name);
755 } else {
756 MonoTrampInfo *info;
757 start = get_delegate_invoke_impl (&info, FALSE, sig->param_count);
758 mono_tramp_info_register (info, NULL);
760 cache [sig->param_count] = start;
761 mono_mini_arch_unlock ();
762 return start;
765 return NULL;
768 gpointer
769 mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg)
771 return NULL;
774 gpointer
775 mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code)
777 return (gpointer)regs [ARMREG_R0];
781 * Initialize the cpu to execute managed code.
783 void
784 mono_arch_cpu_init (void)
786 i8_align = MONO_ABI_ALIGNOF (gint64);
787 #ifdef MONO_CROSS_COMPILE
788 /* Need to set the alignment of i8 since it can different on the target */
789 #ifdef TARGET_ANDROID
790 /* linux gnueabi */
791 mono_type_set_alignment (MONO_TYPE_I8, i8_align);
792 #endif
793 #endif
797 * Initialize architecture specific code.
799 void
800 mono_arch_init (void)
802 char *cpu_arch;
804 #ifdef TARGET_WATCHOS
805 mini_get_debug_options ()->soft_breakpoints = TRUE;
806 #endif
808 mono_os_mutex_init_recursive (&mini_arch_mutex);
809 if (mini_get_debug_options ()->soft_breakpoints) {
810 if (!mono_aot_only)
811 breakpoint_tramp = mini_get_breakpoint_trampoline ();
812 } else {
813 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT, MONO_MEM_ACCOUNT_OTHER);
814 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT, MONO_MEM_ACCOUNT_OTHER);
815 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
818 #if defined(__ARM_EABI__)
819 eabi_supported = TRUE;
820 #endif
822 #if defined(ARM_FPU_VFP_HARD)
823 arm_fpu = MONO_ARM_FPU_VFP_HARD;
824 #else
825 arm_fpu = MONO_ARM_FPU_VFP;
827 #if defined(ARM_FPU_NONE) && !defined(TARGET_IOS)
829 * If we're compiling with a soft float fallback and it
830 * turns out that no VFP unit is available, we need to
831 * switch to soft float. We don't do this for iOS, since
832 * iOS devices always have a VFP unit.
834 if (!mono_hwcap_arm_has_vfp)
835 arm_fpu = MONO_ARM_FPU_NONE;
838 * This environment variable can be useful in testing
839 * environments to make sure the soft float fallback
840 * works. Most ARM devices have VFP units these days, so
841 * normally soft float code would not be exercised much.
843 char *soft = g_getenv ("MONO_ARM_FORCE_SOFT_FLOAT");
845 if (soft && !strncmp (soft, "1", 1))
846 arm_fpu = MONO_ARM_FPU_NONE;
847 g_free (soft);
848 #endif
849 #endif
851 v5_supported = mono_hwcap_arm_is_v5;
852 v6_supported = mono_hwcap_arm_is_v6;
853 v7_supported = mono_hwcap_arm_is_v7;
856 * On weird devices, the hwcap code may fail to detect
857 * the ARM version. In that case, we can at least safely
858 * assume the version the runtime was compiled for.
860 #ifdef HAVE_ARMV5
861 v5_supported = TRUE;
862 #endif
863 #ifdef HAVE_ARMV6
864 v6_supported = TRUE;
865 #endif
866 #ifdef HAVE_ARMV7
867 v7_supported = TRUE;
868 #endif
870 #if defined(TARGET_IOS)
871 /* iOS is special-cased here because we don't yet
872 have a way to properly detect CPU features on it. */
873 thumb_supported = TRUE;
874 iphone_abi = TRUE;
875 #else
876 thumb_supported = mono_hwcap_arm_has_thumb;
877 thumb2_supported = mono_hwcap_arm_has_thumb2;
878 #endif
880 /* Format: armv(5|6|7[s])[-thumb[2]] */
881 cpu_arch = g_getenv ("MONO_CPU_ARCH");
883 /* Do this here so it overrides any detection. */
884 if (cpu_arch) {
885 if (strncmp (cpu_arch, "armv", 4) == 0) {
886 v5_supported = cpu_arch [4] >= '5';
887 v6_supported = cpu_arch [4] >= '6';
888 v7_supported = cpu_arch [4] >= '7';
889 v7s_supported = strncmp (cpu_arch, "armv7s", 6) == 0;
890 v7k_supported = strncmp (cpu_arch, "armv7k", 6) == 0;
893 thumb_supported = strstr (cpu_arch, "thumb") != NULL;
894 thumb2_supported = strstr (cpu_arch, "thumb2") != NULL;
895 g_free (cpu_arch);
900 * Cleanup architecture specific code.
902 void
903 mono_arch_cleanup (void)
908 * This function returns the optimizations supported on this cpu.
910 guint32
911 mono_arch_cpu_optimizations (guint32 *exclude_mask)
913 /* no arm-specific optimizations yet */
914 *exclude_mask = 0;
915 return 0;
919 * This function test for all SIMD functions supported.
921 * Returns a bitmask corresponding to all supported versions.
924 guint32
925 mono_arch_cpu_enumerate_simd_versions (void)
927 /* SIMD is currently unimplemented */
928 return 0;
931 gboolean
932 mono_arm_is_hard_float (void)
934 return arm_fpu == MONO_ARM_FPU_VFP_HARD;
937 #ifndef DISABLE_JIT
939 gboolean
940 mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode)
942 if (v7s_supported || v7k_supported) {
943 switch (opcode) {
944 case OP_IDIV:
945 case OP_IREM:
946 case OP_IDIV_UN:
947 case OP_IREM_UN:
948 return FALSE;
949 default:
950 break;
953 return TRUE;
956 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
957 gboolean
958 mono_arch_is_soft_float (void)
960 return arm_fpu == MONO_ARM_FPU_NONE;
962 #endif
964 static gboolean
965 is_regsize_var (MonoType *t)
967 if (t->byref)
968 return TRUE;
969 t = mini_get_underlying_type (t);
970 switch (t->type) {
971 case MONO_TYPE_I4:
972 case MONO_TYPE_U4:
973 case MONO_TYPE_I:
974 case MONO_TYPE_U:
975 case MONO_TYPE_PTR:
976 case MONO_TYPE_FNPTR:
977 return TRUE;
978 case MONO_TYPE_OBJECT:
979 return TRUE;
980 case MONO_TYPE_GENERICINST:
981 if (!mono_type_generic_inst_is_valuetype (t))
982 return TRUE;
983 return FALSE;
984 case MONO_TYPE_VALUETYPE:
985 return FALSE;
987 return FALSE;
990 GList *
991 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
993 GList *vars = NULL;
994 int i;
996 for (i = 0; i < cfg->num_varinfo; i++) {
997 MonoInst *ins = cfg->varinfo [i];
998 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
1000 /* unused vars */
1001 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
1002 continue;
1004 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
1005 continue;
1007 /* we can only allocate 32 bit values */
1008 if (is_regsize_var (ins->inst_vtype)) {
1009 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
1010 g_assert (i == vmv->idx);
1011 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
1015 return vars;
1018 GList *
1019 mono_arch_get_global_int_regs (MonoCompile *cfg)
1021 GList *regs = NULL;
1023 mono_arch_compute_omit_fp (cfg);
1026 * FIXME: Interface calls might go through a static rgctx trampoline which
1027 * sets V5, but it doesn't save it, so we need to save it ourselves, and
1028 * avoid using it.
1030 if (cfg->flags & MONO_CFG_HAS_CALLS)
1031 cfg->uses_rgctx_reg = TRUE;
1033 if (cfg->arch.omit_fp)
1034 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_FP));
1035 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
1036 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
1037 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
1038 if (iphone_abi)
1039 /* V4=R7 is used as a frame pointer, but V7=R10 is preserved */
1040 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));
1041 else
1042 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
1043 if (!(cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg)))
1044 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1045 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
1046 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
1047 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
1049 return regs;
1053 * mono_arch_regalloc_cost:
1055 * Return the cost, in number of memory references, of the action of
1056 * allocating the variable VMV into a register during global register
1057 * allocation.
1059 guint32
1060 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
1062 /* FIXME: */
1063 return 2;
1066 #endif /* #ifndef DISABLE_JIT */
1068 void
1069 mono_arch_flush_icache (guint8 *code, gint size)
1071 #if defined(MONO_CROSS_COMPILE)
1072 #elif __APPLE__
1073 sys_icache_invalidate (code, size);
1074 #else
1075 __builtin___clear_cache ((char*)code, (char*)code + size);
1076 #endif
1079 #define DEBUG(a)
1081 static void inline
1082 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
1084 if (simple) {
1085 if (*gr > ARMREG_R3) {
1086 ainfo->size = 4;
1087 ainfo->offset = *stack_size;
1088 ainfo->reg = ARMREG_SP; /* in the caller */
1089 ainfo->storage = RegTypeBase;
1090 *stack_size += 4;
1091 } else {
1092 ainfo->storage = RegTypeGeneral;
1093 ainfo->reg = *gr;
1095 } else {
1096 gboolean split;
1098 if (eabi_supported)
1099 split = i8_align == 4;
1100 else
1101 split = TRUE;
1103 ainfo->size = 8;
1104 if (*gr == ARMREG_R3 && split) {
1105 /* first word in r3 and the second on the stack */
1106 ainfo->offset = *stack_size;
1107 ainfo->reg = ARMREG_SP; /* in the caller */
1108 ainfo->storage = RegTypeBaseGen;
1109 *stack_size += 4;
1110 } else if (*gr >= ARMREG_R3) {
1111 if (eabi_supported) {
1112 /* darwin aligns longs to 4 byte only */
1113 if (i8_align == 8) {
1114 *stack_size += 7;
1115 *stack_size &= ~7;
1118 ainfo->offset = *stack_size;
1119 ainfo->reg = ARMREG_SP; /* in the caller */
1120 ainfo->storage = RegTypeBase;
1121 *stack_size += 8;
1122 } else {
1123 if (eabi_supported) {
1124 if (i8_align == 8 && ((*gr) & 1))
1125 (*gr) ++;
1127 ainfo->storage = RegTypeIRegPair;
1128 ainfo->reg = *gr;
1130 (*gr) ++;
1132 (*gr) ++;
1135 static void inline
1136 add_float (guint *fpr, guint *stack_size, ArgInfo *ainfo, gboolean is_double, gint *float_spare)
1139 * If we're calling a function like this:
1141 * void foo(float a, double b, float c)
1143 * We pass a in s0 and b in d1. That leaves us
1144 * with s1 being unused. The armhf ABI recognizes
1145 * this and requires register assignment to then
1146 * use that for the next single-precision arg,
1147 * i.e. c in this example. So float_spare either
1148 * tells us which reg to use for the next single-
1149 * precision arg, or it's -1, meaning use *fpr.
1151 * Note that even though most of the JIT speaks
1152 * double-precision, fpr represents single-
1153 * precision registers.
1155 * See parts 5.5 and 6.1.2 of the AAPCS for how
1156 * this all works.
1159 if (*fpr < ARM_VFP_F16 || (!is_double && *float_spare >= 0)) {
1160 ainfo->storage = RegTypeFP;
1162 if (is_double) {
1164 * If we're passing a double-precision value
1165 * and *fpr is odd (e.g. it's s1, s3, ...)
1166 * we need to use the next even register. So
1167 * we mark the current *fpr as a spare that
1168 * can be used for the next single-precision
1169 * value.
1171 if (*fpr % 2) {
1172 *float_spare = *fpr;
1173 (*fpr)++;
1177 * At this point, we have an even register
1178 * so we assign that and move along.
1180 ainfo->reg = *fpr;
1181 *fpr += 2;
1182 } else if (*float_spare >= 0) {
1184 * We're passing a single-precision value
1185 * and it looks like a spare single-
1186 * precision register is available. Let's
1187 * use it.
1190 ainfo->reg = *float_spare;
1191 *float_spare = -1;
1192 } else {
1194 * If we hit this branch, we're passing a
1195 * single-precision value and we can simply
1196 * use the next available register.
1199 ainfo->reg = *fpr;
1200 (*fpr)++;
1202 } else {
1204 * We've exhausted available floating point
1205 * regs, so pass the rest on the stack.
1208 if (is_double) {
1209 *stack_size += 7;
1210 *stack_size &= ~7;
1213 ainfo->offset = *stack_size;
1214 ainfo->reg = ARMREG_SP;
1215 ainfo->storage = RegTypeBase;
1217 *stack_size += 8;
1221 static gboolean
1222 is_hfa (MonoType *t, int *out_nfields, int *out_esize)
1224 MonoClass *klass;
1225 gpointer iter;
1226 MonoClassField *field;
1227 MonoType *ftype, *prev_ftype = NULL;
1228 int nfields = 0;
1230 klass = mono_class_from_mono_type_internal (t);
1231 iter = NULL;
1232 while ((field = mono_class_get_fields_internal (klass, &iter))) {
1233 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
1234 continue;
1235 ftype = mono_field_get_type_internal (field);
1236 ftype = mini_get_underlying_type (ftype);
1238 if (MONO_TYPE_ISSTRUCT (ftype)) {
1239 int nested_nfields, nested_esize;
1241 if (!is_hfa (ftype, &nested_nfields, &nested_esize))
1242 return FALSE;
1243 if (nested_esize == 4)
1244 ftype = m_class_get_byval_arg (mono_defaults.single_class);
1245 else
1246 ftype = m_class_get_byval_arg (mono_defaults.double_class);
1247 if (prev_ftype && prev_ftype->type != ftype->type)
1248 return FALSE;
1249 prev_ftype = ftype;
1250 nfields += nested_nfields;
1251 } else {
1252 if (!(!ftype->byref && (ftype->type == MONO_TYPE_R4 || ftype->type == MONO_TYPE_R8)))
1253 return FALSE;
1254 if (prev_ftype && prev_ftype->type != ftype->type)
1255 return FALSE;
1256 prev_ftype = ftype;
1257 nfields ++;
1260 if (nfields == 0 || nfields > 4)
1261 return FALSE;
1262 *out_nfields = nfields;
1263 *out_esize = prev_ftype->type == MONO_TYPE_R4 ? 4 : 8;
1264 return TRUE;
1267 static CallInfo*
1268 get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
1270 guint i, gr, fpr, pstart;
1271 gint float_spare;
1272 int n = sig->hasthis + sig->param_count;
1273 int nfields, esize;
1274 guint32 align;
1275 MonoType *t;
1276 guint32 stack_size = 0;
1277 CallInfo *cinfo;
1278 gboolean is_pinvoke = sig->pinvoke;
1279 gboolean vtype_retaddr = FALSE;
1281 if (mp)
1282 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1283 else
1284 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1286 cinfo->nargs = n;
1287 gr = ARMREG_R0;
1288 fpr = ARM_VFP_F0;
1289 float_spare = -1;
1291 t = mini_get_underlying_type (sig->ret);
1292 switch (t->type) {
1293 case MONO_TYPE_I1:
1294 case MONO_TYPE_U1:
1295 case MONO_TYPE_I2:
1296 case MONO_TYPE_U2:
1297 case MONO_TYPE_I4:
1298 case MONO_TYPE_U4:
1299 case MONO_TYPE_I:
1300 case MONO_TYPE_U:
1301 case MONO_TYPE_PTR:
1302 case MONO_TYPE_FNPTR:
1303 case MONO_TYPE_OBJECT:
1304 cinfo->ret.storage = RegTypeGeneral;
1305 cinfo->ret.reg = ARMREG_R0;
1306 break;
1307 case MONO_TYPE_U8:
1308 case MONO_TYPE_I8:
1309 cinfo->ret.storage = RegTypeIRegPair;
1310 cinfo->ret.reg = ARMREG_R0;
1311 break;
1312 case MONO_TYPE_R4:
1313 case MONO_TYPE_R8:
1314 cinfo->ret.storage = RegTypeFP;
1316 if (t->type == MONO_TYPE_R4)
1317 cinfo->ret.size = 4;
1318 else
1319 cinfo->ret.size = 8;
1321 if (IS_HARD_FLOAT) {
1322 cinfo->ret.reg = ARM_VFP_F0;
1323 } else {
1324 cinfo->ret.reg = ARMREG_R0;
1326 break;
1327 case MONO_TYPE_GENERICINST:
1328 if (!mono_type_generic_inst_is_valuetype (t)) {
1329 cinfo->ret.storage = RegTypeGeneral;
1330 cinfo->ret.reg = ARMREG_R0;
1331 break;
1333 if (mini_is_gsharedvt_variable_type (t)) {
1334 cinfo->ret.storage = RegTypeStructByAddr;
1335 break;
1337 /* Fall through */
1338 case MONO_TYPE_VALUETYPE:
1339 case MONO_TYPE_TYPEDBYREF:
1340 if (IS_HARD_FLOAT && sig->pinvoke && is_hfa (t, &nfields, &esize)) {
1341 cinfo->ret.storage = RegTypeHFA;
1342 cinfo->ret.reg = 0;
1343 cinfo->ret.nregs = nfields;
1344 cinfo->ret.esize = esize;
1345 } else {
1346 if (is_pinvoke) {
1347 int native_size = mono_class_native_size (mono_class_from_mono_type_internal (t), &align);
1348 int max_size;
1350 #ifdef TARGET_WATCHOS
1351 max_size = 16;
1352 #else
1353 max_size = 4;
1354 #endif
1355 if (native_size <= max_size) {
1356 cinfo->ret.storage = RegTypeStructByVal;
1357 cinfo->ret.struct_size = native_size;
1358 cinfo->ret.nregs = ALIGN_TO (native_size, 4) / 4;
1359 } else {
1360 cinfo->ret.storage = RegTypeStructByAddr;
1362 } else {
1363 cinfo->ret.storage = RegTypeStructByAddr;
1366 break;
1367 case MONO_TYPE_VAR:
1368 case MONO_TYPE_MVAR:
1369 g_assert (mini_is_gsharedvt_type (t));
1370 cinfo->ret.storage = RegTypeStructByAddr;
1371 break;
1372 case MONO_TYPE_VOID:
1373 break;
1374 default:
1375 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1378 vtype_retaddr = cinfo->ret.storage == RegTypeStructByAddr;
1380 pstart = 0;
1381 n = 0;
1383 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1384 * the first argument, allowing 'this' to be always passed in the first arg reg.
1385 * Also do this if the first argument is a reference type, since virtual calls
1386 * are sometimes made using calli without sig->hasthis set, like in the delegate
1387 * invoke wrappers.
1389 if (vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig->params [0]))))) {
1390 if (sig->hasthis) {
1391 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1392 } else {
1393 add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE);
1394 pstart = 1;
1396 n ++;
1397 cinfo->ret.reg = gr;
1398 gr ++;
1399 cinfo->vret_arg_index = 1;
1400 } else {
1401 /* this */
1402 if (sig->hasthis) {
1403 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1404 n ++;
1406 if (vtype_retaddr) {
1407 cinfo->ret.reg = gr;
1408 gr ++;
1412 DEBUG(g_print("params: %d\n", sig->param_count));
1413 for (i = pstart; i < sig->param_count; ++i) {
1414 ArgInfo *ainfo = &cinfo->args [n];
1416 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1417 /* Prevent implicit arguments and sig_cookie from
1418 being passed in registers */
1419 gr = ARMREG_R3 + 1;
1420 fpr = ARM_VFP_F16;
1421 /* Emit the signature cookie just before the implicit arguments */
1422 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1424 DEBUG(g_print("param %d: ", i));
1425 if (sig->params [i]->byref) {
1426 DEBUG(g_print("byref\n"));
1427 add_general (&gr, &stack_size, ainfo, TRUE);
1428 n++;
1429 continue;
1431 t = mini_get_underlying_type (sig->params [i]);
1432 switch (t->type) {
1433 case MONO_TYPE_I1:
1434 case MONO_TYPE_U1:
1435 cinfo->args [n].size = 1;
1436 add_general (&gr, &stack_size, ainfo, TRUE);
1437 break;
1438 case MONO_TYPE_I2:
1439 case MONO_TYPE_U2:
1440 cinfo->args [n].size = 2;
1441 add_general (&gr, &stack_size, ainfo, TRUE);
1442 break;
1443 case MONO_TYPE_I4:
1444 case MONO_TYPE_U4:
1445 cinfo->args [n].size = 4;
1446 add_general (&gr, &stack_size, ainfo, TRUE);
1447 break;
1448 case MONO_TYPE_I:
1449 case MONO_TYPE_U:
1450 case MONO_TYPE_PTR:
1451 case MONO_TYPE_FNPTR:
1452 case MONO_TYPE_OBJECT:
1453 cinfo->args [n].size = sizeof (target_mgreg_t);
1454 add_general (&gr, &stack_size, ainfo, TRUE);
1455 break;
1456 case MONO_TYPE_GENERICINST:
1457 if (!mono_type_generic_inst_is_valuetype (t)) {
1458 cinfo->args [n].size = sizeof (target_mgreg_t);
1459 add_general (&gr, &stack_size, ainfo, TRUE);
1460 break;
1462 if (mini_is_gsharedvt_variable_type (t)) {
1463 /* gsharedvt arguments are passed by ref */
1464 g_assert (mini_is_gsharedvt_type (t));
1465 add_general (&gr, &stack_size, ainfo, TRUE);
1466 switch (ainfo->storage) {
1467 case RegTypeGeneral:
1468 ainfo->storage = RegTypeGSharedVtInReg;
1469 break;
1470 case RegTypeBase:
1471 ainfo->storage = RegTypeGSharedVtOnStack;
1472 break;
1473 default:
1474 g_assert_not_reached ();
1476 break;
1478 /* Fall through */
1479 case MONO_TYPE_TYPEDBYREF:
1480 case MONO_TYPE_VALUETYPE: {
1481 gint size;
1482 int align_size;
1483 int nwords, nfields, esize;
1484 guint32 align;
1486 if (IS_HARD_FLOAT && sig->pinvoke && is_hfa (t, &nfields, &esize)) {
1487 if (fpr + nfields < ARM_VFP_F16) {
1488 ainfo->storage = RegTypeHFA;
1489 ainfo->reg = fpr;
1490 ainfo->nregs = nfields;
1491 ainfo->esize = esize;
1492 if (esize == 4)
1493 fpr += nfields;
1494 else
1495 fpr += nfields * 2;
1496 break;
1497 } else {
1498 fpr = ARM_VFP_F16;
1502 if (t->type == MONO_TYPE_TYPEDBYREF) {
1503 size = MONO_ABI_SIZEOF (MonoTypedRef);
1504 align = sizeof (target_mgreg_t);
1505 } else {
1506 MonoClass *klass = mono_class_from_mono_type_internal (sig->params [i]);
1507 if (is_pinvoke)
1508 size = mono_class_native_size (klass, &align);
1509 else
1510 size = mini_type_stack_size_full (t, &align, FALSE);
1512 DEBUG(g_print ("load %d bytes struct\n", size));
1514 #ifdef TARGET_WATCHOS
1515 /* Watchos pass large structures by ref */
1516 /* We only do this for pinvoke to make gsharedvt/dyncall simpler */
1517 if (sig->pinvoke && size > 16) {
1518 add_general (&gr, &stack_size, ainfo, TRUE);
1519 switch (ainfo->storage) {
1520 case RegTypeGeneral:
1521 ainfo->storage = RegTypeStructByAddr;
1522 break;
1523 case RegTypeBase:
1524 ainfo->storage = RegTypeStructByAddrOnStack;
1525 break;
1526 default:
1527 g_assert_not_reached ();
1528 break;
1530 break;
1532 #endif
1534 align_size = size;
1535 nwords = 0;
1536 align_size += (sizeof (target_mgreg_t) - 1);
1537 align_size &= ~(sizeof (target_mgreg_t) - 1);
1538 nwords = (align_size + sizeof (target_mgreg_t) -1 ) / sizeof (target_mgreg_t);
1539 ainfo->storage = RegTypeStructByVal;
1540 ainfo->struct_size = size;
1541 ainfo->align = align;
1543 if (eabi_supported) {
1544 if (align >= 8 && (gr & 1))
1545 gr ++;
1547 if (gr > ARMREG_R3) {
1548 ainfo->size = 0;
1549 ainfo->vtsize = nwords;
1550 } else {
1551 int rest = ARMREG_R3 - gr + 1;
1552 int n_in_regs = rest >= nwords? nwords: rest;
1554 ainfo->size = n_in_regs;
1555 ainfo->vtsize = nwords - n_in_regs;
1556 ainfo->reg = gr;
1557 gr += n_in_regs;
1558 nwords -= n_in_regs;
1560 stack_size = ALIGN_TO (stack_size, align);
1562 ainfo->offset = stack_size;
1563 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
1564 stack_size += nwords * sizeof (target_mgreg_t);
1565 break;
1567 case MONO_TYPE_U8:
1568 case MONO_TYPE_I8:
1569 ainfo->size = 8;
1570 add_general (&gr, &stack_size, ainfo, FALSE);
1571 break;
1572 case MONO_TYPE_R4:
1573 ainfo->size = 4;
1575 if (IS_HARD_FLOAT)
1576 add_float (&fpr, &stack_size, ainfo, FALSE, &float_spare);
1577 else
1578 add_general (&gr, &stack_size, ainfo, TRUE);
1579 break;
1580 case MONO_TYPE_R8:
1581 ainfo->size = 8;
1583 if (IS_HARD_FLOAT)
1584 add_float (&fpr, &stack_size, ainfo, TRUE, &float_spare);
1585 else
1586 add_general (&gr, &stack_size, ainfo, FALSE);
1587 break;
1588 case MONO_TYPE_VAR:
1589 case MONO_TYPE_MVAR:
1590 /* gsharedvt arguments are passed by ref */
1591 g_assert (mini_is_gsharedvt_type (t));
1592 add_general (&gr, &stack_size, ainfo, TRUE);
1593 switch (ainfo->storage) {
1594 case RegTypeGeneral:
1595 ainfo->storage = RegTypeGSharedVtInReg;
1596 break;
1597 case RegTypeBase:
1598 ainfo->storage = RegTypeGSharedVtOnStack;
1599 break;
1600 default:
1601 g_assert_not_reached ();
1603 break;
1604 default:
1605 g_error ("Can't handle 0x%x", sig->params [i]->type);
1607 n ++;
1610 /* Handle the case where there are no implicit arguments */
1611 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1612 /* Prevent implicit arguments and sig_cookie from
1613 being passed in registers */
1614 gr = ARMREG_R3 + 1;
1615 fpr = ARM_VFP_F16;
1616 /* Emit the signature cookie just before the implicit arguments */
1617 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1620 DEBUG (g_print (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1621 stack_size = ALIGN_TO (stack_size, MONO_ARCH_FRAME_ALIGNMENT);
1623 cinfo->stack_usage = stack_size;
1624 return cinfo;
1628 * We need to create a temporary value if the argument is not stored in
1629 * a linear memory range in the ccontext (this normally happens for
1630 * value types if they are passed both by stack and regs).
1632 static int
1633 arg_need_temp (ArgInfo *ainfo)
1635 if (ainfo->storage == RegTypeStructByVal && ainfo->vtsize)
1636 return ainfo->struct_size;
1637 return 0;
1640 static gpointer
1641 arg_get_storage (CallContext *ccontext, ArgInfo *ainfo)
1643 switch (ainfo->storage) {
1644 case RegTypeIRegPair:
1645 case RegTypeGeneral:
1646 case RegTypeStructByVal:
1647 return &ccontext->gregs [ainfo->reg];
1648 case RegTypeHFA:
1649 case RegTypeFP:
1650 return &ccontext->fregs [ainfo->reg];
1651 case RegTypeBase:
1652 return ccontext->stack + ainfo->offset;
1653 default:
1654 g_error ("Arg storage type not yet supported");
1658 static void
1659 arg_get_val (CallContext *ccontext, ArgInfo *ainfo, gpointer dest)
1661 int reg_size = ainfo->size * sizeof (host_mgreg_t);
1662 g_assert (arg_need_temp (ainfo));
1663 memcpy (dest, &ccontext->gregs [ainfo->reg], reg_size);
1664 memcpy ((host_mgreg_t*)dest + ainfo->size, ccontext->stack + ainfo->offset, ainfo->struct_size - reg_size);
1667 static void
1668 arg_set_val (CallContext *ccontext, ArgInfo *ainfo, gpointer src)
1670 int reg_size = ainfo->size * sizeof (host_mgreg_t);
1671 g_assert (arg_need_temp (ainfo));
1672 memcpy (&ccontext->gregs [ainfo->reg], src, reg_size);
1673 memcpy (ccontext->stack + ainfo->offset, (host_mgreg_t*)src + ainfo->size, ainfo->struct_size - reg_size);
1676 /* Set arguments in the ccontext (for i2n entry) */
1677 void
1678 mono_arch_set_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
1680 MonoEECallbacks *interp_cb = mini_get_interp_callbacks ();
1681 CallInfo *cinfo = get_call_info (NULL, sig);
1682 gpointer storage;
1683 ArgInfo *ainfo;
1685 memset (ccontext, 0, sizeof (CallContext));
1687 ccontext->stack_size = ALIGN_TO (cinfo->stack_usage, MONO_ARCH_FRAME_ALIGNMENT);
1688 if (ccontext->stack_size)
1689 ccontext->stack = (guint8*)g_calloc (1, ccontext->stack_size);
1691 if (sig->ret->type != MONO_TYPE_VOID) {
1692 ainfo = &cinfo->ret;
1693 if (ainfo->storage == RegTypeStructByAddr) {
1694 storage = interp_cb->frame_arg_to_storage ((MonoInterpFrameHandle)frame, sig, -1);
1695 ccontext->gregs [cinfo->ret.reg] = (host_mgreg_t)(gsize)storage;
1699 g_assert (!sig->hasthis);
1701 for (int i = 0; i < sig->param_count; i++) {
1702 ainfo = &cinfo->args [i];
1703 int temp_size = arg_need_temp (ainfo);
1705 if (temp_size)
1706 storage = alloca (temp_size); // FIXME? alloca in a loop
1707 else
1708 storage = arg_get_storage (ccontext, ainfo);
1710 interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, i, storage);
1711 if (temp_size)
1712 arg_set_val (ccontext, ainfo, storage);
1715 g_free (cinfo);
1718 /* Set return value in the ccontext (for n2i return) */
1719 void
1720 mono_arch_set_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
1722 MonoEECallbacks *interp_cb;
1723 CallInfo *cinfo;
1724 gpointer storage;
1725 ArgInfo *ainfo;
1727 if (sig->ret->type == MONO_TYPE_VOID)
1728 return;
1730 interp_cb = mini_get_interp_callbacks ();
1731 cinfo = get_call_info (NULL, sig);
1732 ainfo = &cinfo->ret;
1734 if (ainfo->storage != RegTypeStructByAddr) {
1735 g_assert (!arg_need_temp (ainfo));
1736 storage = arg_get_storage (ccontext, ainfo);
1737 memset (ccontext, 0, sizeof (CallContext)); // FIXME
1738 interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, -1, storage);
1741 g_free (cinfo);
1744 /* Gets the arguments from ccontext (for n2i entry) */
1745 void
1746 mono_arch_get_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
1748 MonoEECallbacks *interp_cb = mini_get_interp_callbacks ();
1749 CallInfo *cinfo = get_call_info (NULL, sig);
1750 gpointer storage;
1751 ArgInfo *ainfo;
1753 if (sig->ret->type != MONO_TYPE_VOID) {
1754 ainfo = &cinfo->ret;
1755 if (ainfo->storage == RegTypeStructByAddr) {
1756 storage = (gpointer)(gsize)ccontext->gregs [cinfo->ret.reg];
1757 interp_cb->frame_arg_set_storage ((MonoInterpFrameHandle)frame, sig, -1, storage);
1761 for (int i = 0; i < sig->param_count + sig->hasthis; i++) {
1762 ainfo = &cinfo->args [i];
1763 int temp_size = arg_need_temp (ainfo);
1765 if (temp_size) {
1766 storage = alloca (temp_size); // FIXME? alloca in a loop
1767 arg_get_val (ccontext, ainfo, storage);
1768 } else {
1769 storage = arg_get_storage (ccontext, ainfo);
1771 interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, i, storage);
1774 g_free (cinfo);
1777 /* Gets the return value from ccontext (for i2n exit) */
1778 void
1779 mono_arch_get_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
1781 MonoEECallbacks *interp_cb;
1782 CallInfo *cinfo;
1783 ArgInfo *ainfo;
1784 gpointer storage;
1786 if (sig->ret->type == MONO_TYPE_VOID)
1787 return;
1789 interp_cb = mini_get_interp_callbacks ();
1790 cinfo = get_call_info (NULL, sig);
1791 ainfo = &cinfo->ret;
1793 if (ainfo->storage != RegTypeStructByAddr) {
1794 g_assert (!arg_need_temp (ainfo));
1795 storage = arg_get_storage (ccontext, ainfo);
1796 interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, -1, storage);
1799 g_free (cinfo);
1802 #ifndef DISABLE_JIT
1804 gboolean
1805 mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_)
1807 g_assert (caller_sig);
1808 g_assert (callee_sig);
1810 CallInfo *caller_info = get_call_info (NULL, caller_sig);
1811 CallInfo *callee_info = get_call_info (NULL, callee_sig);
1814 * Tailcalls with more callee stack usage than the caller cannot be supported, since
1815 * the extra stack space would be left on the stack after the tailcall.
1817 gboolean res = IS_SUPPORTED_TAILCALL (callee_info->stack_usage <= caller_info->stack_usage)
1818 && IS_SUPPORTED_TAILCALL (caller_info->ret.storage == callee_info->ret.storage);
1820 // FIXME The limit here is that moving the parameters requires addressing the parameters
1821 // with 12bit (4K) immediate offsets. - 4 for TAILCALL_REG/MEMBASE
1822 res &= IS_SUPPORTED_TAILCALL (callee_info->stack_usage < (4096 - 4));
1823 res &= IS_SUPPORTED_TAILCALL (caller_info->stack_usage < (4096 - 4));
1825 g_free (caller_info);
1826 g_free (callee_info);
1828 return res;
1831 static gboolean
1832 debug_omit_fp (void)
1834 #if 0
1835 return mono_debug_count ();
1836 #else
1837 return TRUE;
1838 #endif
1842 * mono_arch_compute_omit_fp:
1843 * Determine whether the frame pointer can be eliminated.
1845 static void
1846 mono_arch_compute_omit_fp (MonoCompile *cfg)
1848 MonoMethodSignature *sig;
1849 MonoMethodHeader *header;
1850 int i, locals_size;
1851 CallInfo *cinfo;
1853 if (cfg->arch.omit_fp_computed)
1854 return;
1856 header = cfg->header;
1858 sig = mono_method_signature_internal (cfg->method);
1860 if (!cfg->arch.cinfo)
1861 cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
1862 cinfo = cfg->arch.cinfo;
1865 * FIXME: Remove some of the restrictions.
1867 cfg->arch.omit_fp = TRUE;
1868 cfg->arch.omit_fp_computed = TRUE;
1870 if (cfg->disable_omit_fp)
1871 cfg->arch.omit_fp = FALSE;
1872 if (!debug_omit_fp ())
1873 cfg->arch.omit_fp = FALSE;
1875 if (cfg->method->save_lmf)
1876 cfg->arch.omit_fp = FALSE;
1878 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
1879 cfg->arch.omit_fp = FALSE;
1880 if (header->num_clauses)
1881 cfg->arch.omit_fp = FALSE;
1882 if (cfg->param_area)
1883 cfg->arch.omit_fp = FALSE;
1884 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1885 cfg->arch.omit_fp = FALSE;
1886 if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)))
1887 cfg->arch.omit_fp = FALSE;
1888 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1889 ArgInfo *ainfo = &cinfo->args [i];
1891 if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeBaseGen || ainfo->storage == RegTypeStructByVal) {
1893 * The stack offset can only be determined when the frame
1894 * size is known.
1896 cfg->arch.omit_fp = FALSE;
1900 locals_size = 0;
1901 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1902 MonoInst *ins = cfg->varinfo [i];
1903 int ialign;
1905 locals_size += mono_type_size (ins->inst_vtype, &ialign);
1910 * Set var information according to the calling convention. arm version.
1911 * The locals var stuff should most likely be split in another method.
1913 void
1914 mono_arch_allocate_vars (MonoCompile *cfg)
1916 MonoMethodSignature *sig;
1917 MonoMethodHeader *header;
1918 MonoInst *ins;
1919 MonoType *sig_ret;
1920 int i, offset, size, align, curinst;
1921 CallInfo *cinfo;
1922 ArgInfo *ainfo;
1923 guint32 ualign;
1925 sig = mono_method_signature_internal (cfg->method);
1927 if (!cfg->arch.cinfo)
1928 cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
1929 cinfo = cfg->arch.cinfo;
1930 sig_ret = mini_get_underlying_type (sig->ret);
1932 mono_arch_compute_omit_fp (cfg);
1934 if (cfg->arch.omit_fp)
1935 cfg->frame_reg = ARMREG_SP;
1936 else
1937 cfg->frame_reg = ARMREG_FP;
1939 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1941 /* allow room for the vararg method args: void* and long/double */
1942 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1943 cfg->param_area = MAX (cfg->param_area, sizeof (target_mgreg_t)*8);
1945 header = cfg->header;
1947 /* See mono_arch_get_global_int_regs () */
1948 if (cfg->flags & MONO_CFG_HAS_CALLS)
1949 cfg->uses_rgctx_reg = TRUE;
1951 if (cfg->frame_reg != ARMREG_SP)
1952 cfg->used_int_regs |= 1 << cfg->frame_reg;
1954 if (cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg))
1955 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1956 cfg->used_int_regs |= (1 << MONO_ARCH_IMT_REG);
1958 offset = 0;
1959 curinst = 0;
1960 if (!MONO_TYPE_ISSTRUCT (sig_ret) && cinfo->ret.storage != RegTypeStructByAddr) {
1961 if (sig_ret->type != MONO_TYPE_VOID) {
1962 cfg->ret->opcode = OP_REGVAR;
1963 cfg->ret->inst_c0 = ARMREG_R0;
1966 /* local vars are at a positive offset from the stack pointer */
1968 * also note that if the function uses alloca, we use FP
1969 * to point at the local variables.
1971 offset = 0; /* linkage area */
1972 /* align the offset to 16 bytes: not sure this is needed here */
1973 //offset += 8 - 1;
1974 //offset &= ~(8 - 1);
1976 /* add parameter area size for called functions */
1977 offset += cfg->param_area;
1978 offset += 8 - 1;
1979 offset &= ~(8 - 1);
1980 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1981 offset += 8;
1983 /* allow room to save the return value */
1984 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1985 offset += 8;
1987 switch (cinfo->ret.storage) {
1988 case RegTypeStructByVal:
1989 case RegTypeHFA:
1990 /* Allocate a local to hold the result, the epilog will copy it to the correct place */
1991 offset = ALIGN_TO (offset, 8);
1992 cfg->ret->opcode = OP_REGOFFSET;
1993 cfg->ret->inst_basereg = cfg->frame_reg;
1994 cfg->ret->inst_offset = offset;
1995 if (cinfo->ret.storage == RegTypeStructByVal)
1996 offset += cinfo->ret.nregs * sizeof (target_mgreg_t);
1997 else
1998 offset += 32;
1999 break;
2000 case RegTypeStructByAddr:
2001 ins = cfg->vret_addr;
2002 offset += sizeof (target_mgreg_t) - 1;
2003 offset &= ~(sizeof (target_mgreg_t) - 1);
2004 ins->inst_offset = offset;
2005 ins->opcode = OP_REGOFFSET;
2006 ins->inst_basereg = cfg->frame_reg;
2007 if (G_UNLIKELY (cfg->verbose_level > 1)) {
2008 g_print ("vret_addr =");
2009 mono_print_ins (cfg->vret_addr);
2011 offset += sizeof (target_mgreg_t);
2012 break;
2013 default:
2014 break;
2017 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
2018 if (cfg->arch.seq_point_info_var) {
2019 MonoInst *ins;
2021 ins = cfg->arch.seq_point_info_var;
2023 size = 4;
2024 align = 4;
2025 offset += align - 1;
2026 offset &= ~(align - 1);
2027 ins->opcode = OP_REGOFFSET;
2028 ins->inst_basereg = cfg->frame_reg;
2029 ins->inst_offset = offset;
2030 offset += size;
2032 if (cfg->arch.ss_trigger_page_var) {
2033 MonoInst *ins;
2035 ins = cfg->arch.ss_trigger_page_var;
2036 size = 4;
2037 align = 4;
2038 offset += align - 1;
2039 offset &= ~(align - 1);
2040 ins->opcode = OP_REGOFFSET;
2041 ins->inst_basereg = cfg->frame_reg;
2042 ins->inst_offset = offset;
2043 offset += size;
2046 if (cfg->arch.seq_point_ss_method_var) {
2047 MonoInst *ins;
2049 ins = cfg->arch.seq_point_ss_method_var;
2050 size = 4;
2051 align = 4;
2052 offset += align - 1;
2053 offset &= ~(align - 1);
2054 ins->opcode = OP_REGOFFSET;
2055 ins->inst_basereg = cfg->frame_reg;
2056 ins->inst_offset = offset;
2057 offset += size;
2059 if (cfg->arch.seq_point_bp_method_var) {
2060 MonoInst *ins;
2062 ins = cfg->arch.seq_point_bp_method_var;
2063 size = 4;
2064 align = 4;
2065 offset += align - 1;
2066 offset &= ~(align - 1);
2067 ins->opcode = OP_REGOFFSET;
2068 ins->inst_basereg = cfg->frame_reg;
2069 ins->inst_offset = offset;
2070 offset += size;
2073 if (cfg->has_atomic_exchange_i4 || cfg->has_atomic_cas_i4 || cfg->has_atomic_add_i4) {
2074 /* Allocate a temporary used by the atomic ops */
2075 size = 4;
2076 align = 4;
2078 /* Allocate a local slot to hold the sig cookie address */
2079 offset += align - 1;
2080 offset &= ~(align - 1);
2081 cfg->arch.atomic_tmp_offset = offset;
2082 offset += size;
2083 } else {
2084 cfg->arch.atomic_tmp_offset = -1;
2087 cfg->locals_min_stack_offset = offset;
2089 curinst = cfg->locals_start;
2090 for (i = curinst; i < cfg->num_varinfo; ++i) {
2091 MonoType *t;
2093 ins = cfg->varinfo [i];
2094 if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
2095 continue;
2097 t = ins->inst_vtype;
2098 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (t))
2099 continue;
2101 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
2102 * pinvoke wrappers when they call functions returning structure */
2103 if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
2104 size = mono_class_native_size (mono_class_from_mono_type_internal (t), &ualign);
2105 align = ualign;
2107 else
2108 size = mono_type_size (t, &align);
2110 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2111 * since it loads/stores misaligned words, which don't do the right thing.
2113 if (align < 4 && size >= 4)
2114 align = 4;
2115 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2116 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2117 offset += align - 1;
2118 offset &= ~(align - 1);
2119 ins->opcode = OP_REGOFFSET;
2120 ins->inst_offset = offset;
2121 ins->inst_basereg = cfg->frame_reg;
2122 offset += size;
2123 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
2126 cfg->locals_max_stack_offset = offset;
2128 curinst = 0;
2129 if (sig->hasthis) {
2130 ins = cfg->args [curinst];
2131 if (ins->opcode != OP_REGVAR) {
2132 ins->opcode = OP_REGOFFSET;
2133 ins->inst_basereg = cfg->frame_reg;
2134 offset += sizeof (target_mgreg_t) - 1;
2135 offset &= ~(sizeof (target_mgreg_t) - 1);
2136 ins->inst_offset = offset;
2137 offset += sizeof (target_mgreg_t);
2139 curinst++;
2142 if (sig->call_convention == MONO_CALL_VARARG) {
2143 size = 4;
2144 align = 4;
2146 /* Allocate a local slot to hold the sig cookie address */
2147 offset += align - 1;
2148 offset &= ~(align - 1);
2149 cfg->sig_cookie = offset;
2150 offset += size;
2153 for (i = 0; i < sig->param_count; ++i) {
2154 ainfo = cinfo->args + i;
2156 ins = cfg->args [curinst];
2158 switch (ainfo->storage) {
2159 case RegTypeHFA:
2160 offset = ALIGN_TO (offset, 8);
2161 ins->opcode = OP_REGOFFSET;
2162 ins->inst_basereg = cfg->frame_reg;
2163 /* These arguments are saved to the stack in the prolog */
2164 ins->inst_offset = offset;
2165 if (cfg->verbose_level >= 2)
2166 g_print ("arg %d allocated to %s+0x%0x.\n", i, mono_arch_regname (ins->inst_basereg), (int)ins->inst_offset);
2167 // FIXME:
2168 offset += 32;
2169 break;
2170 default:
2171 break;
2174 if (ins->opcode != OP_REGVAR) {
2175 ins->opcode = OP_REGOFFSET;
2176 ins->inst_basereg = cfg->frame_reg;
2177 size = mini_type_stack_size_full (sig->params [i], &ualign, sig->pinvoke);
2178 align = ualign;
2179 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2180 * since it loads/stores misaligned words, which don't do the right thing.
2182 if (align < 4 && size >= 4)
2183 align = 4;
2184 /* The code in the prolog () stores words when storing vtypes received in a register */
2185 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
2186 align = 4;
2187 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2188 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2189 offset += align - 1;
2190 offset &= ~(align - 1);
2191 ins->inst_offset = offset;
2192 offset += size;
2194 curinst++;
2197 /* align the offset to 8 bytes */
2198 if (ALIGN_TO (offset, 8) > ALIGN_TO (offset, 4))
2199 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2200 offset += 8 - 1;
2201 offset &= ~(8 - 1);
2203 /* change sign? */
2204 cfg->stack_offset = offset;
2207 void
2208 mono_arch_create_vars (MonoCompile *cfg)
2210 MonoMethodSignature *sig;
2211 CallInfo *cinfo;
2212 int i;
2214 sig = mono_method_signature_internal (cfg->method);
2216 if (!cfg->arch.cinfo)
2217 cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
2218 cinfo = cfg->arch.cinfo;
2220 if (IS_HARD_FLOAT) {
2221 for (i = 0; i < 2; i++) {
2222 MonoInst *inst = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.double_class), OP_LOCAL);
2223 inst->flags |= MONO_INST_VOLATILE;
2225 cfg->arch.vfp_scratch_slots [i] = inst;
2229 if (cinfo->ret.storage == RegTypeStructByVal)
2230 cfg->ret_var_is_local = TRUE;
2232 if (cinfo->ret.storage == RegTypeStructByAddr) {
2233 cfg->vret_addr = mono_compile_create_var (cfg, mono_get_int_type (), OP_ARG);
2234 if (G_UNLIKELY (cfg->verbose_level > 1)) {
2235 g_print ("vret_addr = ");
2236 mono_print_ins (cfg->vret_addr);
2240 if (cfg->gen_sdb_seq_points) {
2241 if (cfg->compile_aot) {
2242 MonoInst *ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
2243 ins->flags |= MONO_INST_VOLATILE;
2244 cfg->arch.seq_point_info_var = ins;
2246 if (!cfg->soft_breakpoints) {
2247 /* Allocate a separate variable for this to save 1 load per seq point */
2248 ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
2249 ins->flags |= MONO_INST_VOLATILE;
2250 cfg->arch.ss_trigger_page_var = ins;
2253 if (cfg->soft_breakpoints) {
2254 MonoInst *ins;
2256 ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
2257 ins->flags |= MONO_INST_VOLATILE;
2258 cfg->arch.seq_point_ss_method_var = ins;
2260 ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
2261 ins->flags |= MONO_INST_VOLATILE;
2262 cfg->arch.seq_point_bp_method_var = ins;
2267 static void
2268 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
2270 MonoMethodSignature *tmp_sig;
2271 int sig_reg;
2273 if (MONO_IS_TAILCALL_OPCODE (call))
2274 NOT_IMPLEMENTED;
2276 g_assert (cinfo->sig_cookie.storage == RegTypeBase);
2279 * mono_ArgIterator_Setup assumes the signature cookie is
2280 * passed first and all the arguments which were before it are
2281 * passed on the stack after the signature. So compensate by
2282 * passing a different signature.
2284 tmp_sig = mono_metadata_signature_dup (call->signature);
2285 tmp_sig->param_count -= call->signature->sentinelpos;
2286 tmp_sig->sentinelpos = 0;
2287 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
2289 sig_reg = mono_alloc_ireg (cfg);
2290 MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig);
2292 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_reg);
2295 #ifdef ENABLE_LLVM
2296 LLVMCallInfo*
2297 mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
2299 int i, n;
2300 CallInfo *cinfo;
2301 ArgInfo *ainfo;
2302 LLVMCallInfo *linfo;
2304 n = sig->param_count + sig->hasthis;
2306 cinfo = get_call_info (cfg->mempool, sig);
2308 linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
2311 * LLVM always uses the native ABI while we use our own ABI, the
2312 * only difference is the handling of vtypes:
2313 * - we only pass/receive them in registers in some cases, and only
2314 * in 1 or 2 integer registers.
2316 switch (cinfo->ret.storage) {
2317 case RegTypeGeneral:
2318 case RegTypeNone:
2319 case RegTypeFP:
2320 case RegTypeIRegPair:
2321 break;
2322 case RegTypeStructByAddr:
2323 if (sig->pinvoke) {
2324 linfo->ret.storage = LLVMArgVtypeByRef;
2325 } else {
2326 /* Vtype returned using a hidden argument */
2327 linfo->ret.storage = LLVMArgVtypeRetAddr;
2328 linfo->vret_arg_index = cinfo->vret_arg_index;
2330 break;
2331 #if TARGET_WATCHOS
2332 case RegTypeStructByVal:
2333 /* LLVM models this by returning an int array */
2334 linfo->ret.storage = LLVMArgAsIArgs;
2335 linfo->ret.nslots = cinfo->ret.nregs;
2336 break;
2337 #endif
2338 case RegTypeHFA:
2339 linfo->ret.storage = LLVMArgFpStruct;
2340 linfo->ret.nslots = cinfo->ret.nregs;
2341 linfo->ret.esize = cinfo->ret.esize;
2342 break;
2343 default:
2344 cfg->exception_message = g_strdup_printf ("unknown ret conv (%d)", cinfo->ret.storage);
2345 cfg->disable_llvm = TRUE;
2346 return linfo;
2349 for (i = 0; i < n; ++i) {
2350 LLVMArgInfo *lainfo = &linfo->args [i];
2351 ainfo = cinfo->args + i;
2353 lainfo->storage = LLVMArgNone;
2355 switch (ainfo->storage) {
2356 case RegTypeGeneral:
2357 case RegTypeIRegPair:
2358 case RegTypeBase:
2359 case RegTypeBaseGen:
2360 case RegTypeFP:
2361 lainfo->storage = LLVMArgNormal;
2362 break;
2363 case RegTypeStructByVal: {
2364 lainfo->storage = LLVMArgAsIArgs;
2365 int slotsize = eabi_supported && ainfo->align == 8 ? 8 : 4;
2366 lainfo->nslots = ALIGN_TO (ainfo->struct_size, slotsize) / slotsize;
2367 lainfo->esize = slotsize;
2368 break;
2370 case RegTypeStructByAddr:
2371 case RegTypeStructByAddrOnStack:
2372 lainfo->storage = LLVMArgVtypeByRef;
2373 break;
2374 case RegTypeHFA: {
2375 int j;
2377 lainfo->storage = LLVMArgAsFpArgs;
2378 lainfo->nslots = ainfo->nregs;
2379 lainfo->esize = ainfo->esize;
2380 for (j = 0; j < ainfo->nregs; ++j)
2381 lainfo->pair_storage [j] = LLVMArgInFPReg;
2382 break;
2384 default:
2385 cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
2386 cfg->disable_llvm = TRUE;
2387 break;
2391 return linfo;
2393 #endif
2395 void
2396 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
2398 MonoInst *in, *ins;
2399 MonoMethodSignature *sig;
2400 int i, n;
2401 CallInfo *cinfo;
2403 sig = call->signature;
2404 n = sig->param_count + sig->hasthis;
2406 cinfo = get_call_info (cfg->mempool, sig);
2408 switch (cinfo->ret.storage) {
2409 case RegTypeStructByVal:
2410 case RegTypeHFA:
2411 if (cinfo->ret.storage == RegTypeStructByVal && cinfo->ret.nregs == 1) {
2412 /* The JIT will transform this into a normal call */
2413 call->vret_in_reg = TRUE;
2414 break;
2416 if (MONO_IS_TAILCALL_OPCODE (call))
2417 break;
2419 * The vtype is returned in registers, save the return area address in a local, and save the vtype into
2420 * the location pointed to by it after call in emit_move_return_value ().
2422 if (!cfg->arch.vret_addr_loc) {
2423 cfg->arch.vret_addr_loc = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
2424 /* Prevent it from being register allocated or optimized away */
2425 cfg->arch.vret_addr_loc->flags |= MONO_INST_VOLATILE;
2428 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->arch.vret_addr_loc->dreg, call->vret_var->dreg);
2429 break;
2430 case RegTypeStructByAddr: {
2431 MonoInst *vtarg;
2432 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
2433 vtarg->sreg1 = call->vret_var->dreg;
2434 vtarg->dreg = mono_alloc_preg (cfg);
2435 MONO_ADD_INS (cfg->cbb, vtarg);
2437 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
2438 break;
2440 default:
2441 break;
2444 for (i = 0; i < n; ++i) {
2445 ArgInfo *ainfo = cinfo->args + i;
2446 MonoType *t;
2448 if (i >= sig->hasthis)
2449 t = sig->params [i - sig->hasthis];
2450 else
2451 t = mono_get_int_type ();
2452 t = mini_get_underlying_type (t);
2454 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
2455 /* Emit the signature cookie just before the implicit arguments */
2456 emit_sig_cookie (cfg, call, cinfo);
2459 in = call->args [i];
2461 switch (ainfo->storage) {
2462 case RegTypeGeneral:
2463 case RegTypeIRegPair:
2464 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2465 MONO_INST_NEW (cfg, ins, OP_MOVE);
2466 ins->dreg = mono_alloc_ireg (cfg);
2467 ins->sreg1 = MONO_LVREG_LS (in->dreg);
2468 MONO_ADD_INS (cfg->cbb, ins);
2469 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2471 MONO_INST_NEW (cfg, ins, OP_MOVE);
2472 ins->dreg = mono_alloc_ireg (cfg);
2473 ins->sreg1 = MONO_LVREG_MS (in->dreg);
2474 MONO_ADD_INS (cfg->cbb, ins);
2475 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2476 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
2477 if (ainfo->size == 4) {
2478 if (IS_SOFT_FLOAT) {
2479 /* mono_emit_call_args () have already done the r8->r4 conversion */
2480 /* The converted value is in an int vreg */
2481 MONO_INST_NEW (cfg, ins, OP_MOVE);
2482 ins->dreg = mono_alloc_ireg (cfg);
2483 ins->sreg1 = in->dreg;
2484 MONO_ADD_INS (cfg->cbb, ins);
2485 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2486 } else {
2487 int creg;
2489 cfg->param_area = MAX (cfg->param_area, 8);
2490 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2491 creg = mono_alloc_ireg (cfg);
2492 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2493 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2495 } else {
2496 if (IS_SOFT_FLOAT) {
2497 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
2498 ins->dreg = mono_alloc_ireg (cfg);
2499 ins->sreg1 = in->dreg;
2500 MONO_ADD_INS (cfg->cbb, ins);
2501 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2503 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
2504 ins->dreg = mono_alloc_ireg (cfg);
2505 ins->sreg1 = in->dreg;
2506 MONO_ADD_INS (cfg->cbb, ins);
2507 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2508 } else {
2509 int creg;
2511 cfg->param_area = MAX (cfg->param_area, 8);
2512 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2513 creg = mono_alloc_ireg (cfg);
2514 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2515 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2516 creg = mono_alloc_ireg (cfg);
2517 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
2518 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
2521 cfg->flags |= MONO_CFG_HAS_FPOUT;
2522 } else {
2523 MONO_INST_NEW (cfg, ins, OP_MOVE);
2524 ins->dreg = mono_alloc_ireg (cfg);
2525 ins->sreg1 = in->dreg;
2526 MONO_ADD_INS (cfg->cbb, ins);
2528 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2530 break;
2531 case RegTypeStructByVal:
2532 case RegTypeGSharedVtInReg:
2533 case RegTypeGSharedVtOnStack:
2534 case RegTypeHFA:
2535 case RegTypeStructByAddr:
2536 case RegTypeStructByAddrOnStack:
2537 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
2538 ins->opcode = OP_OUTARG_VT;
2539 ins->sreg1 = in->dreg;
2540 ins->klass = in->klass;
2541 ins->inst_p0 = call;
2542 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
2543 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
2544 mono_call_inst_add_outarg_vt (cfg, call, ins);
2545 MONO_ADD_INS (cfg->cbb, ins);
2546 break;
2547 case RegTypeBase:
2548 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2549 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2550 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
2551 if (t->type == MONO_TYPE_R8) {
2552 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2553 } else {
2554 if (IS_SOFT_FLOAT)
2555 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2556 else
2557 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2559 } else {
2560 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2562 break;
2563 case RegTypeBaseGen:
2564 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2565 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? MONO_LVREG_LS (in->dreg) : MONO_LVREG_MS (in->dreg));
2566 MONO_INST_NEW (cfg, ins, OP_MOVE);
2567 ins->dreg = mono_alloc_ireg (cfg);
2568 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? MONO_LVREG_MS (in->dreg) : MONO_LVREG_LS (in->dreg);
2569 MONO_ADD_INS (cfg->cbb, ins);
2570 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
2571 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
2572 int creg;
2574 /* This should work for soft-float as well */
2576 cfg->param_area = MAX (cfg->param_area, 8);
2577 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2578 creg = mono_alloc_ireg (cfg);
2579 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
2580 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2581 creg = mono_alloc_ireg (cfg);
2582 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
2583 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
2584 cfg->flags |= MONO_CFG_HAS_FPOUT;
2585 } else {
2586 g_assert_not_reached ();
2588 break;
2589 case RegTypeFP: {
2590 int fdreg = mono_alloc_freg (cfg);
2592 if (ainfo->size == 8) {
2593 MONO_INST_NEW (cfg, ins, OP_FMOVE);
2594 ins->sreg1 = in->dreg;
2595 ins->dreg = fdreg;
2596 MONO_ADD_INS (cfg->cbb, ins);
2598 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, TRUE);
2599 } else {
2600 FloatArgData *fad;
2603 * Mono's register allocator doesn't speak single-precision registers that
2604 * overlap double-precision registers (i.e. armhf). So we have to work around
2605 * the register allocator and load the value from memory manually.
2607 * So we create a variable for the float argument and an instruction to store
2608 * the argument into the variable. We then store the list of these arguments
2609 * in call->float_args. This list is then used by emit_float_args later to
2610 * pass the arguments in the various call opcodes.
2612 * This is not very nice, and we should really try to fix the allocator.
2615 MonoInst *float_arg = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.single_class), OP_LOCAL);
2617 /* Make sure the instruction isn't seen as pointless and removed.
2619 float_arg->flags |= MONO_INST_VOLATILE;
2621 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, float_arg->dreg, in->dreg);
2623 /* We use the dreg to look up the instruction later. The hreg is used to
2624 * emit the instruction that loads the value into the FP reg.
2626 fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData));
2627 fad->vreg = float_arg->dreg;
2628 fad->hreg = ainfo->reg;
2630 call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad);
2633 call->used_iregs |= 1 << ainfo->reg;
2634 cfg->flags |= MONO_CFG_HAS_FPOUT;
2635 break;
2637 default:
2638 g_assert_not_reached ();
2642 /* Handle the case where there are no implicit arguments */
2643 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
2644 emit_sig_cookie (cfg, call, cinfo);
2646 call->call_info = cinfo;
2647 call->stack_usage = cinfo->stack_usage;
2650 static void
2651 add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, MonoInst *arg)
2653 MonoInst *ins;
2655 switch (storage) {
2656 case RegTypeFP:
2657 MONO_INST_NEW (cfg, ins, OP_FMOVE);
2658 ins->dreg = mono_alloc_freg (cfg);
2659 ins->sreg1 = arg->dreg;
2660 MONO_ADD_INS (cfg->cbb, ins);
2661 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE);
2662 break;
2663 default:
2664 g_assert_not_reached ();
2665 break;
2669 void
2670 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
2672 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
2673 MonoInst *load;
2674 ArgInfo *ainfo = (ArgInfo*)ins->inst_p1;
2675 int ovf_size = ainfo->vtsize;
2676 int doffset = ainfo->offset;
2677 int struct_size = ainfo->struct_size;
2678 int i, soffset, dreg, tmpreg;
2680 switch (ainfo->storage) {
2681 case RegTypeGSharedVtInReg:
2682 case RegTypeStructByAddr:
2683 /* Pass by addr */
2684 mono_call_inst_add_outarg_reg (cfg, call, src->dreg, ainfo->reg, FALSE);
2685 break;
2686 case RegTypeGSharedVtOnStack:
2687 case RegTypeStructByAddrOnStack:
2688 /* Pass by addr on stack */
2689 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, src->dreg);
2690 break;
2691 case RegTypeHFA:
2692 for (i = 0; i < ainfo->nregs; ++i) {
2693 if (ainfo->esize == 4)
2694 MONO_INST_NEW (cfg, load, OP_LOADR4_MEMBASE);
2695 else
2696 MONO_INST_NEW (cfg, load, OP_LOADR8_MEMBASE);
2697 load->dreg = mono_alloc_freg (cfg);
2698 load->inst_basereg = src->dreg;
2699 load->inst_offset = i * ainfo->esize;
2700 MONO_ADD_INS (cfg->cbb, load);
2702 if (ainfo->esize == 4) {
2703 FloatArgData *fad;
2705 /* See RegTypeFP in mono_arch_emit_call () */
2706 MonoInst *float_arg = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.single_class), OP_LOCAL);
2707 float_arg->flags |= MONO_INST_VOLATILE;
2708 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, float_arg->dreg, load->dreg);
2710 fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData));
2711 fad->vreg = float_arg->dreg;
2712 fad->hreg = ainfo->reg + i;
2714 call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad);
2715 } else {
2716 add_outarg_reg (cfg, call, RegTypeFP, ainfo->reg + (i * 2), load);
2719 break;
2720 default:
2721 soffset = 0;
2722 for (i = 0; i < ainfo->size; ++i) {
2723 dreg = mono_alloc_ireg (cfg);
2724 switch (struct_size) {
2725 case 1:
2726 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2727 break;
2728 case 2:
2729 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, dreg, src->dreg, soffset);
2730 break;
2731 case 3:
2732 tmpreg = mono_alloc_ireg (cfg);
2733 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2734 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 1);
2735 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 8);
2736 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2737 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 2);
2738 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 16);
2739 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2740 break;
2741 default:
2742 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
2743 break;
2745 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
2746 soffset += sizeof (target_mgreg_t);
2747 struct_size -= sizeof (target_mgreg_t);
2749 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
2750 if (ovf_size != 0)
2751 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, MIN (ovf_size * sizeof (target_mgreg_t), struct_size), struct_size < 4 ? 1 : 4);
2752 break;
2756 void
2757 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
2759 MonoType *ret = mini_get_underlying_type (mono_method_signature_internal (method)->ret);
2761 if (!ret->byref) {
2762 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
2763 MonoInst *ins;
2765 if (COMPILE_LLVM (cfg)) {
2766 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2767 } else {
2768 MONO_INST_NEW (cfg, ins, OP_SETLRET);
2769 ins->sreg1 = MONO_LVREG_LS (val->dreg);
2770 ins->sreg2 = MONO_LVREG_MS (val->dreg);
2771 MONO_ADD_INS (cfg->cbb, ins);
2773 return;
2775 switch (arm_fpu) {
2776 case MONO_ARM_FPU_NONE:
2777 if (ret->type == MONO_TYPE_R8) {
2778 MonoInst *ins;
2780 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2781 ins->dreg = cfg->ret->dreg;
2782 ins->sreg1 = val->dreg;
2783 MONO_ADD_INS (cfg->cbb, ins);
2784 return;
2786 if (ret->type == MONO_TYPE_R4) {
2787 /* Already converted to an int in method_to_ir () */
2788 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2789 return;
2791 break;
2792 case MONO_ARM_FPU_VFP:
2793 case MONO_ARM_FPU_VFP_HARD:
2794 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
2795 MonoInst *ins;
2797 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2798 ins->dreg = cfg->ret->dreg;
2799 ins->sreg1 = val->dreg;
2800 MONO_ADD_INS (cfg->cbb, ins);
2801 return;
2803 break;
2804 default:
2805 g_assert_not_reached ();
2809 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2812 #endif /* #ifndef DISABLE_JIT */
2814 gboolean
2815 mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm)
2817 return TRUE;
2820 typedef struct {
2821 MonoMethodSignature *sig;
2822 CallInfo *cinfo;
2823 MonoType *rtype;
2824 MonoType **param_types;
2825 } ArchDynCallInfo;
2827 static gboolean
2828 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
2830 int i;
2832 switch (cinfo->ret.storage) {
2833 case RegTypeNone:
2834 case RegTypeGeneral:
2835 case RegTypeIRegPair:
2836 case RegTypeStructByAddr:
2837 break;
2838 case RegTypeFP:
2839 if (IS_VFP)
2840 break;
2841 else
2842 return FALSE;
2843 default:
2844 return FALSE;
2847 for (i = 0; i < cinfo->nargs; ++i) {
2848 ArgInfo *ainfo = &cinfo->args [i];
2849 int last_slot;
2851 switch (ainfo->storage) {
2852 case RegTypeGeneral:
2853 case RegTypeIRegPair:
2854 case RegTypeBaseGen:
2855 case RegTypeFP:
2856 break;
2857 case RegTypeBase:
2858 break;
2859 case RegTypeStructByVal:
2860 if (ainfo->size == 0)
2861 last_slot = PARAM_REGS + (ainfo->offset / 4) + ainfo->vtsize;
2862 else
2863 last_slot = ainfo->reg + ainfo->size + ainfo->vtsize;
2864 break;
2865 default:
2866 return FALSE;
2870 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
2871 for (i = 0; i < sig->param_count; ++i) {
2872 MonoType *t = sig->params [i];
2874 if (t->byref)
2875 continue;
2877 t = mini_get_underlying_type (t);
2879 switch (t->type) {
2880 case MONO_TYPE_R4:
2881 case MONO_TYPE_R8:
2882 if (IS_SOFT_FLOAT)
2883 return FALSE;
2884 else
2885 break;
2887 case MONO_TYPE_I8:
2888 case MONO_TYPE_U8:
2889 return FALSE;
2891 default:
2892 break;
2896 return TRUE;
2899 MonoDynCallInfo*
2900 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
2902 ArchDynCallInfo *info;
2903 CallInfo *cinfo;
2904 int i;
2906 cinfo = get_call_info (NULL, sig);
2908 if (!dyn_call_supported (cinfo, sig)) {
2909 g_free (cinfo);
2910 return NULL;
2913 info = g_new0 (ArchDynCallInfo, 1);
2914 // FIXME: Preprocess the info to speed up start_dyn_call ()
2915 info->sig = sig;
2916 info->cinfo = cinfo;
2917 info->rtype = mini_get_underlying_type (sig->ret);
2918 info->param_types = g_new0 (MonoType*, sig->param_count);
2919 for (i = 0; i < sig->param_count; ++i)
2920 info->param_types [i] = mini_get_underlying_type (sig->params [i]);
2922 return (MonoDynCallInfo*)info;
2925 void
2926 mono_arch_dyn_call_free (MonoDynCallInfo *info)
2928 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2930 g_free (ainfo->cinfo);
2931 g_free (ainfo);
2935 mono_arch_dyn_call_get_buf_size (MonoDynCallInfo *info)
2937 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2939 g_assert (ainfo->cinfo->stack_usage % MONO_ARCH_FRAME_ALIGNMENT == 0);
2940 return sizeof (DynCallArgs) + ainfo->cinfo->stack_usage;
2943 void
2944 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf)
2946 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
2947 CallInfo *cinfo = dinfo->cinfo;
2948 DynCallArgs *p = (DynCallArgs*)buf;
2949 int arg_index, greg, i, j, pindex;
2950 MonoMethodSignature *sig = dinfo->sig;
2952 p->res = 0;
2953 p->ret = ret;
2954 p->has_fpregs = 0;
2955 p->n_stackargs = cinfo->stack_usage / sizeof (host_mgreg_t);
2957 arg_index = 0;
2958 greg = 0;
2959 pindex = 0;
2961 if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) {
2962 p->regs [greg ++] = (host_mgreg_t)(gsize)*(args [arg_index ++]);
2963 if (!sig->hasthis)
2964 pindex = 1;
2967 if (dinfo->cinfo->ret.storage == RegTypeStructByAddr)
2968 p->regs [greg ++] = (host_mgreg_t)(gsize)ret;
2970 for (i = pindex; i < sig->param_count; i++) {
2971 MonoType *t = dinfo->param_types [i];
2972 gpointer *arg = args [arg_index ++];
2973 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
2974 int slot = -1;
2976 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal) {
2977 slot = ainfo->reg;
2978 } else if (ainfo->storage == RegTypeFP) {
2979 } else if (ainfo->storage == RegTypeBase) {
2980 slot = PARAM_REGS + (ainfo->offset / 4);
2981 } else if (ainfo->storage == RegTypeBaseGen) {
2982 /* slot + 1 is the first stack slot, so the code below will work */
2983 slot = 3;
2984 } else {
2985 g_assert_not_reached ();
2988 if (t->byref) {
2989 p->regs [slot] = (host_mgreg_t)(gsize)*arg;
2990 continue;
2993 switch (t->type) {
2994 case MONO_TYPE_OBJECT:
2995 case MONO_TYPE_PTR:
2996 case MONO_TYPE_I:
2997 case MONO_TYPE_U:
2998 p->regs [slot] = (host_mgreg_t)(gsize)*arg;
2999 break;
3000 case MONO_TYPE_U1:
3001 p->regs [slot] = *(guint8*)arg;
3002 break;
3003 case MONO_TYPE_I1:
3004 p->regs [slot] = *(gint8*)arg;
3005 break;
3006 case MONO_TYPE_I2:
3007 p->regs [slot] = *(gint16*)arg;
3008 break;
3009 case MONO_TYPE_U2:
3010 p->regs [slot] = *(guint16*)arg;
3011 break;
3012 case MONO_TYPE_I4:
3013 p->regs [slot] = *(gint32*)arg;
3014 break;
3015 case MONO_TYPE_U4:
3016 p->regs [slot] = *(guint32*)arg;
3017 break;
3018 case MONO_TYPE_I8:
3019 case MONO_TYPE_U8:
3020 p->regs [slot ++] = (host_mgreg_t)(gsize)arg [0];
3021 p->regs [slot] = (host_mgreg_t)(gsize)arg [1];
3022 break;
3023 case MONO_TYPE_R4:
3024 if (ainfo->storage == RegTypeFP) {
3025 float f = *(float*)arg;
3026 p->fpregs [ainfo->reg / 2] = *(double*)&f;
3027 p->has_fpregs = 1;
3028 } else {
3029 p->regs [slot] = *(host_mgreg_t*)arg;
3031 break;
3032 case MONO_TYPE_R8:
3033 if (ainfo->storage == RegTypeFP) {
3034 p->fpregs [ainfo->reg / 2] = *(double*)arg;
3035 p->has_fpregs = 1;
3036 } else {
3037 p->regs [slot ++] = (host_mgreg_t)(gsize)arg [0];
3038 p->regs [slot] = (host_mgreg_t)(gsize)arg [1];
3040 break;
3041 case MONO_TYPE_GENERICINST:
3042 if (MONO_TYPE_IS_REFERENCE (t)) {
3043 p->regs [slot] = (host_mgreg_t)(gsize)*arg;
3044 break;
3045 } else {
3046 if (t->type == MONO_TYPE_GENERICINST && mono_class_is_nullable (mono_class_from_mono_type_internal (t))) {
3047 MonoClass *klass = mono_class_from_mono_type_internal (t);
3048 guint8 *nullable_buf;
3049 int size;
3051 size = mono_class_value_size (klass, NULL);
3052 nullable_buf = g_alloca (size);
3053 g_assert (nullable_buf);
3055 /* The argument pointed to by arg is either a boxed vtype or null */
3056 mono_nullable_init (nullable_buf, (MonoObject*)arg, klass);
3058 arg = (gpointer*)nullable_buf;
3059 /* Fall though */
3060 } else {
3061 /* Fall though */
3064 case MONO_TYPE_VALUETYPE:
3065 g_assert (ainfo->storage == RegTypeStructByVal);
3067 if (ainfo->size == 0)
3068 slot = PARAM_REGS + (ainfo->offset / 4);
3069 else
3070 slot = ainfo->reg;
3072 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
3073 p->regs [slot ++] = ((host_mgreg_t*)arg) [j];
3074 break;
3075 default:
3076 g_assert_not_reached ();
3081 void
3082 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
3084 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
3085 DynCallArgs *p = (DynCallArgs*)buf;
3086 MonoType *ptype = ainfo->rtype;
3087 guint8 *ret = p->ret;
3088 host_mgreg_t res = p->res;
3089 host_mgreg_t res2 = p->res2;
3091 switch (ptype->type) {
3092 case MONO_TYPE_VOID:
3093 *(gpointer*)ret = NULL;
3094 break;
3095 case MONO_TYPE_OBJECT:
3096 case MONO_TYPE_I:
3097 case MONO_TYPE_U:
3098 case MONO_TYPE_PTR:
3099 *(gpointer*)ret = (gpointer)(gsize)res;
3100 break;
3101 case MONO_TYPE_I1:
3102 *(gint8*)ret = res;
3103 break;
3104 case MONO_TYPE_U1:
3105 *(guint8*)ret = res;
3106 break;
3107 case MONO_TYPE_I2:
3108 *(gint16*)ret = res;
3109 break;
3110 case MONO_TYPE_U2:
3111 *(guint16*)ret = res;
3112 break;
3113 case MONO_TYPE_I4:
3114 *(gint32*)ret = res;
3115 break;
3116 case MONO_TYPE_U4:
3117 *(guint32*)ret = res;
3118 break;
3119 case MONO_TYPE_I8:
3120 case MONO_TYPE_U8:
3121 /* This handles endianness as well */
3122 ((gint32*)ret) [0] = res;
3123 ((gint32*)ret) [1] = res2;
3124 break;
3125 case MONO_TYPE_GENERICINST:
3126 if (MONO_TYPE_IS_REFERENCE (ptype)) {
3127 *(gpointer*)ret = (gpointer)res;
3128 break;
3129 } else {
3130 /* Fall though */
3132 case MONO_TYPE_VALUETYPE:
3133 g_assert (ainfo->cinfo->ret.storage == RegTypeStructByAddr);
3134 /* Nothing to do */
3135 break;
3136 case MONO_TYPE_R4:
3137 g_assert (IS_VFP);
3138 if (IS_HARD_FLOAT)
3139 *(float*)ret = *(float*)&p->fpregs [0];
3140 else
3141 *(float*)ret = *(float*)&res;
3142 break;
3143 case MONO_TYPE_R8: {
3144 host_mgreg_t regs [2];
3146 g_assert (IS_VFP);
3147 if (IS_HARD_FLOAT) {
3148 *(double*)ret = p->fpregs [0];
3149 } else {
3150 regs [0] = res;
3151 regs [1] = res2;
3153 *(double*)ret = *(double*)&regs;
3155 break;
3157 default:
3158 g_assert_not_reached ();
3162 #ifndef DISABLE_JIT
3165 * The immediate field for cond branches is big enough for all reasonable methods
3167 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
3168 if (0 && ins->inst_true_bb->native_offset) { \
3169 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
3170 } else { \
3171 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
3172 ARM_B_COND (code, (condcode), 0); \
3175 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
3177 /* emit an exception if condition is fail
3179 * We assign the extra code used to throw the implicit exceptions
3180 * to cfg->bb_exit as far as the big branch handling is concerned
3182 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
3183 do { \
3184 mono_add_patch_info (cfg, code - cfg->native_code, \
3185 MONO_PATCH_INFO_EXC, exc_name); \
3186 ARM_BL_COND (code, (condcode), 0); \
3187 } while (0);
3189 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
3191 void
3192 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
3196 void
3197 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
3199 MonoInst *ins, *n;
3201 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
3202 MonoInst *last_ins = mono_inst_prev (ins, FILTER_IL_SEQ_POINT);
3204 switch (ins->opcode) {
3205 case OP_MUL_IMM:
3206 case OP_IMUL_IMM:
3207 /* Already done by an arch-independent pass */
3208 break;
3209 case OP_LOAD_MEMBASE:
3210 case OP_LOADI4_MEMBASE:
3212 * OP_STORE_MEMBASE_REG reg, offset(basereg)
3213 * OP_LOAD_MEMBASE offset(basereg), reg
3215 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
3216 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
3217 ins->inst_basereg == last_ins->inst_destbasereg &&
3218 ins->inst_offset == last_ins->inst_offset) {
3219 if (ins->dreg == last_ins->sreg1) {
3220 MONO_DELETE_INS (bb, ins);
3221 continue;
3222 } else {
3223 //static int c = 0; g_print ("MATCHX %s %d\n", cfg->method->name,c++);
3224 ins->opcode = OP_MOVE;
3225 ins->sreg1 = last_ins->sreg1;
3229 * Note: reg1 must be different from the basereg in the second load
3230 * OP_LOAD_MEMBASE offset(basereg), reg1
3231 * OP_LOAD_MEMBASE offset(basereg), reg2
3232 * -->
3233 * OP_LOAD_MEMBASE offset(basereg), reg1
3234 * OP_MOVE reg1, reg2
3236 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
3237 || last_ins->opcode == OP_LOAD_MEMBASE) &&
3238 ins->inst_basereg != last_ins->dreg &&
3239 ins->inst_basereg == last_ins->inst_basereg &&
3240 ins->inst_offset == last_ins->inst_offset) {
3242 if (ins->dreg == last_ins->dreg) {
3243 MONO_DELETE_INS (bb, ins);
3244 continue;
3245 } else {
3246 ins->opcode = OP_MOVE;
3247 ins->sreg1 = last_ins->dreg;
3250 //g_assert_not_reached ();
3252 #if 0
3254 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3255 * OP_LOAD_MEMBASE offset(basereg), reg
3256 * -->
3257 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3258 * OP_ICONST reg, imm
3260 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
3261 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
3262 ins->inst_basereg == last_ins->inst_destbasereg &&
3263 ins->inst_offset == last_ins->inst_offset) {
3264 //static int c = 0; g_print ("MATCHX %s %d\n", cfg->method->name,c++);
3265 ins->opcode = OP_ICONST;
3266 ins->inst_c0 = last_ins->inst_imm;
3267 g_assert_not_reached (); // check this rule
3268 #endif
3270 break;
3271 case OP_LOADU1_MEMBASE:
3272 case OP_LOADI1_MEMBASE:
3273 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
3274 ins->inst_basereg == last_ins->inst_destbasereg &&
3275 ins->inst_offset == last_ins->inst_offset) {
3276 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
3277 ins->sreg1 = last_ins->sreg1;
3279 break;
3280 case OP_LOADU2_MEMBASE:
3281 case OP_LOADI2_MEMBASE:
3282 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
3283 ins->inst_basereg == last_ins->inst_destbasereg &&
3284 ins->inst_offset == last_ins->inst_offset) {
3285 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
3286 ins->sreg1 = last_ins->sreg1;
3288 break;
3289 case OP_MOVE:
3290 ins->opcode = OP_MOVE;
3292 * OP_MOVE reg, reg
3294 if (ins->dreg == ins->sreg1) {
3295 MONO_DELETE_INS (bb, ins);
3296 continue;
3299 * OP_MOVE sreg, dreg
3300 * OP_MOVE dreg, sreg
3302 if (last_ins && last_ins->opcode == OP_MOVE &&
3303 ins->sreg1 == last_ins->dreg &&
3304 ins->dreg == last_ins->sreg1) {
3305 MONO_DELETE_INS (bb, ins);
3306 continue;
3308 break;
3314 * the branch_cc_table should maintain the order of these
3315 * opcodes.
3316 case CEE_BEQ:
3317 case CEE_BGE:
3318 case CEE_BGT:
3319 case CEE_BLE:
3320 case CEE_BLT:
3321 case CEE_BNE_UN:
3322 case CEE_BGE_UN:
3323 case CEE_BGT_UN:
3324 case CEE_BLE_UN:
3325 case CEE_BLT_UN:
3327 static const guchar
3328 branch_cc_table [] = {
3329 ARMCOND_EQ,
3330 ARMCOND_GE,
3331 ARMCOND_GT,
3332 ARMCOND_LE,
3333 ARMCOND_LT,
3335 ARMCOND_NE,
3336 ARMCOND_HS,
3337 ARMCOND_HI,
3338 ARMCOND_LS,
3339 ARMCOND_LO
3342 #define ADD_NEW_INS(cfg,dest,op) do { \
3343 MONO_INST_NEW ((cfg), (dest), (op)); \
3344 mono_bblock_insert_before_ins (bb, ins, (dest)); \
3345 } while (0)
3347 static int
3348 map_to_reg_reg_op (int op)
3350 switch (op) {
3351 case OP_ADD_IMM:
3352 return OP_IADD;
3353 case OP_SUB_IMM:
3354 return OP_ISUB;
3355 case OP_AND_IMM:
3356 return OP_IAND;
3357 case OP_COMPARE_IMM:
3358 return OP_COMPARE;
3359 case OP_ICOMPARE_IMM:
3360 return OP_ICOMPARE;
3361 case OP_ADDCC_IMM:
3362 return OP_ADDCC;
3363 case OP_ADC_IMM:
3364 return OP_ADC;
3365 case OP_SUBCC_IMM:
3366 return OP_SUBCC;
3367 case OP_SBB_IMM:
3368 return OP_SBB;
3369 case OP_OR_IMM:
3370 return OP_IOR;
3371 case OP_XOR_IMM:
3372 return OP_IXOR;
3373 case OP_LOAD_MEMBASE:
3374 return OP_LOAD_MEMINDEX;
3375 case OP_LOADI4_MEMBASE:
3376 return OP_LOADI4_MEMINDEX;
3377 case OP_LOADU4_MEMBASE:
3378 return OP_LOADU4_MEMINDEX;
3379 case OP_LOADU1_MEMBASE:
3380 return OP_LOADU1_MEMINDEX;
3381 case OP_LOADI2_MEMBASE:
3382 return OP_LOADI2_MEMINDEX;
3383 case OP_LOADU2_MEMBASE:
3384 return OP_LOADU2_MEMINDEX;
3385 case OP_LOADI1_MEMBASE:
3386 return OP_LOADI1_MEMINDEX;
3387 case OP_STOREI1_MEMBASE_REG:
3388 return OP_STOREI1_MEMINDEX;
3389 case OP_STOREI2_MEMBASE_REG:
3390 return OP_STOREI2_MEMINDEX;
3391 case OP_STOREI4_MEMBASE_REG:
3392 return OP_STOREI4_MEMINDEX;
3393 case OP_STORE_MEMBASE_REG:
3394 return OP_STORE_MEMINDEX;
3395 case OP_STORER4_MEMBASE_REG:
3396 return OP_STORER4_MEMINDEX;
3397 case OP_STORER8_MEMBASE_REG:
3398 return OP_STORER8_MEMINDEX;
3399 case OP_STORE_MEMBASE_IMM:
3400 return OP_STORE_MEMBASE_REG;
3401 case OP_STOREI1_MEMBASE_IMM:
3402 return OP_STOREI1_MEMBASE_REG;
3403 case OP_STOREI2_MEMBASE_IMM:
3404 return OP_STOREI2_MEMBASE_REG;
3405 case OP_STOREI4_MEMBASE_IMM:
3406 return OP_STOREI4_MEMBASE_REG;
3408 g_assert_not_reached ();
3412 * Remove from the instruction list the instructions that can't be
3413 * represented with very simple instructions with no register
3414 * requirements.
3416 void
3417 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
3419 MonoInst *ins, *temp, *last_ins = NULL;
3420 int rot_amount, imm8, low_imm;
3422 MONO_BB_FOR_EACH_INS (bb, ins) {
3423 loop_start:
3424 switch (ins->opcode) {
3425 case OP_ADD_IMM:
3426 case OP_SUB_IMM:
3427 case OP_AND_IMM:
3428 case OP_COMPARE_IMM:
3429 case OP_ICOMPARE_IMM:
3430 case OP_ADDCC_IMM:
3431 case OP_ADC_IMM:
3432 case OP_SUBCC_IMM:
3433 case OP_SBB_IMM:
3434 case OP_OR_IMM:
3435 case OP_XOR_IMM:
3436 case OP_IADD_IMM:
3437 case OP_ISUB_IMM:
3438 case OP_IAND_IMM:
3439 case OP_IADC_IMM:
3440 case OP_ISBB_IMM:
3441 case OP_IOR_IMM:
3442 case OP_IXOR_IMM:
3443 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
3444 int opcode2 = mono_op_imm_to_op (ins->opcode);
3445 ADD_NEW_INS (cfg, temp, OP_ICONST);
3446 temp->inst_c0 = ins->inst_imm;
3447 temp->dreg = mono_alloc_ireg (cfg);
3448 ins->sreg2 = temp->dreg;
3449 if (opcode2 == -1)
3450 g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins->opcode));
3451 ins->opcode = opcode2;
3453 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
3454 goto loop_start;
3455 else
3456 break;
3457 case OP_MUL_IMM:
3458 case OP_IMUL_IMM:
3459 if (ins->inst_imm == 1) {
3460 ins->opcode = OP_MOVE;
3461 break;
3463 if (ins->inst_imm == 0) {
3464 ins->opcode = OP_ICONST;
3465 ins->inst_c0 = 0;
3466 break;
3468 imm8 = mono_is_power_of_two (ins->inst_imm);
3469 if (imm8 > 0) {
3470 ins->opcode = OP_SHL_IMM;
3471 ins->inst_imm = imm8;
3472 break;
3474 ADD_NEW_INS (cfg, temp, OP_ICONST);
3475 temp->inst_c0 = ins->inst_imm;
3476 temp->dreg = mono_alloc_ireg (cfg);
3477 ins->sreg2 = temp->dreg;
3478 ins->opcode = OP_IMUL;
3479 break;
3480 case OP_SBB:
3481 case OP_ISBB:
3482 case OP_SUBCC:
3483 case OP_ISUBCC: {
3484 int try_count = 2;
3485 MonoInst *current = ins;
3487 /* may require a look-ahead of a couple instructions due to spilling */
3488 while (try_count-- && current->next) {
3489 if (current->next->opcode == OP_COND_EXC_C || current->next->opcode == OP_COND_EXC_IC) {
3490 /* ARM sets the C flag to 1 if there was _no_ overflow */
3491 current->next->opcode = OP_COND_EXC_NC;
3492 break;
3494 current = current->next;
3496 break;
3498 case OP_IDIV_IMM:
3499 case OP_IDIV_UN_IMM:
3500 case OP_IREM_IMM:
3501 case OP_IREM_UN_IMM: {
3502 int opcode2 = mono_op_imm_to_op (ins->opcode);
3503 ADD_NEW_INS (cfg, temp, OP_ICONST);
3504 temp->inst_c0 = ins->inst_imm;
3505 temp->dreg = mono_alloc_ireg (cfg);
3506 ins->sreg2 = temp->dreg;
3507 if (opcode2 == -1)
3508 g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins->opcode));
3509 ins->opcode = opcode2;
3510 break;
3512 case OP_LOCALLOC_IMM:
3513 ADD_NEW_INS (cfg, temp, OP_ICONST);
3514 temp->inst_c0 = ins->inst_imm;
3515 temp->dreg = mono_alloc_ireg (cfg);
3516 ins->sreg1 = temp->dreg;
3517 ins->opcode = OP_LOCALLOC;
3518 break;
3519 case OP_LOAD_MEMBASE:
3520 case OP_LOADI4_MEMBASE:
3521 case OP_LOADU4_MEMBASE:
3522 case OP_LOADU1_MEMBASE:
3523 /* we can do two things: load the immed in a register
3524 * and use an indexed load, or see if the immed can be
3525 * represented as an ad_imm + a load with a smaller offset
3526 * that fits. We just do the first for now, optimize later.
3528 if (arm_is_imm12 (ins->inst_offset))
3529 break;
3530 ADD_NEW_INS (cfg, temp, OP_ICONST);
3531 temp->inst_c0 = ins->inst_offset;
3532 temp->dreg = mono_alloc_ireg (cfg);
3533 ins->sreg2 = temp->dreg;
3534 ins->opcode = map_to_reg_reg_op (ins->opcode);
3535 break;
3536 case OP_LOADI2_MEMBASE:
3537 case OP_LOADU2_MEMBASE:
3538 case OP_LOADI1_MEMBASE:
3539 if (arm_is_imm8 (ins->inst_offset))
3540 break;
3541 ADD_NEW_INS (cfg, temp, OP_ICONST);
3542 temp->inst_c0 = ins->inst_offset;
3543 temp->dreg = mono_alloc_ireg (cfg);
3544 ins->sreg2 = temp->dreg;
3545 ins->opcode = map_to_reg_reg_op (ins->opcode);
3546 break;
3547 case OP_LOADR4_MEMBASE:
3548 case OP_LOADR8_MEMBASE:
3549 if (arm_is_fpimm8 (ins->inst_offset))
3550 break;
3551 low_imm = ins->inst_offset & 0x1ff;
3552 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
3553 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3554 temp->inst_imm = ins->inst_offset & ~0x1ff;
3555 temp->sreg1 = ins->inst_basereg;
3556 temp->dreg = mono_alloc_ireg (cfg);
3557 ins->inst_basereg = temp->dreg;
3558 ins->inst_offset = low_imm;
3559 } else {
3560 MonoInst *add_ins;
3562 ADD_NEW_INS (cfg, temp, OP_ICONST);
3563 temp->inst_c0 = ins->inst_offset;
3564 temp->dreg = mono_alloc_ireg (cfg);
3566 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3567 add_ins->sreg1 = ins->inst_basereg;
3568 add_ins->sreg2 = temp->dreg;
3569 add_ins->dreg = mono_alloc_ireg (cfg);
3571 ins->inst_basereg = add_ins->dreg;
3572 ins->inst_offset = 0;
3574 break;
3575 case OP_STORE_MEMBASE_REG:
3576 case OP_STOREI4_MEMBASE_REG:
3577 case OP_STOREI1_MEMBASE_REG:
3578 if (arm_is_imm12 (ins->inst_offset))
3579 break;
3580 ADD_NEW_INS (cfg, temp, OP_ICONST);
3581 temp->inst_c0 = ins->inst_offset;
3582 temp->dreg = mono_alloc_ireg (cfg);
3583 ins->sreg2 = temp->dreg;
3584 ins->opcode = map_to_reg_reg_op (ins->opcode);
3585 break;
3586 case OP_STOREI2_MEMBASE_REG:
3587 if (arm_is_imm8 (ins->inst_offset))
3588 break;
3589 ADD_NEW_INS (cfg, temp, OP_ICONST);
3590 temp->inst_c0 = ins->inst_offset;
3591 temp->dreg = mono_alloc_ireg (cfg);
3592 ins->sreg2 = temp->dreg;
3593 ins->opcode = map_to_reg_reg_op (ins->opcode);
3594 break;
3595 case OP_STORER4_MEMBASE_REG:
3596 case OP_STORER8_MEMBASE_REG:
3597 if (arm_is_fpimm8 (ins->inst_offset))
3598 break;
3599 low_imm = ins->inst_offset & 0x1ff;
3600 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
3601 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3602 temp->inst_imm = ins->inst_offset & ~0x1ff;
3603 temp->sreg1 = ins->inst_destbasereg;
3604 temp->dreg = mono_alloc_ireg (cfg);
3605 ins->inst_destbasereg = temp->dreg;
3606 ins->inst_offset = low_imm;
3607 } else {
3608 MonoInst *add_ins;
3610 ADD_NEW_INS (cfg, temp, OP_ICONST);
3611 temp->inst_c0 = ins->inst_offset;
3612 temp->dreg = mono_alloc_ireg (cfg);
3614 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3615 add_ins->sreg1 = ins->inst_destbasereg;
3616 add_ins->sreg2 = temp->dreg;
3617 add_ins->dreg = mono_alloc_ireg (cfg);
3619 ins->inst_destbasereg = add_ins->dreg;
3620 ins->inst_offset = 0;
3622 break;
3623 case OP_STORE_MEMBASE_IMM:
3624 case OP_STOREI1_MEMBASE_IMM:
3625 case OP_STOREI2_MEMBASE_IMM:
3626 case OP_STOREI4_MEMBASE_IMM:
3627 ADD_NEW_INS (cfg, temp, OP_ICONST);
3628 temp->inst_c0 = ins->inst_imm;
3629 temp->dreg = mono_alloc_ireg (cfg);
3630 ins->sreg1 = temp->dreg;
3631 ins->opcode = map_to_reg_reg_op (ins->opcode);
3632 last_ins = temp;
3633 goto loop_start; /* make it handle the possibly big ins->inst_offset */
3634 case OP_FCOMPARE:
3635 case OP_RCOMPARE: {
3636 gboolean swap = FALSE;
3637 int reg;
3639 if (!ins->next) {
3640 /* Optimized away */
3641 NULLIFY_INS (ins);
3642 break;
3645 /* Some fp compares require swapped operands */
3646 switch (ins->next->opcode) {
3647 case OP_FBGT:
3648 ins->next->opcode = OP_FBLT;
3649 swap = TRUE;
3650 break;
3651 case OP_FBGT_UN:
3652 ins->next->opcode = OP_FBLT_UN;
3653 swap = TRUE;
3654 break;
3655 case OP_FBLE:
3656 ins->next->opcode = OP_FBGE;
3657 swap = TRUE;
3658 break;
3659 case OP_FBLE_UN:
3660 ins->next->opcode = OP_FBGE_UN;
3661 swap = TRUE;
3662 break;
3663 default:
3664 break;
3666 if (swap) {
3667 reg = ins->sreg1;
3668 ins->sreg1 = ins->sreg2;
3669 ins->sreg2 = reg;
3671 break;
3675 last_ins = ins;
3677 bb->last_ins = last_ins;
3678 bb->max_vreg = cfg->next_vreg;
3681 void
3682 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
3684 MonoInst *ins;
3686 if (long_ins->opcode == OP_LNEG) {
3687 ins = long_ins;
3688 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), 0);
3689 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), 0);
3690 NULLIFY_INS (ins);
3694 static guchar*
3695 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
3697 /* sreg is a float, dreg is an integer reg */
3698 if (IS_VFP) {
3699 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
3700 if (is_signed)
3701 ARM_TOSIZD (code, vfp_scratch1, sreg);
3702 else
3703 ARM_TOUIZD (code, vfp_scratch1, sreg);
3704 ARM_FMRS (code, dreg, vfp_scratch1);
3705 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
3707 if (!is_signed) {
3708 if (size == 1)
3709 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
3710 else if (size == 2) {
3711 ARM_SHL_IMM (code, dreg, dreg, 16);
3712 ARM_SHR_IMM (code, dreg, dreg, 16);
3714 } else {
3715 if (size == 1) {
3716 ARM_SHL_IMM (code, dreg, dreg, 24);
3717 ARM_SAR_IMM (code, dreg, dreg, 24);
3718 } else if (size == 2) {
3719 ARM_SHL_IMM (code, dreg, dreg, 16);
3720 ARM_SAR_IMM (code, dreg, dreg, 16);
3723 return code;
3726 static guchar*
3727 emit_r4_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
3729 /* sreg is a float, dreg is an integer reg */
3730 g_assert (IS_VFP);
3731 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
3732 if (is_signed)
3733 ARM_TOSIZS (code, vfp_scratch1, sreg);
3734 else
3735 ARM_TOUIZS (code, vfp_scratch1, sreg);
3736 ARM_FMRS (code, dreg, vfp_scratch1);
3737 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
3739 if (!is_signed) {
3740 if (size == 1)
3741 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
3742 else if (size == 2) {
3743 ARM_SHL_IMM (code, dreg, dreg, 16);
3744 ARM_SHR_IMM (code, dreg, dreg, 16);
3746 } else {
3747 if (size == 1) {
3748 ARM_SHL_IMM (code, dreg, dreg, 24);
3749 ARM_SAR_IMM (code, dreg, dreg, 24);
3750 } else if (size == 2) {
3751 ARM_SHL_IMM (code, dreg, dreg, 16);
3752 ARM_SAR_IMM (code, dreg, dreg, 16);
3755 return code;
3758 #endif /* #ifndef DISABLE_JIT */
3760 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
3762 static void
3763 emit_thunk (guint8 *code, gconstpointer target)
3765 guint8 *p = code;
3767 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3768 if (thumb_supported)
3769 ARM_BX (code, ARMREG_IP);
3770 else
3771 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3772 *(guint32*)code = (guint32)(gsize)target;
3773 code += 4;
3774 mono_arch_flush_icache (p, code - p);
3777 static void
3778 handle_thunk (MonoCompile *cfg, MonoDomain *domain, guchar *code, const guchar *target)
3780 MonoJitInfo *ji = NULL;
3781 MonoThunkJitInfo *info;
3782 guint8 *thunks, *p;
3783 int thunks_size;
3784 guint8 *orig_target;
3785 guint8 *target_thunk;
3787 if (!domain)
3788 domain = mono_domain_get ();
3790 if (cfg) {
3792 * This can be called multiple times during JITting,
3793 * save the current position in cfg->arch to avoid
3794 * doing a O(n^2) search.
3796 if (!cfg->arch.thunks) {
3797 cfg->arch.thunks = cfg->thunks;
3798 cfg->arch.thunks_size = cfg->thunk_area;
3800 thunks = cfg->arch.thunks;
3801 thunks_size = cfg->arch.thunks_size;
3802 if (!thunks_size) {
3803 g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, mono_method_full_name (cfg->method, TRUE));
3804 g_assert_not_reached ();
3807 g_assert (*(guint32*)thunks == 0);
3808 emit_thunk (thunks, target);
3809 arm_patch (code, thunks);
3811 cfg->arch.thunks += THUNK_SIZE;
3812 cfg->arch.thunks_size -= THUNK_SIZE;
3813 } else {
3814 ji = mini_jit_info_table_find (domain, (char*)code, NULL);
3815 g_assert (ji);
3816 info = mono_jit_info_get_thunk_info (ji);
3817 g_assert (info);
3819 thunks = (guint8*)ji->code_start + info->thunks_offset;
3820 thunks_size = info->thunks_size;
3822 orig_target = mono_arch_get_call_target (code + 4);
3824 mono_mini_arch_lock ();
3826 target_thunk = NULL;
3827 if (orig_target >= thunks && orig_target < thunks + thunks_size) {
3828 /* The call already points to a thunk, because of trampolines etc. */
3829 target_thunk = orig_target;
3830 } else {
3831 for (p = thunks; p < thunks + thunks_size; p += THUNK_SIZE) {
3832 if (((guint32*)p) [0] == 0) {
3833 /* Free entry */
3834 target_thunk = p;
3835 break;
3836 } else if (((guint32*)p) [2] == (guint32)(gsize)target) {
3837 /* Thunk already points to target */
3838 target_thunk = p;
3839 break;
3844 //g_print ("THUNK: %p %p %p\n", code, target, target_thunk);
3846 if (!target_thunk) {
3847 mono_mini_arch_unlock ();
3848 g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, cfg ? mono_method_full_name (cfg->method, TRUE) : mono_method_full_name (jinfo_get_method (ji), TRUE));
3849 g_assert_not_reached ();
3852 emit_thunk (target_thunk, target);
3853 arm_patch (code, target_thunk);
3854 mono_arch_flush_icache (code, 4);
3856 mono_mini_arch_unlock ();
3860 static void
3861 arm_patch_general (MonoCompile *cfg, MonoDomain *domain, guchar *code, const guchar *target)
3863 guint32 *code32 = (guint32*)code;
3864 guint32 ins = *code32;
3865 guint32 prim = (ins >> 25) & 7;
3866 guint32 tval = GPOINTER_TO_UINT (target);
3868 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
3869 if (prim == 5) { /* 101b */
3870 /* the diff starts 8 bytes from the branch opcode */
3871 gint diff = target - code - 8;
3872 gint tbits;
3873 gint tmask = 0xffffffff;
3874 if (tval & 1) { /* entering thumb mode */
3875 diff = target - 1 - code - 8;
3876 g_assert (thumb_supported);
3877 tbits = 0xf << 28; /* bl->blx bit pattern */
3878 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
3879 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
3880 if (diff & 2) {
3881 tbits |= 1 << 24;
3883 tmask = ~(1 << 24); /* clear the link bit */
3884 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
3885 } else {
3886 tbits = 0;
3888 if (diff >= 0) {
3889 if (diff <= 33554431) {
3890 diff >>= 2;
3891 ins = (ins & 0xff000000) | diff;
3892 ins &= tmask;
3893 *code32 = ins | tbits;
3894 return;
3896 } else {
3897 /* diff between 0 and -33554432 */
3898 if (diff >= -33554432) {
3899 diff >>= 2;
3900 ins = (ins & 0xff000000) | (diff & ~0xff000000);
3901 ins &= tmask;
3902 *code32 = ins | tbits;
3903 return;
3907 handle_thunk (cfg, domain, code, target);
3908 return;
3912 * The alternative call sequences looks like this:
3914 * ldr ip, [pc] // loads the address constant
3915 * b 1f // jumps around the constant
3916 * address constant embedded in the code
3917 * 1f:
3918 * mov lr, pc
3919 * mov pc, ip
3921 * There are two cases for patching:
3922 * a) at the end of method emission: in this case code points to the start
3923 * of the call sequence
3924 * b) during runtime patching of the call site: in this case code points
3925 * to the mov pc, ip instruction
3927 * We have to handle also the thunk jump code sequence:
3929 * ldr ip, [pc]
3930 * mov pc, ip
3931 * address constant // execution never reaches here
3933 if ((ins & 0x0ffffff0) == 0x12fff10) {
3934 /* Branch and exchange: the address is constructed in a reg
3935 * We can patch BX when the code sequence is the following:
3936 * ldr ip, [pc, #0] ; 0x8
3937 * b 0xc
3938 * .word code_ptr
3939 * mov lr, pc
3940 * bx ips
3941 * */
3942 guint32 ccode [4];
3943 guint8 *emit = (guint8*)ccode;
3944 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3945 ARM_B (emit, 0);
3946 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3947 ARM_BX (emit, ARMREG_IP);
3949 /*patching from magic trampoline*/
3950 if (ins == ccode [3]) {
3951 g_assert (code32 [-4] == ccode [0]);
3952 g_assert (code32 [-3] == ccode [1]);
3953 g_assert (code32 [-1] == ccode [2]);
3954 code32 [-2] = (guint32)(gsize)target;
3955 return;
3957 /*patching from JIT*/
3958 if (ins == ccode [0]) {
3959 g_assert (code32 [1] == ccode [1]);
3960 g_assert (code32 [3] == ccode [2]);
3961 g_assert (code32 [4] == ccode [3]);
3962 code32 [2] = (guint32)(gsize)target;
3963 return;
3965 g_assert_not_reached ();
3966 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
3968 * ldr ip, [pc, #0]
3969 * b 0xc
3970 * .word code_ptr
3971 * blx ip
3973 guint32 ccode [4];
3974 guint8 *emit = (guint8*)ccode;
3975 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3976 ARM_B (emit, 0);
3977 ARM_BLX_REG (emit, ARMREG_IP);
3979 g_assert (code32 [-3] == ccode [0]);
3980 g_assert (code32 [-2] == ccode [1]);
3981 g_assert (code32 [0] == ccode [2]);
3983 code32 [-1] = (guint32)(gsize)target;
3984 } else {
3985 guint32 ccode [4];
3986 guint32 *tmp = ccode;
3987 guint8 *emit = (guint8*)tmp;
3988 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3989 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3990 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
3991 ARM_BX (emit, ARMREG_IP);
3992 if (ins == ccode [2]) {
3993 g_assert_not_reached (); // should be -2 ...
3994 code32 [-1] = (guint32)(gsize)target;
3995 return;
3997 if (ins == ccode [0]) {
3998 /* handles both thunk jump code and the far call sequence */
3999 code32 [2] = (guint32)(gsize)target;
4000 return;
4002 g_assert_not_reached ();
4004 // g_print ("patched with 0x%08x\n", ins);
4007 void
4008 arm_patch (guchar *code, const guchar *target)
4010 arm_patch_general (NULL, NULL, code, target);
4014 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
4015 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
4016 * to be used with the emit macros.
4017 * Return -1 otherwise.
4020 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
4022 guint32 res, i;
4023 for (i = 0; i < 31; i+= 2) {
4024 if (i == 0)
4025 res = val;
4026 else
4027 res = (val << (32 - i)) | (val >> i);
4028 if (res & ~0xff)
4029 continue;
4030 *rot_amount = i? 32 - i: 0;
4031 return res;
4033 return -1;
4037 * Emits in code a sequence of instructions that load the value 'val'
4038 * into the dreg register. Uses at most 4 instructions.
4040 guint8*
4041 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
4043 int imm8, rot_amount;
4044 #if 0
4045 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
4046 /* skip the constant pool */
4047 ARM_B (code, 0);
4048 *(int*)code = val;
4049 code += 4;
4050 return code;
4051 #endif
4052 if (mini_get_debug_options()->single_imm_size && v7_supported) {
4053 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
4054 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
4055 return code;
4058 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
4059 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
4060 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
4061 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
4062 } else {
4063 if (v7_supported) {
4064 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
4065 if (val >> 16)
4066 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
4067 return code;
4069 if (val & 0xFF) {
4070 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
4071 if (val & 0xFF00) {
4072 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
4074 if (val & 0xFF0000) {
4075 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
4077 if (val & 0xFF000000) {
4078 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
4080 } else if (val & 0xFF00) {
4081 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
4082 if (val & 0xFF0000) {
4083 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
4085 if (val & 0xFF000000) {
4086 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
4088 } else if (val & 0xFF0000) {
4089 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
4090 if (val & 0xFF000000) {
4091 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
4094 //g_assert_not_reached ();
4096 return code;
4099 gboolean
4100 mono_arm_thumb_supported (void)
4102 return thumb_supported;
4105 gboolean
4106 mono_arm_eabi_supported (void)
4108 return eabi_supported;
4112 mono_arm_i8_align (void)
4114 return i8_align;
4117 #ifndef DISABLE_JIT
4119 static guint8*
4120 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
4122 CallInfo *cinfo;
4123 MonoCallInst *call;
4125 call = (MonoCallInst*)ins;
4126 cinfo = call->call_info;
4128 switch (cinfo->ret.storage) {
4129 case RegTypeStructByVal:
4130 case RegTypeHFA: {
4131 MonoInst *loc = cfg->arch.vret_addr_loc;
4132 int i;
4134 if (cinfo->ret.storage == RegTypeStructByVal && cinfo->ret.nregs == 1) {
4135 /* The JIT treats this as a normal call */
4136 break;
4139 /* Load the destination address */
4140 g_assert (loc && loc->opcode == OP_REGOFFSET);
4142 if (arm_is_imm12 (loc->inst_offset)) {
4143 ARM_LDR_IMM (code, ARMREG_LR, loc->inst_basereg, loc->inst_offset);
4144 } else {
4145 code = mono_arm_emit_load_imm (code, ARMREG_LR, loc->inst_offset);
4146 ARM_LDR_REG_REG (code, ARMREG_LR, loc->inst_basereg, ARMREG_LR);
4149 if (cinfo->ret.storage == RegTypeStructByVal) {
4150 int rsize = cinfo->ret.struct_size;
4152 for (i = 0; i < cinfo->ret.nregs; ++i) {
4153 g_assert (rsize >= 0);
4154 switch (rsize) {
4155 case 0:
4156 break;
4157 case 1:
4158 ARM_STRB_IMM (code, i, ARMREG_LR, i * 4);
4159 break;
4160 case 2:
4161 ARM_STRH_IMM (code, i, ARMREG_LR, i * 4);
4162 break;
4163 default:
4164 ARM_STR_IMM (code, i, ARMREG_LR, i * 4);
4165 break;
4167 rsize -= 4;
4169 } else {
4170 for (i = 0; i < cinfo->ret.nregs; ++i) {
4171 if (cinfo->ret.esize == 4)
4172 ARM_FSTS (code, cinfo->ret.reg + i, ARMREG_LR, i * 4);
4173 else
4174 ARM_FSTD (code, cinfo->ret.reg + (i * 2), ARMREG_LR, i * 8);
4177 return code;
4179 default:
4180 break;
4183 switch (ins->opcode) {
4184 case OP_FCALL:
4185 case OP_FCALL_REG:
4186 case OP_FCALL_MEMBASE:
4187 if (IS_VFP) {
4188 MonoType *sig_ret = mini_get_underlying_type (((MonoCallInst*)ins)->signature->ret);
4189 if (sig_ret->type == MONO_TYPE_R4) {
4190 if (IS_HARD_FLOAT) {
4191 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
4192 } else {
4193 ARM_FMSR (code, ins->dreg, ARMREG_R0);
4194 ARM_CVTS (code, ins->dreg, ins->dreg);
4196 } else {
4197 if (IS_HARD_FLOAT) {
4198 ARM_CPYD (code, ins->dreg, ARM_VFP_D0);
4199 } else {
4200 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
4204 break;
4205 case OP_RCALL:
4206 case OP_RCALL_REG:
4207 case OP_RCALL_MEMBASE: {
4208 MonoType *sig_ret;
4210 g_assert (IS_VFP);
4212 sig_ret = mini_get_underlying_type (((MonoCallInst*)ins)->signature->ret);
4213 g_assert (sig_ret->type == MONO_TYPE_R4);
4214 if (IS_HARD_FLOAT) {
4215 ARM_CPYS (code, ins->dreg, ARM_VFP_F0);
4216 } else {
4217 ARM_FMSR (code, ins->dreg, ARMREG_R0);
4218 ARM_CPYS (code, ins->dreg, ins->dreg);
4220 break;
4222 default:
4223 break;
4226 return code;
4229 void
4230 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
4232 MonoInst *ins;
4233 MonoCallInst *call;
4234 guint8 *code = cfg->native_code + cfg->code_len;
4235 MonoInst *last_ins = NULL;
4236 int max_len, cpos;
4237 int imm8, rot_amount;
4239 /* we don't align basic blocks of loops on arm */
4241 if (cfg->verbose_level > 2)
4242 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
4244 cpos = bb->max_offset;
4246 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
4247 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
4248 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break));
4249 code = emit_call_seq (cfg, code);
4252 MONO_BB_FOR_EACH_INS (bb, ins) {
4253 guint offset = code - cfg->native_code;
4254 set_code_cursor (cfg, code);
4255 max_len = ins_get_size (ins->opcode);
4256 code = realloc_code (cfg, max_len);
4257 // if (ins->cil_code)
4258 // g_print ("cil code\n");
4259 mono_debug_record_line_number (cfg, ins, offset);
4261 switch (ins->opcode) {
4262 case OP_MEMORY_BARRIER:
4263 if (v7_supported) {
4264 ARM_DMB (code, ARM_DMB_ISH);
4265 } else if (v6_supported) {
4266 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
4267 ARM_MCR (code, 15, 0, ARMREG_R0, 7, 10, 5);
4269 break;
4270 case OP_TLS_GET:
4271 code = emit_tls_get (code, ins->dreg, ins->inst_offset);
4272 break;
4273 case OP_TLS_SET:
4274 code = emit_tls_set (code, ins->sreg1, ins->inst_offset);
4275 break;
4276 case OP_ATOMIC_EXCHANGE_I4:
4277 case OP_ATOMIC_CAS_I4:
4278 case OP_ATOMIC_ADD_I4: {
4279 int tmpreg;
4280 guint8 *buf [16];
4282 g_assert (v7_supported);
4284 /* Free up a reg */
4285 if (ins->sreg1 != ARMREG_IP && ins->sreg2 != ARMREG_IP && ins->sreg3 != ARMREG_IP)
4286 tmpreg = ARMREG_IP;
4287 else if (ins->sreg1 != ARMREG_R0 && ins->sreg2 != ARMREG_R0 && ins->sreg3 != ARMREG_R0)
4288 tmpreg = ARMREG_R0;
4289 else if (ins->sreg1 != ARMREG_R1 && ins->sreg2 != ARMREG_R1 && ins->sreg3 != ARMREG_R1)
4290 tmpreg = ARMREG_R1;
4291 else
4292 tmpreg = ARMREG_R2;
4293 g_assert (cfg->arch.atomic_tmp_offset != -1);
4294 ARM_STR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4296 switch (ins->opcode) {
4297 case OP_ATOMIC_EXCHANGE_I4:
4298 buf [0] = code;
4299 ARM_DMB (code, ARM_DMB_ISH);
4300 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4301 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4302 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4303 buf [1] = code;
4304 ARM_B_COND (code, ARMCOND_NE, 0);
4305 arm_patch (buf [1], buf [0]);
4306 break;
4307 case OP_ATOMIC_CAS_I4:
4308 ARM_DMB (code, ARM_DMB_ISH);
4309 buf [0] = code;
4310 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4311 ARM_CMP_REG_REG (code, ARMREG_LR, ins->sreg3);
4312 buf [1] = code;
4313 ARM_B_COND (code, ARMCOND_NE, 0);
4314 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4315 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4316 buf [2] = code;
4317 ARM_B_COND (code, ARMCOND_NE, 0);
4318 arm_patch (buf [2], buf [0]);
4319 arm_patch (buf [1], code);
4320 break;
4321 case OP_ATOMIC_ADD_I4:
4322 buf [0] = code;
4323 ARM_DMB (code, ARM_DMB_ISH);
4324 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4325 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->sreg2);
4326 ARM_STREX_REG (code, tmpreg, ARMREG_LR, ins->sreg1);
4327 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4328 buf [1] = code;
4329 ARM_B_COND (code, ARMCOND_NE, 0);
4330 arm_patch (buf [1], buf [0]);
4331 break;
4332 default:
4333 g_assert_not_reached ();
4336 ARM_DMB (code, ARM_DMB_ISH);
4337 if (tmpreg != ins->dreg)
4338 ARM_LDR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4339 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_LR);
4340 break;
4342 case OP_ATOMIC_LOAD_I1:
4343 case OP_ATOMIC_LOAD_U1:
4344 case OP_ATOMIC_LOAD_I2:
4345 case OP_ATOMIC_LOAD_U2:
4346 case OP_ATOMIC_LOAD_I4:
4347 case OP_ATOMIC_LOAD_U4:
4348 case OP_ATOMIC_LOAD_R4:
4349 case OP_ATOMIC_LOAD_R8: {
4350 if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
4351 ARM_DMB (code, ARM_DMB_ISH);
4353 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4355 switch (ins->opcode) {
4356 case OP_ATOMIC_LOAD_I1:
4357 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4358 break;
4359 case OP_ATOMIC_LOAD_U1:
4360 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4361 break;
4362 case OP_ATOMIC_LOAD_I2:
4363 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4364 break;
4365 case OP_ATOMIC_LOAD_U2:
4366 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4367 break;
4368 case OP_ATOMIC_LOAD_I4:
4369 case OP_ATOMIC_LOAD_U4:
4370 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4371 break;
4372 case OP_ATOMIC_LOAD_R4:
4373 if (cfg->r4fp) {
4374 ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_basereg, ARMREG_LR);
4375 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
4376 } else {
4377 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
4378 ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_basereg, ARMREG_LR);
4379 ARM_FLDS (code, vfp_scratch1, ARMREG_LR, 0);
4380 ARM_CVTS (code, ins->dreg, vfp_scratch1);
4381 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
4383 break;
4384 case OP_ATOMIC_LOAD_R8:
4385 ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_basereg, ARMREG_LR);
4386 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
4387 break;
4390 if (ins->backend.memory_barrier_kind != MONO_MEMORY_BARRIER_NONE)
4391 ARM_DMB (code, ARM_DMB_ISH);
4392 break;
4394 case OP_ATOMIC_STORE_I1:
4395 case OP_ATOMIC_STORE_U1:
4396 case OP_ATOMIC_STORE_I2:
4397 case OP_ATOMIC_STORE_U2:
4398 case OP_ATOMIC_STORE_I4:
4399 case OP_ATOMIC_STORE_U4:
4400 case OP_ATOMIC_STORE_R4:
4401 case OP_ATOMIC_STORE_R8: {
4402 if (ins->backend.memory_barrier_kind != MONO_MEMORY_BARRIER_NONE)
4403 ARM_DMB (code, ARM_DMB_ISH);
4405 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4407 switch (ins->opcode) {
4408 case OP_ATOMIC_STORE_I1:
4409 case OP_ATOMIC_STORE_U1:
4410 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4411 break;
4412 case OP_ATOMIC_STORE_I2:
4413 case OP_ATOMIC_STORE_U2:
4414 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4415 break;
4416 case OP_ATOMIC_STORE_I4:
4417 case OP_ATOMIC_STORE_U4:
4418 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4419 break;
4420 case OP_ATOMIC_STORE_R4:
4421 if (cfg->r4fp) {
4422 ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_destbasereg, ARMREG_LR);
4423 ARM_FSTS (code, ins->sreg1, ARMREG_LR, 0);
4424 } else {
4425 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
4426 ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_destbasereg, ARMREG_LR);
4427 ARM_CVTD (code, vfp_scratch1, ins->sreg1);
4428 ARM_FSTS (code, vfp_scratch1, ARMREG_LR, 0);
4429 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
4431 break;
4432 case OP_ATOMIC_STORE_R8:
4433 ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_destbasereg, ARMREG_LR);
4434 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
4435 break;
4438 if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
4439 ARM_DMB (code, ARM_DMB_ISH);
4440 break;
4442 case OP_BIGMUL:
4443 ARM_SMULL_REG_REG (code, ins->backend.reg3, ins->dreg, ins->sreg1, ins->sreg2);
4444 break;
4445 case OP_BIGMUL_UN:
4446 ARM_UMULL_REG_REG (code, ins->backend.reg3, ins->dreg, ins->sreg1, ins->sreg2);
4447 break;
4448 case OP_STOREI1_MEMBASE_IMM:
4449 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
4450 g_assert (arm_is_imm12 (ins->inst_offset));
4451 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4452 break;
4453 case OP_STOREI2_MEMBASE_IMM:
4454 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
4455 g_assert (arm_is_imm8 (ins->inst_offset));
4456 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4457 break;
4458 case OP_STORE_MEMBASE_IMM:
4459 case OP_STOREI4_MEMBASE_IMM:
4460 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
4461 g_assert (arm_is_imm12 (ins->inst_offset));
4462 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4463 break;
4464 case OP_STOREI1_MEMBASE_REG:
4465 g_assert (arm_is_imm12 (ins->inst_offset));
4466 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4467 break;
4468 case OP_STOREI2_MEMBASE_REG:
4469 g_assert (arm_is_imm8 (ins->inst_offset));
4470 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4471 break;
4472 case OP_STORE_MEMBASE_REG:
4473 case OP_STOREI4_MEMBASE_REG:
4474 /* this case is special, since it happens for spill code after lowering has been called */
4475 if (arm_is_imm12 (ins->inst_offset)) {
4476 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4477 } else {
4478 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4479 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4481 break;
4482 case OP_STOREI1_MEMINDEX:
4483 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4484 break;
4485 case OP_STOREI2_MEMINDEX:
4486 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4487 break;
4488 case OP_STORE_MEMINDEX:
4489 case OP_STOREI4_MEMINDEX:
4490 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4491 break;
4492 case OP_LOADU4_MEM:
4493 g_assert_not_reached ();
4494 break;
4495 case OP_LOAD_MEMINDEX:
4496 case OP_LOADI4_MEMINDEX:
4497 case OP_LOADU4_MEMINDEX:
4498 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4499 break;
4500 case OP_LOADI1_MEMINDEX:
4501 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4502 break;
4503 case OP_LOADU1_MEMINDEX:
4504 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4505 break;
4506 case OP_LOADI2_MEMINDEX:
4507 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4508 break;
4509 case OP_LOADU2_MEMINDEX:
4510 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4511 break;
4512 case OP_LOAD_MEMBASE:
4513 case OP_LOADI4_MEMBASE:
4514 case OP_LOADU4_MEMBASE:
4515 /* this case is special, since it happens for spill code after lowering has been called */
4516 if (arm_is_imm12 (ins->inst_offset)) {
4517 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4518 } else {
4519 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4520 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4522 break;
4523 case OP_LOADI1_MEMBASE:
4524 g_assert (arm_is_imm8 (ins->inst_offset));
4525 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4526 break;
4527 case OP_LOADU1_MEMBASE:
4528 g_assert (arm_is_imm12 (ins->inst_offset));
4529 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4530 break;
4531 case OP_LOADU2_MEMBASE:
4532 g_assert (arm_is_imm8 (ins->inst_offset));
4533 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4534 break;
4535 case OP_LOADI2_MEMBASE:
4536 g_assert (arm_is_imm8 (ins->inst_offset));
4537 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4538 break;
4539 case OP_ICONV_TO_I1:
4540 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
4541 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
4542 break;
4543 case OP_ICONV_TO_I2:
4544 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4545 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
4546 break;
4547 case OP_ICONV_TO_U1:
4548 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
4549 break;
4550 case OP_ICONV_TO_U2:
4551 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4552 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
4553 break;
4554 case OP_COMPARE:
4555 case OP_ICOMPARE:
4556 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
4557 break;
4558 case OP_COMPARE_IMM:
4559 case OP_ICOMPARE_IMM:
4560 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4561 g_assert (imm8 >= 0);
4562 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
4563 break;
4564 case OP_BREAK:
4566 * gdb does not like encountering the hw breakpoint ins in the debugged code.
4567 * So instead of emitting a trap, we emit a call a C function and place a
4568 * breakpoint there.
4570 //*(int*)code = 0xef9f0001;
4571 //code += 4;
4572 //ARM_DBRK (code);
4573 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
4574 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break));
4575 code = emit_call_seq (cfg, code);
4576 break;
4577 case OP_RELAXED_NOP:
4578 ARM_NOP (code);
4579 break;
4580 case OP_NOP:
4581 case OP_DUMMY_USE:
4582 case OP_DUMMY_ICONST:
4583 case OP_DUMMY_R8CONST:
4584 case OP_DUMMY_R4CONST:
4585 case OP_NOT_REACHED:
4586 case OP_NOT_NULL:
4587 break;
4588 case OP_IL_SEQ_POINT:
4589 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
4590 break;
4591 case OP_SEQ_POINT: {
4592 int i;
4593 MonoInst *info_var = cfg->arch.seq_point_info_var;
4594 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4595 MonoInst *ss_method_var = cfg->arch.seq_point_ss_method_var;
4596 MonoInst *bp_method_var = cfg->arch.seq_point_bp_method_var;
4597 MonoInst *var;
4598 int dreg = ARMREG_LR;
4600 #if 0
4601 if (cfg->soft_breakpoints) {
4602 g_assert (!cfg->compile_aot);
4604 #endif
4607 * For AOT, we use one got slot per method, which will point to a
4608 * SeqPointInfo structure, containing all the information required
4609 * by the code below.
4611 if (cfg->compile_aot) {
4612 g_assert (info_var);
4613 g_assert (info_var->opcode == OP_REGOFFSET);
4616 if (!cfg->soft_breakpoints && !cfg->compile_aot) {
4618 * Read from the single stepping trigger page. This will cause a
4619 * SIGSEGV when single stepping is enabled.
4620 * We do this _before_ the breakpoint, so single stepping after
4621 * a breakpoint is hit will step to the next IL offset.
4623 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
4626 /* Single step check */
4627 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
4628 if (cfg->soft_breakpoints) {
4629 /* Load the address of the sequence point method variable. */
4630 var = ss_method_var;
4631 g_assert (var);
4632 g_assert (var->opcode == OP_REGOFFSET);
4633 code = emit_ldr_imm (code, dreg, var->inst_basereg, var->inst_offset);
4634 /* Read the value and check whether it is non-zero. */
4635 ARM_LDR_IMM (code, dreg, dreg, 0);
4636 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4637 /* Call it conditionally. */
4638 ARM_BLX_REG_COND (code, ARMCOND_NE, dreg);
4639 } else {
4640 if (cfg->compile_aot) {
4641 /* Load the trigger page addr from the variable initialized in the prolog */
4642 var = ss_trigger_page_var;
4643 g_assert (var);
4644 g_assert (var->opcode == OP_REGOFFSET);
4645 code = emit_ldr_imm (code, dreg, var->inst_basereg, var->inst_offset);
4646 } else {
4647 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
4648 ARM_B (code, 0);
4649 *(int*)code = (int)(gsize)ss_trigger_page;
4650 code += 4;
4652 ARM_LDR_IMM (code, dreg, dreg, 0);
4656 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
4658 /* Breakpoint check */
4659 if (cfg->compile_aot) {
4660 const guint32 offset = code - cfg->native_code;
4661 guint32 val;
4663 var = info_var;
4664 code = emit_ldr_imm (code, dreg, var->inst_basereg, var->inst_offset);
4665 /* Add the offset */
4666 val = ((offset / 4) * sizeof (target_mgreg_t)) + MONO_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
4667 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
4668 if (arm_is_imm12 ((int)val)) {
4669 ARM_LDR_IMM (code, dreg, dreg, val);
4670 } else {
4671 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
4672 if (val & 0xFF00)
4673 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
4674 if (val & 0xFF0000)
4675 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
4676 g_assert (!(val & 0xFF000000));
4678 ARM_LDR_IMM (code, dreg, dreg, 0);
4680 /* What is faster, a branch or a load ? */
4681 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4682 /* The breakpoint instruction */
4683 if (cfg->soft_breakpoints)
4684 ARM_BLX_REG_COND (code, ARMCOND_NE, dreg);
4685 else
4686 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
4687 } else if (cfg->soft_breakpoints) {
4688 /* Load the address of the breakpoint method into ip. */
4689 var = bp_method_var;
4690 g_assert (var);
4691 g_assert (var->opcode == OP_REGOFFSET);
4692 g_assert (arm_is_imm12 (var->inst_offset));
4693 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4696 * A placeholder for a possible breakpoint inserted by
4697 * mono_arch_set_breakpoint ().
4699 ARM_NOP (code);
4700 } else {
4702 * A placeholder for a possible breakpoint inserted by
4703 * mono_arch_set_breakpoint ().
4705 for (i = 0; i < 4; ++i)
4706 ARM_NOP (code);
4710 * Add an additional nop so skipping the bp doesn't cause the ip to point
4711 * to another IL offset.
4714 ARM_NOP (code);
4715 break;
4717 case OP_ADDCC:
4718 case OP_IADDCC:
4719 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4720 break;
4721 case OP_IADD:
4722 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4723 break;
4724 case OP_ADC:
4725 case OP_IADC:
4726 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4727 break;
4728 case OP_ADDCC_IMM:
4729 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4730 g_assert (imm8 >= 0);
4731 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4732 break;
4733 case OP_ADD_IMM:
4734 case OP_IADD_IMM:
4735 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4736 g_assert (imm8 >= 0);
4737 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4738 break;
4739 case OP_ADC_IMM:
4740 case OP_IADC_IMM:
4741 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4742 g_assert (imm8 >= 0);
4743 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4744 break;
4745 case OP_IADD_OVF:
4746 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4747 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4748 break;
4749 case OP_IADD_OVF_UN:
4750 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4751 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4752 break;
4753 case OP_ISUB_OVF:
4754 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4755 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4756 break;
4757 case OP_ISUB_OVF_UN:
4758 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4759 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4760 break;
4761 case OP_ADD_OVF_CARRY:
4762 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4763 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4764 break;
4765 case OP_ADD_OVF_UN_CARRY:
4766 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4767 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4768 break;
4769 case OP_SUB_OVF_CARRY:
4770 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4771 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4772 break;
4773 case OP_SUB_OVF_UN_CARRY:
4774 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4775 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4776 break;
4777 case OP_SUBCC:
4778 case OP_ISUBCC:
4779 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4780 break;
4781 case OP_SUBCC_IMM:
4782 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4783 g_assert (imm8 >= 0);
4784 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4785 break;
4786 case OP_ISUB:
4787 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4788 break;
4789 case OP_SBB:
4790 case OP_ISBB:
4791 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4792 break;
4793 case OP_SUB_IMM:
4794 case OP_ISUB_IMM:
4795 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4796 g_assert (imm8 >= 0);
4797 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4798 break;
4799 case OP_SBB_IMM:
4800 case OP_ISBB_IMM:
4801 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4802 g_assert (imm8 >= 0);
4803 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4804 break;
4805 case OP_ARM_RSBS_IMM:
4806 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4807 g_assert (imm8 >= 0);
4808 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4809 break;
4810 case OP_ARM_RSC_IMM:
4811 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4812 g_assert (imm8 >= 0);
4813 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4814 break;
4815 case OP_IAND:
4816 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4817 break;
4818 case OP_AND_IMM:
4819 case OP_IAND_IMM:
4820 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4821 g_assert (imm8 >= 0);
4822 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4823 break;
4824 case OP_IDIV:
4825 g_assert (v7s_supported || v7k_supported);
4826 ARM_SDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4827 break;
4828 case OP_IDIV_UN:
4829 g_assert (v7s_supported || v7k_supported);
4830 ARM_UDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4831 break;
4832 case OP_IREM:
4833 g_assert (v7s_supported || v7k_supported);
4834 ARM_SDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4835 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4836 break;
4837 case OP_IREM_UN:
4838 g_assert (v7s_supported || v7k_supported);
4839 ARM_UDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4840 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4841 break;
4842 case OP_DIV_IMM:
4843 case OP_REM_IMM:
4844 g_assert_not_reached ();
4845 case OP_IOR:
4846 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4847 break;
4848 case OP_OR_IMM:
4849 case OP_IOR_IMM:
4850 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4851 g_assert (imm8 >= 0);
4852 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4853 break;
4854 case OP_IXOR:
4855 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4856 break;
4857 case OP_XOR_IMM:
4858 case OP_IXOR_IMM:
4859 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4860 g_assert (imm8 >= 0);
4861 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4862 break;
4863 case OP_ISHL:
4864 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4865 break;
4866 case OP_SHL_IMM:
4867 case OP_ISHL_IMM:
4868 if (ins->inst_imm)
4869 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4870 else if (ins->dreg != ins->sreg1)
4871 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4872 break;
4873 case OP_ISHR:
4874 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4875 break;
4876 case OP_SHR_IMM:
4877 case OP_ISHR_IMM:
4878 if (ins->inst_imm)
4879 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4880 else if (ins->dreg != ins->sreg1)
4881 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4882 break;
4883 case OP_SHR_UN_IMM:
4884 case OP_ISHR_UN_IMM:
4885 if (ins->inst_imm)
4886 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4887 else if (ins->dreg != ins->sreg1)
4888 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4889 break;
4890 case OP_ISHR_UN:
4891 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4892 break;
4893 case OP_INOT:
4894 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
4895 break;
4896 case OP_INEG:
4897 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
4898 break;
4899 case OP_IMUL:
4900 if (ins->dreg == ins->sreg2)
4901 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4902 else
4903 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
4904 break;
4905 case OP_MUL_IMM:
4906 g_assert_not_reached ();
4907 break;
4908 case OP_IMUL_OVF:
4909 /* FIXME: handle ovf/ sreg2 != dreg */
4910 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4911 /* FIXME: MUL doesn't set the C/O flags on ARM */
4912 break;
4913 case OP_IMUL_OVF_UN:
4914 /* FIXME: handle ovf/ sreg2 != dreg */
4915 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4916 /* FIXME: MUL doesn't set the C/O flags on ARM */
4917 break;
4918 case OP_ICONST:
4919 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
4920 break;
4921 case OP_AOTCONST:
4922 /* Load the GOT offset */
4923 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)(gsize)ins->inst_i1, ins->inst_p0);
4924 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4925 ARM_B (code, 0);
4926 *(gpointer*)code = NULL;
4927 code += 4;
4928 /* Load the value from the GOT */
4929 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4930 break;
4931 case OP_OBJC_GET_SELECTOR:
4932 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_OBJC_SELECTOR_REF, ins->inst_p0);
4933 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4934 ARM_B (code, 0);
4935 *(gpointer*)code = NULL;
4936 code += 4;
4937 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4938 break;
4939 case OP_ICONV_TO_I4:
4940 case OP_ICONV_TO_U4:
4941 case OP_MOVE:
4942 if (ins->dreg != ins->sreg1)
4943 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4944 break;
4945 case OP_SETLRET: {
4946 int saved = ins->sreg2;
4947 if (ins->sreg2 == ARM_LSW_REG) {
4948 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
4949 saved = ARMREG_LR;
4951 if (ins->sreg1 != ARM_LSW_REG)
4952 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
4953 if (saved != ARM_MSW_REG)
4954 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
4955 break;
4957 case OP_FMOVE:
4958 if (IS_VFP && ins->dreg != ins->sreg1)
4959 ARM_CPYD (code, ins->dreg, ins->sreg1);
4960 break;
4961 case OP_RMOVE:
4962 if (IS_VFP && ins->dreg != ins->sreg1)
4963 ARM_CPYS (code, ins->dreg, ins->sreg1);
4964 break;
4965 case OP_MOVE_F_TO_I4:
4966 if (cfg->r4fp) {
4967 ARM_FMRS (code, ins->dreg, ins->sreg1);
4968 } else {
4969 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
4970 ARM_CVTD (code, vfp_scratch1, ins->sreg1);
4971 ARM_FMRS (code, ins->dreg, vfp_scratch1);
4972 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
4974 break;
4975 case OP_MOVE_I4_TO_F:
4976 if (cfg->r4fp) {
4977 ARM_FMSR (code, ins->dreg, ins->sreg1);
4978 } else {
4979 ARM_FMSR (code, ins->dreg, ins->sreg1);
4980 ARM_CVTS (code, ins->dreg, ins->dreg);
4982 break;
4983 case OP_FCONV_TO_R4:
4984 if (IS_VFP) {
4985 if (cfg->r4fp) {
4986 ARM_CVTD (code, ins->dreg, ins->sreg1);
4987 } else {
4988 ARM_CVTD (code, ins->dreg, ins->sreg1);
4989 ARM_CVTS (code, ins->dreg, ins->dreg);
4992 break;
4994 case OP_TAILCALL_PARAMETER:
4995 // This opcode helps compute sizes, i.e.
4996 // of the subsequent OP_TAILCALL, but contributes no code.
4997 g_assert (ins->next);
4998 break;
5000 case OP_TAILCALL:
5001 case OP_TAILCALL_MEMBASE:
5002 case OP_TAILCALL_REG: {
5003 gboolean const tailcall_membase = ins->opcode == OP_TAILCALL_MEMBASE;
5004 gboolean const tailcall_reg = ins->opcode == OP_TAILCALL_REG;
5005 MonoCallInst *call = (MonoCallInst*)ins;
5007 max_len += call->stack_usage / sizeof (target_mgreg_t) * ins_get_size (OP_TAILCALL_PARAMETER);
5009 if (IS_HARD_FLOAT)
5010 code = emit_float_args (cfg, call, code, &max_len, &offset);
5012 code = realloc_code (cfg, max_len);
5014 // For reg and membase, get destination in IP.
5016 if (tailcall_reg) {
5017 g_assert (ins->sreg1 > -1);
5018 if (ins->sreg1 != ARMREG_IP)
5019 ARM_MOV_REG_REG (code, ARMREG_IP, ins->sreg1);
5020 } else if (tailcall_membase) {
5021 g_assert (ins->sreg1 > -1);
5022 if (!arm_is_imm12 (ins->inst_offset)) {
5023 g_assert (ins->sreg1 != ARMREG_IP); // temp in emit_big_add
5024 code = emit_big_add (code, ARMREG_IP, ins->sreg1, ins->inst_offset);
5025 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, 0);
5026 } else {
5027 ARM_LDR_IMM (code, ARMREG_IP, ins->sreg1, ins->inst_offset);
5032 * The stack looks like the following:
5033 * <caller argument area>
5034 * <saved regs etc>
5035 * <rest of frame>
5036 * <callee argument area>
5037 * <optionally saved IP> (about to be)
5038 * Need to copy the arguments from the callee argument area to
5039 * the caller argument area, and pop the frame.
5041 if (call->stack_usage) {
5042 int i, prev_sp_offset = 0;
5044 // When we get here, the parameters to the tailcall are already formed,
5045 // in registers and at the bottom of the grow-down stack.
5047 // Our goal is generally preserve parameters, and trim the stack,
5048 // and, before trimming stack, move parameters from the bottom of the
5049 // frame to the bottom of the trimmed frame.
5051 // For the case of large frames, and presently therefore always,
5052 // IP is used as an adjusted frame_reg.
5053 // Be conservative and save IP around the movement
5054 // of parameters from the bottom of frame to top of the frame.
5055 const gboolean save_ip = tailcall_membase || tailcall_reg;
5056 if (save_ip)
5057 ARM_PUSH (code, 1 << ARMREG_IP);
5059 // When moving stacked parameters from the bottom
5060 // of the frame (sp) to the top of the frame (ip),
5061 // account, 0 or 4, for the conditional save of IP.
5062 const int offset_sp = save_ip ? 4 : 0;
5063 const int offset_ip = (save_ip && (cfg->frame_reg == ARMREG_SP)) ? 4 : 0;
5065 /* Compute size of saved registers restored below */
5066 if (iphone_abi)
5067 prev_sp_offset = 2 * 4;
5068 else
5069 prev_sp_offset = 1 * 4;
5070 for (i = 0; i < 16; ++i) {
5071 if (cfg->used_int_regs & (1 << i))
5072 prev_sp_offset += 4;
5075 // Point IP at the start of where the parameters will go after trimming stack.
5076 // After locals and saved registers.
5077 code = emit_big_add (code, ARMREG_IP, cfg->frame_reg, cfg->stack_usage + prev_sp_offset);
5079 /* Copy arguments on the stack to our argument area */
5080 // FIXME a fixed size memcpy is desirable here,
5081 // at least for larger values of stack_usage.
5083 // FIXME For most functions, with frames < 4K, we can use frame_reg directly here instead of IP.
5084 // See https://github.com/mono/mono/pull/12079
5085 // See https://github.com/mono/mono/pull/12079/commits/93e7007a9567b78fa8152ce404b372b26e735516
5086 for (i = 0; i < call->stack_usage; i += sizeof (target_mgreg_t)) {
5087 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, i + offset_sp);
5088 ARM_STR_IMM (code, ARMREG_LR, ARMREG_IP, i + offset_ip);
5091 if (save_ip)
5092 ARM_POP (code, 1 << ARMREG_IP);
5096 * Keep in sync with mono_arch_emit_epilog
5098 g_assert (!cfg->method->save_lmf);
5099 code = emit_big_add_temp (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage, ARMREG_LR);
5100 if (iphone_abi) {
5101 if (cfg->used_int_regs)
5102 ARM_POP (code, cfg->used_int_regs);
5103 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
5104 } else {
5105 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
5108 if (tailcall_reg || tailcall_membase) {
5109 code = emit_jmp_reg (code, ARMREG_IP);
5110 } else {
5111 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method);
5113 if (cfg->compile_aot) {
5114 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
5115 ARM_B (code, 0);
5116 *(gpointer*)code = NULL;
5117 code += 4;
5118 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
5119 } else {
5120 code = mono_arm_patchable_b (code, ARMCOND_AL);
5121 cfg->thunk_area += THUNK_SIZE;
5124 break;
5126 case OP_CHECK_THIS:
5127 /* ensure ins->sreg1 is not NULL */
5128 ARM_LDRB_IMM (code, ARMREG_LR, ins->sreg1, 0);
5129 break;
5130 case OP_ARGLIST: {
5131 g_assert (cfg->sig_cookie < 128);
5132 ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
5133 ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
5134 break;
5136 case OP_FCALL:
5137 case OP_RCALL:
5138 case OP_LCALL:
5139 case OP_VCALL:
5140 case OP_VCALL2:
5141 case OP_VOIDCALL:
5142 case OP_CALL:
5143 call = (MonoCallInst*)ins;
5145 if (IS_HARD_FLOAT)
5146 code = emit_float_args (cfg, call, code, &max_len, &offset);
5148 mono_call_add_patch_info (cfg, call, code - cfg->native_code);
5150 code = emit_call_seq (cfg, code);
5151 ins->flags |= MONO_INST_GC_CALLSITE;
5152 ins->backend.pc_offset = code - cfg->native_code;
5153 code = emit_move_return_value (cfg, ins, code);
5154 break;
5155 case OP_FCALL_REG:
5156 case OP_RCALL_REG:
5157 case OP_LCALL_REG:
5158 case OP_VCALL_REG:
5159 case OP_VCALL2_REG:
5160 case OP_VOIDCALL_REG:
5161 case OP_CALL_REG:
5162 if (IS_HARD_FLOAT)
5163 code = emit_float_args (cfg, (MonoCallInst *)ins, code, &max_len, &offset);
5165 code = emit_call_reg (code, ins->sreg1);
5166 ins->flags |= MONO_INST_GC_CALLSITE;
5167 ins->backend.pc_offset = code - cfg->native_code;
5168 code = emit_move_return_value (cfg, ins, code);
5169 break;
5170 case OP_FCALL_MEMBASE:
5171 case OP_RCALL_MEMBASE:
5172 case OP_LCALL_MEMBASE:
5173 case OP_VCALL_MEMBASE:
5174 case OP_VCALL2_MEMBASE:
5175 case OP_VOIDCALL_MEMBASE:
5176 case OP_CALL_MEMBASE: {
5177 g_assert (ins->sreg1 != ARMREG_LR);
5178 call = (MonoCallInst*)ins;
5180 if (IS_HARD_FLOAT)
5181 code = emit_float_args (cfg, call, code, &max_len, &offset);
5182 if (!arm_is_imm12 (ins->inst_offset)) {
5183 /* sreg1 might be IP */
5184 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg1);
5185 code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_offset);
5186 ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, ARMREG_LR);
5187 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
5188 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, 0);
5189 } else {
5190 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
5191 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
5193 ins->flags |= MONO_INST_GC_CALLSITE;
5194 ins->backend.pc_offset = code - cfg->native_code;
5195 code = emit_move_return_value (cfg, ins, code);
5196 break;
5198 case OP_GENERIC_CLASS_INIT: {
5199 int byte_offset;
5200 guint8 *jump;
5202 byte_offset = MONO_STRUCT_OFFSET (MonoVTable, initialized);
5204 g_assert (arm_is_imm8 (byte_offset));
5205 ARM_LDRSB_IMM (code, ARMREG_IP, ins->sreg1, byte_offset);
5206 ARM_CMP_REG_IMM (code, ARMREG_IP, 0, 0);
5207 jump = code;
5208 ARM_B_COND (code, ARMCOND_NE, 0);
5210 /* Uninitialized case */
5211 g_assert (ins->sreg1 == ARMREG_R0);
5213 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
5214 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_generic_class_init));
5215 code = emit_call_seq (cfg, code);
5217 /* Initialized case */
5218 arm_patch (jump, code);
5219 break;
5221 case OP_LOCALLOC: {
5222 /* round the size to 8 bytes */
5223 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, (MONO_ARCH_FRAME_ALIGNMENT - 1));
5224 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, (MONO_ARCH_FRAME_ALIGNMENT - 1));
5225 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
5226 /* memzero the area: dreg holds the size, sp is the pointer */
5227 if (ins->flags & MONO_INST_INIT) {
5228 guint8 *start_loop, *branch_to_cond;
5229 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
5230 branch_to_cond = code;
5231 ARM_B (code, 0);
5232 start_loop = code;
5233 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
5234 arm_patch (branch_to_cond, code);
5235 /* decrement by 4 and set flags */
5236 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, sizeof (target_mgreg_t));
5237 ARM_B_COND (code, ARMCOND_GE, 0);
5238 arm_patch (code - 4, start_loop);
5240 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_SP);
5241 if (cfg->param_area)
5242 code = emit_sub_imm (code, ARMREG_SP, ARMREG_SP, ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT));
5243 break;
5245 case OP_DYN_CALL: {
5246 int i;
5247 MonoInst *var = cfg->dyn_call_var;
5248 guint8 *labels [16];
5250 g_assert (var->opcode == OP_REGOFFSET);
5251 g_assert (arm_is_imm12 (var->inst_offset));
5253 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
5254 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg1);
5255 /* ip = ftn */
5256 ARM_MOV_REG_REG (code, ARMREG_IP, ins->sreg2);
5258 /* Save args buffer */
5259 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
5261 /* Set fp argument registers */
5262 if (IS_HARD_FLOAT) {
5263 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, has_fpregs));
5264 ARM_CMP_REG_IMM (code, ARMREG_R0, 0, 0);
5265 labels [0] = code;
5266 ARM_B_COND (code, ARMCOND_EQ, 0);
5267 for (i = 0; i < FP_PARAM_REGS; ++i) {
5268 const int offset = MONO_STRUCT_OFFSET (DynCallArgs, fpregs) + (i * sizeof (double));
5269 g_assert (arm_is_fpimm8 (offset));
5270 ARM_FLDD (code, i * 2, ARMREG_LR, offset);
5272 arm_patch (labels [0], code);
5275 /* Allocate callee area */
5276 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_stackargs));
5277 ARM_SHL_IMM (code, ARMREG_R1, ARMREG_R1, 2);
5278 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_R1);
5280 /* Set stack args */
5281 /* R1 = limit */
5282 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_stackargs));
5283 /* R2 = pointer into regs */
5284 code = emit_big_add (code, ARMREG_R2, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, regs) + (PARAM_REGS * sizeof (target_mgreg_t)));
5285 /* R3 = pointer to stack */
5286 ARM_MOV_REG_REG (code, ARMREG_R3, ARMREG_SP);
5287 /* Loop */
5288 labels [0] = code;
5289 ARM_B_COND (code, ARMCOND_AL, 0);
5290 labels [1] = code;
5291 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R2, 0);
5292 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R3, 0);
5293 ARM_ADD_REG_IMM (code, ARMREG_R2, ARMREG_R2, sizeof (target_mgreg_t), 0);
5294 ARM_ADD_REG_IMM (code, ARMREG_R3, ARMREG_R3, sizeof (target_mgreg_t), 0);
5295 ARM_SUB_REG_IMM (code, ARMREG_R1, ARMREG_R1, 1, 0);
5296 arm_patch (labels [0], code);
5297 ARM_CMP_REG_IMM (code, ARMREG_R1, 0, 0);
5298 labels [2] = code;
5299 ARM_B_COND (code, ARMCOND_GT, 0);
5300 arm_patch (labels [2], labels [1]);
5302 /* Set argument registers */
5303 for (i = 0; i < PARAM_REGS; ++i)
5304 ARM_LDR_IMM (code, i, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, regs) + (i * sizeof (target_mgreg_t)));
5306 /* Make the call */
5307 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
5308 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5310 /* Save result */
5311 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
5312 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res));
5313 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res2));
5314 if (IS_HARD_FLOAT)
5315 ARM_FSTD (code, ARM_VFP_D0, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, fpregs));
5316 break;
5318 case OP_THROW: {
5319 if (ins->sreg1 != ARMREG_R0)
5320 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5321 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
5322 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_throw_exception));
5323 code = emit_call_seq (cfg, code);
5324 break;
5326 case OP_RETHROW: {
5327 if (ins->sreg1 != ARMREG_R0)
5328 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5329 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
5330 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_rethrow_exception));
5331 code = emit_call_seq (cfg, code);
5332 break;
5334 case OP_START_HANDLER: {
5335 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5336 int param_area = ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT);
5337 int i, rot_amount;
5339 /* Reserve a param area, see filter-stack.exe */
5340 if (param_area) {
5341 if ((i = mono_arm_is_rotated_imm8 (param_area, &rot_amount)) >= 0) {
5342 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5343 } else {
5344 code = mono_arm_emit_load_imm (code, ARMREG_IP, param_area);
5345 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5349 if (arm_is_imm12 (spvar->inst_offset)) {
5350 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
5351 } else {
5352 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5353 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
5355 break;
5357 case OP_ENDFILTER: {
5358 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5359 int param_area = ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT);
5360 int i, rot_amount;
5362 /* Free the param area */
5363 if (param_area) {
5364 if ((i = mono_arm_is_rotated_imm8 (param_area, &rot_amount)) >= 0) {
5365 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5366 } else {
5367 code = mono_arm_emit_load_imm (code, ARMREG_IP, param_area);
5368 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5372 if (ins->sreg1 != ARMREG_R0)
5373 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5374 if (arm_is_imm12 (spvar->inst_offset)) {
5375 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5376 } else {
5377 g_assert (ARMREG_IP != spvar->inst_basereg);
5378 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5379 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5381 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5382 break;
5384 case OP_ENDFINALLY: {
5385 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5386 int param_area = ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT);
5387 int i, rot_amount;
5389 /* Free the param area */
5390 if (param_area) {
5391 if ((i = mono_arm_is_rotated_imm8 (param_area, &rot_amount)) >= 0) {
5392 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5393 } else {
5394 code = mono_arm_emit_load_imm (code, ARMREG_IP, param_area);
5395 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5399 if (arm_is_imm12 (spvar->inst_offset)) {
5400 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5401 } else {
5402 g_assert (ARMREG_IP != spvar->inst_basereg);
5403 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5404 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5406 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5407 break;
5409 case OP_CALL_HANDLER:
5410 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5411 code = mono_arm_patchable_bl (code, ARMCOND_AL);
5412 cfg->thunk_area += THUNK_SIZE;
5413 for (GList *tmp = ins->inst_eh_blocks; tmp != bb->clause_holes; tmp = tmp->prev)
5414 mono_cfg_add_try_hole (cfg, ((MonoLeaveClause *) tmp->data)->clause, code, bb);
5415 break;
5416 case OP_GET_EX_OBJ:
5417 if (ins->dreg != ARMREG_R0)
5418 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_R0);
5419 break;
5421 case OP_LABEL:
5422 ins->inst_c0 = code - cfg->native_code;
5423 break;
5424 case OP_BR:
5425 /*if (ins->inst_target_bb->native_offset) {
5426 ARM_B (code, 0);
5427 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
5428 } else*/ {
5429 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5430 code = mono_arm_patchable_b (code, ARMCOND_AL);
5432 break;
5433 case OP_BR_REG:
5434 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
5435 break;
5436 case OP_SWITCH:
5438 * In the normal case we have:
5439 * ldr pc, [pc, ins->sreg1 << 2]
5440 * nop
5441 * If aot, we have:
5442 * ldr lr, [pc, ins->sreg1 << 2]
5443 * add pc, pc, lr
5444 * After follows the data.
5445 * FIXME: add aot support.
5447 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
5448 max_len += 4 * GPOINTER_TO_INT (ins->klass);
5449 code = realloc_code (cfg, max_len);
5450 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
5451 ARM_NOP (code);
5452 code += 4 * GPOINTER_TO_INT (ins->klass);
5453 break;
5454 case OP_CEQ:
5455 case OP_ICEQ:
5456 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5457 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5458 break;
5459 case OP_CLT:
5460 case OP_ICLT:
5461 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5462 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
5463 break;
5464 case OP_CLT_UN:
5465 case OP_ICLT_UN:
5466 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5467 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
5468 break;
5469 case OP_CGT:
5470 case OP_ICGT:
5471 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5472 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
5473 break;
5474 case OP_CGT_UN:
5475 case OP_ICGT_UN:
5476 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5477 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
5478 break;
5479 case OP_ICNEQ:
5480 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5481 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5482 break;
5483 case OP_ICGE:
5484 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5485 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LT);
5486 break;
5487 case OP_ICLE:
5488 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5489 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_GT);
5490 break;
5491 case OP_ICGE_UN:
5492 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5493 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LO);
5494 break;
5495 case OP_ICLE_UN:
5496 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5497 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_HI);
5498 break;
5499 case OP_COND_EXC_EQ:
5500 case OP_COND_EXC_NE_UN:
5501 case OP_COND_EXC_LT:
5502 case OP_COND_EXC_LT_UN:
5503 case OP_COND_EXC_GT:
5504 case OP_COND_EXC_GT_UN:
5505 case OP_COND_EXC_GE:
5506 case OP_COND_EXC_GE_UN:
5507 case OP_COND_EXC_LE:
5508 case OP_COND_EXC_LE_UN:
5509 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
5510 break;
5511 case OP_COND_EXC_IEQ:
5512 case OP_COND_EXC_INE_UN:
5513 case OP_COND_EXC_ILT:
5514 case OP_COND_EXC_ILT_UN:
5515 case OP_COND_EXC_IGT:
5516 case OP_COND_EXC_IGT_UN:
5517 case OP_COND_EXC_IGE:
5518 case OP_COND_EXC_IGE_UN:
5519 case OP_COND_EXC_ILE:
5520 case OP_COND_EXC_ILE_UN:
5521 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
5522 break;
5523 case OP_COND_EXC_C:
5524 case OP_COND_EXC_IC:
5525 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
5526 break;
5527 case OP_COND_EXC_OV:
5528 case OP_COND_EXC_IOV:
5529 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
5530 break;
5531 case OP_COND_EXC_NC:
5532 case OP_COND_EXC_INC:
5533 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
5534 break;
5535 case OP_COND_EXC_NO:
5536 case OP_COND_EXC_INO:
5537 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
5538 break;
5539 case OP_IBEQ:
5540 case OP_IBNE_UN:
5541 case OP_IBLT:
5542 case OP_IBLT_UN:
5543 case OP_IBGT:
5544 case OP_IBGT_UN:
5545 case OP_IBGE:
5546 case OP_IBGE_UN:
5547 case OP_IBLE:
5548 case OP_IBLE_UN:
5549 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
5550 break;
5552 /* floating point opcodes */
5553 case OP_R8CONST:
5554 if (cfg->compile_aot) {
5555 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
5556 ARM_B (code, 1);
5557 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5558 code += 4;
5559 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
5560 code += 4;
5561 } else {
5562 /* FIXME: we can optimize the imm load by dealing with part of
5563 * the displacement in LDFD (aligning to 512).
5565 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)(gsize)ins->inst_p0);
5566 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5568 break;
5569 case OP_R4CONST:
5570 if (cfg->compile_aot) {
5571 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
5572 ARM_B (code, 0);
5573 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5574 code += 4;
5575 if (!cfg->r4fp)
5576 ARM_CVTS (code, ins->dreg, ins->dreg);
5577 } else {
5578 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)(gsize)ins->inst_p0);
5579 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
5580 if (!cfg->r4fp)
5581 ARM_CVTS (code, ins->dreg, ins->dreg);
5583 break;
5584 case OP_STORER8_MEMBASE_REG:
5585 /* This is generated by the local regalloc pass which runs after the lowering pass */
5586 if (!arm_is_fpimm8 (ins->inst_offset)) {
5587 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5588 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
5589 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
5590 } else {
5591 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
5593 break;
5594 case OP_LOADR8_MEMBASE:
5595 /* This is generated by the local regalloc pass which runs after the lowering pass */
5596 if (!arm_is_fpimm8 (ins->inst_offset)) {
5597 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5598 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
5599 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5600 } else {
5601 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
5603 break;
5604 case OP_STORER4_MEMBASE_REG:
5605 g_assert (arm_is_fpimm8 (ins->inst_offset));
5606 if (cfg->r4fp) {
5607 ARM_FSTS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
5608 } else {
5609 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5610 ARM_CVTD (code, vfp_scratch1, ins->sreg1);
5611 ARM_FSTS (code, vfp_scratch1, ins->inst_destbasereg, ins->inst_offset);
5612 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5614 break;
5615 case OP_LOADR4_MEMBASE:
5616 if (cfg->r4fp) {
5617 ARM_FLDS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
5618 } else {
5619 g_assert (arm_is_fpimm8 (ins->inst_offset));
5620 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5621 ARM_FLDS (code, vfp_scratch1, ins->inst_basereg, ins->inst_offset);
5622 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5623 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5625 break;
5626 case OP_ICONV_TO_R_UN: {
5627 g_assert_not_reached ();
5628 break;
5630 case OP_ICONV_TO_R4:
5631 if (cfg->r4fp) {
5632 ARM_FMSR (code, ins->dreg, ins->sreg1);
5633 ARM_FSITOS (code, ins->dreg, ins->dreg);
5634 } else {
5635 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5636 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5637 ARM_FSITOS (code, vfp_scratch1, vfp_scratch1);
5638 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5639 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5641 break;
5642 case OP_ICONV_TO_R8:
5643 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5644 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5645 ARM_FSITOD (code, ins->dreg, vfp_scratch1);
5646 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5647 break;
5649 case OP_SETFRET: {
5650 MonoType *sig_ret = mini_get_underlying_type (mono_method_signature_internal (cfg->method)->ret);
5651 if (sig_ret->type == MONO_TYPE_R4) {
5652 if (cfg->r4fp) {
5653 if (IS_HARD_FLOAT) {
5654 if (ins->sreg1 != ARM_VFP_D0)
5655 ARM_CPYS (code, ARM_VFP_D0, ins->sreg1);
5656 } else {
5657 ARM_FMRS (code, ARMREG_R0, ins->sreg1);
5659 } else {
5660 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
5662 if (!IS_HARD_FLOAT)
5663 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
5665 } else {
5666 if (IS_HARD_FLOAT)
5667 ARM_CPYD (code, ARM_VFP_D0, ins->sreg1);
5668 else
5669 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
5671 break;
5673 case OP_FCONV_TO_I1:
5674 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
5675 break;
5676 case OP_FCONV_TO_U1:
5677 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
5678 break;
5679 case OP_FCONV_TO_I2:
5680 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
5681 break;
5682 case OP_FCONV_TO_U2:
5683 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
5684 break;
5685 case OP_FCONV_TO_I4:
5686 case OP_FCONV_TO_I:
5687 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
5688 break;
5689 case OP_FCONV_TO_U4:
5690 case OP_FCONV_TO_U:
5691 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
5692 break;
5693 case OP_FCONV_TO_I8:
5694 case OP_FCONV_TO_U8:
5695 g_assert_not_reached ();
5696 /* Implemented as helper calls */
5697 break;
5698 case OP_LCONV_TO_R_UN:
5699 g_assert_not_reached ();
5700 /* Implemented as helper calls */
5701 break;
5702 case OP_LCONV_TO_OVF_I4_2: {
5703 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
5705 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
5708 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
5709 high_bit_not_set = code;
5710 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
5712 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
5713 valid_negative = code;
5714 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
5715 invalid_negative = code;
5716 ARM_B_COND (code, ARMCOND_AL, 0);
5718 arm_patch (high_bit_not_set, code);
5720 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
5721 valid_positive = code;
5722 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
5724 arm_patch (invalid_negative, code);
5725 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
5727 arm_patch (valid_negative, code);
5728 arm_patch (valid_positive, code);
5730 if (ins->dreg != ins->sreg1)
5731 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
5732 break;
5734 case OP_FADD:
5735 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
5736 break;
5737 case OP_FSUB:
5738 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
5739 break;
5740 case OP_FMUL:
5741 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
5742 break;
5743 case OP_FDIV:
5744 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
5745 break;
5746 case OP_FNEG:
5747 ARM_NEGD (code, ins->dreg, ins->sreg1);
5748 break;
5749 case OP_FREM:
5750 /* emulated */
5751 g_assert_not_reached ();
5752 break;
5753 case OP_FCOMPARE:
5754 if (IS_VFP) {
5755 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5756 ARM_FMSTAT (code);
5758 break;
5759 case OP_RCOMPARE:
5760 g_assert (IS_VFP);
5761 ARM_CMPS (code, ins->sreg1, ins->sreg2);
5762 ARM_FMSTAT (code);
5763 break;
5764 case OP_FCEQ:
5765 if (IS_VFP) {
5766 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5767 ARM_FMSTAT (code);
5769 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5770 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5771 break;
5772 case OP_FCLT:
5773 if (IS_VFP) {
5774 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5775 ARM_FMSTAT (code);
5777 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5778 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5779 break;
5780 case OP_FCLT_UN:
5781 if (IS_VFP) {
5782 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5783 ARM_FMSTAT (code);
5785 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5786 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5787 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5788 break;
5789 case OP_FCGT:
5790 if (IS_VFP) {
5791 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5792 ARM_FMSTAT (code);
5794 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5795 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5796 break;
5797 case OP_FCGT_UN:
5798 if (IS_VFP) {
5799 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5800 ARM_FMSTAT (code);
5802 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5803 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5804 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5805 break;
5806 case OP_FCNEQ:
5807 if (IS_VFP) {
5808 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5809 ARM_FMSTAT (code);
5811 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5812 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5813 break;
5814 case OP_FCGE:
5815 if (IS_VFP) {
5816 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5817 ARM_FMSTAT (code);
5819 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5820 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5821 break;
5822 case OP_FCLE:
5823 if (IS_VFP) {
5824 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5825 ARM_FMSTAT (code);
5827 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5828 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5829 break;
5831 /* ARM FPA flags table:
5832 * N Less than ARMCOND_MI
5833 * Z Equal ARMCOND_EQ
5834 * C Greater Than or Equal ARMCOND_CS
5835 * V Unordered ARMCOND_VS
5837 case OP_FBEQ:
5838 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
5839 break;
5840 case OP_FBNE_UN:
5841 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
5842 break;
5843 case OP_FBLT:
5844 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5845 break;
5846 case OP_FBLT_UN:
5847 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5848 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5849 break;
5850 case OP_FBGT:
5851 case OP_FBGT_UN:
5852 case OP_FBLE:
5853 case OP_FBLE_UN:
5854 g_assert_not_reached ();
5855 break;
5856 case OP_FBGE:
5857 if (IS_VFP) {
5858 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5859 } else {
5860 /* FPA requires EQ even thou the docs suggests that just CS is enough */
5861 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
5862 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
5864 break;
5865 case OP_FBGE_UN:
5866 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5867 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5868 break;
5870 case OP_CKFINITE: {
5871 if (IS_VFP) {
5872 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5873 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch2);
5875 ARM_ABSD (code, vfp_scratch2, ins->sreg1);
5876 ARM_FLDD (code, vfp_scratch1, ARMREG_PC, 0);
5877 ARM_B (code, 1);
5878 *(guint32*)code = 0xffffffff;
5879 code += 4;
5880 *(guint32*)code = 0x7fefffff;
5881 code += 4;
5882 ARM_CMPD (code, vfp_scratch2, vfp_scratch1);
5883 ARM_FMSTAT (code);
5884 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "OverflowException");
5885 ARM_CMPD (code, ins->sreg1, ins->sreg1);
5886 ARM_FMSTAT (code);
5887 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "OverflowException");
5888 ARM_CPYD (code, ins->dreg, ins->sreg1);
5890 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5891 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch2);
5893 break;
5896 case OP_RCONV_TO_I1:
5897 code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
5898 break;
5899 case OP_RCONV_TO_U1:
5900 code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
5901 break;
5902 case OP_RCONV_TO_I2:
5903 code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
5904 break;
5905 case OP_RCONV_TO_U2:
5906 code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
5907 break;
5908 case OP_RCONV_TO_I4:
5909 code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
5910 break;
5911 case OP_RCONV_TO_U4:
5912 code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
5913 break;
5914 case OP_RCONV_TO_R4:
5915 g_assert (IS_VFP);
5916 if (ins->dreg != ins->sreg1)
5917 ARM_CPYS (code, ins->dreg, ins->sreg1);
5918 break;
5919 case OP_RCONV_TO_R8:
5920 g_assert (IS_VFP);
5921 ARM_CVTS (code, ins->dreg, ins->sreg1);
5922 break;
5923 case OP_RADD:
5924 ARM_VFP_ADDS (code, ins->dreg, ins->sreg1, ins->sreg2);
5925 break;
5926 case OP_RSUB:
5927 ARM_VFP_SUBS (code, ins->dreg, ins->sreg1, ins->sreg2);
5928 break;
5929 case OP_RMUL:
5930 ARM_VFP_MULS (code, ins->dreg, ins->sreg1, ins->sreg2);
5931 break;
5932 case OP_RDIV:
5933 ARM_VFP_DIVS (code, ins->dreg, ins->sreg1, ins->sreg2);
5934 break;
5935 case OP_RNEG:
5936 ARM_NEGS (code, ins->dreg, ins->sreg1);
5937 break;
5938 case OP_RCEQ:
5939 if (IS_VFP) {
5940 ARM_CMPS (code, ins->sreg1, ins->sreg2);
5941 ARM_FMSTAT (code);
5943 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5944 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5945 break;
5946 case OP_RCLT:
5947 if (IS_VFP) {
5948 ARM_CMPS (code, ins->sreg1, ins->sreg2);
5949 ARM_FMSTAT (code);
5951 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5952 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5953 break;
5954 case OP_RCLT_UN:
5955 if (IS_VFP) {
5956 ARM_CMPS (code, ins->sreg1, ins->sreg2);
5957 ARM_FMSTAT (code);
5959 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5960 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5961 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5962 break;
5963 case OP_RCGT:
5964 if (IS_VFP) {
5965 ARM_CMPS (code, ins->sreg2, ins->sreg1);
5966 ARM_FMSTAT (code);
5968 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5969 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5970 break;
5971 case OP_RCGT_UN:
5972 if (IS_VFP) {
5973 ARM_CMPS (code, ins->sreg2, ins->sreg1);
5974 ARM_FMSTAT (code);
5976 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5977 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5978 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5979 break;
5980 case OP_RCNEQ:
5981 if (IS_VFP) {
5982 ARM_CMPS (code, ins->sreg1, ins->sreg2);
5983 ARM_FMSTAT (code);
5985 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5986 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5987 break;
5988 case OP_RCGE:
5989 if (IS_VFP) {
5990 ARM_CMPS (code, ins->sreg1, ins->sreg2);
5991 ARM_FMSTAT (code);
5993 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5994 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5995 break;
5996 case OP_RCLE:
5997 if (IS_VFP) {
5998 ARM_CMPS (code, ins->sreg2, ins->sreg1);
5999 ARM_FMSTAT (code);
6001 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
6002 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
6003 break;
6005 case OP_GC_LIVENESS_DEF:
6006 case OP_GC_LIVENESS_USE:
6007 case OP_GC_PARAM_SLOT_LIVENESS_DEF:
6008 ins->backend.pc_offset = code - cfg->native_code;
6009 break;
6010 case OP_GC_SPILL_SLOT_LIVENESS_DEF:
6011 ins->backend.pc_offset = code - cfg->native_code;
6012 bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
6013 break;
6014 case OP_LIVERANGE_START: {
6015 if (cfg->verbose_level > 1)
6016 printf ("R%d START=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code));
6017 MONO_VARINFO (cfg, ins->inst_c0)->live_range_start = code - cfg->native_code;
6018 break;
6020 case OP_LIVERANGE_END: {
6021 if (cfg->verbose_level > 1)
6022 printf ("R%d END=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code));
6023 MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code;
6024 break;
6026 case OP_GC_SAFE_POINT: {
6027 guint8 *buf [1];
6029 ARM_LDR_IMM (code, ARMREG_IP, ins->sreg1, 0);
6030 ARM_CMP_REG_IMM (code, ARMREG_IP, 0, 0);
6031 buf [0] = code;
6032 ARM_B_COND (code, ARMCOND_EQ, 0);
6033 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_threads_state_poll));
6034 code = emit_call_seq (cfg, code);
6035 arm_patch (buf [0], code);
6036 break;
6038 case OP_FILL_PROF_CALL_CTX:
6039 for (int i = 0; i < ARMREG_MAX; i++)
6040 if ((MONO_ARCH_CALLEE_SAVED_REGS & (1 << i)) || i == ARMREG_SP || i == ARMREG_FP)
6041 ARM_STR_IMM (code, i, ins->sreg1, MONO_STRUCT_OFFSET (MonoContext, regs) + i * sizeof (target_mgreg_t));
6042 break;
6043 default:
6044 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
6045 g_assert_not_reached ();
6048 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
6049 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
6050 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
6051 g_assert_not_reached ();
6054 cpos += max_len;
6056 last_ins = ins;
6059 set_code_cursor (cfg, code);
6062 #endif /* DISABLE_JIT */
6064 void
6065 mono_arch_register_lowlevel_calls (void)
6067 /* The signature doesn't matter */
6068 mono_register_jit_icall (mono_arm_throw_exception, mono_icall_sig_void, TRUE);
6069 mono_register_jit_icall (mono_arm_throw_exception_by_token, mono_icall_sig_void, TRUE);
6070 mono_register_jit_icall (mono_arm_unaligned_stack, mono_icall_sig_void, TRUE);
6073 #define patch_lis_ori(ip,val) do {\
6074 guint16 *__lis_ori = (guint16*)(ip); \
6075 __lis_ori [1] = (((guint32)(gsize)(val)) >> 16) & 0xffff; \
6076 __lis_ori [3] = ((guint32)(gsize)(val)) & 0xffff; \
6077 } while (0)
6079 void
6080 mono_arch_patch_code_new (MonoCompile *cfg, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gpointer target)
6082 unsigned char *ip = ji->ip.i + code;
6084 if (ji->type == MONO_PATCH_INFO_SWITCH) {
6087 switch (ji->type) {
6088 case MONO_PATCH_INFO_SWITCH: {
6089 gpointer *jt = (gpointer*)(ip + 8);
6090 int i;
6091 /* jt is the inlined jump table, 2 instructions after ip
6092 * In the normal case we store the absolute addresses,
6093 * otherwise the displacements.
6095 for (i = 0; i < ji->data.table->table_size; i++)
6096 jt [i] = code + (int)(gsize)ji->data.table->table [i];
6097 break;
6099 case MONO_PATCH_INFO_IP:
6100 g_assert_not_reached ();
6101 patch_lis_ori (ip, ip);
6102 break;
6103 case MONO_PATCH_INFO_METHODCONST:
6104 case MONO_PATCH_INFO_CLASS:
6105 case MONO_PATCH_INFO_IMAGE:
6106 case MONO_PATCH_INFO_FIELD:
6107 case MONO_PATCH_INFO_VTABLE:
6108 case MONO_PATCH_INFO_IID:
6109 case MONO_PATCH_INFO_SFLDA:
6110 case MONO_PATCH_INFO_LDSTR:
6111 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
6112 case MONO_PATCH_INFO_LDTOKEN:
6113 g_assert_not_reached ();
6114 /* from OP_AOTCONST : lis + ori */
6115 patch_lis_ori (ip, target);
6116 break;
6117 case MONO_PATCH_INFO_R4:
6118 case MONO_PATCH_INFO_R8:
6119 g_assert_not_reached ();
6120 *((gconstpointer *)(ip + 2)) = target;
6121 break;
6122 case MONO_PATCH_INFO_EXC_NAME:
6123 g_assert_not_reached ();
6124 *((gconstpointer *)(ip + 1)) = target;
6125 break;
6126 case MONO_PATCH_INFO_NONE:
6127 case MONO_PATCH_INFO_BB_OVF:
6128 case MONO_PATCH_INFO_EXC_OVF:
6129 /* everything is dealt with at epilog output time */
6130 break;
6131 default:
6132 arm_patch_general (cfg, domain, ip, (const guchar*)target);
6133 break;
6137 void
6138 mono_arm_unaligned_stack (MonoMethod *method)
6140 g_assert_not_reached ();
6143 #ifndef DISABLE_JIT
6146 * Stack frame layout:
6148 * ------------------- fp
6149 * MonoLMF structure or saved registers
6150 * -------------------
6151 * locals
6152 * -------------------
6153 * spilled regs
6154 * -------------------
6155 * param area size is cfg->param_area
6156 * ------------------- sp
6158 guint8 *
6159 mono_arch_emit_prolog (MonoCompile *cfg)
6161 MonoMethod *method = cfg->method;
6162 MonoBasicBlock *bb;
6163 MonoMethodSignature *sig;
6164 MonoInst *inst;
6165 int alloc_size, orig_alloc_size, pos, max_offset, i, rot_amount, part;
6166 guint8 *code;
6167 CallInfo *cinfo;
6168 int lmf_offset = 0;
6169 int prev_sp_offset, reg_offset;
6171 sig = mono_method_signature_internal (method);
6172 cfg->code_size = 256 + sig->param_count * 64;
6173 code = cfg->native_code = g_malloc (cfg->code_size);
6175 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
6177 alloc_size = cfg->stack_offset;
6178 pos = 0;
6179 prev_sp_offset = 0;
6181 if (iphone_abi) {
6183 * The iphone uses R7 as the frame pointer, and it points at the saved
6184 * r7+lr:
6185 * <lr>
6186 * r7 -> <r7>
6187 * <rest of frame>
6188 * We can't use r7 as a frame pointer since it points into the middle of
6189 * the frame, so we keep using our own frame pointer.
6190 * FIXME: Optimize this.
6192 ARM_PUSH (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
6193 prev_sp_offset += 8; /* r7 and lr */
6194 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
6195 mono_emit_unwind_op_offset (cfg, code, ARMREG_R7, (- prev_sp_offset) + 0);
6196 ARM_MOV_REG_REG (code, ARMREG_R7, ARMREG_SP);
6199 if (!method->save_lmf) {
6200 if (iphone_abi) {
6201 /* No need to push LR again */
6202 if (cfg->used_int_regs)
6203 ARM_PUSH (code, cfg->used_int_regs);
6204 } else {
6205 ARM_PUSH (code, cfg->used_int_regs | (1 << ARMREG_LR));
6206 prev_sp_offset += 4;
6208 for (i = 0; i < 16; ++i) {
6209 if (cfg->used_int_regs & (1 << i))
6210 prev_sp_offset += 4;
6212 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
6213 reg_offset = 0;
6214 for (i = 0; i < 16; ++i) {
6215 if ((cfg->used_int_regs & (1 << i))) {
6216 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
6217 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + reg_offset, SLOT_NOREF);
6218 reg_offset += 4;
6221 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
6222 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
6223 } else {
6224 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
6225 ARM_PUSH (code, 0x5ff0);
6226 prev_sp_offset += 4 * 10; /* all but r0-r3, sp and pc */
6227 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
6228 reg_offset = 0;
6229 for (i = 0; i < 16; ++i) {
6230 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
6231 /* The original r7 is saved at the start */
6232 if (!(iphone_abi && i == ARMREG_R7))
6233 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
6234 reg_offset += 4;
6237 g_assert (reg_offset == 4 * 10);
6238 pos += MONO_ABI_SIZEOF (MonoLMF) - (4 * 10);
6239 lmf_offset = pos;
6241 alloc_size += pos;
6242 orig_alloc_size = alloc_size;
6243 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
6244 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
6245 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
6246 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
6249 /* the stack used in the pushed regs */
6250 alloc_size += ALIGN_TO (prev_sp_offset, MONO_ARCH_FRAME_ALIGNMENT) - prev_sp_offset;
6251 cfg->stack_usage = alloc_size;
6252 if (alloc_size) {
6253 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
6254 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
6255 } else {
6256 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
6257 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
6259 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
6261 if (cfg->frame_reg != ARMREG_SP) {
6262 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
6263 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
6265 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
6266 prev_sp_offset += alloc_size;
6268 for (i = 0; i < alloc_size - orig_alloc_size; i += 4)
6269 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + orig_alloc_size + i, SLOT_NOREF);
6271 /* compute max_offset in order to use short forward jumps
6272 * we could skip do it on arm because the immediate displacement
6273 * for jumps is large enough, it may be useful later for constant pools
6275 max_offset = 0;
6276 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
6277 MonoInst *ins = bb->code;
6278 bb->max_offset = max_offset;
6280 MONO_BB_FOR_EACH_INS (bb, ins)
6281 max_offset += ins_get_size (ins->opcode);
6284 /* stack alignment check */
6287 guint8 *buf [16];
6288 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_SP);
6289 code = mono_arm_emit_load_imm (code, ARMREG_IP, MONO_ARCH_FRAME_ALIGNMENT -1);
6290 ARM_AND_REG_REG (code, ARMREG_LR, ARMREG_LR, ARMREG_IP);
6291 ARM_CMP_REG_IMM (code, ARMREG_LR, 0, 0);
6292 buf [0] = code;
6293 ARM_B_COND (code, ARMCOND_EQ, 0);
6294 if (cfg->compile_aot)
6295 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
6296 else
6297 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
6298 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arm_unaligned_stack));
6299 code = emit_call_seq (cfg, code);
6300 arm_patch (buf [0], code);
6304 /* store runtime generic context */
6305 if (cfg->rgctx_var) {
6306 MonoInst *ins = cfg->rgctx_var;
6308 g_assert (ins->opcode == OP_REGOFFSET);
6310 if (arm_is_imm12 (ins->inst_offset)) {
6311 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
6312 } else {
6313 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6314 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
6318 /* load arguments allocated to register from the stack */
6319 pos = 0;
6321 cinfo = get_call_info (NULL, sig);
6323 if (cinfo->ret.storage == RegTypeStructByAddr) {
6324 ArgInfo *ainfo = &cinfo->ret;
6325 inst = cfg->vret_addr;
6326 g_assert (arm_is_imm12 (inst->inst_offset));
6327 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
6330 if (sig->call_convention == MONO_CALL_VARARG) {
6331 ArgInfo *cookie = &cinfo->sig_cookie;
6333 /* Save the sig cookie address */
6334 g_assert (cookie->storage == RegTypeBase);
6336 g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
6337 g_assert (arm_is_imm12 (cfg->sig_cookie));
6338 ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
6339 ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
6342 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6343 ArgInfo *ainfo = cinfo->args + i;
6344 inst = cfg->args [pos];
6346 if (cfg->verbose_level > 2)
6347 g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
6349 if (inst->opcode == OP_REGVAR) {
6350 if (ainfo->storage == RegTypeGeneral)
6351 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
6352 else if (ainfo->storage == RegTypeFP) {
6353 g_assert_not_reached ();
6354 } else if (ainfo->storage == RegTypeBase) {
6355 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
6356 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
6357 } else {
6358 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
6359 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
6361 } else
6362 g_assert_not_reached ();
6364 if (cfg->verbose_level > 2)
6365 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
6366 } else {
6367 switch (ainfo->storage) {
6368 case RegTypeHFA:
6369 for (part = 0; part < ainfo->nregs; part ++) {
6370 if (ainfo->esize == 4)
6371 ARM_FSTS (code, ainfo->reg + part, inst->inst_basereg, inst->inst_offset + (part * ainfo->esize));
6372 else
6373 ARM_FSTD (code, ainfo->reg + (part * 2), inst->inst_basereg, inst->inst_offset + (part * ainfo->esize));
6375 break;
6376 case RegTypeGeneral:
6377 case RegTypeIRegPair:
6378 case RegTypeGSharedVtInReg:
6379 case RegTypeStructByAddr:
6380 switch (ainfo->size) {
6381 case 1:
6382 if (arm_is_imm12 (inst->inst_offset))
6383 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
6384 else {
6385 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6386 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
6388 break;
6389 case 2:
6390 if (arm_is_imm8 (inst->inst_offset)) {
6391 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
6392 } else {
6393 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6394 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
6396 break;
6397 case 8:
6398 if (arm_is_imm12 (inst->inst_offset)) {
6399 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
6400 } else {
6401 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6402 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
6404 if (arm_is_imm12 (inst->inst_offset + 4)) {
6405 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
6406 } else {
6407 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
6408 ARM_STR_REG_REG (code, ainfo->reg + 1, inst->inst_basereg, ARMREG_IP);
6410 break;
6411 default:
6412 if (arm_is_imm12 (inst->inst_offset)) {
6413 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
6414 } else {
6415 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6416 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
6418 break;
6420 break;
6421 case RegTypeBaseGen:
6422 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
6423 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
6424 } else {
6425 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
6426 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6428 if (arm_is_imm12 (inst->inst_offset + 4)) {
6429 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
6430 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
6431 } else {
6432 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
6433 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6434 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6435 ARM_STR_REG_REG (code, ARMREG_R3, inst->inst_basereg, ARMREG_IP);
6437 break;
6438 case RegTypeBase:
6439 case RegTypeGSharedVtOnStack:
6440 case RegTypeStructByAddrOnStack:
6441 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
6442 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
6443 } else {
6444 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
6445 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6448 switch (ainfo->size) {
6449 case 1:
6450 if (arm_is_imm8 (inst->inst_offset)) {
6451 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6452 } else {
6453 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6454 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6456 break;
6457 case 2:
6458 if (arm_is_imm8 (inst->inst_offset)) {
6459 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6460 } else {
6461 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6462 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6464 break;
6465 case 8:
6466 if (arm_is_imm12 (inst->inst_offset)) {
6467 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6468 } else {
6469 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6470 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6472 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
6473 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
6474 } else {
6475 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
6476 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6478 if (arm_is_imm12 (inst->inst_offset + 4)) {
6479 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
6480 } else {
6481 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
6482 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6484 break;
6485 default:
6486 if (arm_is_imm12 (inst->inst_offset)) {
6487 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6488 } else {
6489 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6490 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6492 break;
6494 break;
6495 case RegTypeFP: {
6496 int imm8, rot_amount;
6498 if ((imm8 = mono_arm_is_rotated_imm8 (inst->inst_offset, &rot_amount)) == -1) {
6499 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6500 ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg);
6501 } else
6502 ARM_ADD_REG_IMM (code, ARMREG_IP, inst->inst_basereg, imm8, rot_amount);
6504 if (ainfo->size == 8)
6505 ARM_FSTD (code, ainfo->reg, ARMREG_IP, 0);
6506 else
6507 ARM_FSTS (code, ainfo->reg, ARMREG_IP, 0);
6508 break;
6510 case RegTypeStructByVal: {
6511 int doffset = inst->inst_offset;
6512 int soffset = 0;
6513 int cur_reg;
6514 int size = 0;
6515 size = mini_type_stack_size_full (inst->inst_vtype, NULL, sig->pinvoke);
6516 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
6517 if (arm_is_imm12 (doffset)) {
6518 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
6519 } else {
6520 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
6521 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
6523 soffset += sizeof (target_mgreg_t);
6524 doffset += sizeof (target_mgreg_t);
6526 if (ainfo->vtsize) {
6527 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6528 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
6529 code = emit_memcpy (code, ainfo->vtsize * sizeof (target_mgreg_t), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
6531 break;
6533 default:
6534 g_assert_not_reached ();
6535 break;
6538 pos++;
6541 if (method->save_lmf)
6542 code = emit_save_lmf (cfg, code, alloc_size - lmf_offset);
6544 if (cfg->arch.seq_point_info_var) {
6545 MonoInst *ins = cfg->arch.seq_point_info_var;
6547 /* Initialize the variable from a GOT slot */
6548 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
6549 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6550 ARM_B (code, 0);
6551 *(gpointer*)code = NULL;
6552 code += 4;
6553 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
6555 g_assert (ins->opcode == OP_REGOFFSET);
6557 if (arm_is_imm12 (ins->inst_offset)) {
6558 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6559 } else {
6560 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6561 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6565 /* Initialize ss_trigger_page_var */
6566 if (!cfg->soft_breakpoints) {
6567 MonoInst *info_var = cfg->arch.seq_point_info_var;
6568 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
6569 int dreg = ARMREG_LR;
6571 if (info_var) {
6572 g_assert (info_var->opcode == OP_REGOFFSET);
6574 code = emit_ldr_imm (code, dreg, info_var->inst_basereg, info_var->inst_offset);
6575 /* Load the trigger page addr */
6576 ARM_LDR_IMM (code, dreg, dreg, MONO_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
6577 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
6581 if (cfg->arch.seq_point_ss_method_var) {
6582 MonoInst *ss_method_ins = cfg->arch.seq_point_ss_method_var;
6583 MonoInst *bp_method_ins = cfg->arch.seq_point_bp_method_var;
6585 g_assert (ss_method_ins->opcode == OP_REGOFFSET);
6586 g_assert (arm_is_imm12 (ss_method_ins->inst_offset));
6588 if (cfg->compile_aot) {
6589 MonoInst *info_var = cfg->arch.seq_point_info_var;
6590 int dreg = ARMREG_LR;
6592 g_assert (info_var->opcode == OP_REGOFFSET);
6593 g_assert (arm_is_imm12 (info_var->inst_offset));
6595 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
6596 ARM_LDR_IMM (code, dreg, dreg, MONO_STRUCT_OFFSET (SeqPointInfo, ss_tramp_addr));
6597 ARM_STR_IMM (code, dreg, ss_method_ins->inst_basereg, ss_method_ins->inst_offset);
6598 } else {
6599 g_assert (bp_method_ins->opcode == OP_REGOFFSET);
6600 g_assert (arm_is_imm12 (bp_method_ins->inst_offset));
6602 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
6603 ARM_B (code, 1);
6604 *(gpointer*)code = &single_step_tramp;
6605 code += 4;
6606 *(gpointer*)code = breakpoint_tramp;
6607 code += 4;
6609 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 0);
6610 ARM_STR_IMM (code, ARMREG_IP, ss_method_ins->inst_basereg, ss_method_ins->inst_offset);
6611 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 4);
6612 ARM_STR_IMM (code, ARMREG_IP, bp_method_ins->inst_basereg, bp_method_ins->inst_offset);
6616 set_code_cursor (cfg, code);
6617 g_free (cinfo);
6619 return code;
6622 void
6623 mono_arch_emit_epilog (MonoCompile *cfg)
6625 MonoMethod *method = cfg->method;
6626 int pos, i, rot_amount;
6627 int max_epilog_size = 16 + 20*4;
6628 guint8 *code;
6629 CallInfo *cinfo;
6631 if (cfg->method->save_lmf)
6632 max_epilog_size += 128;
6634 code = realloc_code (cfg, max_epilog_size);
6636 /* Save the uwind state which is needed by the out-of-line code */
6637 mono_emit_unwind_op_remember_state (cfg, code);
6639 pos = 0;
6641 /* Load returned vtypes into registers if needed */
6642 cinfo = cfg->arch.cinfo;
6643 switch (cinfo->ret.storage) {
6644 case RegTypeStructByVal: {
6645 MonoInst *ins = cfg->ret;
6647 if (cinfo->ret.nregs == 1) {
6648 if (arm_is_imm12 (ins->inst_offset)) {
6649 ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6650 } else {
6651 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6652 ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6654 } else {
6655 for (i = 0; i < cinfo->ret.nregs; ++i) {
6656 int offset = ins->inst_offset + (i * 4);
6657 if (arm_is_imm12 (offset)) {
6658 ARM_LDR_IMM (code, i, ins->inst_basereg, offset);
6659 } else {
6660 code = mono_arm_emit_load_imm (code, ARMREG_LR, offset);
6661 ARM_LDR_REG_REG (code, i, ins->inst_basereg, ARMREG_LR);
6665 break;
6667 case RegTypeHFA: {
6668 MonoInst *ins = cfg->ret;
6670 for (i = 0; i < cinfo->ret.nregs; ++i) {
6671 if (cinfo->ret.esize == 4)
6672 ARM_FLDS (code, cinfo->ret.reg + i, ins->inst_basereg, ins->inst_offset + (i * cinfo->ret.esize));
6673 else
6674 ARM_FLDD (code, cinfo->ret.reg + (i * 2), ins->inst_basereg, ins->inst_offset + (i * cinfo->ret.esize));
6676 break;
6678 default:
6679 break;
6682 if (method->save_lmf) {
6683 int lmf_offset, reg, sp_adj, regmask, nused_int_regs = 0;
6684 /* all but r0-r3, sp and pc */
6685 pos += MONO_ABI_SIZEOF (MonoLMF) - (MONO_ARM_NUM_SAVED_REGS * sizeof (target_mgreg_t));
6686 lmf_offset = pos;
6688 code = emit_restore_lmf (cfg, code, cfg->stack_usage - lmf_offset);
6690 /* This points to r4 inside MonoLMF->iregs */
6691 sp_adj = (MONO_ABI_SIZEOF (MonoLMF) - MONO_ARM_NUM_SAVED_REGS * sizeof (target_mgreg_t));
6692 reg = ARMREG_R4;
6693 regmask = 0x9ff0; /* restore lr to pc */
6694 /* Skip caller saved registers not used by the method */
6695 while (!(cfg->used_int_regs & (1 << reg)) && reg < ARMREG_FP) {
6696 regmask &= ~(1 << reg);
6697 sp_adj += 4;
6698 reg ++;
6700 if (iphone_abi)
6701 /* Restored later */
6702 regmask &= ~(1 << ARMREG_PC);
6703 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
6704 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage - lmf_offset + sp_adj);
6705 for (i = 0; i < 16; i++) {
6706 if (regmask & (1 << i))
6707 nused_int_regs ++;
6709 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, ((iphone_abi ? 3 : 0) + nused_int_regs) * 4);
6710 /* restore iregs */
6711 ARM_POP (code, regmask);
6712 if (iphone_abi) {
6713 for (i = 0; i < 16; i++) {
6714 if (regmask & (1 << i))
6715 mono_emit_unwind_op_same_value (cfg, code, i);
6717 /* Restore saved r7, restore LR to PC */
6718 /* Skip lr from the lmf */
6719 mono_emit_unwind_op_def_cfa_offset (cfg, code, 3 * 4);
6720 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, sizeof (target_mgreg_t), 0);
6721 mono_emit_unwind_op_def_cfa_offset (cfg, code, 2 * 4);
6722 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6724 } else {
6725 int i, nused_int_regs = 0;
6727 for (i = 0; i < 16; i++) {
6728 if (cfg->used_int_regs & (1 << i))
6729 nused_int_regs ++;
6732 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
6733 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
6734 } else {
6735 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
6736 ARM_ADD_REG_REG (code, ARMREG_SP, cfg->frame_reg, ARMREG_IP);
6739 if (cfg->frame_reg != ARMREG_SP) {
6740 mono_emit_unwind_op_def_cfa_reg (cfg, code, ARMREG_SP);
6743 if (iphone_abi) {
6744 /* Restore saved gregs */
6745 if (cfg->used_int_regs) {
6746 mono_emit_unwind_op_def_cfa_offset (cfg, code, (2 + nused_int_regs) * 4);
6747 ARM_POP (code, cfg->used_int_regs);
6748 for (i = 0; i < 16; i++) {
6749 if (cfg->used_int_regs & (1 << i))
6750 mono_emit_unwind_op_same_value (cfg, code, i);
6753 mono_emit_unwind_op_def_cfa_offset (cfg, code, 2 * 4);
6754 /* Restore saved r7, restore LR to PC */
6755 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6756 } else {
6757 mono_emit_unwind_op_def_cfa_offset (cfg, code, (nused_int_regs + 1) * 4);
6758 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_PC));
6762 /* Restore the unwind state to be the same as before the epilog */
6763 mono_emit_unwind_op_restore_state (cfg, code);
6765 set_code_cursor (cfg, code);
6769 void
6770 mono_arch_emit_exceptions (MonoCompile *cfg)
6772 MonoJumpInfo *patch_info;
6773 int i;
6774 guint8 *code;
6775 guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
6776 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
6777 int max_epilog_size = 50;
6779 for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
6780 exc_throw_pos [i] = NULL;
6781 exc_throw_found [i] = 0;
6784 /* count the number of exception infos */
6787 * make sure we have enough space for exceptions
6789 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6790 if (patch_info->type == MONO_PATCH_INFO_EXC) {
6791 i = mini_exception_id_by_name ((const char*)patch_info->data.target);
6792 if (!exc_throw_found [i]) {
6793 max_epilog_size += 32;
6794 exc_throw_found [i] = TRUE;
6799 code = realloc_code (cfg, max_epilog_size);
6801 /* add code to raise exceptions */
6802 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6803 switch (patch_info->type) {
6804 case MONO_PATCH_INFO_EXC: {
6805 MonoClass *exc_class;
6806 unsigned char *ip = patch_info->ip.i + cfg->native_code;
6808 i = mini_exception_id_by_name ((const char*)patch_info->data.target);
6809 if (exc_throw_pos [i]) {
6810 arm_patch (ip, exc_throw_pos [i]);
6811 patch_info->type = MONO_PATCH_INFO_NONE;
6812 break;
6813 } else {
6814 exc_throw_pos [i] = code;
6816 arm_patch (ip, code);
6818 exc_class = mono_class_load_from_name (mono_defaults.corlib, "System", patch_info->data.name);
6820 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
6821 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6822 patch_info->type = MONO_PATCH_INFO_JIT_ICALL_ID;
6823 patch_info->data.jit_icall_id = MONO_JIT_ICALL_mono_arch_throw_corlib_exception;
6824 patch_info->ip.i = code - cfg->native_code;
6825 ARM_BL (code, 0);
6826 cfg->thunk_area += THUNK_SIZE;
6827 *(guint32*)(gpointer)code = m_class_get_type_token (exc_class) - MONO_TOKEN_TYPE_DEF;
6828 code += 4;
6829 break;
6831 default:
6832 /* do nothing */
6833 break;
6837 set_code_cursor (cfg, code);
6840 #endif /* #ifndef DISABLE_JIT */
6842 void
6843 mono_arch_finish_init (void)
6847 void
6848 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
6852 MonoInst*
6853 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
6855 /* FIXME: */
6856 return NULL;
6859 #ifndef DISABLE_JIT
6861 #endif
6863 guint32
6864 mono_arch_get_patch_offset (guint8 *code)
6866 /* OP_AOTCONST */
6867 return 8;
6870 void
6871 mono_arch_flush_register_windows (void)
6875 MonoMethod*
6876 mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code)
6878 return (MonoMethod*)regs [MONO_ARCH_IMT_REG];
6881 MonoVTable*
6882 mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code)
6884 return (MonoVTable*)(gsize)regs [MONO_ARCH_RGCTX_REG];
6887 GSList*
6888 mono_arch_get_cie_program (void)
6890 GSList *l = NULL;
6892 mono_add_unwind_op_def_cfa (l, (guint8*)NULL, (guint8*)NULL, ARMREG_SP, 0);
6894 return l;
6897 /* #define ENABLE_WRONG_METHOD_CHECK 1 */
6898 #define BASE_SIZE (6 * 4)
6899 #define BSEARCH_ENTRY_SIZE (4 * 4)
6900 #define CMP_SIZE (3 * 4)
6901 #define BRANCH_SIZE (1 * 4)
6902 #define CALL_SIZE (2 * 4)
6903 #define WMC_SIZE (8 * 4)
6904 #define DISTANCE(A, B) (((gint32)(gssize)(B)) - ((gint32)(gssize)(A)))
6906 static arminstr_t *
6907 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
6909 guint32 delta = DISTANCE (target, code);
6910 delta -= 8;
6911 g_assert (delta >= 0 && delta <= 0xFFF);
6912 *target = *target | delta;
6913 *code = value;
6914 return code + 1;
6917 #ifdef ENABLE_WRONG_METHOD_CHECK
6918 static void
6919 mini_dump_bad_imt (int input_imt, int compared_imt, int pc)
6921 g_print ("BAD IMT comparing %x with expected %x at ip %x", input_imt, compared_imt, pc);
6922 g_assert (0);
6924 #endif
6926 gpointer
6927 mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
6928 gpointer fail_tramp)
6930 int size, i;
6931 arminstr_t *code, *start;
6932 gboolean large_offsets = FALSE;
6933 guint32 **constant_pool_starts;
6934 arminstr_t *vtable_target = NULL;
6935 int extra_space = 0;
6936 #ifdef ENABLE_WRONG_METHOD_CHECK
6937 char * cond;
6938 #endif
6939 GSList *unwind_ops;
6941 size = BASE_SIZE;
6942 constant_pool_starts = g_new0 (guint32*, count);
6944 for (i = 0; i < count; ++i) {
6945 MonoIMTCheckItem *item = imt_entries [i];
6946 if (item->is_equals) {
6947 gboolean fail_case = !item->check_target_idx && fail_tramp;
6949 if (item->has_target_code || !arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
6950 item->chunk_size += 32;
6951 large_offsets = TRUE;
6954 if (item->check_target_idx || fail_case) {
6955 if (!item->compare_done || fail_case)
6956 item->chunk_size += CMP_SIZE;
6957 item->chunk_size += BRANCH_SIZE;
6958 } else {
6959 #ifdef ENABLE_WRONG_METHOD_CHECK
6960 item->chunk_size += WMC_SIZE;
6961 #endif
6963 if (fail_case) {
6964 item->chunk_size += 16;
6965 large_offsets = TRUE;
6967 item->chunk_size += CALL_SIZE;
6968 } else {
6969 item->chunk_size += BSEARCH_ENTRY_SIZE;
6970 imt_entries [item->check_target_idx]->compare_done = TRUE;
6972 size += item->chunk_size;
6975 if (large_offsets)
6976 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
6978 if (fail_tramp)
6979 code = mono_method_alloc_generic_virtual_trampoline (domain, size);
6980 else
6981 code = mono_domain_code_reserve (domain, size);
6982 start = code;
6984 unwind_ops = mono_arch_get_cie_program ();
6986 #ifdef DEBUG_IMT
6987 g_print ("Building IMT trampoline for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", m_class_get_name_space (vtable->klass), m_class_get_name (vtable->klass), count, size, start, ((guint8*)start) + size, vtable, fail_tramp);
6988 for (i = 0; i < count; ++i) {
6989 MonoIMTCheckItem *item = imt_entries [i];
6990 g_print ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, ((MonoMethod*)item->key)->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
6992 #endif
6994 if (large_offsets) {
6995 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6996 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 4 * sizeof (target_mgreg_t));
6997 } else {
6998 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
6999 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 2 * sizeof (target_mgreg_t));
7001 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
7002 vtable_target = code;
7003 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
7004 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
7006 for (i = 0; i < count; ++i) {
7007 MonoIMTCheckItem *item = imt_entries [i];
7008 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL, *target_code_ins = NULL;
7009 gint32 vtable_offset;
7011 item->code_target = (guint8*)code;
7013 if (item->is_equals) {
7014 gboolean fail_case = !item->check_target_idx && fail_tramp;
7016 if (item->check_target_idx || fail_case) {
7017 if (!item->compare_done || fail_case) {
7018 imt_method = code;
7019 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
7020 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
7022 item->jmp_code = (guint8*)code;
7023 ARM_B_COND (code, ARMCOND_NE, 0);
7024 } else {
7025 /*Enable the commented code to assert on wrong method*/
7026 #ifdef ENABLE_WRONG_METHOD_CHECK
7027 imt_method = code;
7028 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
7029 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
7030 cond = code;
7031 ARM_B_COND (code, ARMCOND_EQ, 0);
7033 /* Define this if your system is so bad that gdb is failing. */
7034 #ifdef BROKEN_DEV_ENV
7035 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
7036 ARM_BL (code, 0);
7037 arm_patch (code - 1, mini_dump_bad_imt);
7038 #else
7039 ARM_DBRK (code);
7040 #endif
7041 arm_patch (cond, code);
7042 #endif
7045 if (item->has_target_code) {
7046 /* Load target address */
7047 target_code_ins = code;
7048 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
7049 /* Save it to the fourth slot */
7050 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (target_mgreg_t));
7051 /* Restore registers and branch */
7052 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
7054 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)item->value.target_code);
7055 } else {
7056 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
7057 if (!arm_is_imm12 (vtable_offset)) {
7059 * We need to branch to a computed address but we don't have
7060 * a free register to store it, since IP must contain the
7061 * vtable address. So we push the two values to the stack, and
7062 * load them both using LDM.
7064 /* Compute target address */
7065 vtable_offset_ins = code;
7066 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
7067 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
7068 /* Save it to the fourth slot */
7069 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (target_mgreg_t));
7070 /* Restore registers and branch */
7071 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
7073 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
7074 } else {
7075 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
7076 if (large_offsets) {
7077 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 2 * sizeof (target_mgreg_t));
7078 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (target_mgreg_t));
7080 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 0);
7081 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
7085 if (fail_case) {
7086 arm_patch (item->jmp_code, (guchar*)code);
7088 target_code_ins = code;
7089 /* Load target address */
7090 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
7091 /* Save it to the fourth slot */
7092 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (target_mgreg_t));
7093 /* Restore registers and branch */
7094 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
7096 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)fail_tramp);
7097 item->jmp_code = NULL;
7100 if (imt_method)
7101 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)(gsize)item->key);
7103 /*must emit after unconditional branch*/
7104 if (vtable_target) {
7105 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)(gsize)vtable);
7106 item->chunk_size += 4;
7107 vtable_target = NULL;
7110 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
7111 constant_pool_starts [i] = code;
7112 if (extra_space) {
7113 code += extra_space;
7114 extra_space = 0;
7116 } else {
7117 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
7118 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
7120 item->jmp_code = (guint8*)code;
7121 ARM_B_COND (code, ARMCOND_HS, 0);
7122 ++extra_space;
7126 for (i = 0; i < count; ++i) {
7127 MonoIMTCheckItem *item = imt_entries [i];
7128 if (item->jmp_code) {
7129 if (item->check_target_idx)
7130 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
7132 if (i > 0 && item->is_equals) {
7133 int j;
7134 arminstr_t *space_start = constant_pool_starts [i];
7135 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
7136 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)(gsize)imt_entries [j]->key);
7141 #ifdef DEBUG_IMT
7143 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", m_class_get_name_space (vtable->klass), m_class_get_name (vtable->klass), count);
7144 mono_disassemble_code (NULL, (guint8*)start, size, buff);
7145 g_free (buff);
7147 #endif
7149 g_free (constant_pool_starts);
7151 mono_arch_flush_icache ((guint8*)start, size);
7152 MONO_PROFILER_RAISE (jit_code_buffer, ((guint8*)start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL));
7153 UnlockedAdd (&mono_stats.imt_trampolines_size, code - start);
7155 g_assert (DISTANCE (start, code) <= size);
7157 mono_tramp_info_register (mono_tramp_info_create (NULL, (guint8*)start, DISTANCE (start, code), NULL, unwind_ops), domain);
7159 return start;
7162 host_mgreg_t
7163 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
7165 return ctx->regs [reg];
7168 void
7169 mono_arch_context_set_int_reg (MonoContext *ctx, int reg, host_mgreg_t val)
7171 ctx->regs [reg] = val;
7175 * mono_arch_get_trampolines:
7177 * Return a list of MonoTrampInfo structures describing arch specific trampolines
7178 * for AOT.
7180 GSList *
7181 mono_arch_get_trampolines (gboolean aot)
7183 return mono_arm_get_exception_trampolines (aot);
7186 #if defined(MONO_ARCH_SOFT_DEBUG_SUPPORTED)
7188 * mono_arch_set_breakpoint:
7190 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
7191 * The location should contain code emitted by OP_SEQ_POINT.
7193 void
7194 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
7196 guint8 *code = ip;
7197 guint32 native_offset = ip - (guint8*)ji->code_start;
7198 MonoDebugOptions *opt = mini_get_debug_options ();
7200 if (ji->from_aot) {
7201 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), (guint8*)ji->code_start);
7203 if (!breakpoint_tramp)
7204 breakpoint_tramp = mini_get_breakpoint_trampoline ();
7206 g_assert (native_offset % 4 == 0);
7207 g_assert (info->bp_addrs [native_offset / 4] == 0);
7208 info->bp_addrs [native_offset / 4] = (guint8*)(opt->soft_breakpoints ? breakpoint_tramp : bp_trigger_page);
7209 } else if (opt->soft_breakpoints) {
7210 code += 4;
7211 ARM_BLX_REG (code, ARMREG_LR);
7212 mono_arch_flush_icache (code - 4, 4);
7213 } else {
7214 int dreg = ARMREG_LR;
7216 /* Read from another trigger page */
7217 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
7218 ARM_B (code, 0);
7219 *(int*)code = (int)(gssize)bp_trigger_page;
7220 code += 4;
7221 ARM_LDR_IMM (code, dreg, dreg, 0);
7223 mono_arch_flush_icache (code - 16, 16);
7225 #if 0
7226 /* This is currently implemented by emitting an SWI instruction, which
7227 * qemu/linux seems to convert to a SIGILL.
7229 *(int*)code = (0xef << 24) | 8;
7230 code += 4;
7231 mono_arch_flush_icache (code - 4, 4);
7232 #endif
7237 * mono_arch_clear_breakpoint:
7239 * Clear the breakpoint at IP.
7241 void
7242 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
7244 MonoDebugOptions *opt = mini_get_debug_options ();
7245 guint8 *code = ip;
7246 int i;
7248 if (ji->from_aot) {
7249 guint32 native_offset = ip - (guint8*)ji->code_start;
7250 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), (guint8*)ji->code_start);
7252 if (!breakpoint_tramp)
7253 breakpoint_tramp = mini_get_breakpoint_trampoline ();
7255 g_assert (native_offset % 4 == 0);
7256 g_assert (info->bp_addrs [native_offset / 4] == (guint8*)(opt->soft_breakpoints ? breakpoint_tramp : bp_trigger_page));
7257 info->bp_addrs [native_offset / 4] = 0;
7258 } else if (opt->soft_breakpoints) {
7259 code += 4;
7260 ARM_NOP (code);
7261 mono_arch_flush_icache (code - 4, 4);
7262 } else {
7263 for (i = 0; i < 4; ++i)
7264 ARM_NOP (code);
7266 mono_arch_flush_icache (ip, code - ip);
7271 * mono_arch_start_single_stepping:
7273 * Start single stepping.
7275 void
7276 mono_arch_start_single_stepping (void)
7278 if (ss_trigger_page)
7279 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
7280 else
7281 single_step_tramp = mini_get_single_step_trampoline ();
7285 * mono_arch_stop_single_stepping:
7287 * Stop single stepping.
7289 void
7290 mono_arch_stop_single_stepping (void)
7292 if (ss_trigger_page)
7293 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
7294 else
7295 single_step_tramp = NULL;
7298 #if __APPLE__
7299 #define DBG_SIGNAL SIGBUS
7300 #else
7301 #define DBG_SIGNAL SIGSEGV
7302 #endif
7305 * mono_arch_is_single_step_event:
7307 * Return whenever the machine state in SIGCTX corresponds to a single
7308 * step event.
7310 gboolean
7311 mono_arch_is_single_step_event (void *info, void *sigctx)
7313 siginfo_t *sinfo = (siginfo_t*)info;
7315 if (!ss_trigger_page)
7316 return FALSE;
7318 /* Sometimes the address is off by 4 */
7319 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
7320 return TRUE;
7321 else
7322 return FALSE;
7326 * mono_arch_is_breakpoint_event:
7328 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
7330 gboolean
7331 mono_arch_is_breakpoint_event (void *info, void *sigctx)
7333 siginfo_t *sinfo = (siginfo_t*)info;
7335 if (!ss_trigger_page)
7336 return FALSE;
7338 if (sinfo->si_signo == DBG_SIGNAL) {
7339 /* Sometimes the address is off by 4 */
7340 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
7341 return TRUE;
7342 else
7343 return FALSE;
7344 } else {
7345 return FALSE;
7350 * mono_arch_skip_breakpoint:
7352 * See mini-amd64.c for docs.
7354 void
7355 mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
7357 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7361 * mono_arch_skip_single_step:
7363 * See mini-amd64.c for docs.
7365 void
7366 mono_arch_skip_single_step (MonoContext *ctx)
7368 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7372 * mono_arch_get_seq_point_info:
7374 * See mini-amd64.c for docs.
7376 SeqPointInfo*
7377 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
7379 SeqPointInfo *info;
7380 MonoJitInfo *ji;
7382 // FIXME: Add a free function
7384 mono_domain_lock (domain);
7385 info = (SeqPointInfo*)g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
7386 code);
7387 mono_domain_unlock (domain);
7389 if (!info) {
7390 ji = mono_jit_info_table_find (domain, code);
7391 g_assert (ji);
7393 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
7395 info->ss_trigger_page = ss_trigger_page;
7396 info->bp_trigger_page = bp_trigger_page;
7397 info->ss_tramp_addr = &single_step_tramp;
7399 mono_domain_lock (domain);
7400 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
7401 code, info);
7402 mono_domain_unlock (domain);
7405 return info;
7408 #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
7411 * mono_arch_set_target:
7413 * Set the target architecture the JIT backend should generate code for, in the form
7414 * of a GNU target triplet. Only used in AOT mode.
7416 void
7417 mono_arch_set_target (char *mtriple)
7419 /* The GNU target triple format is not very well documented */
7420 if (strstr (mtriple, "armv7")) {
7421 v5_supported = TRUE;
7422 v6_supported = TRUE;
7423 v7_supported = TRUE;
7425 if (strstr (mtriple, "armv6")) {
7426 v5_supported = TRUE;
7427 v6_supported = TRUE;
7429 if (strstr (mtriple, "armv7s")) {
7430 v7s_supported = TRUE;
7432 if (strstr (mtriple, "armv7k")) {
7433 v7k_supported = TRUE;
7435 if (strstr (mtriple, "thumbv7s")) {
7436 v5_supported = TRUE;
7437 v6_supported = TRUE;
7438 v7_supported = TRUE;
7439 v7s_supported = TRUE;
7440 thumb_supported = TRUE;
7441 thumb2_supported = TRUE;
7443 if (strstr (mtriple, "darwin") || strstr (mtriple, "ios")) {
7444 v5_supported = TRUE;
7445 v6_supported = TRUE;
7446 thumb_supported = TRUE;
7447 iphone_abi = TRUE;
7449 if (strstr (mtriple, "gnueabi"))
7450 eabi_supported = TRUE;
7453 gboolean
7454 mono_arch_opcode_supported (int opcode)
7456 switch (opcode) {
7457 case OP_ATOMIC_ADD_I4:
7458 case OP_ATOMIC_EXCHANGE_I4:
7459 case OP_ATOMIC_CAS_I4:
7460 case OP_ATOMIC_LOAD_I1:
7461 case OP_ATOMIC_LOAD_I2:
7462 case OP_ATOMIC_LOAD_I4:
7463 case OP_ATOMIC_LOAD_U1:
7464 case OP_ATOMIC_LOAD_U2:
7465 case OP_ATOMIC_LOAD_U4:
7466 case OP_ATOMIC_STORE_I1:
7467 case OP_ATOMIC_STORE_I2:
7468 case OP_ATOMIC_STORE_I4:
7469 case OP_ATOMIC_STORE_U1:
7470 case OP_ATOMIC_STORE_U2:
7471 case OP_ATOMIC_STORE_U4:
7472 return v7_supported;
7473 case OP_ATOMIC_LOAD_R4:
7474 case OP_ATOMIC_LOAD_R8:
7475 case OP_ATOMIC_STORE_R4:
7476 case OP_ATOMIC_STORE_R8:
7477 return v7_supported && IS_VFP;
7478 default:
7479 return FALSE;
7483 CallInfo*
7484 mono_arch_get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
7486 return get_call_info (mp, sig);
7489 gpointer
7490 mono_arch_get_get_tls_tramp (void)
7492 return NULL;
7495 static G_GNUC_UNUSED guint8*
7496 emit_aotconst (MonoCompile *cfg, guint8 *code, int dreg, int patch_type, gpointer data)
7498 /* OP_AOTCONST */
7499 mono_add_patch_info (cfg, code - cfg->native_code, (MonoJumpInfoType)patch_type, data);
7500 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
7501 ARM_B (code, 0);
7502 *(gpointer*)code = NULL;
7503 code += 4;
7504 /* Load the value from the GOT */
7505 ARM_LDR_REG_REG (code, dreg, ARMREG_PC, dreg);
7506 return code;
7509 guint8*
7510 mono_arm_emit_aotconst (gpointer ji_list, guint8 *code, guint8 *buf, int dreg, int patch_type, gconstpointer data)
7512 MonoJumpInfo **ji = (MonoJumpInfo**)ji_list;
7514 *ji = mono_patch_info_list_prepend (*ji, code - buf, (MonoJumpInfoType)patch_type, data);
7515 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
7516 ARM_B (code, 0);
7517 *(gpointer*)code = NULL;
7518 code += 4;
7519 ARM_LDR_REG_REG (code, dreg, ARMREG_PC, dreg);
7520 return code;
7523 gpointer
7524 mono_arch_load_function (MonoJitICallId jit_icall_id)
7526 gpointer target = NULL;
7527 switch (jit_icall_id) {
7528 #undef MONO_AOT_ICALL
7529 #define MONO_AOT_ICALL(x) case MONO_JIT_ICALL_ ## x: target = (gpointer)x; break;
7530 MONO_AOT_ICALL (mono_arm_resume_unwind)
7531 MONO_AOT_ICALL (mono_arm_start_gsharedvt_call)
7532 MONO_AOT_ICALL (mono_arm_throw_exception)
7533 MONO_AOT_ICALL (mono_arm_throw_exception_by_token)
7534 MONO_AOT_ICALL (mono_arm_unaligned_stack)
7536 return target;