[arm] account only 4 bytes on stack for single precision arguments
[mono-project.git] / mono / mini / mini-arm.c
blob62153919df03f9a074912cfeb1d3cb2347253101
1 /**
2 * \file
3 * ARM backend for the Mono code generator
5 * Authors:
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
9 * (C) 2003 Ximian, Inc.
10 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
11 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
14 #include "mini.h"
15 #include <string.h>
17 #include <mono/metadata/abi-details.h>
18 #include <mono/metadata/appdomain.h>
19 #include <mono/metadata/profiler-private.h>
20 #include <mono/metadata/debug-helpers.h>
21 #include <mono/utils/mono-mmap.h>
22 #include <mono/utils/mono-hwcap.h>
23 #include <mono/utils/mono-memory-model.h>
24 #include <mono/utils/mono-threads-coop.h>
25 #include <mono/utils/unlocked.h>
27 #include "interp/interp.h"
29 #include "mini-arm.h"
30 #include "cpu-arm.h"
31 #include "ir-emit.h"
32 #include "debugger-agent.h"
33 #include "mini-gc.h"
34 #include "mini-runtime.h"
35 #include "aot-runtime.h"
36 #include "mono/arch/arm/arm-vfp-codegen.h"
38 /* Sanity check: This makes no sense */
39 #if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD))
40 #error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined"
41 #endif
44 * IS_SOFT_FLOAT: Is full software floating point used?
45 * IS_HARD_FLOAT: Is full hardware floating point used?
46 * IS_VFP: Is hardware floating point with software ABI used?
48 * These are not necessarily constants, e.g. IS_SOFT_FLOAT and
49 * IS_VFP may delegate to mono_arch_is_soft_float ().
52 #if defined(ARM_FPU_VFP_HARD)
53 #define IS_SOFT_FLOAT (FALSE)
54 #define IS_HARD_FLOAT (TRUE)
55 #define IS_VFP (TRUE)
56 #elif defined(ARM_FPU_NONE)
57 #define IS_SOFT_FLOAT (mono_arch_is_soft_float ())
58 #define IS_HARD_FLOAT (FALSE)
59 #define IS_VFP (!mono_arch_is_soft_float ())
60 #else
61 #define IS_SOFT_FLOAT (FALSE)
62 #define IS_HARD_FLOAT (FALSE)
63 #define IS_VFP (TRUE)
64 #endif
66 #define THUNK_SIZE (3 * 4)
68 #if __APPLE__
69 G_BEGIN_DECLS
70 void sys_icache_invalidate (void *start, size_t len);
71 G_END_DECLS
72 #endif
74 /* This mutex protects architecture specific caches */
75 #define mono_mini_arch_lock() mono_os_mutex_lock (&mini_arch_mutex)
76 #define mono_mini_arch_unlock() mono_os_mutex_unlock (&mini_arch_mutex)
77 static mono_mutex_t mini_arch_mutex;
79 static gboolean v5_supported = FALSE;
80 static gboolean v6_supported = FALSE;
81 static gboolean v7_supported = FALSE;
82 static gboolean v7s_supported = FALSE;
83 static gboolean v7k_supported = FALSE;
84 static gboolean thumb_supported = FALSE;
85 static gboolean thumb2_supported = FALSE;
87 * Whenever to use the ARM EABI
89 static gboolean eabi_supported = FALSE;
91 /*
92 * Whenever to use the iphone ABI extensions:
93 * http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html
94 * Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr.
95 * This is required for debugging/profiling tools to work, but it has some overhead so it should
96 * only be turned on in debug builds.
98 static gboolean iphone_abi = FALSE;
101 * The FPU we are generating code for. This is NOT runtime configurable right now,
102 * since some things like MONO_ARCH_CALLEE_FREGS still depend on defines.
104 static MonoArmFPU arm_fpu;
106 #if defined(ARM_FPU_VFP_HARD)
108 * On armhf, d0-d7 are used for argument passing and d8-d15
109 * must be preserved across calls, which leaves us no room
110 * for scratch registers. So we use d14-d15 but back up their
111 * previous contents to a stack slot before using them - see
112 * mono_arm_emit_vfp_scratch_save/_restore ().
114 static int vfp_scratch1 = ARM_VFP_D14;
115 static int vfp_scratch2 = ARM_VFP_D15;
116 #else
118 * On armel, d0-d7 do not need to be preserved, so we can
119 * freely make use of them as scratch registers.
121 static int vfp_scratch1 = ARM_VFP_D0;
122 static int vfp_scratch2 = ARM_VFP_D1;
123 #endif
125 static int i8_align;
127 static gpointer single_step_tramp, breakpoint_tramp;
130 * The code generated for sequence points reads from this location, which is
131 * made read-only when single stepping is enabled.
133 static gpointer ss_trigger_page;
135 /* Enabled breakpoints read from this trigger page */
136 static gpointer bp_trigger_page;
139 * TODO:
140 * floating point support: on ARM it is a mess, there are at least 3
141 * different setups, each of which binary incompat with the other.
142 * 1) FPA: old and ugly, but unfortunately what current distros use
143 * the double binary format has the two words swapped. 8 double registers.
144 * Implemented usually by kernel emulation.
145 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
146 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
147 * 3) VFP: the new and actually sensible and useful FP support. Implemented
148 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
150 * We do not care about FPA. We will support soft float and VFP.
152 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
153 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
154 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
156 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
157 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
158 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
160 //#define DEBUG_IMT 0
162 #ifndef DISABLE_JIT
163 static void mono_arch_compute_omit_fp (MonoCompile *cfg);
164 #endif
166 static guint8*
167 emit_aotconst (MonoCompile *cfg, guint8 *code, int dreg, int patch_type, gpointer data);
169 const char*
170 mono_arch_regname (int reg)
172 static const char * rnames[] = {
173 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
174 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
175 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
176 "arm_pc"
178 if (reg >= 0 && reg < 16)
179 return rnames [reg];
180 return "unknown";
183 const char*
184 mono_arch_fregname (int reg)
186 static const char * rnames[] = {
187 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
188 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
189 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
190 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
191 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
192 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
193 "arm_f30", "arm_f31"
195 if (reg >= 0 && reg < 32)
196 return rnames [reg];
197 return "unknown";
201 #ifndef DISABLE_JIT
202 static guint8*
203 emit_big_add_temp (guint8 *code, int dreg, int sreg, int imm, int temp)
205 int imm8, rot_amount;
207 g_assert (temp == ARMREG_IP || temp == ARMREG_LR);
209 if (imm == 0) {
210 if (sreg != dreg)
211 ARM_MOV_REG_REG (code, dreg, sreg);
212 } else if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
213 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
214 return code;
216 if (dreg == sreg) {
217 code = mono_arm_emit_load_imm (code, temp, imm);
218 ARM_ADD_REG_REG (code, dreg, sreg, temp);
219 } else {
220 code = mono_arm_emit_load_imm (code, dreg, imm);
221 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
223 return code;
226 static guint8*
227 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
229 return emit_big_add_temp (code, dreg, sreg, imm, ARMREG_IP);
232 static guint8*
233 emit_ldr_imm (guint8 *code, int dreg, int sreg, int imm)
235 if (!arm_is_imm12 (imm)) {
236 g_assert (dreg != sreg);
237 code = emit_big_add (code, dreg, sreg, imm);
238 ARM_LDR_IMM (code, dreg, dreg, 0);
239 } else {
240 ARM_LDR_IMM (code, dreg, sreg, imm);
242 return code;
245 /* If dreg == sreg, this clobbers IP */
246 static guint8*
247 emit_sub_imm (guint8 *code, int dreg, int sreg, int imm)
249 int imm8, rot_amount;
250 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
251 ARM_SUB_REG_IMM (code, dreg, sreg, imm8, rot_amount);
252 return code;
254 if (dreg == sreg) {
255 code = mono_arm_emit_load_imm (code, ARMREG_IP, imm);
256 ARM_SUB_REG_REG (code, dreg, sreg, ARMREG_IP);
257 } else {
258 code = mono_arm_emit_load_imm (code, dreg, imm);
259 ARM_SUB_REG_REG (code, dreg, dreg, sreg);
261 return code;
264 static guint8*
265 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
267 /* we can use r0-r3, since this is called only for incoming args on the stack */
268 if (size > sizeof (target_mgreg_t) * 4) {
269 guint8 *start_loop;
270 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
271 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
272 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
273 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
274 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
275 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
276 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
277 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
278 ARM_B_COND (code, ARMCOND_NE, 0);
279 arm_patch (code - 4, start_loop);
280 return code;
282 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
283 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
284 while (size >= 4) {
285 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
286 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
287 doffset += 4;
288 soffset += 4;
289 size -= 4;
291 } else if (size) {
292 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
293 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
294 doffset = soffset = 0;
295 while (size >= 4) {
296 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
297 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
298 doffset += 4;
299 soffset += 4;
300 size -= 4;
303 g_assert (size == 0);
304 return code;
307 static guint8*
308 emit_jmp_reg (guint8 *code, int reg)
310 if (thumb_supported)
311 ARM_BX (code, reg);
312 else
313 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
314 return code;
317 static guint8*
318 emit_call_reg (guint8 *code, int reg)
320 if (v5_supported) {
321 ARM_BLX_REG (code, reg);
322 } else {
323 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
324 return emit_jmp_reg (code, reg);
326 return code;
329 static guint8*
330 emit_call_seq (MonoCompile *cfg, guint8 *code)
332 if (cfg->method->dynamic) {
333 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
334 ARM_B (code, 0);
335 *(gpointer*)code = NULL;
336 code += 4;
337 code = emit_call_reg (code, ARMREG_IP);
338 } else {
339 ARM_BL (code, 0);
341 cfg->thunk_area += THUNK_SIZE;
342 return code;
345 guint8*
346 mono_arm_patchable_b (guint8 *code, int cond)
348 ARM_B_COND (code, cond, 0);
349 return code;
352 guint8*
353 mono_arm_patchable_bl (guint8 *code, int cond)
355 ARM_BL_COND (code, cond, 0);
356 return code;
359 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(HOST_ANDROID) && !defined(MONO_CROSS_COMPILE)
360 #define HAVE_AEABI_READ_TP 1
361 #endif
363 #ifdef HAVE_AEABI_READ_TP
364 G_BEGIN_DECLS
365 gpointer __aeabi_read_tp (void);
366 G_END_DECLS
367 #endif
369 gboolean
370 mono_arch_have_fast_tls (void)
372 #ifdef HAVE_AEABI_READ_TP
373 static gboolean have_fast_tls = FALSE;
374 static gboolean inited = FALSE;
376 if (mini_debug_options.use_fallback_tls)
377 return FALSE;
379 if (inited)
380 return have_fast_tls;
382 if (v7_supported) {
383 gpointer tp1, tp2;
385 tp1 = __aeabi_read_tp ();
386 asm volatile("mrc p15, 0, %0, c13, c0, 3" : "=r" (tp2));
388 have_fast_tls = tp1 && tp1 == tp2;
390 inited = TRUE;
391 return have_fast_tls;
392 #else
393 return FALSE;
394 #endif
397 static guint8*
398 emit_tls_get (guint8 *code, int dreg, int tls_offset)
400 g_assert (v7_supported);
401 ARM_MRC (code, 15, 0, dreg, 13, 0, 3);
402 ARM_LDR_IMM (code, dreg, dreg, tls_offset);
403 return code;
406 static guint8*
407 emit_tls_set (guint8 *code, int sreg, int tls_offset)
409 int tp_reg = (sreg != ARMREG_R0) ? ARMREG_R0 : ARMREG_R1;
410 g_assert (v7_supported);
411 ARM_MRC (code, 15, 0, tp_reg, 13, 0, 3);
412 ARM_STR_IMM (code, sreg, tp_reg, tls_offset);
413 return code;
417 * emit_save_lmf:
419 * Emit code to push an LMF structure on the LMF stack.
420 * On arm, this is intermixed with the initialization of other fields of the structure.
422 static guint8*
423 emit_save_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
425 int i;
427 if (mono_arch_have_fast_tls () && mono_tls_get_tls_offset (TLS_KEY_LMF_ADDR) != -1) {
428 code = emit_tls_get (code, ARMREG_R0, mono_tls_get_tls_offset (TLS_KEY_LMF_ADDR));
429 } else {
430 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
431 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_tls_get_lmf_addr));
432 code = emit_call_seq (cfg, code);
434 /* we build the MonoLMF structure on the stack - see mini-arm.h */
435 /* lmf_offset is the offset from the previous stack pointer,
436 * alloc_size is the total stack space allocated, so the offset
437 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
438 * The pointer to the struct is put in r1 (new_lmf).
439 * ip is used as scratch
440 * The callee-saved registers are already in the MonoLMF structure
442 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, lmf_offset);
443 /* r0 is the result from mono_get_lmf_addr () */
444 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
445 /* new_lmf->previous_lmf = *lmf_addr */
446 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
447 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
448 /* *(lmf_addr) = r1 */
449 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
450 /* Skip method (only needed for trampoline LMF frames) */
451 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, sp));
452 ARM_STR_IMM (code, ARMREG_FP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, fp));
453 /* save the current IP */
454 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_PC);
455 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, ip));
457 for (i = 0; i < MONO_ABI_SIZEOF (MonoLMF); i += sizeof (target_mgreg_t))
458 mini_gc_set_slot_type_from_fp (cfg, lmf_offset + i, SLOT_NOREF);
460 return code;
463 typedef struct {
464 gint32 vreg;
465 gint32 hreg;
466 } FloatArgData;
468 static guint8 *
469 emit_float_args (MonoCompile *cfg, MonoCallInst *inst, guint8 *code, int *max_len, guint *offset)
471 GSList *list;
473 set_code_cursor (cfg, code);
475 for (list = inst->float_args; list; list = list->next) {
476 FloatArgData *fad = (FloatArgData*)list->data;
477 MonoInst *var = get_vreg_to_inst (cfg, fad->vreg);
478 gboolean imm = arm_is_fpimm8 (var->inst_offset);
480 /* 4+1 insns for emit_big_add () and 1 for FLDS. */
481 if (!imm)
482 *max_len += 20 + 4;
484 *max_len += 4;
486 code = realloc_code (cfg, *max_len);
488 if (!imm) {
489 code = emit_big_add (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
490 ARM_FLDS (code, fad->hreg, ARMREG_LR, 0);
491 } else
492 ARM_FLDS (code, fad->hreg, var->inst_basereg, var->inst_offset);
494 set_code_cursor (cfg, code);
495 *offset = code - cfg->native_code;
498 return code;
501 static guint8 *
502 mono_arm_emit_vfp_scratch_save (MonoCompile *cfg, guint8 *code, int reg)
504 MonoInst *inst;
506 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
508 inst = cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
510 if (IS_HARD_FLOAT) {
511 if (!arm_is_fpimm8 (inst->inst_offset)) {
512 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
513 ARM_FSTD (code, reg, ARMREG_LR, 0);
514 } else
515 ARM_FSTD (code, reg, inst->inst_basereg, inst->inst_offset);
518 return code;
521 static guint8 *
522 mono_arm_emit_vfp_scratch_restore (MonoCompile *cfg, guint8 *code, int reg)
524 MonoInst *inst;
526 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
528 inst = cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
530 if (IS_HARD_FLOAT) {
531 if (!arm_is_fpimm8 (inst->inst_offset)) {
532 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
533 ARM_FLDD (code, reg, ARMREG_LR, 0);
534 } else
535 ARM_FLDD (code, reg, inst->inst_basereg, inst->inst_offset);
538 return code;
542 * emit_restore_lmf:
544 * Emit code to pop an LMF structure from the LMF stack.
546 static guint8*
547 emit_restore_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
549 int basereg, offset;
551 if (lmf_offset < 32) {
552 basereg = cfg->frame_reg;
553 offset = lmf_offset;
554 } else {
555 basereg = ARMREG_R2;
556 offset = 0;
557 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, lmf_offset);
560 /* ip = previous_lmf */
561 ARM_LDR_IMM (code, ARMREG_IP, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
562 /* lr = lmf_addr */
563 ARM_LDR_IMM (code, ARMREG_LR, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
564 /* *(lmf_addr) = previous_lmf */
565 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
567 return code;
570 #endif /* #ifndef DISABLE_JIT */
573 * mono_arch_get_argument_info:
574 * @csig: a method signature
575 * @param_count: the number of parameters to consider
576 * @arg_info: an array to store the result infos
578 * Gathers information on parameters such as size, alignment and
579 * padding. arg_info should be large enought to hold param_count + 1 entries.
581 * Returns the size of the activation frame.
584 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
586 int k, frame_size = 0;
587 guint32 size, align, pad;
588 int offset = 8;
589 MonoType *t;
591 t = mini_get_underlying_type (csig->ret);
592 if (MONO_TYPE_ISSTRUCT (t)) {
593 frame_size += sizeof (target_mgreg_t);
594 offset += 4;
597 arg_info [0].offset = offset;
599 if (csig->hasthis) {
600 frame_size += sizeof (target_mgreg_t);
601 offset += 4;
604 arg_info [0].size = frame_size;
606 for (k = 0; k < param_count; k++) {
607 size = mini_type_stack_size_full (csig->params [k], &align, csig->pinvoke);
609 /* ignore alignment for now */
610 align = 1;
612 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
613 arg_info [k].pad = pad;
614 frame_size += size;
615 arg_info [k + 1].pad = 0;
616 arg_info [k + 1].size = size;
617 offset += pad;
618 arg_info [k + 1].offset = offset;
619 offset += size;
622 align = MONO_ARCH_FRAME_ALIGNMENT;
623 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
624 arg_info [k].pad = pad;
626 return frame_size;
629 #define MAX_ARCH_DELEGATE_PARAMS 3
631 static guint8*
632 get_delegate_invoke_impl (MonoTrampInfo **info, gboolean has_target, gboolean param_count)
634 guint8 *code, *start;
635 GSList *unwind_ops = mono_arch_get_cie_program ();
637 if (has_target) {
638 start = code = mono_global_codeman_reserve (12);
640 /* Replace the this argument with the target */
641 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
642 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, target));
643 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
645 g_assert ((code - start) <= 12);
647 mono_arch_flush_icache (start, 12);
648 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL));
649 } else {
650 int size, i;
652 size = 8 + param_count * 4;
653 start = code = mono_global_codeman_reserve (size);
655 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
656 /* slide down the arguments */
657 for (i = 0; i < param_count; ++i) {
658 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
660 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
662 g_assert ((code - start) <= size);
664 mono_arch_flush_icache (start, size);
665 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL));
668 if (has_target) {
669 *info = mono_tramp_info_create ("delegate_invoke_impl_has_target", start, code - start, NULL, unwind_ops);
670 } else {
671 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", param_count);
672 *info = mono_tramp_info_create (name, start, code - start, NULL, unwind_ops);
673 g_free (name);
676 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL));
678 return start;
682 * mono_arch_get_delegate_invoke_impls:
684 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
685 * trampolines.
687 GSList*
688 mono_arch_get_delegate_invoke_impls (void)
690 GSList *res = NULL;
691 MonoTrampInfo *info;
692 int i;
694 get_delegate_invoke_impl (&info, TRUE, 0);
695 res = g_slist_prepend (res, info);
697 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
698 get_delegate_invoke_impl (&info, FALSE, i);
699 res = g_slist_prepend (res, info);
702 return res;
705 gpointer
706 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
708 guint8 *code, *start;
709 MonoType *sig_ret;
711 /* FIXME: Support more cases */
712 sig_ret = mini_get_underlying_type (sig->ret);
713 if (MONO_TYPE_ISSTRUCT (sig_ret))
714 return NULL;
716 if (has_target) {
717 static guint8* cached = NULL;
718 mono_mini_arch_lock ();
719 if (cached) {
720 mono_mini_arch_unlock ();
721 return cached;
724 if (mono_ee_features.use_aot_trampolines) {
725 start = (guint8*)mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
726 } else {
727 MonoTrampInfo *info;
728 start = get_delegate_invoke_impl (&info, TRUE, 0);
729 mono_tramp_info_register (info, NULL);
731 cached = start;
732 mono_mini_arch_unlock ();
733 return cached;
734 } else {
735 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
736 int i;
738 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
739 return NULL;
740 for (i = 0; i < sig->param_count; ++i)
741 if (!mono_is_regsize_var (sig->params [i]))
742 return NULL;
744 mono_mini_arch_lock ();
745 code = cache [sig->param_count];
746 if (code) {
747 mono_mini_arch_unlock ();
748 return code;
751 if (mono_ee_features.use_aot_trampolines) {
752 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
753 start = (guint8*)mono_aot_get_trampoline (name);
754 g_free (name);
755 } else {
756 MonoTrampInfo *info;
757 start = get_delegate_invoke_impl (&info, FALSE, sig->param_count);
758 mono_tramp_info_register (info, NULL);
760 cache [sig->param_count] = start;
761 mono_mini_arch_unlock ();
762 return start;
765 return NULL;
768 gpointer
769 mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg)
771 return NULL;
774 gpointer
775 mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code)
777 return (gpointer)regs [ARMREG_R0];
781 * Initialize the cpu to execute managed code.
783 void
784 mono_arch_cpu_init (void)
786 i8_align = MONO_ABI_ALIGNOF (gint64);
787 #ifdef MONO_CROSS_COMPILE
788 /* Need to set the alignment of i8 since it can different on the target */
789 #ifdef TARGET_ANDROID
790 /* linux gnueabi */
791 mono_type_set_alignment (MONO_TYPE_I8, i8_align);
792 #endif
793 #endif
797 * Initialize architecture specific code.
799 void
800 mono_arch_init (void)
802 char *cpu_arch;
804 #ifdef TARGET_WATCHOS
805 mini_debug_options.soft_breakpoints = TRUE;
806 #endif
808 mono_os_mutex_init_recursive (&mini_arch_mutex);
809 if (mini_debug_options.soft_breakpoints) {
810 if (!mono_aot_only)
811 breakpoint_tramp = mini_get_breakpoint_trampoline ();
812 } else {
813 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT, MONO_MEM_ACCOUNT_OTHER);
814 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT, MONO_MEM_ACCOUNT_OTHER);
815 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
818 #if defined(__ARM_EABI__)
819 eabi_supported = TRUE;
820 #endif
822 #if defined(ARM_FPU_VFP_HARD)
823 arm_fpu = MONO_ARM_FPU_VFP_HARD;
824 #else
825 arm_fpu = MONO_ARM_FPU_VFP;
827 #if defined(ARM_FPU_NONE) && !defined(TARGET_IOS)
829 * If we're compiling with a soft float fallback and it
830 * turns out that no VFP unit is available, we need to
831 * switch to soft float. We don't do this for iOS, since
832 * iOS devices always have a VFP unit.
834 if (!mono_hwcap_arm_has_vfp)
835 arm_fpu = MONO_ARM_FPU_NONE;
838 * This environment variable can be useful in testing
839 * environments to make sure the soft float fallback
840 * works. Most ARM devices have VFP units these days, so
841 * normally soft float code would not be exercised much.
843 char *soft = g_getenv ("MONO_ARM_FORCE_SOFT_FLOAT");
845 if (soft && !strncmp (soft, "1", 1))
846 arm_fpu = MONO_ARM_FPU_NONE;
847 g_free (soft);
848 #endif
849 #endif
851 v5_supported = mono_hwcap_arm_is_v5;
852 v6_supported = mono_hwcap_arm_is_v6;
853 v7_supported = mono_hwcap_arm_is_v7;
856 * On weird devices, the hwcap code may fail to detect
857 * the ARM version. In that case, we can at least safely
858 * assume the version the runtime was compiled for.
860 #ifdef HAVE_ARMV5
861 v5_supported = TRUE;
862 #endif
863 #ifdef HAVE_ARMV6
864 v6_supported = TRUE;
865 #endif
866 #ifdef HAVE_ARMV7
867 v7_supported = TRUE;
868 #endif
870 #if defined(TARGET_IOS)
871 /* iOS is special-cased here because we don't yet
872 have a way to properly detect CPU features on it. */
873 thumb_supported = TRUE;
874 iphone_abi = TRUE;
875 #else
876 thumb_supported = mono_hwcap_arm_has_thumb;
877 thumb2_supported = mono_hwcap_arm_has_thumb2;
878 #endif
880 /* Format: armv(5|6|7[s])[-thumb[2]] */
881 cpu_arch = g_getenv ("MONO_CPU_ARCH");
883 /* Do this here so it overrides any detection. */
884 if (cpu_arch) {
885 if (strncmp (cpu_arch, "armv", 4) == 0) {
886 v5_supported = cpu_arch [4] >= '5';
887 v6_supported = cpu_arch [4] >= '6';
888 v7_supported = cpu_arch [4] >= '7';
889 v7s_supported = strncmp (cpu_arch, "armv7s", 6) == 0;
890 v7k_supported = strncmp (cpu_arch, "armv7k", 6) == 0;
893 thumb_supported = strstr (cpu_arch, "thumb") != NULL;
894 thumb2_supported = strstr (cpu_arch, "thumb2") != NULL;
895 g_free (cpu_arch);
900 * Cleanup architecture specific code.
902 void
903 mono_arch_cleanup (void)
908 * This function returns the optimizations supported on this cpu.
910 guint32
911 mono_arch_cpu_optimizations (guint32 *exclude_mask)
913 /* no arm-specific optimizations yet */
914 *exclude_mask = 0;
915 return 0;
919 * This function test for all SIMD functions supported.
921 * Returns a bitmask corresponding to all supported versions.
924 guint32
925 mono_arch_cpu_enumerate_simd_versions (void)
927 /* SIMD is currently unimplemented */
928 return 0;
931 gboolean
932 mono_arm_is_hard_float (void)
934 return arm_fpu == MONO_ARM_FPU_VFP_HARD;
937 #ifndef DISABLE_JIT
939 gboolean
940 mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode)
942 if (v7s_supported || v7k_supported) {
943 switch (opcode) {
944 case OP_IDIV:
945 case OP_IREM:
946 case OP_IDIV_UN:
947 case OP_IREM_UN:
948 return FALSE;
949 default:
950 break;
953 return TRUE;
956 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
957 gboolean
958 mono_arch_is_soft_float (void)
960 return arm_fpu == MONO_ARM_FPU_NONE;
962 #endif
964 static gboolean
965 is_regsize_var (MonoType *t)
967 if (t->byref)
968 return TRUE;
969 t = mini_get_underlying_type (t);
970 switch (t->type) {
971 case MONO_TYPE_I4:
972 case MONO_TYPE_U4:
973 case MONO_TYPE_I:
974 case MONO_TYPE_U:
975 case MONO_TYPE_PTR:
976 case MONO_TYPE_FNPTR:
977 return TRUE;
978 case MONO_TYPE_OBJECT:
979 return TRUE;
980 case MONO_TYPE_GENERICINST:
981 if (!mono_type_generic_inst_is_valuetype (t))
982 return TRUE;
983 return FALSE;
984 case MONO_TYPE_VALUETYPE:
985 return FALSE;
987 return FALSE;
990 GList *
991 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
993 GList *vars = NULL;
994 int i;
996 for (i = 0; i < cfg->num_varinfo; i++) {
997 MonoInst *ins = cfg->varinfo [i];
998 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
1000 /* unused vars */
1001 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
1002 continue;
1004 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
1005 continue;
1007 /* we can only allocate 32 bit values */
1008 if (is_regsize_var (ins->inst_vtype)) {
1009 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
1010 g_assert (i == vmv->idx);
1011 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
1015 return vars;
1018 GList *
1019 mono_arch_get_global_int_regs (MonoCompile *cfg)
1021 GList *regs = NULL;
1023 mono_arch_compute_omit_fp (cfg);
1026 * FIXME: Interface calls might go through a static rgctx trampoline which
1027 * sets V5, but it doesn't save it, so we need to save it ourselves, and
1028 * avoid using it.
1030 if (cfg->flags & MONO_CFG_HAS_CALLS)
1031 cfg->uses_rgctx_reg = TRUE;
1033 if (cfg->arch.omit_fp)
1034 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_FP));
1035 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
1036 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
1037 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
1038 if (iphone_abi)
1039 /* V4=R7 is used as a frame pointer, but V7=R10 is preserved */
1040 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));
1041 else
1042 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
1043 if (!(cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg)))
1044 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1045 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
1046 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
1047 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
1049 return regs;
1053 * mono_arch_regalloc_cost:
1055 * Return the cost, in number of memory references, of the action of
1056 * allocating the variable VMV into a register during global register
1057 * allocation.
1059 guint32
1060 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
1062 /* FIXME: */
1063 return 2;
1066 #endif /* #ifndef DISABLE_JIT */
1068 void
1069 mono_arch_flush_icache (guint8 *code, gint size)
1071 #if defined(MONO_CROSS_COMPILE)
1072 #elif __APPLE__
1073 sys_icache_invalidate (code, size);
1074 #else
1075 __builtin___clear_cache ((char*)code, (char*)code + size);
1076 #endif
1079 #define DEBUG(a)
1081 static void inline
1082 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
1084 if (simple) {
1085 if (*gr > ARMREG_R3) {
1086 ainfo->size = 4;
1087 ainfo->offset = *stack_size;
1088 ainfo->reg = ARMREG_SP; /* in the caller */
1089 ainfo->storage = RegTypeBase;
1090 *stack_size += 4;
1091 } else {
1092 ainfo->storage = RegTypeGeneral;
1093 ainfo->reg = *gr;
1095 } else {
1096 gboolean split;
1098 if (eabi_supported)
1099 split = i8_align == 4;
1100 else
1101 split = TRUE;
1103 ainfo->size = 8;
1104 if (*gr == ARMREG_R3 && split) {
1105 /* first word in r3 and the second on the stack */
1106 ainfo->offset = *stack_size;
1107 ainfo->reg = ARMREG_SP; /* in the caller */
1108 ainfo->storage = RegTypeBaseGen;
1109 *stack_size += 4;
1110 } else if (*gr >= ARMREG_R3) {
1111 if (eabi_supported) {
1112 /* darwin aligns longs to 4 byte only */
1113 if (i8_align == 8) {
1114 *stack_size += 7;
1115 *stack_size &= ~7;
1118 ainfo->offset = *stack_size;
1119 ainfo->reg = ARMREG_SP; /* in the caller */
1120 ainfo->storage = RegTypeBase;
1121 *stack_size += 8;
1122 } else {
1123 if (eabi_supported) {
1124 if (i8_align == 8 && ((*gr) & 1))
1125 (*gr) ++;
1127 ainfo->storage = RegTypeIRegPair;
1128 ainfo->reg = *gr;
1130 (*gr) ++;
1132 (*gr) ++;
1135 static void inline
1136 add_float (guint *fpr, guint *stack_size, ArgInfo *ainfo, gboolean is_double, gint *float_spare)
1139 * If we're calling a function like this:
1141 * void foo(float a, double b, float c)
1143 * We pass a in s0 and b in d1. That leaves us
1144 * with s1 being unused. The armhf ABI recognizes
1145 * this and requires register assignment to then
1146 * use that for the next single-precision arg,
1147 * i.e. c in this example. So float_spare either
1148 * tells us which reg to use for the next single-
1149 * precision arg, or it's -1, meaning use *fpr.
1151 * Note that even though most of the JIT speaks
1152 * double-precision, fpr represents single-
1153 * precision registers.
1155 * See parts 5.5 and 6.1.2 of the AAPCS for how
1156 * this all works.
1159 if (*fpr < ARM_VFP_F16 || (!is_double && *float_spare >= 0)) {
1160 ainfo->storage = RegTypeFP;
1162 if (is_double) {
1164 * If we're passing a double-precision value
1165 * and *fpr is odd (e.g. it's s1, s3, ...)
1166 * we need to use the next even register. So
1167 * we mark the current *fpr as a spare that
1168 * can be used for the next single-precision
1169 * value.
1171 if (*fpr % 2) {
1172 *float_spare = *fpr;
1173 (*fpr)++;
1177 * At this point, we have an even register
1178 * so we assign that and move along.
1180 ainfo->reg = *fpr;
1181 *fpr += 2;
1182 } else if (*float_spare >= 0) {
1184 * We're passing a single-precision value
1185 * and it looks like a spare single-
1186 * precision register is available. Let's
1187 * use it.
1190 ainfo->reg = *float_spare;
1191 *float_spare = -1;
1192 } else {
1194 * If we hit this branch, we're passing a
1195 * single-precision value and we can simply
1196 * use the next available register.
1199 ainfo->reg = *fpr;
1200 (*fpr)++;
1202 } else {
1204 * We've exhausted available floating point
1205 * regs, so pass the rest on the stack.
1208 if (is_double) {
1209 *stack_size += 7;
1210 *stack_size &= ~7;
1213 ainfo->offset = *stack_size;
1214 ainfo->reg = ARMREG_SP;
1215 ainfo->storage = RegTypeBase;
1217 *stack_size += is_double ? 8 : 4;
1221 static gboolean
1222 is_hfa (MonoType *t, int *out_nfields, int *out_esize)
1224 MonoClass *klass;
1225 gpointer iter;
1226 MonoClassField *field;
1227 MonoType *ftype, *prev_ftype = NULL;
1228 int nfields = 0;
1230 klass = mono_class_from_mono_type_internal (t);
1231 iter = NULL;
1232 while ((field = mono_class_get_fields_internal (klass, &iter))) {
1233 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
1234 continue;
1235 ftype = mono_field_get_type_internal (field);
1236 ftype = mini_get_underlying_type (ftype);
1238 if (MONO_TYPE_ISSTRUCT (ftype)) {
1239 int nested_nfields, nested_esize;
1241 if (!is_hfa (ftype, &nested_nfields, &nested_esize))
1242 return FALSE;
1243 if (nested_esize == 4)
1244 ftype = m_class_get_byval_arg (mono_defaults.single_class);
1245 else
1246 ftype = m_class_get_byval_arg (mono_defaults.double_class);
1247 if (prev_ftype && prev_ftype->type != ftype->type)
1248 return FALSE;
1249 prev_ftype = ftype;
1250 nfields += nested_nfields;
1251 } else {
1252 if (!(!ftype->byref && (ftype->type == MONO_TYPE_R4 || ftype->type == MONO_TYPE_R8)))
1253 return FALSE;
1254 if (prev_ftype && prev_ftype->type != ftype->type)
1255 return FALSE;
1256 prev_ftype = ftype;
1257 nfields ++;
1260 if (nfields == 0 || nfields > 4)
1261 return FALSE;
1262 *out_nfields = nfields;
1263 *out_esize = prev_ftype->type == MONO_TYPE_R4 ? 4 : 8;
1264 return TRUE;
1267 static CallInfo*
1268 get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
1270 guint i, gr, fpr, pstart;
1271 gint float_spare;
1272 int n = sig->hasthis + sig->param_count;
1273 int nfields, esize;
1274 guint32 align;
1275 MonoType *t;
1276 guint32 stack_size = 0;
1277 CallInfo *cinfo;
1278 gboolean is_pinvoke = sig->pinvoke;
1279 gboolean vtype_retaddr = FALSE;
1281 if (mp)
1282 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1283 else
1284 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1286 cinfo->nargs = n;
1287 gr = ARMREG_R0;
1288 fpr = ARM_VFP_F0;
1289 float_spare = -1;
1291 t = mini_get_underlying_type (sig->ret);
1292 switch (t->type) {
1293 case MONO_TYPE_I1:
1294 case MONO_TYPE_U1:
1295 case MONO_TYPE_I2:
1296 case MONO_TYPE_U2:
1297 case MONO_TYPE_I4:
1298 case MONO_TYPE_U4:
1299 case MONO_TYPE_I:
1300 case MONO_TYPE_U:
1301 case MONO_TYPE_PTR:
1302 case MONO_TYPE_FNPTR:
1303 case MONO_TYPE_OBJECT:
1304 cinfo->ret.storage = RegTypeGeneral;
1305 cinfo->ret.reg = ARMREG_R0;
1306 break;
1307 case MONO_TYPE_U8:
1308 case MONO_TYPE_I8:
1309 cinfo->ret.storage = RegTypeIRegPair;
1310 cinfo->ret.reg = ARMREG_R0;
1311 break;
1312 case MONO_TYPE_R4:
1313 case MONO_TYPE_R8:
1314 cinfo->ret.storage = RegTypeFP;
1316 if (t->type == MONO_TYPE_R4)
1317 cinfo->ret.size = 4;
1318 else
1319 cinfo->ret.size = 8;
1321 if (IS_HARD_FLOAT) {
1322 cinfo->ret.reg = ARM_VFP_F0;
1323 } else {
1324 cinfo->ret.reg = ARMREG_R0;
1326 break;
1327 case MONO_TYPE_GENERICINST:
1328 if (!mono_type_generic_inst_is_valuetype (t)) {
1329 cinfo->ret.storage = RegTypeGeneral;
1330 cinfo->ret.reg = ARMREG_R0;
1331 break;
1333 if (mini_is_gsharedvt_variable_type (t)) {
1334 cinfo->ret.storage = RegTypeStructByAddr;
1335 break;
1337 /* Fall through */
1338 case MONO_TYPE_VALUETYPE:
1339 case MONO_TYPE_TYPEDBYREF:
1340 if (IS_HARD_FLOAT && sig->pinvoke && is_hfa (t, &nfields, &esize)) {
1341 cinfo->ret.storage = RegTypeHFA;
1342 cinfo->ret.reg = 0;
1343 cinfo->ret.nregs = nfields;
1344 cinfo->ret.esize = esize;
1345 } else {
1346 if (is_pinvoke) {
1347 int native_size = mono_class_native_size (mono_class_from_mono_type_internal (t), &align);
1348 int max_size;
1350 #ifdef TARGET_WATCHOS
1351 max_size = 16;
1352 #else
1353 max_size = 4;
1354 #endif
1355 if (native_size <= max_size) {
1356 cinfo->ret.storage = RegTypeStructByVal;
1357 cinfo->ret.struct_size = native_size;
1358 cinfo->ret.nregs = ALIGN_TO (native_size, 4) / 4;
1359 } else {
1360 cinfo->ret.storage = RegTypeStructByAddr;
1362 } else {
1363 cinfo->ret.storage = RegTypeStructByAddr;
1366 break;
1367 case MONO_TYPE_VAR:
1368 case MONO_TYPE_MVAR:
1369 g_assert (mini_is_gsharedvt_type (t));
1370 cinfo->ret.storage = RegTypeStructByAddr;
1371 break;
1372 case MONO_TYPE_VOID:
1373 break;
1374 default:
1375 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1378 vtype_retaddr = cinfo->ret.storage == RegTypeStructByAddr;
1380 pstart = 0;
1381 n = 0;
1383 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1384 * the first argument, allowing 'this' to be always passed in the first arg reg.
1385 * Also do this if the first argument is a reference type, since virtual calls
1386 * are sometimes made using calli without sig->hasthis set, like in the delegate
1387 * invoke wrappers.
1389 if (vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig->params [0]))))) {
1390 if (sig->hasthis) {
1391 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1392 } else {
1393 add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE);
1394 pstart = 1;
1396 n ++;
1397 cinfo->ret.reg = gr;
1398 gr ++;
1399 cinfo->vret_arg_index = 1;
1400 } else {
1401 /* this */
1402 if (sig->hasthis) {
1403 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1404 n ++;
1406 if (vtype_retaddr) {
1407 cinfo->ret.reg = gr;
1408 gr ++;
1412 DEBUG(g_print("params: %d\n", sig->param_count));
1413 for (i = pstart; i < sig->param_count; ++i) {
1414 ArgInfo *ainfo = &cinfo->args [n];
1416 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1417 /* Prevent implicit arguments and sig_cookie from
1418 being passed in registers */
1419 gr = ARMREG_R3 + 1;
1420 fpr = ARM_VFP_F16;
1421 /* Emit the signature cookie just before the implicit arguments */
1422 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1424 DEBUG(g_print("param %d: ", i));
1425 if (sig->params [i]->byref) {
1426 DEBUG(g_print("byref\n"));
1427 add_general (&gr, &stack_size, ainfo, TRUE);
1428 n++;
1429 continue;
1431 t = mini_get_underlying_type (sig->params [i]);
1432 switch (t->type) {
1433 case MONO_TYPE_I1:
1434 cinfo->args [n].is_signed = 1;
1435 case MONO_TYPE_U1:
1436 cinfo->args [n].size = 1;
1437 add_general (&gr, &stack_size, ainfo, TRUE);
1438 break;
1439 case MONO_TYPE_I2:
1440 cinfo->args [n].is_signed = 1;
1441 case MONO_TYPE_U2:
1442 cinfo->args [n].size = 2;
1443 add_general (&gr, &stack_size, ainfo, TRUE);
1444 break;
1445 case MONO_TYPE_I4:
1446 case MONO_TYPE_U4:
1447 cinfo->args [n].size = 4;
1448 add_general (&gr, &stack_size, ainfo, TRUE);
1449 break;
1450 case MONO_TYPE_I:
1451 case MONO_TYPE_U:
1452 case MONO_TYPE_PTR:
1453 case MONO_TYPE_FNPTR:
1454 case MONO_TYPE_OBJECT:
1455 cinfo->args [n].size = sizeof (target_mgreg_t);
1456 add_general (&gr, &stack_size, ainfo, TRUE);
1457 break;
1458 case MONO_TYPE_GENERICINST:
1459 if (!mono_type_generic_inst_is_valuetype (t)) {
1460 cinfo->args [n].size = sizeof (target_mgreg_t);
1461 add_general (&gr, &stack_size, ainfo, TRUE);
1462 break;
1464 if (mini_is_gsharedvt_variable_type (t)) {
1465 /* gsharedvt arguments are passed by ref */
1466 g_assert (mini_is_gsharedvt_type (t));
1467 add_general (&gr, &stack_size, ainfo, TRUE);
1468 switch (ainfo->storage) {
1469 case RegTypeGeneral:
1470 ainfo->storage = RegTypeGSharedVtInReg;
1471 break;
1472 case RegTypeBase:
1473 ainfo->storage = RegTypeGSharedVtOnStack;
1474 break;
1475 default:
1476 g_assert_not_reached ();
1478 break;
1480 /* Fall through */
1481 case MONO_TYPE_TYPEDBYREF:
1482 case MONO_TYPE_VALUETYPE: {
1483 gint size;
1484 int align_size;
1485 int nwords, nfields, esize;
1486 guint32 align;
1488 if (IS_HARD_FLOAT && sig->pinvoke && is_hfa (t, &nfields, &esize)) {
1489 if (fpr + nfields < ARM_VFP_F16) {
1490 ainfo->storage = RegTypeHFA;
1491 ainfo->reg = fpr;
1492 ainfo->nregs = nfields;
1493 ainfo->esize = esize;
1494 if (esize == 4)
1495 fpr += nfields;
1496 else
1497 fpr += nfields * 2;
1498 break;
1499 } else {
1500 fpr = ARM_VFP_F16;
1504 if (t->type == MONO_TYPE_TYPEDBYREF) {
1505 size = MONO_ABI_SIZEOF (MonoTypedRef);
1506 align = sizeof (target_mgreg_t);
1507 } else {
1508 MonoClass *klass = mono_class_from_mono_type_internal (sig->params [i]);
1509 if (is_pinvoke)
1510 size = mono_class_native_size (klass, &align);
1511 else
1512 size = mini_type_stack_size_full (t, &align, FALSE);
1514 DEBUG(g_print ("load %d bytes struct\n", size));
1516 #ifdef TARGET_WATCHOS
1517 /* Watchos pass large structures by ref */
1518 /* We only do this for pinvoke to make gsharedvt/dyncall simpler */
1519 if (sig->pinvoke && size > 16) {
1520 add_general (&gr, &stack_size, ainfo, TRUE);
1521 switch (ainfo->storage) {
1522 case RegTypeGeneral:
1523 ainfo->storage = RegTypeStructByAddr;
1524 break;
1525 case RegTypeBase:
1526 ainfo->storage = RegTypeStructByAddrOnStack;
1527 break;
1528 default:
1529 g_assert_not_reached ();
1530 break;
1532 break;
1534 #endif
1536 align_size = size;
1537 nwords = 0;
1538 align_size += (sizeof (target_mgreg_t) - 1);
1539 align_size &= ~(sizeof (target_mgreg_t) - 1);
1540 nwords = (align_size + sizeof (target_mgreg_t) -1 ) / sizeof (target_mgreg_t);
1541 ainfo->storage = RegTypeStructByVal;
1542 ainfo->struct_size = size;
1543 ainfo->align = align;
1545 if (eabi_supported) {
1546 if (align >= 8 && (gr & 1))
1547 gr ++;
1549 if (gr > ARMREG_R3) {
1550 ainfo->size = 0;
1551 ainfo->vtsize = nwords;
1552 } else {
1553 int rest = ARMREG_R3 - gr + 1;
1554 int n_in_regs = rest >= nwords? nwords: rest;
1556 ainfo->size = n_in_regs;
1557 ainfo->vtsize = nwords - n_in_regs;
1558 ainfo->reg = gr;
1559 gr += n_in_regs;
1560 nwords -= n_in_regs;
1562 stack_size = ALIGN_TO (stack_size, align);
1564 ainfo->offset = stack_size;
1565 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
1566 stack_size += nwords * sizeof (target_mgreg_t);
1567 break;
1569 case MONO_TYPE_U8:
1570 case MONO_TYPE_I8:
1571 ainfo->size = 8;
1572 add_general (&gr, &stack_size, ainfo, FALSE);
1573 break;
1574 case MONO_TYPE_R4:
1575 ainfo->size = 4;
1577 if (IS_HARD_FLOAT)
1578 add_float (&fpr, &stack_size, ainfo, FALSE, &float_spare);
1579 else
1580 add_general (&gr, &stack_size, ainfo, TRUE);
1581 break;
1582 case MONO_TYPE_R8:
1583 ainfo->size = 8;
1585 if (IS_HARD_FLOAT)
1586 add_float (&fpr, &stack_size, ainfo, TRUE, &float_spare);
1587 else
1588 add_general (&gr, &stack_size, ainfo, FALSE);
1589 break;
1590 case MONO_TYPE_VAR:
1591 case MONO_TYPE_MVAR:
1592 /* gsharedvt arguments are passed by ref */
1593 g_assert (mini_is_gsharedvt_type (t));
1594 add_general (&gr, &stack_size, ainfo, TRUE);
1595 switch (ainfo->storage) {
1596 case RegTypeGeneral:
1597 ainfo->storage = RegTypeGSharedVtInReg;
1598 break;
1599 case RegTypeBase:
1600 ainfo->storage = RegTypeGSharedVtOnStack;
1601 break;
1602 default:
1603 g_assert_not_reached ();
1605 break;
1606 default:
1607 g_error ("Can't handle 0x%x", sig->params [i]->type);
1609 n ++;
1612 /* Handle the case where there are no implicit arguments */
1613 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1614 /* Prevent implicit arguments and sig_cookie from
1615 being passed in registers */
1616 gr = ARMREG_R3 + 1;
1617 fpr = ARM_VFP_F16;
1618 /* Emit the signature cookie just before the implicit arguments */
1619 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1622 DEBUG (g_print (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1623 stack_size = ALIGN_TO (stack_size, MONO_ARCH_FRAME_ALIGNMENT);
1625 cinfo->stack_usage = stack_size;
1626 return cinfo;
1630 * We need to create a temporary value if the argument is not stored in
1631 * a linear memory range in the ccontext (this normally happens for
1632 * value types if they are passed both by stack and regs).
1634 static int
1635 arg_need_temp (ArgInfo *ainfo)
1637 if (ainfo->storage == RegTypeStructByVal && ainfo->vtsize)
1638 return ainfo->struct_size;
1639 return 0;
1642 static gpointer
1643 arg_get_storage (CallContext *ccontext, ArgInfo *ainfo)
1645 switch (ainfo->storage) {
1646 case RegTypeIRegPair:
1647 case RegTypeGeneral:
1648 case RegTypeStructByVal:
1649 return &ccontext->gregs [ainfo->reg];
1650 case RegTypeHFA:
1651 case RegTypeFP:
1652 return &ccontext->fregs [ainfo->reg];
1653 case RegTypeBase:
1654 return ccontext->stack + ainfo->offset;
1655 default:
1656 g_error ("Arg storage type not yet supported");
1660 static void
1661 arg_get_val (CallContext *ccontext, ArgInfo *ainfo, gpointer dest)
1663 int reg_size = ainfo->size * sizeof (host_mgreg_t);
1664 g_assert (arg_need_temp (ainfo));
1665 memcpy (dest, &ccontext->gregs [ainfo->reg], reg_size);
1666 memcpy ((host_mgreg_t*)dest + ainfo->size, ccontext->stack + ainfo->offset, ainfo->struct_size - reg_size);
1669 static void
1670 arg_set_val (CallContext *ccontext, ArgInfo *ainfo, gpointer src)
1672 int reg_size = ainfo->size * sizeof (host_mgreg_t);
1673 g_assert (arg_need_temp (ainfo));
1674 memcpy (&ccontext->gregs [ainfo->reg], src, reg_size);
1675 memcpy (ccontext->stack + ainfo->offset, (host_mgreg_t*)src + ainfo->size, ainfo->struct_size - reg_size);
1678 /* Set arguments in the ccontext (for i2n entry) */
1679 void
1680 mono_arch_set_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
1682 const MonoEECallbacks *interp_cb = mini_get_interp_callbacks ();
1683 CallInfo *cinfo = get_call_info (NULL, sig);
1684 gpointer storage;
1685 ArgInfo *ainfo;
1687 memset (ccontext, 0, sizeof (CallContext));
1689 ccontext->stack_size = ALIGN_TO (cinfo->stack_usage, MONO_ARCH_FRAME_ALIGNMENT);
1690 if (ccontext->stack_size)
1691 ccontext->stack = (guint8*)g_calloc (1, ccontext->stack_size);
1693 if (sig->ret->type != MONO_TYPE_VOID) {
1694 ainfo = &cinfo->ret;
1695 if (ainfo->storage == RegTypeStructByAddr) {
1696 storage = interp_cb->frame_arg_to_storage ((MonoInterpFrameHandle)frame, sig, -1);
1697 ccontext->gregs [cinfo->ret.reg] = (host_mgreg_t)(gsize)storage;
1701 g_assert (!sig->hasthis);
1703 for (int i = 0; i < sig->param_count; i++) {
1704 ainfo = &cinfo->args [i];
1705 int temp_size = arg_need_temp (ainfo);
1707 if (temp_size)
1708 storage = alloca (temp_size); // FIXME? alloca in a loop
1709 else
1710 storage = arg_get_storage (ccontext, ainfo);
1712 interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, i, storage);
1713 if (temp_size)
1714 arg_set_val (ccontext, ainfo, storage);
1717 g_free (cinfo);
1720 /* Set return value in the ccontext (for n2i return) */
1721 void
1722 mono_arch_set_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
1724 const MonoEECallbacks *interp_cb;
1725 CallInfo *cinfo;
1726 gpointer storage;
1727 ArgInfo *ainfo;
1729 if (sig->ret->type == MONO_TYPE_VOID)
1730 return;
1732 interp_cb = mini_get_interp_callbacks ();
1733 cinfo = get_call_info (NULL, sig);
1734 ainfo = &cinfo->ret;
1736 if (ainfo->storage != RegTypeStructByAddr) {
1737 g_assert (!arg_need_temp (ainfo));
1738 storage = arg_get_storage (ccontext, ainfo);
1739 memset (ccontext, 0, sizeof (CallContext)); // FIXME
1740 interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, -1, storage);
1743 g_free (cinfo);
1746 /* Gets the arguments from ccontext (for n2i entry) */
1747 void
1748 mono_arch_get_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
1750 const MonoEECallbacks *interp_cb = mini_get_interp_callbacks ();
1751 CallInfo *cinfo = get_call_info (NULL, sig);
1752 gpointer storage;
1753 ArgInfo *ainfo;
1755 if (sig->ret->type != MONO_TYPE_VOID) {
1756 ainfo = &cinfo->ret;
1757 if (ainfo->storage == RegTypeStructByAddr) {
1758 storage = (gpointer)(gsize)ccontext->gregs [cinfo->ret.reg];
1759 interp_cb->frame_arg_set_storage ((MonoInterpFrameHandle)frame, sig, -1, storage);
1763 for (int i = 0; i < sig->param_count + sig->hasthis; i++) {
1764 ainfo = &cinfo->args [i];
1765 int temp_size = arg_need_temp (ainfo);
1767 if (temp_size) {
1768 storage = alloca (temp_size); // FIXME? alloca in a loop
1769 arg_get_val (ccontext, ainfo, storage);
1770 } else {
1771 storage = arg_get_storage (ccontext, ainfo);
1773 interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, i, storage);
1776 g_free (cinfo);
1779 /* Gets the return value from ccontext (for i2n exit) */
1780 void
1781 mono_arch_get_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
1783 const MonoEECallbacks *interp_cb;
1784 CallInfo *cinfo;
1785 ArgInfo *ainfo;
1786 gpointer storage;
1788 if (sig->ret->type == MONO_TYPE_VOID)
1789 return;
1791 interp_cb = mini_get_interp_callbacks ();
1792 cinfo = get_call_info (NULL, sig);
1793 ainfo = &cinfo->ret;
1795 if (ainfo->storage != RegTypeStructByAddr) {
1796 g_assert (!arg_need_temp (ainfo));
1797 storage = arg_get_storage (ccontext, ainfo);
1798 interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, -1, storage);
1801 g_free (cinfo);
1804 #ifndef DISABLE_JIT
1806 gboolean
1807 mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_)
1809 g_assert (caller_sig);
1810 g_assert (callee_sig);
1812 CallInfo *caller_info = get_call_info (NULL, caller_sig);
1813 CallInfo *callee_info = get_call_info (NULL, callee_sig);
1816 * Tailcalls with more callee stack usage than the caller cannot be supported, since
1817 * the extra stack space would be left on the stack after the tailcall.
1819 gboolean res = IS_SUPPORTED_TAILCALL (callee_info->stack_usage <= caller_info->stack_usage)
1820 && IS_SUPPORTED_TAILCALL (caller_info->ret.storage == callee_info->ret.storage);
1822 // FIXME The limit here is that moving the parameters requires addressing the parameters
1823 // with 12bit (4K) immediate offsets. - 4 for TAILCALL_REG/MEMBASE
1824 res &= IS_SUPPORTED_TAILCALL (callee_info->stack_usage < (4096 - 4));
1825 res &= IS_SUPPORTED_TAILCALL (caller_info->stack_usage < (4096 - 4));
1827 g_free (caller_info);
1828 g_free (callee_info);
1830 return res;
1833 static gboolean
1834 debug_omit_fp (void)
1836 #if 0
1837 return mono_debug_count ();
1838 #else
1839 return TRUE;
1840 #endif
1844 * mono_arch_compute_omit_fp:
1845 * Determine whether the frame pointer can be eliminated.
1847 static void
1848 mono_arch_compute_omit_fp (MonoCompile *cfg)
1850 MonoMethodSignature *sig;
1851 MonoMethodHeader *header;
1852 int i, locals_size;
1853 CallInfo *cinfo;
1855 if (cfg->arch.omit_fp_computed)
1856 return;
1858 header = cfg->header;
1860 sig = mono_method_signature_internal (cfg->method);
1862 if (!cfg->arch.cinfo)
1863 cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
1864 cinfo = cfg->arch.cinfo;
1867 * FIXME: Remove some of the restrictions.
1869 cfg->arch.omit_fp = TRUE;
1870 cfg->arch.omit_fp_computed = TRUE;
1872 if (cfg->disable_omit_fp)
1873 cfg->arch.omit_fp = FALSE;
1874 if (!debug_omit_fp ())
1875 cfg->arch.omit_fp = FALSE;
1877 if (cfg->method->save_lmf)
1878 cfg->arch.omit_fp = FALSE;
1880 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
1881 cfg->arch.omit_fp = FALSE;
1882 if (header->num_clauses)
1883 cfg->arch.omit_fp = FALSE;
1884 if (cfg->param_area)
1885 cfg->arch.omit_fp = FALSE;
1886 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1887 cfg->arch.omit_fp = FALSE;
1888 if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)))
1889 cfg->arch.omit_fp = FALSE;
1890 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1891 ArgInfo *ainfo = &cinfo->args [i];
1893 if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeBaseGen || ainfo->storage == RegTypeStructByVal) {
1895 * The stack offset can only be determined when the frame
1896 * size is known.
1898 cfg->arch.omit_fp = FALSE;
1902 locals_size = 0;
1903 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1904 MonoInst *ins = cfg->varinfo [i];
1905 int ialign;
1907 locals_size += mono_type_size (ins->inst_vtype, &ialign);
1912 * Set var information according to the calling convention. arm version.
1913 * The locals var stuff should most likely be split in another method.
1915 void
1916 mono_arch_allocate_vars (MonoCompile *cfg)
1918 MonoMethodSignature *sig;
1919 MonoMethodHeader *header;
1920 MonoInst *ins;
1921 MonoType *sig_ret;
1922 int i, offset, size, align, curinst;
1923 CallInfo *cinfo;
1924 ArgInfo *ainfo;
1925 guint32 ualign;
1927 sig = mono_method_signature_internal (cfg->method);
1929 if (!cfg->arch.cinfo)
1930 cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
1931 cinfo = cfg->arch.cinfo;
1932 sig_ret = mini_get_underlying_type (sig->ret);
1934 mono_arch_compute_omit_fp (cfg);
1936 if (cfg->arch.omit_fp)
1937 cfg->frame_reg = ARMREG_SP;
1938 else
1939 cfg->frame_reg = ARMREG_FP;
1941 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1943 /* allow room for the vararg method args: void* and long/double */
1944 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1945 cfg->param_area = MAX (cfg->param_area, sizeof (target_mgreg_t)*8);
1947 header = cfg->header;
1949 /* See mono_arch_get_global_int_regs () */
1950 if (cfg->flags & MONO_CFG_HAS_CALLS)
1951 cfg->uses_rgctx_reg = TRUE;
1953 if (cfg->frame_reg != ARMREG_SP)
1954 cfg->used_int_regs |= 1 << cfg->frame_reg;
1956 if (cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg))
1957 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1958 cfg->used_int_regs |= (1 << MONO_ARCH_IMT_REG);
1960 offset = 0;
1961 curinst = 0;
1962 if (!MONO_TYPE_ISSTRUCT (sig_ret) && cinfo->ret.storage != RegTypeStructByAddr) {
1963 if (sig_ret->type != MONO_TYPE_VOID) {
1964 cfg->ret->opcode = OP_REGVAR;
1965 cfg->ret->inst_c0 = ARMREG_R0;
1968 /* local vars are at a positive offset from the stack pointer */
1970 * also note that if the function uses alloca, we use FP
1971 * to point at the local variables.
1973 offset = 0; /* linkage area */
1974 /* align the offset to 16 bytes: not sure this is needed here */
1975 //offset += 8 - 1;
1976 //offset &= ~(8 - 1);
1978 /* add parameter area size for called functions */
1979 offset += cfg->param_area;
1980 offset += 8 - 1;
1981 offset &= ~(8 - 1);
1982 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1983 offset += 8;
1985 /* allow room to save the return value */
1986 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1987 offset += 8;
1989 switch (cinfo->ret.storage) {
1990 case RegTypeStructByVal:
1991 case RegTypeHFA:
1992 /* Allocate a local to hold the result, the epilog will copy it to the correct place */
1993 offset = ALIGN_TO (offset, 8);
1994 cfg->ret->opcode = OP_REGOFFSET;
1995 cfg->ret->inst_basereg = cfg->frame_reg;
1996 cfg->ret->inst_offset = offset;
1997 if (cinfo->ret.storage == RegTypeStructByVal)
1998 offset += cinfo->ret.nregs * sizeof (target_mgreg_t);
1999 else
2000 offset += 32;
2001 break;
2002 case RegTypeStructByAddr:
2003 ins = cfg->vret_addr;
2004 offset += sizeof (target_mgreg_t) - 1;
2005 offset &= ~(sizeof (target_mgreg_t) - 1);
2006 ins->inst_offset = offset;
2007 ins->opcode = OP_REGOFFSET;
2008 ins->inst_basereg = cfg->frame_reg;
2009 if (G_UNLIKELY (cfg->verbose_level > 1)) {
2010 g_print ("vret_addr =");
2011 mono_print_ins (cfg->vret_addr);
2013 offset += sizeof (target_mgreg_t);
2014 break;
2015 default:
2016 break;
2019 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
2020 if (cfg->arch.seq_point_info_var) {
2021 MonoInst *ins;
2023 ins = cfg->arch.seq_point_info_var;
2025 size = 4;
2026 align = 4;
2027 offset += align - 1;
2028 offset &= ~(align - 1);
2029 ins->opcode = OP_REGOFFSET;
2030 ins->inst_basereg = cfg->frame_reg;
2031 ins->inst_offset = offset;
2032 offset += size;
2034 if (cfg->arch.ss_trigger_page_var) {
2035 MonoInst *ins;
2037 ins = cfg->arch.ss_trigger_page_var;
2038 size = 4;
2039 align = 4;
2040 offset += align - 1;
2041 offset &= ~(align - 1);
2042 ins->opcode = OP_REGOFFSET;
2043 ins->inst_basereg = cfg->frame_reg;
2044 ins->inst_offset = offset;
2045 offset += size;
2048 if (cfg->arch.seq_point_ss_method_var) {
2049 MonoInst *ins;
2051 ins = cfg->arch.seq_point_ss_method_var;
2052 size = 4;
2053 align = 4;
2054 offset += align - 1;
2055 offset &= ~(align - 1);
2056 ins->opcode = OP_REGOFFSET;
2057 ins->inst_basereg = cfg->frame_reg;
2058 ins->inst_offset = offset;
2059 offset += size;
2061 if (cfg->arch.seq_point_bp_method_var) {
2062 MonoInst *ins;
2064 ins = cfg->arch.seq_point_bp_method_var;
2065 size = 4;
2066 align = 4;
2067 offset += align - 1;
2068 offset &= ~(align - 1);
2069 ins->opcode = OP_REGOFFSET;
2070 ins->inst_basereg = cfg->frame_reg;
2071 ins->inst_offset = offset;
2072 offset += size;
2075 if (cfg->has_atomic_exchange_i4 || cfg->has_atomic_cas_i4 || cfg->has_atomic_add_i4) {
2076 /* Allocate a temporary used by the atomic ops */
2077 size = 4;
2078 align = 4;
2080 /* Allocate a local slot to hold the sig cookie address */
2081 offset += align - 1;
2082 offset &= ~(align - 1);
2083 cfg->arch.atomic_tmp_offset = offset;
2084 offset += size;
2085 } else {
2086 cfg->arch.atomic_tmp_offset = -1;
2089 cfg->locals_min_stack_offset = offset;
2091 curinst = cfg->locals_start;
2092 for (i = curinst; i < cfg->num_varinfo; ++i) {
2093 MonoType *t;
2095 ins = cfg->varinfo [i];
2096 if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
2097 continue;
2099 t = ins->inst_vtype;
2100 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (t))
2101 continue;
2103 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
2104 * pinvoke wrappers when they call functions returning structure */
2105 if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
2106 size = mono_class_native_size (mono_class_from_mono_type_internal (t), &ualign);
2107 align = ualign;
2109 else
2110 size = mono_type_size (t, &align);
2112 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2113 * since it loads/stores misaligned words, which don't do the right thing.
2115 if (align < 4 && size >= 4)
2116 align = 4;
2117 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2118 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2119 offset += align - 1;
2120 offset &= ~(align - 1);
2121 ins->opcode = OP_REGOFFSET;
2122 ins->inst_offset = offset;
2123 ins->inst_basereg = cfg->frame_reg;
2124 offset += size;
2125 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
2128 cfg->locals_max_stack_offset = offset;
2130 curinst = 0;
2131 if (sig->hasthis) {
2132 ins = cfg->args [curinst];
2133 if (ins->opcode != OP_REGVAR) {
2134 ins->opcode = OP_REGOFFSET;
2135 ins->inst_basereg = cfg->frame_reg;
2136 offset += sizeof (target_mgreg_t) - 1;
2137 offset &= ~(sizeof (target_mgreg_t) - 1);
2138 ins->inst_offset = offset;
2139 offset += sizeof (target_mgreg_t);
2141 curinst++;
2144 if (sig->call_convention == MONO_CALL_VARARG) {
2145 size = 4;
2146 align = 4;
2148 /* Allocate a local slot to hold the sig cookie address */
2149 offset += align - 1;
2150 offset &= ~(align - 1);
2151 cfg->sig_cookie = offset;
2152 offset += size;
2155 for (i = 0; i < sig->param_count; ++i) {
2156 ainfo = cinfo->args + i;
2158 ins = cfg->args [curinst];
2160 switch (ainfo->storage) {
2161 case RegTypeHFA:
2162 offset = ALIGN_TO (offset, 8);
2163 ins->opcode = OP_REGOFFSET;
2164 ins->inst_basereg = cfg->frame_reg;
2165 /* These arguments are saved to the stack in the prolog */
2166 ins->inst_offset = offset;
2167 if (cfg->verbose_level >= 2)
2168 g_print ("arg %d allocated to %s+0x%0x.\n", i, mono_arch_regname (ins->inst_basereg), (int)ins->inst_offset);
2169 // FIXME:
2170 offset += 32;
2171 break;
2172 default:
2173 break;
2176 if (ins->opcode != OP_REGVAR) {
2177 ins->opcode = OP_REGOFFSET;
2178 ins->inst_basereg = cfg->frame_reg;
2179 size = mini_type_stack_size_full (sig->params [i], &ualign, sig->pinvoke);
2180 align = ualign;
2181 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2182 * since it loads/stores misaligned words, which don't do the right thing.
2184 if (align < 4 && size >= 4)
2185 align = 4;
2186 /* The code in the prolog () stores words when storing vtypes received in a register */
2187 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
2188 align = 4;
2189 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2190 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2191 offset += align - 1;
2192 offset &= ~(align - 1);
2193 ins->inst_offset = offset;
2194 offset += size;
2196 curinst++;
2199 /* align the offset to 8 bytes */
2200 if (ALIGN_TO (offset, 8) > ALIGN_TO (offset, 4))
2201 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2202 offset += 8 - 1;
2203 offset &= ~(8 - 1);
2205 /* change sign? */
2206 cfg->stack_offset = offset;
2209 void
2210 mono_arch_create_vars (MonoCompile *cfg)
2212 MonoMethodSignature *sig;
2213 CallInfo *cinfo;
2214 int i;
2216 sig = mono_method_signature_internal (cfg->method);
2218 if (!cfg->arch.cinfo)
2219 cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
2220 cinfo = cfg->arch.cinfo;
2222 if (IS_HARD_FLOAT) {
2223 for (i = 0; i < 2; i++) {
2224 MonoInst *inst = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.double_class), OP_LOCAL);
2225 inst->flags |= MONO_INST_VOLATILE;
2227 cfg->arch.vfp_scratch_slots [i] = inst;
2231 if (cinfo->ret.storage == RegTypeStructByVal)
2232 cfg->ret_var_is_local = TRUE;
2234 if (cinfo->ret.storage == RegTypeStructByAddr) {
2235 cfg->vret_addr = mono_compile_create_var (cfg, mono_get_int_type (), OP_ARG);
2236 if (G_UNLIKELY (cfg->verbose_level > 1)) {
2237 g_print ("vret_addr = ");
2238 mono_print_ins (cfg->vret_addr);
2242 if (cfg->gen_sdb_seq_points) {
2243 if (cfg->compile_aot) {
2244 MonoInst *ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
2245 ins->flags |= MONO_INST_VOLATILE;
2246 cfg->arch.seq_point_info_var = ins;
2248 if (!cfg->soft_breakpoints) {
2249 /* Allocate a separate variable for this to save 1 load per seq point */
2250 ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
2251 ins->flags |= MONO_INST_VOLATILE;
2252 cfg->arch.ss_trigger_page_var = ins;
2255 if (cfg->soft_breakpoints) {
2256 MonoInst *ins;
2258 ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
2259 ins->flags |= MONO_INST_VOLATILE;
2260 cfg->arch.seq_point_ss_method_var = ins;
2262 ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
2263 ins->flags |= MONO_INST_VOLATILE;
2264 cfg->arch.seq_point_bp_method_var = ins;
2269 static void
2270 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
2272 MonoMethodSignature *tmp_sig;
2273 int sig_reg;
2275 if (MONO_IS_TAILCALL_OPCODE (call))
2276 NOT_IMPLEMENTED;
2278 g_assert (cinfo->sig_cookie.storage == RegTypeBase);
2281 * mono_ArgIterator_Setup assumes the signature cookie is
2282 * passed first and all the arguments which were before it are
2283 * passed on the stack after the signature. So compensate by
2284 * passing a different signature.
2286 tmp_sig = mono_metadata_signature_dup (call->signature);
2287 tmp_sig->param_count -= call->signature->sentinelpos;
2288 tmp_sig->sentinelpos = 0;
2289 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
2291 sig_reg = mono_alloc_ireg (cfg);
2292 MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig);
2294 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_reg);
2297 #ifdef ENABLE_LLVM
2298 LLVMCallInfo*
2299 mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
2301 int i, n;
2302 CallInfo *cinfo;
2303 ArgInfo *ainfo;
2304 LLVMCallInfo *linfo;
2306 n = sig->param_count + sig->hasthis;
2308 cinfo = get_call_info (cfg->mempool, sig);
2310 linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
2313 * LLVM always uses the native ABI while we use our own ABI, the
2314 * only difference is the handling of vtypes:
2315 * - we only pass/receive them in registers in some cases, and only
2316 * in 1 or 2 integer registers.
2318 switch (cinfo->ret.storage) {
2319 case RegTypeGeneral:
2320 case RegTypeNone:
2321 case RegTypeFP:
2322 case RegTypeIRegPair:
2323 break;
2324 case RegTypeStructByAddr:
2325 if (sig->pinvoke) {
2326 linfo->ret.storage = LLVMArgVtypeByRef;
2327 } else {
2328 /* Vtype returned using a hidden argument */
2329 linfo->ret.storage = LLVMArgVtypeRetAddr;
2330 linfo->vret_arg_index = cinfo->vret_arg_index;
2332 break;
2333 #if TARGET_WATCHOS
2334 case RegTypeStructByVal:
2335 /* LLVM models this by returning an int array */
2336 linfo->ret.storage = LLVMArgAsIArgs;
2337 linfo->ret.nslots = cinfo->ret.nregs;
2338 break;
2339 #endif
2340 case RegTypeHFA:
2341 linfo->ret.storage = LLVMArgFpStruct;
2342 linfo->ret.nslots = cinfo->ret.nregs;
2343 linfo->ret.esize = cinfo->ret.esize;
2344 break;
2345 default:
2346 cfg->exception_message = g_strdup_printf ("unknown ret conv (%d)", cinfo->ret.storage);
2347 cfg->disable_llvm = TRUE;
2348 return linfo;
2351 for (i = 0; i < n; ++i) {
2352 LLVMArgInfo *lainfo = &linfo->args [i];
2353 ainfo = cinfo->args + i;
2355 lainfo->storage = LLVMArgNone;
2357 switch (ainfo->storage) {
2358 case RegTypeGeneral:
2359 case RegTypeIRegPair:
2360 case RegTypeBase:
2361 case RegTypeBaseGen:
2362 case RegTypeFP:
2363 lainfo->storage = LLVMArgNormal;
2364 break;
2365 case RegTypeStructByVal: {
2366 lainfo->storage = LLVMArgAsIArgs;
2367 int slotsize = eabi_supported && ainfo->align == 8 ? 8 : 4;
2368 lainfo->nslots = ALIGN_TO (ainfo->struct_size, slotsize) / slotsize;
2369 lainfo->esize = slotsize;
2370 break;
2372 case RegTypeStructByAddr:
2373 case RegTypeStructByAddrOnStack:
2374 lainfo->storage = LLVMArgVtypeByRef;
2375 break;
2376 case RegTypeHFA: {
2377 int j;
2379 lainfo->storage = LLVMArgAsFpArgs;
2380 lainfo->nslots = ainfo->nregs;
2381 lainfo->esize = ainfo->esize;
2382 for (j = 0; j < ainfo->nregs; ++j)
2383 lainfo->pair_storage [j] = LLVMArgInFPReg;
2384 break;
2386 default:
2387 cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
2388 cfg->disable_llvm = TRUE;
2389 break;
2393 return linfo;
2395 #endif
2397 void
2398 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
2400 MonoInst *in, *ins;
2401 MonoMethodSignature *sig;
2402 int i, n;
2403 CallInfo *cinfo;
2405 sig = call->signature;
2406 n = sig->param_count + sig->hasthis;
2408 cinfo = get_call_info (cfg->mempool, sig);
2410 switch (cinfo->ret.storage) {
2411 case RegTypeStructByVal:
2412 case RegTypeHFA:
2413 if (cinfo->ret.storage == RegTypeStructByVal && cinfo->ret.nregs == 1) {
2414 /* The JIT will transform this into a normal call */
2415 call->vret_in_reg = TRUE;
2416 break;
2418 if (MONO_IS_TAILCALL_OPCODE (call))
2419 break;
2421 * The vtype is returned in registers, save the return area address in a local, and save the vtype into
2422 * the location pointed to by it after call in emit_move_return_value ().
2424 if (!cfg->arch.vret_addr_loc) {
2425 cfg->arch.vret_addr_loc = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
2426 /* Prevent it from being register allocated or optimized away */
2427 cfg->arch.vret_addr_loc->flags |= MONO_INST_VOLATILE;
2430 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->arch.vret_addr_loc->dreg, call->vret_var->dreg);
2431 break;
2432 case RegTypeStructByAddr: {
2433 MonoInst *vtarg;
2434 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
2435 vtarg->sreg1 = call->vret_var->dreg;
2436 vtarg->dreg = mono_alloc_preg (cfg);
2437 MONO_ADD_INS (cfg->cbb, vtarg);
2439 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
2440 break;
2442 default:
2443 break;
2446 for (i = 0; i < n; ++i) {
2447 ArgInfo *ainfo = cinfo->args + i;
2448 MonoType *t;
2450 if (i >= sig->hasthis)
2451 t = sig->params [i - sig->hasthis];
2452 else
2453 t = mono_get_int_type ();
2454 t = mini_get_underlying_type (t);
2456 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
2457 /* Emit the signature cookie just before the implicit arguments */
2458 emit_sig_cookie (cfg, call, cinfo);
2461 in = call->args [i];
2463 switch (ainfo->storage) {
2464 case RegTypeGeneral:
2465 case RegTypeIRegPair:
2466 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2467 MONO_INST_NEW (cfg, ins, OP_MOVE);
2468 ins->dreg = mono_alloc_ireg (cfg);
2469 ins->sreg1 = MONO_LVREG_LS (in->dreg);
2470 MONO_ADD_INS (cfg->cbb, ins);
2471 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2473 MONO_INST_NEW (cfg, ins, OP_MOVE);
2474 ins->dreg = mono_alloc_ireg (cfg);
2475 ins->sreg1 = MONO_LVREG_MS (in->dreg);
2476 MONO_ADD_INS (cfg->cbb, ins);
2477 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2478 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
2479 if (ainfo->size == 4) {
2480 if (IS_SOFT_FLOAT) {
2481 /* mono_emit_call_args () have already done the r8->r4 conversion */
2482 /* The converted value is in an int vreg */
2483 MONO_INST_NEW (cfg, ins, OP_MOVE);
2484 ins->dreg = mono_alloc_ireg (cfg);
2485 ins->sreg1 = in->dreg;
2486 MONO_ADD_INS (cfg->cbb, ins);
2487 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2488 } else {
2489 int creg;
2491 cfg->param_area = MAX (cfg->param_area, 8);
2492 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2493 creg = mono_alloc_ireg (cfg);
2494 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2495 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2497 } else {
2498 if (IS_SOFT_FLOAT) {
2499 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
2500 ins->dreg = mono_alloc_ireg (cfg);
2501 ins->sreg1 = in->dreg;
2502 MONO_ADD_INS (cfg->cbb, ins);
2503 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2505 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
2506 ins->dreg = mono_alloc_ireg (cfg);
2507 ins->sreg1 = in->dreg;
2508 MONO_ADD_INS (cfg->cbb, ins);
2509 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2510 } else {
2511 int creg;
2513 cfg->param_area = MAX (cfg->param_area, 8);
2514 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2515 creg = mono_alloc_ireg (cfg);
2516 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2517 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2518 creg = mono_alloc_ireg (cfg);
2519 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
2520 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
2523 cfg->flags |= MONO_CFG_HAS_FPOUT;
2524 } else {
2525 MONO_INST_NEW (cfg, ins, OP_MOVE);
2526 ins->dreg = mono_alloc_ireg (cfg);
2527 ins->sreg1 = in->dreg;
2528 MONO_ADD_INS (cfg->cbb, ins);
2530 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2532 break;
2533 case RegTypeStructByVal:
2534 case RegTypeGSharedVtInReg:
2535 case RegTypeGSharedVtOnStack:
2536 case RegTypeHFA:
2537 case RegTypeStructByAddr:
2538 case RegTypeStructByAddrOnStack:
2539 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
2540 ins->opcode = OP_OUTARG_VT;
2541 ins->sreg1 = in->dreg;
2542 ins->klass = in->klass;
2543 ins->inst_p0 = call;
2544 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
2545 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
2546 mono_call_inst_add_outarg_vt (cfg, call, ins);
2547 MONO_ADD_INS (cfg->cbb, ins);
2548 break;
2549 case RegTypeBase:
2550 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2551 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2552 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
2553 if (t->type == MONO_TYPE_R8) {
2554 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2555 } else {
2556 if (IS_SOFT_FLOAT)
2557 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2558 else
2559 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2561 } else {
2562 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2564 break;
2565 case RegTypeBaseGen:
2566 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2567 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? MONO_LVREG_LS (in->dreg) : MONO_LVREG_MS (in->dreg));
2568 MONO_INST_NEW (cfg, ins, OP_MOVE);
2569 ins->dreg = mono_alloc_ireg (cfg);
2570 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? MONO_LVREG_MS (in->dreg) : MONO_LVREG_LS (in->dreg);
2571 MONO_ADD_INS (cfg->cbb, ins);
2572 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
2573 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
2574 int creg;
2576 /* This should work for soft-float as well */
2578 cfg->param_area = MAX (cfg->param_area, 8);
2579 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2580 creg = mono_alloc_ireg (cfg);
2581 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
2582 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2583 creg = mono_alloc_ireg (cfg);
2584 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
2585 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
2586 cfg->flags |= MONO_CFG_HAS_FPOUT;
2587 } else {
2588 g_assert_not_reached ();
2590 break;
2591 case RegTypeFP: {
2592 int fdreg = mono_alloc_freg (cfg);
2594 if (ainfo->size == 8) {
2595 MONO_INST_NEW (cfg, ins, OP_FMOVE);
2596 ins->sreg1 = in->dreg;
2597 ins->dreg = fdreg;
2598 MONO_ADD_INS (cfg->cbb, ins);
2600 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, TRUE);
2601 } else {
2602 FloatArgData *fad;
2605 * Mono's register allocator doesn't speak single-precision registers that
2606 * overlap double-precision registers (i.e. armhf). So we have to work around
2607 * the register allocator and load the value from memory manually.
2609 * So we create a variable for the float argument and an instruction to store
2610 * the argument into the variable. We then store the list of these arguments
2611 * in call->float_args. This list is then used by emit_float_args later to
2612 * pass the arguments in the various call opcodes.
2614 * This is not very nice, and we should really try to fix the allocator.
2617 MonoInst *float_arg = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.single_class), OP_LOCAL);
2619 /* Make sure the instruction isn't seen as pointless and removed.
2621 float_arg->flags |= MONO_INST_VOLATILE;
2623 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, float_arg->dreg, in->dreg);
2625 /* We use the dreg to look up the instruction later. The hreg is used to
2626 * emit the instruction that loads the value into the FP reg.
2628 fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData));
2629 fad->vreg = float_arg->dreg;
2630 fad->hreg = ainfo->reg;
2632 call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad);
2635 call->used_iregs |= 1 << ainfo->reg;
2636 cfg->flags |= MONO_CFG_HAS_FPOUT;
2637 break;
2639 default:
2640 g_assert_not_reached ();
2644 /* Handle the case where there are no implicit arguments */
2645 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
2646 emit_sig_cookie (cfg, call, cinfo);
2648 call->call_info = cinfo;
2649 call->stack_usage = cinfo->stack_usage;
2652 static void
2653 add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, MonoInst *arg)
2655 MonoInst *ins;
2657 switch (storage) {
2658 case RegTypeFP:
2659 MONO_INST_NEW (cfg, ins, OP_FMOVE);
2660 ins->dreg = mono_alloc_freg (cfg);
2661 ins->sreg1 = arg->dreg;
2662 MONO_ADD_INS (cfg->cbb, ins);
2663 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE);
2664 break;
2665 default:
2666 g_assert_not_reached ();
2667 break;
2671 void
2672 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
2674 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
2675 MonoInst *load;
2676 ArgInfo *ainfo = (ArgInfo*)ins->inst_p1;
2677 int ovf_size = ainfo->vtsize;
2678 int doffset = ainfo->offset;
2679 int struct_size = ainfo->struct_size;
2680 int i, soffset, dreg, tmpreg;
2682 switch (ainfo->storage) {
2683 case RegTypeGSharedVtInReg:
2684 case RegTypeStructByAddr:
2685 /* Pass by addr */
2686 mono_call_inst_add_outarg_reg (cfg, call, src->dreg, ainfo->reg, FALSE);
2687 break;
2688 case RegTypeGSharedVtOnStack:
2689 case RegTypeStructByAddrOnStack:
2690 /* Pass by addr on stack */
2691 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, src->dreg);
2692 break;
2693 case RegTypeHFA:
2694 for (i = 0; i < ainfo->nregs; ++i) {
2695 if (ainfo->esize == 4)
2696 MONO_INST_NEW (cfg, load, OP_LOADR4_MEMBASE);
2697 else
2698 MONO_INST_NEW (cfg, load, OP_LOADR8_MEMBASE);
2699 load->dreg = mono_alloc_freg (cfg);
2700 load->inst_basereg = src->dreg;
2701 load->inst_offset = i * ainfo->esize;
2702 MONO_ADD_INS (cfg->cbb, load);
2704 if (ainfo->esize == 4) {
2705 FloatArgData *fad;
2707 /* See RegTypeFP in mono_arch_emit_call () */
2708 MonoInst *float_arg = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.single_class), OP_LOCAL);
2709 float_arg->flags |= MONO_INST_VOLATILE;
2710 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, float_arg->dreg, load->dreg);
2712 fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData));
2713 fad->vreg = float_arg->dreg;
2714 fad->hreg = ainfo->reg + i;
2716 call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad);
2717 } else {
2718 add_outarg_reg (cfg, call, RegTypeFP, ainfo->reg + (i * 2), load);
2721 break;
2722 default:
2723 soffset = 0;
2724 for (i = 0; i < ainfo->size; ++i) {
2725 dreg = mono_alloc_ireg (cfg);
2726 switch (struct_size) {
2727 case 1:
2728 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2729 break;
2730 case 2:
2731 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, dreg, src->dreg, soffset);
2732 break;
2733 case 3:
2734 tmpreg = mono_alloc_ireg (cfg);
2735 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2736 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 1);
2737 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 8);
2738 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2739 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 2);
2740 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 16);
2741 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2742 break;
2743 default:
2744 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
2745 break;
2747 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
2748 soffset += sizeof (target_mgreg_t);
2749 struct_size -= sizeof (target_mgreg_t);
2751 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
2752 if (ovf_size != 0)
2753 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, MIN (ovf_size * sizeof (target_mgreg_t), struct_size), struct_size < 4 ? 1 : 4);
2754 break;
2758 void
2759 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
2761 MonoType *ret = mini_get_underlying_type (mono_method_signature_internal (method)->ret);
2763 if (!ret->byref) {
2764 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
2765 MonoInst *ins;
2767 if (COMPILE_LLVM (cfg)) {
2768 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2769 } else {
2770 MONO_INST_NEW (cfg, ins, OP_SETLRET);
2771 ins->sreg1 = MONO_LVREG_LS (val->dreg);
2772 ins->sreg2 = MONO_LVREG_MS (val->dreg);
2773 MONO_ADD_INS (cfg->cbb, ins);
2775 return;
2777 switch (arm_fpu) {
2778 case MONO_ARM_FPU_NONE:
2779 if (ret->type == MONO_TYPE_R8) {
2780 MonoInst *ins;
2782 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2783 ins->dreg = cfg->ret->dreg;
2784 ins->sreg1 = val->dreg;
2785 MONO_ADD_INS (cfg->cbb, ins);
2786 return;
2788 if (ret->type == MONO_TYPE_R4) {
2789 /* Already converted to an int in method_to_ir () */
2790 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2791 return;
2793 break;
2794 case MONO_ARM_FPU_VFP:
2795 case MONO_ARM_FPU_VFP_HARD:
2796 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
2797 MonoInst *ins;
2799 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2800 ins->dreg = cfg->ret->dreg;
2801 ins->sreg1 = val->dreg;
2802 MONO_ADD_INS (cfg->cbb, ins);
2803 return;
2805 break;
2806 default:
2807 g_assert_not_reached ();
2811 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2814 #endif /* #ifndef DISABLE_JIT */
2816 gboolean
2817 mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm)
2819 return TRUE;
2822 typedef struct {
2823 MonoMethodSignature *sig;
2824 CallInfo *cinfo;
2825 MonoType *rtype;
2826 MonoType **param_types;
2827 } ArchDynCallInfo;
2829 static gboolean
2830 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
2832 int i;
2834 switch (cinfo->ret.storage) {
2835 case RegTypeNone:
2836 case RegTypeGeneral:
2837 case RegTypeIRegPair:
2838 case RegTypeStructByAddr:
2839 break;
2840 case RegTypeFP:
2841 if (IS_VFP)
2842 break;
2843 else
2844 return FALSE;
2845 default:
2846 return FALSE;
2849 for (i = 0; i < cinfo->nargs; ++i) {
2850 ArgInfo *ainfo = &cinfo->args [i];
2851 int last_slot;
2853 switch (ainfo->storage) {
2854 case RegTypeGeneral:
2855 case RegTypeIRegPair:
2856 case RegTypeBaseGen:
2857 case RegTypeFP:
2858 break;
2859 case RegTypeBase:
2860 break;
2861 case RegTypeStructByVal:
2862 if (ainfo->size == 0)
2863 last_slot = PARAM_REGS + (ainfo->offset / 4) + ainfo->vtsize;
2864 else
2865 last_slot = ainfo->reg + ainfo->size + ainfo->vtsize;
2866 break;
2867 default:
2868 return FALSE;
2872 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
2873 for (i = 0; i < sig->param_count; ++i) {
2874 MonoType *t = sig->params [i];
2876 if (t->byref)
2877 continue;
2879 t = mini_get_underlying_type (t);
2881 switch (t->type) {
2882 case MONO_TYPE_R4:
2883 case MONO_TYPE_R8:
2884 if (IS_SOFT_FLOAT)
2885 return FALSE;
2886 else
2887 break;
2889 case MONO_TYPE_I8:
2890 case MONO_TYPE_U8:
2891 return FALSE;
2893 default:
2894 break;
2898 return TRUE;
2901 MonoDynCallInfo*
2902 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
2904 ArchDynCallInfo *info;
2905 CallInfo *cinfo;
2906 int i;
2908 cinfo = get_call_info (NULL, sig);
2910 if (!dyn_call_supported (cinfo, sig)) {
2911 g_free (cinfo);
2912 return NULL;
2915 info = g_new0 (ArchDynCallInfo, 1);
2916 // FIXME: Preprocess the info to speed up start_dyn_call ()
2917 info->sig = sig;
2918 info->cinfo = cinfo;
2919 info->rtype = mini_get_underlying_type (sig->ret);
2920 info->param_types = g_new0 (MonoType*, sig->param_count);
2921 for (i = 0; i < sig->param_count; ++i)
2922 info->param_types [i] = mini_get_underlying_type (sig->params [i]);
2924 return (MonoDynCallInfo*)info;
2927 void
2928 mono_arch_dyn_call_free (MonoDynCallInfo *info)
2930 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2932 g_free (ainfo->cinfo);
2933 g_free (ainfo);
2937 mono_arch_dyn_call_get_buf_size (MonoDynCallInfo *info)
2939 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2941 g_assert (ainfo->cinfo->stack_usage % MONO_ARCH_FRAME_ALIGNMENT == 0);
2942 return sizeof (DynCallArgs) + ainfo->cinfo->stack_usage;
2945 void
2946 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf)
2948 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
2949 CallInfo *cinfo = dinfo->cinfo;
2950 DynCallArgs *p = (DynCallArgs*)buf;
2951 int arg_index, greg, i, j, pindex;
2952 MonoMethodSignature *sig = dinfo->sig;
2954 p->res = 0;
2955 p->ret = ret;
2956 p->has_fpregs = 0;
2957 p->n_stackargs = cinfo->stack_usage / sizeof (host_mgreg_t);
2959 arg_index = 0;
2960 greg = 0;
2961 pindex = 0;
2963 if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) {
2964 p->regs [greg ++] = (host_mgreg_t)(gsize)*(args [arg_index ++]);
2965 if (!sig->hasthis)
2966 pindex = 1;
2969 if (dinfo->cinfo->ret.storage == RegTypeStructByAddr)
2970 p->regs [greg ++] = (host_mgreg_t)(gsize)ret;
2972 for (i = pindex; i < sig->param_count; i++) {
2973 MonoType *t = dinfo->param_types [i];
2974 gpointer *arg = args [arg_index ++];
2975 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
2976 int slot = -1;
2978 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal) {
2979 slot = ainfo->reg;
2980 } else if (ainfo->storage == RegTypeFP) {
2981 } else if (ainfo->storage == RegTypeBase) {
2982 slot = PARAM_REGS + (ainfo->offset / 4);
2983 } else if (ainfo->storage == RegTypeBaseGen) {
2984 /* slot + 1 is the first stack slot, so the code below will work */
2985 slot = 3;
2986 } else {
2987 g_assert_not_reached ();
2990 if (t->byref) {
2991 p->regs [slot] = (host_mgreg_t)(gsize)*arg;
2992 continue;
2995 switch (t->type) {
2996 case MONO_TYPE_OBJECT:
2997 case MONO_TYPE_PTR:
2998 case MONO_TYPE_I:
2999 case MONO_TYPE_U:
3000 p->regs [slot] = (host_mgreg_t)(gsize)*arg;
3001 break;
3002 case MONO_TYPE_U1:
3003 p->regs [slot] = *(guint8*)arg;
3004 break;
3005 case MONO_TYPE_I1:
3006 p->regs [slot] = *(gint8*)arg;
3007 break;
3008 case MONO_TYPE_I2:
3009 p->regs [slot] = *(gint16*)arg;
3010 break;
3011 case MONO_TYPE_U2:
3012 p->regs [slot] = *(guint16*)arg;
3013 break;
3014 case MONO_TYPE_I4:
3015 p->regs [slot] = *(gint32*)arg;
3016 break;
3017 case MONO_TYPE_U4:
3018 p->regs [slot] = *(guint32*)arg;
3019 break;
3020 case MONO_TYPE_I8:
3021 case MONO_TYPE_U8:
3022 p->regs [slot ++] = (host_mgreg_t)(gsize)arg [0];
3023 p->regs [slot] = (host_mgreg_t)(gsize)arg [1];
3024 break;
3025 case MONO_TYPE_R4:
3026 if (ainfo->storage == RegTypeFP) {
3027 float f = *(float*)arg;
3028 p->fpregs [ainfo->reg / 2] = *(double*)&f;
3029 p->has_fpregs = 1;
3030 } else {
3031 p->regs [slot] = *(host_mgreg_t*)arg;
3033 break;
3034 case MONO_TYPE_R8:
3035 if (ainfo->storage == RegTypeFP) {
3036 p->fpregs [ainfo->reg / 2] = *(double*)arg;
3037 p->has_fpregs = 1;
3038 } else {
3039 p->regs [slot ++] = (host_mgreg_t)(gsize)arg [0];
3040 p->regs [slot] = (host_mgreg_t)(gsize)arg [1];
3042 break;
3043 case MONO_TYPE_GENERICINST:
3044 if (MONO_TYPE_IS_REFERENCE (t)) {
3045 p->regs [slot] = (host_mgreg_t)(gsize)*arg;
3046 break;
3047 } else {
3048 if (t->type == MONO_TYPE_GENERICINST && mono_class_is_nullable (mono_class_from_mono_type_internal (t))) {
3049 MonoClass *klass = mono_class_from_mono_type_internal (t);
3050 guint8 *nullable_buf;
3051 int size;
3053 size = mono_class_value_size (klass, NULL);
3054 nullable_buf = g_alloca (size);
3055 g_assert (nullable_buf);
3057 /* The argument pointed to by arg is either a boxed vtype or null */
3058 mono_nullable_init (nullable_buf, (MonoObject*)arg, klass);
3060 arg = (gpointer*)nullable_buf;
3061 /* Fall though */
3062 } else {
3063 /* Fall though */
3066 case MONO_TYPE_VALUETYPE:
3067 g_assert (ainfo->storage == RegTypeStructByVal);
3069 if (ainfo->size == 0)
3070 slot = PARAM_REGS + (ainfo->offset / 4);
3071 else
3072 slot = ainfo->reg;
3074 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
3075 p->regs [slot ++] = ((host_mgreg_t*)arg) [j];
3076 break;
3077 default:
3078 g_assert_not_reached ();
3083 void
3084 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
3086 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
3087 DynCallArgs *p = (DynCallArgs*)buf;
3088 MonoType *ptype = ainfo->rtype;
3089 guint8 *ret = p->ret;
3090 host_mgreg_t res = p->res;
3091 host_mgreg_t res2 = p->res2;
3093 switch (ptype->type) {
3094 case MONO_TYPE_VOID:
3095 *(gpointer*)ret = NULL;
3096 break;
3097 case MONO_TYPE_OBJECT:
3098 case MONO_TYPE_I:
3099 case MONO_TYPE_U:
3100 case MONO_TYPE_PTR:
3101 *(gpointer*)ret = (gpointer)(gsize)res;
3102 break;
3103 case MONO_TYPE_I1:
3104 *(gint8*)ret = res;
3105 break;
3106 case MONO_TYPE_U1:
3107 *(guint8*)ret = res;
3108 break;
3109 case MONO_TYPE_I2:
3110 *(gint16*)ret = res;
3111 break;
3112 case MONO_TYPE_U2:
3113 *(guint16*)ret = res;
3114 break;
3115 case MONO_TYPE_I4:
3116 *(gint32*)ret = res;
3117 break;
3118 case MONO_TYPE_U4:
3119 *(guint32*)ret = res;
3120 break;
3121 case MONO_TYPE_I8:
3122 case MONO_TYPE_U8:
3123 /* This handles endianness as well */
3124 ((gint32*)ret) [0] = res;
3125 ((gint32*)ret) [1] = res2;
3126 break;
3127 case MONO_TYPE_GENERICINST:
3128 if (MONO_TYPE_IS_REFERENCE (ptype)) {
3129 *(gpointer*)ret = (gpointer)res;
3130 break;
3131 } else {
3132 /* Fall though */
3134 case MONO_TYPE_VALUETYPE:
3135 g_assert (ainfo->cinfo->ret.storage == RegTypeStructByAddr);
3136 /* Nothing to do */
3137 break;
3138 case MONO_TYPE_R4:
3139 g_assert (IS_VFP);
3140 if (IS_HARD_FLOAT)
3141 *(float*)ret = *(float*)&p->fpregs [0];
3142 else
3143 *(float*)ret = *(float*)&res;
3144 break;
3145 case MONO_TYPE_R8: {
3146 host_mgreg_t regs [2];
3148 g_assert (IS_VFP);
3149 if (IS_HARD_FLOAT) {
3150 *(double*)ret = p->fpregs [0];
3151 } else {
3152 regs [0] = res;
3153 regs [1] = res2;
3155 *(double*)ret = *(double*)&regs;
3157 break;
3159 default:
3160 g_assert_not_reached ();
3164 #ifndef DISABLE_JIT
3167 * The immediate field for cond branches is big enough for all reasonable methods
3169 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
3170 if (0 && ins->inst_true_bb->native_offset) { \
3171 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
3172 } else { \
3173 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
3174 ARM_B_COND (code, (condcode), 0); \
3177 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
3179 /* emit an exception if condition is fail
3181 * We assign the extra code used to throw the implicit exceptions
3182 * to cfg->bb_exit as far as the big branch handling is concerned
3184 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
3185 do { \
3186 mono_add_patch_info (cfg, code - cfg->native_code, \
3187 MONO_PATCH_INFO_EXC, exc_name); \
3188 ARM_BL_COND (code, (condcode), 0); \
3189 } while (0);
3191 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
3193 void
3194 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
3198 void
3199 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
3201 MonoInst *ins, *n;
3203 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
3204 MonoInst *last_ins = mono_inst_prev (ins, FILTER_IL_SEQ_POINT);
3206 switch (ins->opcode) {
3207 case OP_MUL_IMM:
3208 case OP_IMUL_IMM:
3209 /* Already done by an arch-independent pass */
3210 break;
3211 case OP_LOAD_MEMBASE:
3212 case OP_LOADI4_MEMBASE:
3214 * OP_STORE_MEMBASE_REG reg, offset(basereg)
3215 * OP_LOAD_MEMBASE offset(basereg), reg
3217 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
3218 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
3219 ins->inst_basereg == last_ins->inst_destbasereg &&
3220 ins->inst_offset == last_ins->inst_offset) {
3221 if (ins->dreg == last_ins->sreg1) {
3222 MONO_DELETE_INS (bb, ins);
3223 continue;
3224 } else {
3225 //static int c = 0; g_print ("MATCHX %s %d\n", cfg->method->name,c++);
3226 ins->opcode = OP_MOVE;
3227 ins->sreg1 = last_ins->sreg1;
3231 * Note: reg1 must be different from the basereg in the second load
3232 * OP_LOAD_MEMBASE offset(basereg), reg1
3233 * OP_LOAD_MEMBASE offset(basereg), reg2
3234 * -->
3235 * OP_LOAD_MEMBASE offset(basereg), reg1
3236 * OP_MOVE reg1, reg2
3238 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
3239 || last_ins->opcode == OP_LOAD_MEMBASE) &&
3240 ins->inst_basereg != last_ins->dreg &&
3241 ins->inst_basereg == last_ins->inst_basereg &&
3242 ins->inst_offset == last_ins->inst_offset) {
3244 if (ins->dreg == last_ins->dreg) {
3245 MONO_DELETE_INS (bb, ins);
3246 continue;
3247 } else {
3248 ins->opcode = OP_MOVE;
3249 ins->sreg1 = last_ins->dreg;
3252 //g_assert_not_reached ();
3254 #if 0
3256 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3257 * OP_LOAD_MEMBASE offset(basereg), reg
3258 * -->
3259 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3260 * OP_ICONST reg, imm
3262 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
3263 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
3264 ins->inst_basereg == last_ins->inst_destbasereg &&
3265 ins->inst_offset == last_ins->inst_offset) {
3266 //static int c = 0; g_print ("MATCHX %s %d\n", cfg->method->name,c++);
3267 ins->opcode = OP_ICONST;
3268 ins->inst_c0 = last_ins->inst_imm;
3269 g_assert_not_reached (); // check this rule
3270 #endif
3272 break;
3273 case OP_LOADU1_MEMBASE:
3274 case OP_LOADI1_MEMBASE:
3275 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
3276 ins->inst_basereg == last_ins->inst_destbasereg &&
3277 ins->inst_offset == last_ins->inst_offset) {
3278 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
3279 ins->sreg1 = last_ins->sreg1;
3281 break;
3282 case OP_LOADU2_MEMBASE:
3283 case OP_LOADI2_MEMBASE:
3284 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
3285 ins->inst_basereg == last_ins->inst_destbasereg &&
3286 ins->inst_offset == last_ins->inst_offset) {
3287 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
3288 ins->sreg1 = last_ins->sreg1;
3290 break;
3291 case OP_MOVE:
3292 ins->opcode = OP_MOVE;
3294 * OP_MOVE reg, reg
3296 if (ins->dreg == ins->sreg1) {
3297 MONO_DELETE_INS (bb, ins);
3298 continue;
3301 * OP_MOVE sreg, dreg
3302 * OP_MOVE dreg, sreg
3304 if (last_ins && last_ins->opcode == OP_MOVE &&
3305 ins->sreg1 == last_ins->dreg &&
3306 ins->dreg == last_ins->sreg1) {
3307 MONO_DELETE_INS (bb, ins);
3308 continue;
3310 break;
3316 * the branch_cc_table should maintain the order of these
3317 * opcodes.
3318 case CEE_BEQ:
3319 case CEE_BGE:
3320 case CEE_BGT:
3321 case CEE_BLE:
3322 case CEE_BLT:
3323 case CEE_BNE_UN:
3324 case CEE_BGE_UN:
3325 case CEE_BGT_UN:
3326 case CEE_BLE_UN:
3327 case CEE_BLT_UN:
3329 static const guchar
3330 branch_cc_table [] = {
3331 ARMCOND_EQ,
3332 ARMCOND_GE,
3333 ARMCOND_GT,
3334 ARMCOND_LE,
3335 ARMCOND_LT,
3337 ARMCOND_NE,
3338 ARMCOND_HS,
3339 ARMCOND_HI,
3340 ARMCOND_LS,
3341 ARMCOND_LO
3344 #define ADD_NEW_INS(cfg,dest,op) do { \
3345 MONO_INST_NEW ((cfg), (dest), (op)); \
3346 mono_bblock_insert_before_ins (bb, ins, (dest)); \
3347 } while (0)
3349 static int
3350 map_to_reg_reg_op (int op)
3352 switch (op) {
3353 case OP_ADD_IMM:
3354 return OP_IADD;
3355 case OP_SUB_IMM:
3356 return OP_ISUB;
3357 case OP_AND_IMM:
3358 return OP_IAND;
3359 case OP_COMPARE_IMM:
3360 return OP_COMPARE;
3361 case OP_ICOMPARE_IMM:
3362 return OP_ICOMPARE;
3363 case OP_ADDCC_IMM:
3364 return OP_ADDCC;
3365 case OP_ADC_IMM:
3366 return OP_ADC;
3367 case OP_SUBCC_IMM:
3368 return OP_SUBCC;
3369 case OP_SBB_IMM:
3370 return OP_SBB;
3371 case OP_OR_IMM:
3372 return OP_IOR;
3373 case OP_XOR_IMM:
3374 return OP_IXOR;
3375 case OP_LOAD_MEMBASE:
3376 return OP_LOAD_MEMINDEX;
3377 case OP_LOADI4_MEMBASE:
3378 return OP_LOADI4_MEMINDEX;
3379 case OP_LOADU4_MEMBASE:
3380 return OP_LOADU4_MEMINDEX;
3381 case OP_LOADU1_MEMBASE:
3382 return OP_LOADU1_MEMINDEX;
3383 case OP_LOADI2_MEMBASE:
3384 return OP_LOADI2_MEMINDEX;
3385 case OP_LOADU2_MEMBASE:
3386 return OP_LOADU2_MEMINDEX;
3387 case OP_LOADI1_MEMBASE:
3388 return OP_LOADI1_MEMINDEX;
3389 case OP_STOREI1_MEMBASE_REG:
3390 return OP_STOREI1_MEMINDEX;
3391 case OP_STOREI2_MEMBASE_REG:
3392 return OP_STOREI2_MEMINDEX;
3393 case OP_STOREI4_MEMBASE_REG:
3394 return OP_STOREI4_MEMINDEX;
3395 case OP_STORE_MEMBASE_REG:
3396 return OP_STORE_MEMINDEX;
3397 case OP_STORER4_MEMBASE_REG:
3398 return OP_STORER4_MEMINDEX;
3399 case OP_STORER8_MEMBASE_REG:
3400 return OP_STORER8_MEMINDEX;
3401 case OP_STORE_MEMBASE_IMM:
3402 return OP_STORE_MEMBASE_REG;
3403 case OP_STOREI1_MEMBASE_IMM:
3404 return OP_STOREI1_MEMBASE_REG;
3405 case OP_STOREI2_MEMBASE_IMM:
3406 return OP_STOREI2_MEMBASE_REG;
3407 case OP_STOREI4_MEMBASE_IMM:
3408 return OP_STOREI4_MEMBASE_REG;
3410 g_assert_not_reached ();
3414 * Remove from the instruction list the instructions that can't be
3415 * represented with very simple instructions with no register
3416 * requirements.
3418 void
3419 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
3421 MonoInst *ins, *temp, *last_ins = NULL;
3422 int rot_amount, imm8, low_imm;
3424 MONO_BB_FOR_EACH_INS (bb, ins) {
3425 loop_start:
3426 switch (ins->opcode) {
3427 case OP_ADD_IMM:
3428 case OP_SUB_IMM:
3429 case OP_AND_IMM:
3430 case OP_COMPARE_IMM:
3431 case OP_ICOMPARE_IMM:
3432 case OP_ADDCC_IMM:
3433 case OP_ADC_IMM:
3434 case OP_SUBCC_IMM:
3435 case OP_SBB_IMM:
3436 case OP_OR_IMM:
3437 case OP_XOR_IMM:
3438 case OP_IADD_IMM:
3439 case OP_ISUB_IMM:
3440 case OP_IAND_IMM:
3441 case OP_IADC_IMM:
3442 case OP_ISBB_IMM:
3443 case OP_IOR_IMM:
3444 case OP_IXOR_IMM:
3445 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
3446 int opcode2 = mono_op_imm_to_op (ins->opcode);
3447 ADD_NEW_INS (cfg, temp, OP_ICONST);
3448 temp->inst_c0 = ins->inst_imm;
3449 temp->dreg = mono_alloc_ireg (cfg);
3450 ins->sreg2 = temp->dreg;
3451 if (opcode2 == -1)
3452 g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins->opcode));
3453 ins->opcode = opcode2;
3455 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
3456 goto loop_start;
3457 else
3458 break;
3459 case OP_MUL_IMM:
3460 case OP_IMUL_IMM:
3461 if (ins->inst_imm == 1) {
3462 ins->opcode = OP_MOVE;
3463 break;
3465 if (ins->inst_imm == 0) {
3466 ins->opcode = OP_ICONST;
3467 ins->inst_c0 = 0;
3468 break;
3470 imm8 = mono_is_power_of_two (ins->inst_imm);
3471 if (imm8 > 0) {
3472 ins->opcode = OP_SHL_IMM;
3473 ins->inst_imm = imm8;
3474 break;
3476 ADD_NEW_INS (cfg, temp, OP_ICONST);
3477 temp->inst_c0 = ins->inst_imm;
3478 temp->dreg = mono_alloc_ireg (cfg);
3479 ins->sreg2 = temp->dreg;
3480 ins->opcode = OP_IMUL;
3481 break;
3482 case OP_SBB:
3483 case OP_ISBB:
3484 case OP_SUBCC:
3485 case OP_ISUBCC: {
3486 int try_count = 2;
3487 MonoInst *current = ins;
3489 /* may require a look-ahead of a couple instructions due to spilling */
3490 while (try_count-- && current->next) {
3491 if (current->next->opcode == OP_COND_EXC_C || current->next->opcode == OP_COND_EXC_IC) {
3492 /* ARM sets the C flag to 1 if there was _no_ overflow */
3493 current->next->opcode = OP_COND_EXC_NC;
3494 break;
3496 current = current->next;
3498 break;
3500 case OP_IDIV_IMM:
3501 case OP_IDIV_UN_IMM:
3502 case OP_IREM_IMM:
3503 case OP_IREM_UN_IMM: {
3504 int opcode2 = mono_op_imm_to_op (ins->opcode);
3505 ADD_NEW_INS (cfg, temp, OP_ICONST);
3506 temp->inst_c0 = ins->inst_imm;
3507 temp->dreg = mono_alloc_ireg (cfg);
3508 ins->sreg2 = temp->dreg;
3509 if (opcode2 == -1)
3510 g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins->opcode));
3511 ins->opcode = opcode2;
3512 break;
3514 case OP_LOCALLOC_IMM:
3515 ADD_NEW_INS (cfg, temp, OP_ICONST);
3516 temp->inst_c0 = ins->inst_imm;
3517 temp->dreg = mono_alloc_ireg (cfg);
3518 ins->sreg1 = temp->dreg;
3519 ins->opcode = OP_LOCALLOC;
3520 break;
3521 case OP_LOAD_MEMBASE:
3522 case OP_LOADI4_MEMBASE:
3523 case OP_LOADU4_MEMBASE:
3524 case OP_LOADU1_MEMBASE:
3525 /* we can do two things: load the immed in a register
3526 * and use an indexed load, or see if the immed can be
3527 * represented as an ad_imm + a load with a smaller offset
3528 * that fits. We just do the first for now, optimize later.
3530 if (arm_is_imm12 (ins->inst_offset))
3531 break;
3532 ADD_NEW_INS (cfg, temp, OP_ICONST);
3533 temp->inst_c0 = ins->inst_offset;
3534 temp->dreg = mono_alloc_ireg (cfg);
3535 ins->sreg2 = temp->dreg;
3536 ins->opcode = map_to_reg_reg_op (ins->opcode);
3537 break;
3538 case OP_LOADI2_MEMBASE:
3539 case OP_LOADU2_MEMBASE:
3540 case OP_LOADI1_MEMBASE:
3541 if (arm_is_imm8 (ins->inst_offset))
3542 break;
3543 ADD_NEW_INS (cfg, temp, OP_ICONST);
3544 temp->inst_c0 = ins->inst_offset;
3545 temp->dreg = mono_alloc_ireg (cfg);
3546 ins->sreg2 = temp->dreg;
3547 ins->opcode = map_to_reg_reg_op (ins->opcode);
3548 break;
3549 case OP_LOADR4_MEMBASE:
3550 case OP_LOADR8_MEMBASE:
3551 if (arm_is_fpimm8 (ins->inst_offset))
3552 break;
3553 low_imm = ins->inst_offset & 0x1ff;
3554 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
3555 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3556 temp->inst_imm = ins->inst_offset & ~0x1ff;
3557 temp->sreg1 = ins->inst_basereg;
3558 temp->dreg = mono_alloc_ireg (cfg);
3559 ins->inst_basereg = temp->dreg;
3560 ins->inst_offset = low_imm;
3561 } else {
3562 MonoInst *add_ins;
3564 ADD_NEW_INS (cfg, temp, OP_ICONST);
3565 temp->inst_c0 = ins->inst_offset;
3566 temp->dreg = mono_alloc_ireg (cfg);
3568 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3569 add_ins->sreg1 = ins->inst_basereg;
3570 add_ins->sreg2 = temp->dreg;
3571 add_ins->dreg = mono_alloc_ireg (cfg);
3573 ins->inst_basereg = add_ins->dreg;
3574 ins->inst_offset = 0;
3576 break;
3577 case OP_STORE_MEMBASE_REG:
3578 case OP_STOREI4_MEMBASE_REG:
3579 case OP_STOREI1_MEMBASE_REG:
3580 if (arm_is_imm12 (ins->inst_offset))
3581 break;
3582 ADD_NEW_INS (cfg, temp, OP_ICONST);
3583 temp->inst_c0 = ins->inst_offset;
3584 temp->dreg = mono_alloc_ireg (cfg);
3585 ins->sreg2 = temp->dreg;
3586 ins->opcode = map_to_reg_reg_op (ins->opcode);
3587 break;
3588 case OP_STOREI2_MEMBASE_REG:
3589 if (arm_is_imm8 (ins->inst_offset))
3590 break;
3591 ADD_NEW_INS (cfg, temp, OP_ICONST);
3592 temp->inst_c0 = ins->inst_offset;
3593 temp->dreg = mono_alloc_ireg (cfg);
3594 ins->sreg2 = temp->dreg;
3595 ins->opcode = map_to_reg_reg_op (ins->opcode);
3596 break;
3597 case OP_STORER4_MEMBASE_REG:
3598 case OP_STORER8_MEMBASE_REG:
3599 if (arm_is_fpimm8 (ins->inst_offset))
3600 break;
3601 low_imm = ins->inst_offset & 0x1ff;
3602 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
3603 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3604 temp->inst_imm = ins->inst_offset & ~0x1ff;
3605 temp->sreg1 = ins->inst_destbasereg;
3606 temp->dreg = mono_alloc_ireg (cfg);
3607 ins->inst_destbasereg = temp->dreg;
3608 ins->inst_offset = low_imm;
3609 } else {
3610 MonoInst *add_ins;
3612 ADD_NEW_INS (cfg, temp, OP_ICONST);
3613 temp->inst_c0 = ins->inst_offset;
3614 temp->dreg = mono_alloc_ireg (cfg);
3616 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3617 add_ins->sreg1 = ins->inst_destbasereg;
3618 add_ins->sreg2 = temp->dreg;
3619 add_ins->dreg = mono_alloc_ireg (cfg);
3621 ins->inst_destbasereg = add_ins->dreg;
3622 ins->inst_offset = 0;
3624 break;
3625 case OP_STORE_MEMBASE_IMM:
3626 case OP_STOREI1_MEMBASE_IMM:
3627 case OP_STOREI2_MEMBASE_IMM:
3628 case OP_STOREI4_MEMBASE_IMM:
3629 ADD_NEW_INS (cfg, temp, OP_ICONST);
3630 temp->inst_c0 = ins->inst_imm;
3631 temp->dreg = mono_alloc_ireg (cfg);
3632 ins->sreg1 = temp->dreg;
3633 ins->opcode = map_to_reg_reg_op (ins->opcode);
3634 last_ins = temp;
3635 goto loop_start; /* make it handle the possibly big ins->inst_offset */
3636 case OP_FCOMPARE:
3637 case OP_RCOMPARE: {
3638 gboolean swap = FALSE;
3639 int reg;
3641 if (!ins->next) {
3642 /* Optimized away */
3643 NULLIFY_INS (ins);
3644 break;
3647 /* Some fp compares require swapped operands */
3648 switch (ins->next->opcode) {
3649 case OP_FBGT:
3650 ins->next->opcode = OP_FBLT;
3651 swap = TRUE;
3652 break;
3653 case OP_FBGT_UN:
3654 ins->next->opcode = OP_FBLT_UN;
3655 swap = TRUE;
3656 break;
3657 case OP_FBLE:
3658 ins->next->opcode = OP_FBGE;
3659 swap = TRUE;
3660 break;
3661 case OP_FBLE_UN:
3662 ins->next->opcode = OP_FBGE_UN;
3663 swap = TRUE;
3664 break;
3665 default:
3666 break;
3668 if (swap) {
3669 reg = ins->sreg1;
3670 ins->sreg1 = ins->sreg2;
3671 ins->sreg2 = reg;
3673 break;
3677 last_ins = ins;
3679 bb->last_ins = last_ins;
3680 bb->max_vreg = cfg->next_vreg;
3683 void
3684 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
3686 MonoInst *ins;
3688 if (long_ins->opcode == OP_LNEG) {
3689 ins = long_ins;
3690 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), 0);
3691 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), 0);
3692 NULLIFY_INS (ins);
3696 static guchar*
3697 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
3699 /* sreg is a float, dreg is an integer reg */
3700 if (IS_VFP) {
3701 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
3702 if (is_signed)
3703 ARM_TOSIZD (code, vfp_scratch1, sreg);
3704 else
3705 ARM_TOUIZD (code, vfp_scratch1, sreg);
3706 ARM_FMRS (code, dreg, vfp_scratch1);
3707 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
3709 if (!is_signed) {
3710 if (size == 1)
3711 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
3712 else if (size == 2) {
3713 ARM_SHL_IMM (code, dreg, dreg, 16);
3714 ARM_SHR_IMM (code, dreg, dreg, 16);
3716 } else {
3717 if (size == 1) {
3718 ARM_SHL_IMM (code, dreg, dreg, 24);
3719 ARM_SAR_IMM (code, dreg, dreg, 24);
3720 } else if (size == 2) {
3721 ARM_SHL_IMM (code, dreg, dreg, 16);
3722 ARM_SAR_IMM (code, dreg, dreg, 16);
3725 return code;
3728 static guchar*
3729 emit_r4_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
3731 /* sreg is a float, dreg is an integer reg */
3732 g_assert (IS_VFP);
3733 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
3734 if (is_signed)
3735 ARM_TOSIZS (code, vfp_scratch1, sreg);
3736 else
3737 ARM_TOUIZS (code, vfp_scratch1, sreg);
3738 ARM_FMRS (code, dreg, vfp_scratch1);
3739 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
3741 if (!is_signed) {
3742 if (size == 1)
3743 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
3744 else if (size == 2) {
3745 ARM_SHL_IMM (code, dreg, dreg, 16);
3746 ARM_SHR_IMM (code, dreg, dreg, 16);
3748 } else {
3749 if (size == 1) {
3750 ARM_SHL_IMM (code, dreg, dreg, 24);
3751 ARM_SAR_IMM (code, dreg, dreg, 24);
3752 } else if (size == 2) {
3753 ARM_SHL_IMM (code, dreg, dreg, 16);
3754 ARM_SAR_IMM (code, dreg, dreg, 16);
3757 return code;
3760 #endif /* #ifndef DISABLE_JIT */
3762 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
3764 static void
3765 emit_thunk (guint8 *code, gconstpointer target)
3767 guint8 *p = code;
3769 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3770 if (thumb_supported)
3771 ARM_BX (code, ARMREG_IP);
3772 else
3773 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3774 *(guint32*)code = (guint32)(gsize)target;
3775 code += 4;
3776 mono_arch_flush_icache (p, code - p);
3779 static void
3780 handle_thunk (MonoCompile *cfg, MonoDomain *domain, guchar *code, const guchar *target)
3782 MonoJitInfo *ji = NULL;
3783 MonoThunkJitInfo *info;
3784 guint8 *thunks, *p;
3785 int thunks_size;
3786 guint8 *orig_target;
3787 guint8 *target_thunk;
3789 if (!domain)
3790 domain = mono_domain_get ();
3792 if (cfg) {
3794 * This can be called multiple times during JITting,
3795 * save the current position in cfg->arch to avoid
3796 * doing a O(n^2) search.
3798 if (!cfg->arch.thunks) {
3799 cfg->arch.thunks = cfg->thunks;
3800 cfg->arch.thunks_size = cfg->thunk_area;
3802 thunks = cfg->arch.thunks;
3803 thunks_size = cfg->arch.thunks_size;
3804 if (!thunks_size) {
3805 g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, mono_method_full_name (cfg->method, TRUE));
3806 g_assert_not_reached ();
3809 g_assert (*(guint32*)thunks == 0);
3810 emit_thunk (thunks, target);
3811 arm_patch (code, thunks);
3813 cfg->arch.thunks += THUNK_SIZE;
3814 cfg->arch.thunks_size -= THUNK_SIZE;
3815 } else {
3816 ji = mini_jit_info_table_find (domain, (char*)code, NULL);
3817 g_assert (ji);
3818 info = mono_jit_info_get_thunk_info (ji);
3819 g_assert (info);
3821 thunks = (guint8*)ji->code_start + info->thunks_offset;
3822 thunks_size = info->thunks_size;
3824 orig_target = mono_arch_get_call_target (code + 4);
3826 mono_mini_arch_lock ();
3828 target_thunk = NULL;
3829 if (orig_target >= thunks && orig_target < thunks + thunks_size) {
3830 /* The call already points to a thunk, because of trampolines etc. */
3831 target_thunk = orig_target;
3832 } else {
3833 for (p = thunks; p < thunks + thunks_size; p += THUNK_SIZE) {
3834 if (((guint32*)p) [0] == 0) {
3835 /* Free entry */
3836 target_thunk = p;
3837 break;
3838 } else if (((guint32*)p) [2] == (guint32)(gsize)target) {
3839 /* Thunk already points to target */
3840 target_thunk = p;
3841 break;
3846 //g_print ("THUNK: %p %p %p\n", code, target, target_thunk);
3848 if (!target_thunk) {
3849 mono_mini_arch_unlock ();
3850 g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, cfg ? mono_method_full_name (cfg->method, TRUE) : mono_method_full_name (jinfo_get_method (ji), TRUE));
3851 g_assert_not_reached ();
3854 emit_thunk (target_thunk, target);
3855 arm_patch (code, target_thunk);
3856 mono_arch_flush_icache (code, 4);
3858 mono_mini_arch_unlock ();
3862 static void
3863 arm_patch_general (MonoCompile *cfg, MonoDomain *domain, guchar *code, const guchar *target)
3865 guint32 *code32 = (guint32*)code;
3866 guint32 ins = *code32;
3867 guint32 prim = (ins >> 25) & 7;
3868 guint32 tval = GPOINTER_TO_UINT (target);
3870 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
3871 if (prim == 5) { /* 101b */
3872 /* the diff starts 8 bytes from the branch opcode */
3873 gint diff = target - code - 8;
3874 gint tbits;
3875 gint tmask = 0xffffffff;
3876 if (tval & 1) { /* entering thumb mode */
3877 diff = target - 1 - code - 8;
3878 g_assert (thumb_supported);
3879 tbits = 0xf << 28; /* bl->blx bit pattern */
3880 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
3881 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
3882 if (diff & 2) {
3883 tbits |= 1 << 24;
3885 tmask = ~(1 << 24); /* clear the link bit */
3886 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
3887 } else {
3888 tbits = 0;
3890 if (diff >= 0) {
3891 if (diff <= 33554431) {
3892 diff >>= 2;
3893 ins = (ins & 0xff000000) | diff;
3894 ins &= tmask;
3895 *code32 = ins | tbits;
3896 return;
3898 } else {
3899 /* diff between 0 and -33554432 */
3900 if (diff >= -33554432) {
3901 diff >>= 2;
3902 ins = (ins & 0xff000000) | (diff & ~0xff000000);
3903 ins &= tmask;
3904 *code32 = ins | tbits;
3905 return;
3909 handle_thunk (cfg, domain, code, target);
3910 return;
3914 * The alternative call sequences looks like this:
3916 * ldr ip, [pc] // loads the address constant
3917 * b 1f // jumps around the constant
3918 * address constant embedded in the code
3919 * 1f:
3920 * mov lr, pc
3921 * mov pc, ip
3923 * There are two cases for patching:
3924 * a) at the end of method emission: in this case code points to the start
3925 * of the call sequence
3926 * b) during runtime patching of the call site: in this case code points
3927 * to the mov pc, ip instruction
3929 * We have to handle also the thunk jump code sequence:
3931 * ldr ip, [pc]
3932 * mov pc, ip
3933 * address constant // execution never reaches here
3935 if ((ins & 0x0ffffff0) == 0x12fff10) {
3936 /* Branch and exchange: the address is constructed in a reg
3937 * We can patch BX when the code sequence is the following:
3938 * ldr ip, [pc, #0] ; 0x8
3939 * b 0xc
3940 * .word code_ptr
3941 * mov lr, pc
3942 * bx ips
3943 * */
3944 guint32 ccode [4];
3945 guint8 *emit = (guint8*)ccode;
3946 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3947 ARM_B (emit, 0);
3948 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3949 ARM_BX (emit, ARMREG_IP);
3951 /*patching from magic trampoline*/
3952 if (ins == ccode [3]) {
3953 g_assert (code32 [-4] == ccode [0]);
3954 g_assert (code32 [-3] == ccode [1]);
3955 g_assert (code32 [-1] == ccode [2]);
3956 code32 [-2] = (guint32)(gsize)target;
3957 return;
3959 /*patching from JIT*/
3960 if (ins == ccode [0]) {
3961 g_assert (code32 [1] == ccode [1]);
3962 g_assert (code32 [3] == ccode [2]);
3963 g_assert (code32 [4] == ccode [3]);
3964 code32 [2] = (guint32)(gsize)target;
3965 return;
3967 g_assert_not_reached ();
3968 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
3970 * ldr ip, [pc, #0]
3971 * b 0xc
3972 * .word code_ptr
3973 * blx ip
3975 guint32 ccode [4];
3976 guint8 *emit = (guint8*)ccode;
3977 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3978 ARM_B (emit, 0);
3979 ARM_BLX_REG (emit, ARMREG_IP);
3981 g_assert (code32 [-3] == ccode [0]);
3982 g_assert (code32 [-2] == ccode [1]);
3983 g_assert (code32 [0] == ccode [2]);
3985 code32 [-1] = (guint32)(gsize)target;
3986 } else {
3987 guint32 ccode [4];
3988 guint32 *tmp = ccode;
3989 guint8 *emit = (guint8*)tmp;
3990 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3991 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3992 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
3993 ARM_BX (emit, ARMREG_IP);
3994 if (ins == ccode [2]) {
3995 g_assert_not_reached (); // should be -2 ...
3996 code32 [-1] = (guint32)(gsize)target;
3997 return;
3999 if (ins == ccode [0]) {
4000 /* handles both thunk jump code and the far call sequence */
4001 code32 [2] = (guint32)(gsize)target;
4002 return;
4004 g_assert_not_reached ();
4006 // g_print ("patched with 0x%08x\n", ins);
4009 void
4010 arm_patch (guchar *code, const guchar *target)
4012 arm_patch_general (NULL, NULL, code, target);
4016 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
4017 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
4018 * to be used with the emit macros.
4019 * Return -1 otherwise.
4022 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
4024 guint32 res, i;
4025 for (i = 0; i < 31; i+= 2) {
4026 if (i == 0)
4027 res = val;
4028 else
4029 res = (val << (32 - i)) | (val >> i);
4030 if (res & ~0xff)
4031 continue;
4032 *rot_amount = i? 32 - i: 0;
4033 return res;
4035 return -1;
4039 * Emits in code a sequence of instructions that load the value 'val'
4040 * into the dreg register. Uses at most 4 instructions.
4042 guint8*
4043 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
4045 int imm8, rot_amount;
4046 #if 0
4047 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
4048 /* skip the constant pool */
4049 ARM_B (code, 0);
4050 *(int*)code = val;
4051 code += 4;
4052 return code;
4053 #endif
4054 if (mini_debug_options.single_imm_size && v7_supported) {
4055 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
4056 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
4057 return code;
4060 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
4061 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
4062 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
4063 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
4064 } else {
4065 if (v7_supported) {
4066 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
4067 if (val >> 16)
4068 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
4069 return code;
4071 if (val & 0xFF) {
4072 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
4073 if (val & 0xFF00) {
4074 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
4076 if (val & 0xFF0000) {
4077 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
4079 if (val & 0xFF000000) {
4080 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
4082 } else if (val & 0xFF00) {
4083 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
4084 if (val & 0xFF0000) {
4085 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
4087 if (val & 0xFF000000) {
4088 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
4090 } else if (val & 0xFF0000) {
4091 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
4092 if (val & 0xFF000000) {
4093 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
4096 //g_assert_not_reached ();
4098 return code;
4101 gboolean
4102 mono_arm_thumb_supported (void)
4104 return thumb_supported;
4107 gboolean
4108 mono_arm_eabi_supported (void)
4110 return eabi_supported;
4114 mono_arm_i8_align (void)
4116 return i8_align;
4119 #ifndef DISABLE_JIT
4121 static guint8*
4122 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
4124 CallInfo *cinfo;
4125 MonoCallInst *call;
4127 call = (MonoCallInst*)ins;
4128 cinfo = call->call_info;
4130 switch (cinfo->ret.storage) {
4131 case RegTypeStructByVal:
4132 case RegTypeHFA: {
4133 MonoInst *loc = cfg->arch.vret_addr_loc;
4134 int i;
4136 if (cinfo->ret.storage == RegTypeStructByVal && cinfo->ret.nregs == 1) {
4137 /* The JIT treats this as a normal call */
4138 break;
4141 /* Load the destination address */
4142 g_assert (loc && loc->opcode == OP_REGOFFSET);
4144 if (arm_is_imm12 (loc->inst_offset)) {
4145 ARM_LDR_IMM (code, ARMREG_LR, loc->inst_basereg, loc->inst_offset);
4146 } else {
4147 code = mono_arm_emit_load_imm (code, ARMREG_LR, loc->inst_offset);
4148 ARM_LDR_REG_REG (code, ARMREG_LR, loc->inst_basereg, ARMREG_LR);
4151 if (cinfo->ret.storage == RegTypeStructByVal) {
4152 int rsize = cinfo->ret.struct_size;
4154 for (i = 0; i < cinfo->ret.nregs; ++i) {
4155 g_assert (rsize >= 0);
4156 switch (rsize) {
4157 case 0:
4158 break;
4159 case 1:
4160 ARM_STRB_IMM (code, i, ARMREG_LR, i * 4);
4161 break;
4162 case 2:
4163 ARM_STRH_IMM (code, i, ARMREG_LR, i * 4);
4164 break;
4165 default:
4166 ARM_STR_IMM (code, i, ARMREG_LR, i * 4);
4167 break;
4169 rsize -= 4;
4171 } else {
4172 for (i = 0; i < cinfo->ret.nregs; ++i) {
4173 if (cinfo->ret.esize == 4)
4174 ARM_FSTS (code, cinfo->ret.reg + i, ARMREG_LR, i * 4);
4175 else
4176 ARM_FSTD (code, cinfo->ret.reg + (i * 2), ARMREG_LR, i * 8);
4179 return code;
4181 default:
4182 break;
4185 switch (ins->opcode) {
4186 case OP_FCALL:
4187 case OP_FCALL_REG:
4188 case OP_FCALL_MEMBASE:
4189 if (IS_VFP) {
4190 MonoType *sig_ret = mini_get_underlying_type (((MonoCallInst*)ins)->signature->ret);
4191 if (sig_ret->type == MONO_TYPE_R4) {
4192 if (IS_HARD_FLOAT) {
4193 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
4194 } else {
4195 ARM_FMSR (code, ins->dreg, ARMREG_R0);
4196 ARM_CVTS (code, ins->dreg, ins->dreg);
4198 } else {
4199 if (IS_HARD_FLOAT) {
4200 ARM_CPYD (code, ins->dreg, ARM_VFP_D0);
4201 } else {
4202 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
4206 break;
4207 case OP_RCALL:
4208 case OP_RCALL_REG:
4209 case OP_RCALL_MEMBASE: {
4210 MonoType *sig_ret;
4212 g_assert (IS_VFP);
4214 sig_ret = mini_get_underlying_type (((MonoCallInst*)ins)->signature->ret);
4215 g_assert (sig_ret->type == MONO_TYPE_R4);
4216 if (IS_HARD_FLOAT) {
4217 ARM_CPYS (code, ins->dreg, ARM_VFP_F0);
4218 } else {
4219 ARM_FMSR (code, ins->dreg, ARMREG_R0);
4220 ARM_CPYS (code, ins->dreg, ins->dreg);
4222 break;
4224 default:
4225 break;
4228 return code;
4231 void
4232 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
4234 MonoInst *ins;
4235 MonoCallInst *call;
4236 guint8 *code = cfg->native_code + cfg->code_len;
4237 MonoInst *last_ins = NULL;
4238 int max_len, cpos;
4239 int imm8, rot_amount;
4241 /* we don't align basic blocks of loops on arm */
4243 if (cfg->verbose_level > 2)
4244 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
4246 cpos = bb->max_offset;
4248 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
4249 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
4250 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break));
4251 code = emit_call_seq (cfg, code);
4254 MONO_BB_FOR_EACH_INS (bb, ins) {
4255 guint offset = code - cfg->native_code;
4256 set_code_cursor (cfg, code);
4257 max_len = ins_get_size (ins->opcode);
4258 code = realloc_code (cfg, max_len);
4259 // if (ins->cil_code)
4260 // g_print ("cil code\n");
4261 mono_debug_record_line_number (cfg, ins, offset);
4263 switch (ins->opcode) {
4264 case OP_MEMORY_BARRIER:
4265 if (v7_supported) {
4266 ARM_DMB (code, ARM_DMB_ISH);
4267 } else if (v6_supported) {
4268 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
4269 ARM_MCR (code, 15, 0, ARMREG_R0, 7, 10, 5);
4271 break;
4272 case OP_TLS_GET:
4273 code = emit_tls_get (code, ins->dreg, ins->inst_offset);
4274 break;
4275 case OP_TLS_SET:
4276 code = emit_tls_set (code, ins->sreg1, ins->inst_offset);
4277 break;
4278 case OP_ATOMIC_EXCHANGE_I4:
4279 case OP_ATOMIC_CAS_I4:
4280 case OP_ATOMIC_ADD_I4: {
4281 int tmpreg;
4282 guint8 *buf [16];
4284 g_assert (v7_supported);
4286 /* Free up a reg */
4287 if (ins->sreg1 != ARMREG_IP && ins->sreg2 != ARMREG_IP && ins->sreg3 != ARMREG_IP)
4288 tmpreg = ARMREG_IP;
4289 else if (ins->sreg1 != ARMREG_R0 && ins->sreg2 != ARMREG_R0 && ins->sreg3 != ARMREG_R0)
4290 tmpreg = ARMREG_R0;
4291 else if (ins->sreg1 != ARMREG_R1 && ins->sreg2 != ARMREG_R1 && ins->sreg3 != ARMREG_R1)
4292 tmpreg = ARMREG_R1;
4293 else
4294 tmpreg = ARMREG_R2;
4295 g_assert (cfg->arch.atomic_tmp_offset != -1);
4296 ARM_STR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4298 switch (ins->opcode) {
4299 case OP_ATOMIC_EXCHANGE_I4:
4300 buf [0] = code;
4301 ARM_DMB (code, ARM_DMB_ISH);
4302 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4303 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4304 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4305 buf [1] = code;
4306 ARM_B_COND (code, ARMCOND_NE, 0);
4307 arm_patch (buf [1], buf [0]);
4308 break;
4309 case OP_ATOMIC_CAS_I4:
4310 ARM_DMB (code, ARM_DMB_ISH);
4311 buf [0] = code;
4312 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4313 ARM_CMP_REG_REG (code, ARMREG_LR, ins->sreg3);
4314 buf [1] = code;
4315 ARM_B_COND (code, ARMCOND_NE, 0);
4316 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4317 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4318 buf [2] = code;
4319 ARM_B_COND (code, ARMCOND_NE, 0);
4320 arm_patch (buf [2], buf [0]);
4321 arm_patch (buf [1], code);
4322 break;
4323 case OP_ATOMIC_ADD_I4:
4324 buf [0] = code;
4325 ARM_DMB (code, ARM_DMB_ISH);
4326 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4327 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->sreg2);
4328 ARM_STREX_REG (code, tmpreg, ARMREG_LR, ins->sreg1);
4329 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4330 buf [1] = code;
4331 ARM_B_COND (code, ARMCOND_NE, 0);
4332 arm_patch (buf [1], buf [0]);
4333 break;
4334 default:
4335 g_assert_not_reached ();
4338 ARM_DMB (code, ARM_DMB_ISH);
4339 if (tmpreg != ins->dreg)
4340 ARM_LDR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4341 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_LR);
4342 break;
4344 case OP_ATOMIC_LOAD_I1:
4345 case OP_ATOMIC_LOAD_U1:
4346 case OP_ATOMIC_LOAD_I2:
4347 case OP_ATOMIC_LOAD_U2:
4348 case OP_ATOMIC_LOAD_I4:
4349 case OP_ATOMIC_LOAD_U4:
4350 case OP_ATOMIC_LOAD_R4:
4351 case OP_ATOMIC_LOAD_R8: {
4352 if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
4353 ARM_DMB (code, ARM_DMB_ISH);
4355 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4357 switch (ins->opcode) {
4358 case OP_ATOMIC_LOAD_I1:
4359 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4360 break;
4361 case OP_ATOMIC_LOAD_U1:
4362 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4363 break;
4364 case OP_ATOMIC_LOAD_I2:
4365 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4366 break;
4367 case OP_ATOMIC_LOAD_U2:
4368 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4369 break;
4370 case OP_ATOMIC_LOAD_I4:
4371 case OP_ATOMIC_LOAD_U4:
4372 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4373 break;
4374 case OP_ATOMIC_LOAD_R4:
4375 if (cfg->r4fp) {
4376 ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_basereg, ARMREG_LR);
4377 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
4378 } else {
4379 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
4380 ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_basereg, ARMREG_LR);
4381 ARM_FLDS (code, vfp_scratch1, ARMREG_LR, 0);
4382 ARM_CVTS (code, ins->dreg, vfp_scratch1);
4383 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
4385 break;
4386 case OP_ATOMIC_LOAD_R8:
4387 ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_basereg, ARMREG_LR);
4388 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
4389 break;
4392 if (ins->backend.memory_barrier_kind != MONO_MEMORY_BARRIER_NONE)
4393 ARM_DMB (code, ARM_DMB_ISH);
4394 break;
4396 case OP_ATOMIC_STORE_I1:
4397 case OP_ATOMIC_STORE_U1:
4398 case OP_ATOMIC_STORE_I2:
4399 case OP_ATOMIC_STORE_U2:
4400 case OP_ATOMIC_STORE_I4:
4401 case OP_ATOMIC_STORE_U4:
4402 case OP_ATOMIC_STORE_R4:
4403 case OP_ATOMIC_STORE_R8: {
4404 if (ins->backend.memory_barrier_kind != MONO_MEMORY_BARRIER_NONE)
4405 ARM_DMB (code, ARM_DMB_ISH);
4407 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4409 switch (ins->opcode) {
4410 case OP_ATOMIC_STORE_I1:
4411 case OP_ATOMIC_STORE_U1:
4412 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4413 break;
4414 case OP_ATOMIC_STORE_I2:
4415 case OP_ATOMIC_STORE_U2:
4416 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4417 break;
4418 case OP_ATOMIC_STORE_I4:
4419 case OP_ATOMIC_STORE_U4:
4420 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4421 break;
4422 case OP_ATOMIC_STORE_R4:
4423 if (cfg->r4fp) {
4424 ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_destbasereg, ARMREG_LR);
4425 ARM_FSTS (code, ins->sreg1, ARMREG_LR, 0);
4426 } else {
4427 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
4428 ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_destbasereg, ARMREG_LR);
4429 ARM_CVTD (code, vfp_scratch1, ins->sreg1);
4430 ARM_FSTS (code, vfp_scratch1, ARMREG_LR, 0);
4431 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
4433 break;
4434 case OP_ATOMIC_STORE_R8:
4435 ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_destbasereg, ARMREG_LR);
4436 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
4437 break;
4440 if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
4441 ARM_DMB (code, ARM_DMB_ISH);
4442 break;
4444 case OP_BIGMUL:
4445 ARM_SMULL_REG_REG (code, ins->backend.reg3, ins->dreg, ins->sreg1, ins->sreg2);
4446 break;
4447 case OP_BIGMUL_UN:
4448 ARM_UMULL_REG_REG (code, ins->backend.reg3, ins->dreg, ins->sreg1, ins->sreg2);
4449 break;
4450 case OP_STOREI1_MEMBASE_IMM:
4451 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
4452 g_assert (arm_is_imm12 (ins->inst_offset));
4453 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4454 break;
4455 case OP_STOREI2_MEMBASE_IMM:
4456 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
4457 g_assert (arm_is_imm8 (ins->inst_offset));
4458 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4459 break;
4460 case OP_STORE_MEMBASE_IMM:
4461 case OP_STOREI4_MEMBASE_IMM:
4462 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
4463 g_assert (arm_is_imm12 (ins->inst_offset));
4464 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4465 break;
4466 case OP_STOREI1_MEMBASE_REG:
4467 g_assert (arm_is_imm12 (ins->inst_offset));
4468 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4469 break;
4470 case OP_STOREI2_MEMBASE_REG:
4471 g_assert (arm_is_imm8 (ins->inst_offset));
4472 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4473 break;
4474 case OP_STORE_MEMBASE_REG:
4475 case OP_STOREI4_MEMBASE_REG:
4476 /* this case is special, since it happens for spill code after lowering has been called */
4477 if (arm_is_imm12 (ins->inst_offset)) {
4478 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4479 } else {
4480 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4481 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4483 break;
4484 case OP_STOREI1_MEMINDEX:
4485 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4486 break;
4487 case OP_STOREI2_MEMINDEX:
4488 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4489 break;
4490 case OP_STORE_MEMINDEX:
4491 case OP_STOREI4_MEMINDEX:
4492 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4493 break;
4494 case OP_LOADU4_MEM:
4495 g_assert_not_reached ();
4496 break;
4497 case OP_LOAD_MEMINDEX:
4498 case OP_LOADI4_MEMINDEX:
4499 case OP_LOADU4_MEMINDEX:
4500 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4501 break;
4502 case OP_LOADI1_MEMINDEX:
4503 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4504 break;
4505 case OP_LOADU1_MEMINDEX:
4506 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4507 break;
4508 case OP_LOADI2_MEMINDEX:
4509 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4510 break;
4511 case OP_LOADU2_MEMINDEX:
4512 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4513 break;
4514 case OP_LOAD_MEMBASE:
4515 case OP_LOADI4_MEMBASE:
4516 case OP_LOADU4_MEMBASE:
4517 /* this case is special, since it happens for spill code after lowering has been called */
4518 if (arm_is_imm12 (ins->inst_offset)) {
4519 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4520 } else {
4521 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4522 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4524 break;
4525 case OP_LOADI1_MEMBASE:
4526 g_assert (arm_is_imm8 (ins->inst_offset));
4527 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4528 break;
4529 case OP_LOADU1_MEMBASE:
4530 g_assert (arm_is_imm12 (ins->inst_offset));
4531 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4532 break;
4533 case OP_LOADU2_MEMBASE:
4534 g_assert (arm_is_imm8 (ins->inst_offset));
4535 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4536 break;
4537 case OP_LOADI2_MEMBASE:
4538 g_assert (arm_is_imm8 (ins->inst_offset));
4539 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4540 break;
4541 case OP_ICONV_TO_I1:
4542 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
4543 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
4544 break;
4545 case OP_ICONV_TO_I2:
4546 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4547 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
4548 break;
4549 case OP_ICONV_TO_U1:
4550 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
4551 break;
4552 case OP_ICONV_TO_U2:
4553 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4554 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
4555 break;
4556 case OP_COMPARE:
4557 case OP_ICOMPARE:
4558 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
4559 break;
4560 case OP_COMPARE_IMM:
4561 case OP_ICOMPARE_IMM:
4562 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4563 g_assert (imm8 >= 0);
4564 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
4565 break;
4566 case OP_BREAK:
4568 * gdb does not like encountering the hw breakpoint ins in the debugged code.
4569 * So instead of emitting a trap, we emit a call a C function and place a
4570 * breakpoint there.
4572 //*(int*)code = 0xef9f0001;
4573 //code += 4;
4574 //ARM_DBRK (code);
4575 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
4576 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break));
4577 code = emit_call_seq (cfg, code);
4578 break;
4579 case OP_RELAXED_NOP:
4580 ARM_NOP (code);
4581 break;
4582 case OP_NOP:
4583 case OP_DUMMY_USE:
4584 case OP_DUMMY_ICONST:
4585 case OP_DUMMY_R8CONST:
4586 case OP_DUMMY_R4CONST:
4587 case OP_NOT_REACHED:
4588 case OP_NOT_NULL:
4589 break;
4590 case OP_IL_SEQ_POINT:
4591 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
4592 break;
4593 case OP_SEQ_POINT: {
4594 int i;
4595 MonoInst *info_var = cfg->arch.seq_point_info_var;
4596 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4597 MonoInst *ss_method_var = cfg->arch.seq_point_ss_method_var;
4598 MonoInst *bp_method_var = cfg->arch.seq_point_bp_method_var;
4599 MonoInst *var;
4600 int dreg = ARMREG_LR;
4602 #if 0
4603 if (cfg->soft_breakpoints) {
4604 g_assert (!cfg->compile_aot);
4606 #endif
4609 * For AOT, we use one got slot per method, which will point to a
4610 * SeqPointInfo structure, containing all the information required
4611 * by the code below.
4613 if (cfg->compile_aot) {
4614 g_assert (info_var);
4615 g_assert (info_var->opcode == OP_REGOFFSET);
4618 if (!cfg->soft_breakpoints && !cfg->compile_aot) {
4620 * Read from the single stepping trigger page. This will cause a
4621 * SIGSEGV when single stepping is enabled.
4622 * We do this _before_ the breakpoint, so single stepping after
4623 * a breakpoint is hit will step to the next IL offset.
4625 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
4628 /* Single step check */
4629 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
4630 if (cfg->soft_breakpoints) {
4631 /* Load the address of the sequence point method variable. */
4632 var = ss_method_var;
4633 g_assert (var);
4634 g_assert (var->opcode == OP_REGOFFSET);
4635 code = emit_ldr_imm (code, dreg, var->inst_basereg, var->inst_offset);
4636 /* Read the value and check whether it is non-zero. */
4637 ARM_LDR_IMM (code, dreg, dreg, 0);
4638 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4639 /* Call it conditionally. */
4640 ARM_BLX_REG_COND (code, ARMCOND_NE, dreg);
4641 } else {
4642 if (cfg->compile_aot) {
4643 /* Load the trigger page addr from the variable initialized in the prolog */
4644 var = ss_trigger_page_var;
4645 g_assert (var);
4646 g_assert (var->opcode == OP_REGOFFSET);
4647 code = emit_ldr_imm (code, dreg, var->inst_basereg, var->inst_offset);
4648 } else {
4649 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
4650 ARM_B (code, 0);
4651 *(int*)code = (int)(gsize)ss_trigger_page;
4652 code += 4;
4654 ARM_LDR_IMM (code, dreg, dreg, 0);
4658 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
4660 /* Breakpoint check */
4661 if (cfg->compile_aot) {
4662 const guint32 offset = code - cfg->native_code;
4663 guint32 val;
4665 var = info_var;
4666 code = emit_ldr_imm (code, dreg, var->inst_basereg, var->inst_offset);
4667 /* Add the offset */
4668 val = ((offset / 4) * sizeof (target_mgreg_t)) + MONO_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
4669 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
4670 if (arm_is_imm12 ((int)val)) {
4671 ARM_LDR_IMM (code, dreg, dreg, val);
4672 } else {
4673 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
4674 if (val & 0xFF00)
4675 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
4676 if (val & 0xFF0000)
4677 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
4678 g_assert (!(val & 0xFF000000));
4680 ARM_LDR_IMM (code, dreg, dreg, 0);
4682 /* What is faster, a branch or a load ? */
4683 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4684 /* The breakpoint instruction */
4685 if (cfg->soft_breakpoints)
4686 ARM_BLX_REG_COND (code, ARMCOND_NE, dreg);
4687 else
4688 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
4689 } else if (cfg->soft_breakpoints) {
4690 /* Load the address of the breakpoint method into ip. */
4691 var = bp_method_var;
4692 g_assert (var);
4693 g_assert (var->opcode == OP_REGOFFSET);
4694 g_assert (arm_is_imm12 (var->inst_offset));
4695 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4698 * A placeholder for a possible breakpoint inserted by
4699 * mono_arch_set_breakpoint ().
4701 ARM_NOP (code);
4702 } else {
4704 * A placeholder for a possible breakpoint inserted by
4705 * mono_arch_set_breakpoint ().
4707 for (i = 0; i < 4; ++i)
4708 ARM_NOP (code);
4712 * Add an additional nop so skipping the bp doesn't cause the ip to point
4713 * to another IL offset.
4716 ARM_NOP (code);
4717 break;
4719 case OP_ADDCC:
4720 case OP_IADDCC:
4721 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4722 break;
4723 case OP_IADD:
4724 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4725 break;
4726 case OP_ADC:
4727 case OP_IADC:
4728 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4729 break;
4730 case OP_ADDCC_IMM:
4731 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4732 g_assert (imm8 >= 0);
4733 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4734 break;
4735 case OP_ADD_IMM:
4736 case OP_IADD_IMM:
4737 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4738 g_assert (imm8 >= 0);
4739 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4740 break;
4741 case OP_ADC_IMM:
4742 case OP_IADC_IMM:
4743 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4744 g_assert (imm8 >= 0);
4745 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4746 break;
4747 case OP_IADD_OVF:
4748 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4749 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4750 break;
4751 case OP_IADD_OVF_UN:
4752 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4753 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4754 break;
4755 case OP_ISUB_OVF:
4756 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4757 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4758 break;
4759 case OP_ISUB_OVF_UN:
4760 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4761 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4762 break;
4763 case OP_ADD_OVF_CARRY:
4764 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4765 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4766 break;
4767 case OP_ADD_OVF_UN_CARRY:
4768 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4769 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4770 break;
4771 case OP_SUB_OVF_CARRY:
4772 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4773 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4774 break;
4775 case OP_SUB_OVF_UN_CARRY:
4776 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4777 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4778 break;
4779 case OP_SUBCC:
4780 case OP_ISUBCC:
4781 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4782 break;
4783 case OP_SUBCC_IMM:
4784 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4785 g_assert (imm8 >= 0);
4786 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4787 break;
4788 case OP_ISUB:
4789 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4790 break;
4791 case OP_SBB:
4792 case OP_ISBB:
4793 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4794 break;
4795 case OP_SUB_IMM:
4796 case OP_ISUB_IMM:
4797 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4798 g_assert (imm8 >= 0);
4799 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4800 break;
4801 case OP_SBB_IMM:
4802 case OP_ISBB_IMM:
4803 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4804 g_assert (imm8 >= 0);
4805 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4806 break;
4807 case OP_ARM_RSBS_IMM:
4808 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4809 g_assert (imm8 >= 0);
4810 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4811 break;
4812 case OP_ARM_RSC_IMM:
4813 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4814 g_assert (imm8 >= 0);
4815 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4816 break;
4817 case OP_IAND:
4818 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4819 break;
4820 case OP_AND_IMM:
4821 case OP_IAND_IMM:
4822 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4823 g_assert (imm8 >= 0);
4824 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4825 break;
4826 case OP_IDIV:
4827 g_assert (v7s_supported || v7k_supported);
4828 ARM_SDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4829 break;
4830 case OP_IDIV_UN:
4831 g_assert (v7s_supported || v7k_supported);
4832 ARM_UDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4833 break;
4834 case OP_IREM:
4835 g_assert (v7s_supported || v7k_supported);
4836 ARM_SDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4837 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4838 break;
4839 case OP_IREM_UN:
4840 g_assert (v7s_supported || v7k_supported);
4841 ARM_UDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4842 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4843 break;
4844 case OP_DIV_IMM:
4845 case OP_REM_IMM:
4846 g_assert_not_reached ();
4847 case OP_IOR:
4848 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4849 break;
4850 case OP_OR_IMM:
4851 case OP_IOR_IMM:
4852 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4853 g_assert (imm8 >= 0);
4854 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4855 break;
4856 case OP_IXOR:
4857 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4858 break;
4859 case OP_XOR_IMM:
4860 case OP_IXOR_IMM:
4861 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4862 g_assert (imm8 >= 0);
4863 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4864 break;
4865 case OP_ISHL:
4866 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4867 break;
4868 case OP_SHL_IMM:
4869 case OP_ISHL_IMM:
4870 if (ins->inst_imm)
4871 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4872 else if (ins->dreg != ins->sreg1)
4873 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4874 break;
4875 case OP_ISHR:
4876 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4877 break;
4878 case OP_SHR_IMM:
4879 case OP_ISHR_IMM:
4880 if (ins->inst_imm)
4881 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4882 else if (ins->dreg != ins->sreg1)
4883 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4884 break;
4885 case OP_SHR_UN_IMM:
4886 case OP_ISHR_UN_IMM:
4887 if (ins->inst_imm)
4888 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4889 else if (ins->dreg != ins->sreg1)
4890 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4891 break;
4892 case OP_ISHR_UN:
4893 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4894 break;
4895 case OP_INOT:
4896 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
4897 break;
4898 case OP_INEG:
4899 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
4900 break;
4901 case OP_IMUL:
4902 if (ins->dreg == ins->sreg2)
4903 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4904 else
4905 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
4906 break;
4907 case OP_MUL_IMM:
4908 g_assert_not_reached ();
4909 break;
4910 case OP_IMUL_OVF:
4911 /* FIXME: handle ovf/ sreg2 != dreg */
4912 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4913 /* FIXME: MUL doesn't set the C/O flags on ARM */
4914 break;
4915 case OP_IMUL_OVF_UN:
4916 /* FIXME: handle ovf/ sreg2 != dreg */
4917 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4918 /* FIXME: MUL doesn't set the C/O flags on ARM */
4919 break;
4920 case OP_ICONST:
4921 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
4922 break;
4923 case OP_AOTCONST:
4924 /* Load the GOT offset */
4925 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)(gsize)ins->inst_i1, ins->inst_p0);
4926 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4927 ARM_B (code, 0);
4928 *(gpointer*)code = NULL;
4929 code += 4;
4930 /* Load the value from the GOT */
4931 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4932 break;
4933 case OP_OBJC_GET_SELECTOR:
4934 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_OBJC_SELECTOR_REF, ins->inst_p0);
4935 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4936 ARM_B (code, 0);
4937 *(gpointer*)code = NULL;
4938 code += 4;
4939 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4940 break;
4941 case OP_ICONV_TO_I4:
4942 case OP_ICONV_TO_U4:
4943 case OP_MOVE:
4944 if (ins->dreg != ins->sreg1)
4945 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4946 break;
4947 case OP_SETLRET: {
4948 int saved = ins->sreg2;
4949 if (ins->sreg2 == ARM_LSW_REG) {
4950 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
4951 saved = ARMREG_LR;
4953 if (ins->sreg1 != ARM_LSW_REG)
4954 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
4955 if (saved != ARM_MSW_REG)
4956 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
4957 break;
4959 case OP_FMOVE:
4960 if (IS_VFP && ins->dreg != ins->sreg1)
4961 ARM_CPYD (code, ins->dreg, ins->sreg1);
4962 break;
4963 case OP_RMOVE:
4964 if (IS_VFP && ins->dreg != ins->sreg1)
4965 ARM_CPYS (code, ins->dreg, ins->sreg1);
4966 break;
4967 case OP_MOVE_F_TO_I4:
4968 if (cfg->r4fp) {
4969 ARM_FMRS (code, ins->dreg, ins->sreg1);
4970 } else {
4971 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
4972 ARM_CVTD (code, vfp_scratch1, ins->sreg1);
4973 ARM_FMRS (code, ins->dreg, vfp_scratch1);
4974 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
4976 break;
4977 case OP_MOVE_I4_TO_F:
4978 if (cfg->r4fp) {
4979 ARM_FMSR (code, ins->dreg, ins->sreg1);
4980 } else {
4981 ARM_FMSR (code, ins->dreg, ins->sreg1);
4982 ARM_CVTS (code, ins->dreg, ins->dreg);
4984 break;
4985 case OP_FCONV_TO_R4:
4986 if (IS_VFP) {
4987 if (cfg->r4fp) {
4988 ARM_CVTD (code, ins->dreg, ins->sreg1);
4989 } else {
4990 ARM_CVTD (code, ins->dreg, ins->sreg1);
4991 ARM_CVTS (code, ins->dreg, ins->dreg);
4994 break;
4996 case OP_TAILCALL_PARAMETER:
4997 // This opcode helps compute sizes, i.e.
4998 // of the subsequent OP_TAILCALL, but contributes no code.
4999 g_assert (ins->next);
5000 break;
5002 case OP_TAILCALL:
5003 case OP_TAILCALL_MEMBASE:
5004 case OP_TAILCALL_REG: {
5005 gboolean const tailcall_membase = ins->opcode == OP_TAILCALL_MEMBASE;
5006 gboolean const tailcall_reg = ins->opcode == OP_TAILCALL_REG;
5007 MonoCallInst *call = (MonoCallInst*)ins;
5009 max_len += call->stack_usage / sizeof (target_mgreg_t) * ins_get_size (OP_TAILCALL_PARAMETER);
5011 if (IS_HARD_FLOAT)
5012 code = emit_float_args (cfg, call, code, &max_len, &offset);
5014 code = realloc_code (cfg, max_len);
5016 // For reg and membase, get destination in IP.
5018 if (tailcall_reg) {
5019 g_assert (ins->sreg1 > -1);
5020 if (ins->sreg1 != ARMREG_IP)
5021 ARM_MOV_REG_REG (code, ARMREG_IP, ins->sreg1);
5022 } else if (tailcall_membase) {
5023 g_assert (ins->sreg1 > -1);
5024 if (!arm_is_imm12 (ins->inst_offset)) {
5025 g_assert (ins->sreg1 != ARMREG_IP); // temp in emit_big_add
5026 code = emit_big_add (code, ARMREG_IP, ins->sreg1, ins->inst_offset);
5027 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, 0);
5028 } else {
5029 ARM_LDR_IMM (code, ARMREG_IP, ins->sreg1, ins->inst_offset);
5034 * The stack looks like the following:
5035 * <caller argument area>
5036 * <saved regs etc>
5037 * <rest of frame>
5038 * <callee argument area>
5039 * <optionally saved IP> (about to be)
5040 * Need to copy the arguments from the callee argument area to
5041 * the caller argument area, and pop the frame.
5043 if (call->stack_usage) {
5044 int i, prev_sp_offset = 0;
5046 // When we get here, the parameters to the tailcall are already formed,
5047 // in registers and at the bottom of the grow-down stack.
5049 // Our goal is generally preserve parameters, and trim the stack,
5050 // and, before trimming stack, move parameters from the bottom of the
5051 // frame to the bottom of the trimmed frame.
5053 // For the case of large frames, and presently therefore always,
5054 // IP is used as an adjusted frame_reg.
5055 // Be conservative and save IP around the movement
5056 // of parameters from the bottom of frame to top of the frame.
5057 const gboolean save_ip = tailcall_membase || tailcall_reg;
5058 if (save_ip)
5059 ARM_PUSH (code, 1 << ARMREG_IP);
5061 // When moving stacked parameters from the bottom
5062 // of the frame (sp) to the top of the frame (ip),
5063 // account, 0 or 4, for the conditional save of IP.
5064 const int offset_sp = save_ip ? 4 : 0;
5065 const int offset_ip = (save_ip && (cfg->frame_reg == ARMREG_SP)) ? 4 : 0;
5067 /* Compute size of saved registers restored below */
5068 if (iphone_abi)
5069 prev_sp_offset = 2 * 4;
5070 else
5071 prev_sp_offset = 1 * 4;
5072 for (i = 0; i < 16; ++i) {
5073 if (cfg->used_int_regs & (1 << i))
5074 prev_sp_offset += 4;
5077 // Point IP at the start of where the parameters will go after trimming stack.
5078 // After locals and saved registers.
5079 code = emit_big_add (code, ARMREG_IP, cfg->frame_reg, cfg->stack_usage + prev_sp_offset);
5081 /* Copy arguments on the stack to our argument area */
5082 // FIXME a fixed size memcpy is desirable here,
5083 // at least for larger values of stack_usage.
5085 // FIXME For most functions, with frames < 4K, we can use frame_reg directly here instead of IP.
5086 // See https://github.com/mono/mono/pull/12079
5087 // See https://github.com/mono/mono/pull/12079/commits/93e7007a9567b78fa8152ce404b372b26e735516
5088 for (i = 0; i < call->stack_usage; i += sizeof (target_mgreg_t)) {
5089 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, i + offset_sp);
5090 ARM_STR_IMM (code, ARMREG_LR, ARMREG_IP, i + offset_ip);
5093 if (save_ip)
5094 ARM_POP (code, 1 << ARMREG_IP);
5098 * Keep in sync with mono_arch_emit_epilog
5100 g_assert (!cfg->method->save_lmf);
5101 code = emit_big_add_temp (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage, ARMREG_LR);
5102 if (iphone_abi) {
5103 if (cfg->used_int_regs)
5104 ARM_POP (code, cfg->used_int_regs);
5105 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
5106 } else {
5107 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
5110 if (tailcall_reg || tailcall_membase) {
5111 code = emit_jmp_reg (code, ARMREG_IP);
5112 } else {
5113 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method);
5115 if (cfg->compile_aot) {
5116 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
5117 ARM_B (code, 0);
5118 *(gpointer*)code = NULL;
5119 code += 4;
5120 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
5121 } else {
5122 code = mono_arm_patchable_b (code, ARMCOND_AL);
5123 cfg->thunk_area += THUNK_SIZE;
5126 break;
5128 case OP_CHECK_THIS:
5129 /* ensure ins->sreg1 is not NULL */
5130 ARM_LDRB_IMM (code, ARMREG_LR, ins->sreg1, 0);
5131 break;
5132 case OP_ARGLIST: {
5133 g_assert (cfg->sig_cookie < 128);
5134 ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
5135 ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
5136 break;
5138 case OP_FCALL:
5139 case OP_RCALL:
5140 case OP_LCALL:
5141 case OP_VCALL:
5142 case OP_VCALL2:
5143 case OP_VOIDCALL:
5144 case OP_CALL:
5145 call = (MonoCallInst*)ins;
5147 if (IS_HARD_FLOAT)
5148 code = emit_float_args (cfg, call, code, &max_len, &offset);
5150 mono_call_add_patch_info (cfg, call, code - cfg->native_code);
5152 code = emit_call_seq (cfg, code);
5153 ins->flags |= MONO_INST_GC_CALLSITE;
5154 ins->backend.pc_offset = code - cfg->native_code;
5155 code = emit_move_return_value (cfg, ins, code);
5156 break;
5157 case OP_FCALL_REG:
5158 case OP_RCALL_REG:
5159 case OP_LCALL_REG:
5160 case OP_VCALL_REG:
5161 case OP_VCALL2_REG:
5162 case OP_VOIDCALL_REG:
5163 case OP_CALL_REG:
5164 if (IS_HARD_FLOAT)
5165 code = emit_float_args (cfg, (MonoCallInst *)ins, code, &max_len, &offset);
5167 code = emit_call_reg (code, ins->sreg1);
5168 ins->flags |= MONO_INST_GC_CALLSITE;
5169 ins->backend.pc_offset = code - cfg->native_code;
5170 code = emit_move_return_value (cfg, ins, code);
5171 break;
5172 case OP_FCALL_MEMBASE:
5173 case OP_RCALL_MEMBASE:
5174 case OP_LCALL_MEMBASE:
5175 case OP_VCALL_MEMBASE:
5176 case OP_VCALL2_MEMBASE:
5177 case OP_VOIDCALL_MEMBASE:
5178 case OP_CALL_MEMBASE: {
5179 g_assert (ins->sreg1 != ARMREG_LR);
5180 call = (MonoCallInst*)ins;
5182 if (IS_HARD_FLOAT)
5183 code = emit_float_args (cfg, call, code, &max_len, &offset);
5184 if (!arm_is_imm12 (ins->inst_offset)) {
5185 /* sreg1 might be IP */
5186 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg1);
5187 code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_offset);
5188 ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, ARMREG_LR);
5189 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
5190 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, 0);
5191 } else {
5192 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
5193 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
5195 ins->flags |= MONO_INST_GC_CALLSITE;
5196 ins->backend.pc_offset = code - cfg->native_code;
5197 code = emit_move_return_value (cfg, ins, code);
5198 break;
5200 case OP_GENERIC_CLASS_INIT: {
5201 int byte_offset;
5202 guint8 *jump;
5204 byte_offset = MONO_STRUCT_OFFSET (MonoVTable, initialized);
5206 g_assert (arm_is_imm8 (byte_offset));
5207 ARM_LDRSB_IMM (code, ARMREG_IP, ins->sreg1, byte_offset);
5208 ARM_CMP_REG_IMM (code, ARMREG_IP, 0, 0);
5209 jump = code;
5210 ARM_B_COND (code, ARMCOND_NE, 0);
5212 /* Uninitialized case */
5213 g_assert (ins->sreg1 == ARMREG_R0);
5215 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
5216 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_generic_class_init));
5217 code = emit_call_seq (cfg, code);
5219 /* Initialized case */
5220 arm_patch (jump, code);
5221 break;
5223 case OP_LOCALLOC: {
5224 /* round the size to 8 bytes */
5225 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, (MONO_ARCH_FRAME_ALIGNMENT - 1));
5226 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, (MONO_ARCH_FRAME_ALIGNMENT - 1));
5227 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
5228 /* memzero the area: dreg holds the size, sp is the pointer */
5229 if (ins->flags & MONO_INST_INIT) {
5230 guint8 *start_loop, *branch_to_cond;
5231 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
5232 branch_to_cond = code;
5233 ARM_B (code, 0);
5234 start_loop = code;
5235 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
5236 arm_patch (branch_to_cond, code);
5237 /* decrement by 4 and set flags */
5238 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, sizeof (target_mgreg_t));
5239 ARM_B_COND (code, ARMCOND_GE, 0);
5240 arm_patch (code - 4, start_loop);
5242 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_SP);
5243 if (cfg->param_area)
5244 code = emit_sub_imm (code, ARMREG_SP, ARMREG_SP, ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT));
5245 break;
5247 case OP_DYN_CALL: {
5248 int i;
5249 MonoInst *var = cfg->dyn_call_var;
5250 guint8 *labels [16];
5252 g_assert (var->opcode == OP_REGOFFSET);
5253 g_assert (arm_is_imm12 (var->inst_offset));
5255 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
5256 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg1);
5257 /* ip = ftn */
5258 ARM_MOV_REG_REG (code, ARMREG_IP, ins->sreg2);
5260 /* Save args buffer */
5261 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
5263 /* Set fp argument registers */
5264 if (IS_HARD_FLOAT) {
5265 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, has_fpregs));
5266 ARM_CMP_REG_IMM (code, ARMREG_R0, 0, 0);
5267 labels [0] = code;
5268 ARM_B_COND (code, ARMCOND_EQ, 0);
5269 for (i = 0; i < FP_PARAM_REGS; ++i) {
5270 const int offset = MONO_STRUCT_OFFSET (DynCallArgs, fpregs) + (i * sizeof (double));
5271 g_assert (arm_is_fpimm8 (offset));
5272 ARM_FLDD (code, i * 2, ARMREG_LR, offset);
5274 arm_patch (labels [0], code);
5277 /* Allocate callee area */
5278 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_stackargs));
5279 ARM_SHL_IMM (code, ARMREG_R1, ARMREG_R1, 2);
5280 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_R1);
5282 /* Set stack args */
5283 /* R1 = limit */
5284 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_stackargs));
5285 /* R2 = pointer into regs */
5286 code = emit_big_add (code, ARMREG_R2, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, regs) + (PARAM_REGS * sizeof (target_mgreg_t)));
5287 /* R3 = pointer to stack */
5288 ARM_MOV_REG_REG (code, ARMREG_R3, ARMREG_SP);
5289 /* Loop */
5290 labels [0] = code;
5291 ARM_B_COND (code, ARMCOND_AL, 0);
5292 labels [1] = code;
5293 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R2, 0);
5294 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R3, 0);
5295 ARM_ADD_REG_IMM (code, ARMREG_R2, ARMREG_R2, sizeof (target_mgreg_t), 0);
5296 ARM_ADD_REG_IMM (code, ARMREG_R3, ARMREG_R3, sizeof (target_mgreg_t), 0);
5297 ARM_SUB_REG_IMM (code, ARMREG_R1, ARMREG_R1, 1, 0);
5298 arm_patch (labels [0], code);
5299 ARM_CMP_REG_IMM (code, ARMREG_R1, 0, 0);
5300 labels [2] = code;
5301 ARM_B_COND (code, ARMCOND_GT, 0);
5302 arm_patch (labels [2], labels [1]);
5304 /* Set argument registers */
5305 for (i = 0; i < PARAM_REGS; ++i)
5306 ARM_LDR_IMM (code, i, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, regs) + (i * sizeof (target_mgreg_t)));
5308 /* Make the call */
5309 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
5310 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5312 /* Save result */
5313 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
5314 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res));
5315 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res2));
5316 if (IS_HARD_FLOAT)
5317 ARM_FSTD (code, ARM_VFP_D0, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, fpregs));
5318 break;
5320 case OP_THROW: {
5321 if (ins->sreg1 != ARMREG_R0)
5322 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5323 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
5324 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_throw_exception));
5325 code = emit_call_seq (cfg, code);
5326 break;
5328 case OP_RETHROW: {
5329 if (ins->sreg1 != ARMREG_R0)
5330 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5331 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
5332 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_rethrow_exception));
5333 code = emit_call_seq (cfg, code);
5334 break;
5336 case OP_START_HANDLER: {
5337 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5338 int param_area = ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT);
5339 int i, rot_amount;
5341 /* Reserve a param area, see filter-stack.exe */
5342 if (param_area) {
5343 if ((i = mono_arm_is_rotated_imm8 (param_area, &rot_amount)) >= 0) {
5344 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5345 } else {
5346 code = mono_arm_emit_load_imm (code, ARMREG_IP, param_area);
5347 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5351 if (arm_is_imm12 (spvar->inst_offset)) {
5352 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
5353 } else {
5354 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5355 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
5357 break;
5359 case OP_ENDFILTER: {
5360 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5361 int param_area = ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT);
5362 int i, rot_amount;
5364 /* Free the param area */
5365 if (param_area) {
5366 if ((i = mono_arm_is_rotated_imm8 (param_area, &rot_amount)) >= 0) {
5367 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5368 } else {
5369 code = mono_arm_emit_load_imm (code, ARMREG_IP, param_area);
5370 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5374 if (ins->sreg1 != ARMREG_R0)
5375 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5376 if (arm_is_imm12 (spvar->inst_offset)) {
5377 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5378 } else {
5379 g_assert (ARMREG_IP != spvar->inst_basereg);
5380 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5381 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5383 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5384 break;
5386 case OP_ENDFINALLY: {
5387 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5388 int param_area = ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT);
5389 int i, rot_amount;
5391 /* Free the param area */
5392 if (param_area) {
5393 if ((i = mono_arm_is_rotated_imm8 (param_area, &rot_amount)) >= 0) {
5394 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5395 } else {
5396 code = mono_arm_emit_load_imm (code, ARMREG_IP, param_area);
5397 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5401 if (arm_is_imm12 (spvar->inst_offset)) {
5402 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5403 } else {
5404 g_assert (ARMREG_IP != spvar->inst_basereg);
5405 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5406 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5408 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5409 break;
5411 case OP_CALL_HANDLER:
5412 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5413 code = mono_arm_patchable_bl (code, ARMCOND_AL);
5414 cfg->thunk_area += THUNK_SIZE;
5415 for (GList *tmp = ins->inst_eh_blocks; tmp != bb->clause_holes; tmp = tmp->prev)
5416 mono_cfg_add_try_hole (cfg, ((MonoLeaveClause *) tmp->data)->clause, code, bb);
5417 break;
5418 case OP_GET_EX_OBJ:
5419 if (ins->dreg != ARMREG_R0)
5420 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_R0);
5421 break;
5423 case OP_LABEL:
5424 ins->inst_c0 = code - cfg->native_code;
5425 break;
5426 case OP_BR:
5427 /*if (ins->inst_target_bb->native_offset) {
5428 ARM_B (code, 0);
5429 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
5430 } else*/ {
5431 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5432 code = mono_arm_patchable_b (code, ARMCOND_AL);
5434 break;
5435 case OP_BR_REG:
5436 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
5437 break;
5438 case OP_SWITCH:
5440 * In the normal case we have:
5441 * ldr pc, [pc, ins->sreg1 << 2]
5442 * nop
5443 * If aot, we have:
5444 * ldr lr, [pc, ins->sreg1 << 2]
5445 * add pc, pc, lr
5446 * After follows the data.
5447 * FIXME: add aot support.
5449 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
5450 max_len += 4 * GPOINTER_TO_INT (ins->klass);
5451 code = realloc_code (cfg, max_len);
5452 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
5453 ARM_NOP (code);
5454 code += 4 * GPOINTER_TO_INT (ins->klass);
5455 break;
5456 case OP_CEQ:
5457 case OP_ICEQ:
5458 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5459 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5460 break;
5461 case OP_CLT:
5462 case OP_ICLT:
5463 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5464 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
5465 break;
5466 case OP_CLT_UN:
5467 case OP_ICLT_UN:
5468 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5469 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
5470 break;
5471 case OP_CGT:
5472 case OP_ICGT:
5473 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5474 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
5475 break;
5476 case OP_CGT_UN:
5477 case OP_ICGT_UN:
5478 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5479 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
5480 break;
5481 case OP_ICNEQ:
5482 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5483 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5484 break;
5485 case OP_ICGE:
5486 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5487 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LT);
5488 break;
5489 case OP_ICLE:
5490 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5491 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_GT);
5492 break;
5493 case OP_ICGE_UN:
5494 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5495 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LO);
5496 break;
5497 case OP_ICLE_UN:
5498 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5499 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_HI);
5500 break;
5501 case OP_COND_EXC_EQ:
5502 case OP_COND_EXC_NE_UN:
5503 case OP_COND_EXC_LT:
5504 case OP_COND_EXC_LT_UN:
5505 case OP_COND_EXC_GT:
5506 case OP_COND_EXC_GT_UN:
5507 case OP_COND_EXC_GE:
5508 case OP_COND_EXC_GE_UN:
5509 case OP_COND_EXC_LE:
5510 case OP_COND_EXC_LE_UN:
5511 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
5512 break;
5513 case OP_COND_EXC_IEQ:
5514 case OP_COND_EXC_INE_UN:
5515 case OP_COND_EXC_ILT:
5516 case OP_COND_EXC_ILT_UN:
5517 case OP_COND_EXC_IGT:
5518 case OP_COND_EXC_IGT_UN:
5519 case OP_COND_EXC_IGE:
5520 case OP_COND_EXC_IGE_UN:
5521 case OP_COND_EXC_ILE:
5522 case OP_COND_EXC_ILE_UN:
5523 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
5524 break;
5525 case OP_COND_EXC_C:
5526 case OP_COND_EXC_IC:
5527 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
5528 break;
5529 case OP_COND_EXC_OV:
5530 case OP_COND_EXC_IOV:
5531 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
5532 break;
5533 case OP_COND_EXC_NC:
5534 case OP_COND_EXC_INC:
5535 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
5536 break;
5537 case OP_COND_EXC_NO:
5538 case OP_COND_EXC_INO:
5539 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
5540 break;
5541 case OP_IBEQ:
5542 case OP_IBNE_UN:
5543 case OP_IBLT:
5544 case OP_IBLT_UN:
5545 case OP_IBGT:
5546 case OP_IBGT_UN:
5547 case OP_IBGE:
5548 case OP_IBGE_UN:
5549 case OP_IBLE:
5550 case OP_IBLE_UN:
5551 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
5552 break;
5554 /* floating point opcodes */
5555 case OP_R8CONST:
5556 if (cfg->compile_aot) {
5557 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
5558 ARM_B (code, 1);
5559 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5560 code += 4;
5561 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
5562 code += 4;
5563 } else {
5564 /* FIXME: we can optimize the imm load by dealing with part of
5565 * the displacement in LDFD (aligning to 512).
5567 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)(gsize)ins->inst_p0);
5568 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5570 break;
5571 case OP_R4CONST:
5572 if (cfg->compile_aot) {
5573 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
5574 ARM_B (code, 0);
5575 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5576 code += 4;
5577 if (!cfg->r4fp)
5578 ARM_CVTS (code, ins->dreg, ins->dreg);
5579 } else {
5580 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)(gsize)ins->inst_p0);
5581 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
5582 if (!cfg->r4fp)
5583 ARM_CVTS (code, ins->dreg, ins->dreg);
5585 break;
5586 case OP_STORER8_MEMBASE_REG:
5587 /* This is generated by the local regalloc pass which runs after the lowering pass */
5588 if (!arm_is_fpimm8 (ins->inst_offset)) {
5589 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5590 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
5591 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
5592 } else {
5593 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
5595 break;
5596 case OP_LOADR8_MEMBASE:
5597 /* This is generated by the local regalloc pass which runs after the lowering pass */
5598 if (!arm_is_fpimm8 (ins->inst_offset)) {
5599 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5600 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
5601 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5602 } else {
5603 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
5605 break;
5606 case OP_STORER4_MEMBASE_REG:
5607 g_assert (arm_is_fpimm8 (ins->inst_offset));
5608 if (cfg->r4fp) {
5609 ARM_FSTS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
5610 } else {
5611 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5612 ARM_CVTD (code, vfp_scratch1, ins->sreg1);
5613 ARM_FSTS (code, vfp_scratch1, ins->inst_destbasereg, ins->inst_offset);
5614 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5616 break;
5617 case OP_LOADR4_MEMBASE:
5618 if (cfg->r4fp) {
5619 ARM_FLDS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
5620 } else {
5621 g_assert (arm_is_fpimm8 (ins->inst_offset));
5622 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5623 ARM_FLDS (code, vfp_scratch1, ins->inst_basereg, ins->inst_offset);
5624 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5625 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5627 break;
5628 case OP_ICONV_TO_R_UN: {
5629 g_assert_not_reached ();
5630 break;
5632 case OP_ICONV_TO_R4:
5633 if (cfg->r4fp) {
5634 ARM_FMSR (code, ins->dreg, ins->sreg1);
5635 ARM_FSITOS (code, ins->dreg, ins->dreg);
5636 } else {
5637 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5638 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5639 ARM_FSITOS (code, vfp_scratch1, vfp_scratch1);
5640 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5641 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5643 break;
5644 case OP_ICONV_TO_R8:
5645 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5646 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5647 ARM_FSITOD (code, ins->dreg, vfp_scratch1);
5648 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5649 break;
5651 case OP_SETFRET: {
5652 MonoType *sig_ret = mini_get_underlying_type (mono_method_signature_internal (cfg->method)->ret);
5653 if (sig_ret->type == MONO_TYPE_R4) {
5654 if (cfg->r4fp) {
5655 if (IS_HARD_FLOAT) {
5656 if (ins->sreg1 != ARM_VFP_D0)
5657 ARM_CPYS (code, ARM_VFP_D0, ins->sreg1);
5658 } else {
5659 ARM_FMRS (code, ARMREG_R0, ins->sreg1);
5661 } else {
5662 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
5664 if (!IS_HARD_FLOAT)
5665 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
5667 } else {
5668 if (IS_HARD_FLOAT)
5669 ARM_CPYD (code, ARM_VFP_D0, ins->sreg1);
5670 else
5671 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
5673 break;
5675 case OP_FCONV_TO_I1:
5676 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
5677 break;
5678 case OP_FCONV_TO_U1:
5679 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
5680 break;
5681 case OP_FCONV_TO_I2:
5682 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
5683 break;
5684 case OP_FCONV_TO_U2:
5685 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
5686 break;
5687 case OP_FCONV_TO_I4:
5688 case OP_FCONV_TO_I:
5689 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
5690 break;
5691 case OP_FCONV_TO_U4:
5692 case OP_FCONV_TO_U:
5693 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
5694 break;
5695 case OP_FCONV_TO_I8:
5696 case OP_FCONV_TO_U8:
5697 g_assert_not_reached ();
5698 /* Implemented as helper calls */
5699 break;
5700 case OP_LCONV_TO_R_UN:
5701 g_assert_not_reached ();
5702 /* Implemented as helper calls */
5703 break;
5704 case OP_LCONV_TO_OVF_I4_2: {
5705 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
5707 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
5710 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
5711 high_bit_not_set = code;
5712 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
5714 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
5715 valid_negative = code;
5716 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
5717 invalid_negative = code;
5718 ARM_B_COND (code, ARMCOND_AL, 0);
5720 arm_patch (high_bit_not_set, code);
5722 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
5723 valid_positive = code;
5724 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
5726 arm_patch (invalid_negative, code);
5727 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
5729 arm_patch (valid_negative, code);
5730 arm_patch (valid_positive, code);
5732 if (ins->dreg != ins->sreg1)
5733 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
5734 break;
5736 case OP_FADD:
5737 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
5738 break;
5739 case OP_FSUB:
5740 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
5741 break;
5742 case OP_FMUL:
5743 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
5744 break;
5745 case OP_FDIV:
5746 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
5747 break;
5748 case OP_FNEG:
5749 ARM_NEGD (code, ins->dreg, ins->sreg1);
5750 break;
5751 case OP_FREM:
5752 /* emulated */
5753 g_assert_not_reached ();
5754 break;
5755 case OP_FCOMPARE:
5756 if (IS_VFP) {
5757 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5758 ARM_FMSTAT (code);
5760 break;
5761 case OP_RCOMPARE:
5762 g_assert (IS_VFP);
5763 ARM_CMPS (code, ins->sreg1, ins->sreg2);
5764 ARM_FMSTAT (code);
5765 break;
5766 case OP_FCEQ:
5767 if (IS_VFP) {
5768 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5769 ARM_FMSTAT (code);
5771 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5772 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5773 break;
5774 case OP_FCLT:
5775 if (IS_VFP) {
5776 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5777 ARM_FMSTAT (code);
5779 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5780 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5781 break;
5782 case OP_FCLT_UN:
5783 if (IS_VFP) {
5784 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5785 ARM_FMSTAT (code);
5787 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5788 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5789 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5790 break;
5791 case OP_FCGT:
5792 if (IS_VFP) {
5793 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5794 ARM_FMSTAT (code);
5796 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5797 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5798 break;
5799 case OP_FCGT_UN:
5800 if (IS_VFP) {
5801 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5802 ARM_FMSTAT (code);
5804 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5805 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5806 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5807 break;
5808 case OP_FCNEQ:
5809 if (IS_VFP) {
5810 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5811 ARM_FMSTAT (code);
5813 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5814 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5815 break;
5816 case OP_FCGE:
5817 if (IS_VFP) {
5818 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5819 ARM_FMSTAT (code);
5821 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5822 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5823 break;
5824 case OP_FCLE:
5825 if (IS_VFP) {
5826 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5827 ARM_FMSTAT (code);
5829 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5830 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5831 break;
5833 /* ARM FPA flags table:
5834 * N Less than ARMCOND_MI
5835 * Z Equal ARMCOND_EQ
5836 * C Greater Than or Equal ARMCOND_CS
5837 * V Unordered ARMCOND_VS
5839 case OP_FBEQ:
5840 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
5841 break;
5842 case OP_FBNE_UN:
5843 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
5844 break;
5845 case OP_FBLT:
5846 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5847 break;
5848 case OP_FBLT_UN:
5849 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5850 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5851 break;
5852 case OP_FBGT:
5853 case OP_FBGT_UN:
5854 case OP_FBLE:
5855 case OP_FBLE_UN:
5856 g_assert_not_reached ();
5857 break;
5858 case OP_FBGE:
5859 if (IS_VFP) {
5860 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5861 } else {
5862 /* FPA requires EQ even thou the docs suggests that just CS is enough */
5863 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
5864 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
5866 break;
5867 case OP_FBGE_UN:
5868 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5869 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5870 break;
5872 case OP_CKFINITE: {
5873 if (IS_VFP) {
5874 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5875 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch2);
5877 ARM_ABSD (code, vfp_scratch2, ins->sreg1);
5878 ARM_FLDD (code, vfp_scratch1, ARMREG_PC, 0);
5879 ARM_B (code, 1);
5880 *(guint32*)code = 0xffffffff;
5881 code += 4;
5882 *(guint32*)code = 0x7fefffff;
5883 code += 4;
5884 ARM_CMPD (code, vfp_scratch2, vfp_scratch1);
5885 ARM_FMSTAT (code);
5886 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "OverflowException");
5887 ARM_CMPD (code, ins->sreg1, ins->sreg1);
5888 ARM_FMSTAT (code);
5889 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "OverflowException");
5890 ARM_CPYD (code, ins->dreg, ins->sreg1);
5892 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5893 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch2);
5895 break;
5898 case OP_RCONV_TO_I1:
5899 code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
5900 break;
5901 case OP_RCONV_TO_U1:
5902 code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
5903 break;
5904 case OP_RCONV_TO_I2:
5905 code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
5906 break;
5907 case OP_RCONV_TO_U2:
5908 code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
5909 break;
5910 case OP_RCONV_TO_I4:
5911 code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
5912 break;
5913 case OP_RCONV_TO_U4:
5914 code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
5915 break;
5916 case OP_RCONV_TO_R4:
5917 g_assert (IS_VFP);
5918 if (ins->dreg != ins->sreg1)
5919 ARM_CPYS (code, ins->dreg, ins->sreg1);
5920 break;
5921 case OP_RCONV_TO_R8:
5922 g_assert (IS_VFP);
5923 ARM_CVTS (code, ins->dreg, ins->sreg1);
5924 break;
5925 case OP_RADD:
5926 ARM_VFP_ADDS (code, ins->dreg, ins->sreg1, ins->sreg2);
5927 break;
5928 case OP_RSUB:
5929 ARM_VFP_SUBS (code, ins->dreg, ins->sreg1, ins->sreg2);
5930 break;
5931 case OP_RMUL:
5932 ARM_VFP_MULS (code, ins->dreg, ins->sreg1, ins->sreg2);
5933 break;
5934 case OP_RDIV:
5935 ARM_VFP_DIVS (code, ins->dreg, ins->sreg1, ins->sreg2);
5936 break;
5937 case OP_RNEG:
5938 ARM_NEGS (code, ins->dreg, ins->sreg1);
5939 break;
5940 case OP_RCEQ:
5941 if (IS_VFP) {
5942 ARM_CMPS (code, ins->sreg1, ins->sreg2);
5943 ARM_FMSTAT (code);
5945 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5946 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5947 break;
5948 case OP_RCLT:
5949 if (IS_VFP) {
5950 ARM_CMPS (code, ins->sreg1, ins->sreg2);
5951 ARM_FMSTAT (code);
5953 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5954 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5955 break;
5956 case OP_RCLT_UN:
5957 if (IS_VFP) {
5958 ARM_CMPS (code, ins->sreg1, ins->sreg2);
5959 ARM_FMSTAT (code);
5961 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5962 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5963 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5964 break;
5965 case OP_RCGT:
5966 if (IS_VFP) {
5967 ARM_CMPS (code, ins->sreg2, ins->sreg1);
5968 ARM_FMSTAT (code);
5970 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5971 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5972 break;
5973 case OP_RCGT_UN:
5974 if (IS_VFP) {
5975 ARM_CMPS (code, ins->sreg2, ins->sreg1);
5976 ARM_FMSTAT (code);
5978 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5979 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5980 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5981 break;
5982 case OP_RCNEQ:
5983 if (IS_VFP) {
5984 ARM_CMPS (code, ins->sreg1, ins->sreg2);
5985 ARM_FMSTAT (code);
5987 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5988 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5989 break;
5990 case OP_RCGE:
5991 if (IS_VFP) {
5992 ARM_CMPS (code, ins->sreg1, ins->sreg2);
5993 ARM_FMSTAT (code);
5995 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5996 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5997 break;
5998 case OP_RCLE:
5999 if (IS_VFP) {
6000 ARM_CMPS (code, ins->sreg2, ins->sreg1);
6001 ARM_FMSTAT (code);
6003 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
6004 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
6005 break;
6007 case OP_GC_LIVENESS_DEF:
6008 case OP_GC_LIVENESS_USE:
6009 case OP_GC_PARAM_SLOT_LIVENESS_DEF:
6010 ins->backend.pc_offset = code - cfg->native_code;
6011 break;
6012 case OP_GC_SPILL_SLOT_LIVENESS_DEF:
6013 ins->backend.pc_offset = code - cfg->native_code;
6014 bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
6015 break;
6016 case OP_LIVERANGE_START: {
6017 if (cfg->verbose_level > 1)
6018 printf ("R%d START=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code));
6019 MONO_VARINFO (cfg, ins->inst_c0)->live_range_start = code - cfg->native_code;
6020 break;
6022 case OP_LIVERANGE_END: {
6023 if (cfg->verbose_level > 1)
6024 printf ("R%d END=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code));
6025 MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code;
6026 break;
6028 case OP_GC_SAFE_POINT: {
6029 guint8 *buf [1];
6031 ARM_LDR_IMM (code, ARMREG_IP, ins->sreg1, 0);
6032 ARM_CMP_REG_IMM (code, ARMREG_IP, 0, 0);
6033 buf [0] = code;
6034 ARM_B_COND (code, ARMCOND_EQ, 0);
6035 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_threads_state_poll));
6036 code = emit_call_seq (cfg, code);
6037 arm_patch (buf [0], code);
6038 break;
6040 case OP_FILL_PROF_CALL_CTX:
6041 for (int i = 0; i < ARMREG_MAX; i++)
6042 if ((MONO_ARCH_CALLEE_SAVED_REGS & (1 << i)) || i == ARMREG_SP || i == ARMREG_FP)
6043 ARM_STR_IMM (code, i, ins->sreg1, MONO_STRUCT_OFFSET (MonoContext, regs) + i * sizeof (target_mgreg_t));
6044 break;
6045 default:
6046 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
6047 g_assert_not_reached ();
6050 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
6051 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
6052 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
6053 g_assert_not_reached ();
6056 cpos += max_len;
6058 last_ins = ins;
6061 set_code_cursor (cfg, code);
6064 #endif /* DISABLE_JIT */
6066 void
6067 mono_arch_register_lowlevel_calls (void)
6069 /* The signature doesn't matter */
6070 mono_register_jit_icall (mono_arm_throw_exception, mono_icall_sig_void, TRUE);
6071 mono_register_jit_icall (mono_arm_throw_exception_by_token, mono_icall_sig_void, TRUE);
6072 mono_register_jit_icall (mono_arm_unaligned_stack, mono_icall_sig_void, TRUE);
6075 #define patch_lis_ori(ip,val) do {\
6076 guint16 *__lis_ori = (guint16*)(ip); \
6077 __lis_ori [1] = (((guint32)(gsize)(val)) >> 16) & 0xffff; \
6078 __lis_ori [3] = ((guint32)(gsize)(val)) & 0xffff; \
6079 } while (0)
6081 void
6082 mono_arch_patch_code_new (MonoCompile *cfg, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gpointer target)
6084 unsigned char *ip = ji->ip.i + code;
6086 if (ji->type == MONO_PATCH_INFO_SWITCH) {
6089 switch (ji->type) {
6090 case MONO_PATCH_INFO_SWITCH: {
6091 gpointer *jt = (gpointer*)(ip + 8);
6092 int i;
6093 /* jt is the inlined jump table, 2 instructions after ip
6094 * In the normal case we store the absolute addresses,
6095 * otherwise the displacements.
6097 for (i = 0; i < ji->data.table->table_size; i++)
6098 jt [i] = code + (int)(gsize)ji->data.table->table [i];
6099 break;
6101 case MONO_PATCH_INFO_IP:
6102 g_assert_not_reached ();
6103 patch_lis_ori (ip, ip);
6104 break;
6105 case MONO_PATCH_INFO_METHODCONST:
6106 case MONO_PATCH_INFO_CLASS:
6107 case MONO_PATCH_INFO_IMAGE:
6108 case MONO_PATCH_INFO_FIELD:
6109 case MONO_PATCH_INFO_VTABLE:
6110 case MONO_PATCH_INFO_IID:
6111 case MONO_PATCH_INFO_SFLDA:
6112 case MONO_PATCH_INFO_LDSTR:
6113 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
6114 case MONO_PATCH_INFO_LDTOKEN:
6115 g_assert_not_reached ();
6116 /* from OP_AOTCONST : lis + ori */
6117 patch_lis_ori (ip, target);
6118 break;
6119 case MONO_PATCH_INFO_R4:
6120 case MONO_PATCH_INFO_R8:
6121 g_assert_not_reached ();
6122 *((gconstpointer *)(ip + 2)) = target;
6123 break;
6124 case MONO_PATCH_INFO_EXC_NAME:
6125 g_assert_not_reached ();
6126 *((gconstpointer *)(ip + 1)) = target;
6127 break;
6128 case MONO_PATCH_INFO_NONE:
6129 case MONO_PATCH_INFO_BB_OVF:
6130 case MONO_PATCH_INFO_EXC_OVF:
6131 /* everything is dealt with at epilog output time */
6132 break;
6133 default:
6134 arm_patch_general (cfg, domain, ip, (const guchar*)target);
6135 break;
6139 void
6140 mono_arm_unaligned_stack (MonoMethod *method)
6142 g_assert_not_reached ();
6145 #ifndef DISABLE_JIT
6148 * Stack frame layout:
6150 * ------------------- fp
6151 * MonoLMF structure or saved registers
6152 * -------------------
6153 * locals
6154 * -------------------
6155 * spilled regs
6156 * -------------------
6157 * param area size is cfg->param_area
6158 * ------------------- sp
6160 guint8 *
6161 mono_arch_emit_prolog (MonoCompile *cfg)
6163 MonoMethod *method = cfg->method;
6164 MonoBasicBlock *bb;
6165 MonoMethodSignature *sig;
6166 MonoInst *inst;
6167 int alloc_size, orig_alloc_size, pos, max_offset, i, rot_amount, part;
6168 guint8 *code;
6169 CallInfo *cinfo;
6170 int lmf_offset = 0;
6171 int prev_sp_offset, reg_offset;
6173 sig = mono_method_signature_internal (method);
6174 cfg->code_size = 256 + sig->param_count * 64;
6175 code = cfg->native_code = g_malloc (cfg->code_size);
6177 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
6179 alloc_size = cfg->stack_offset;
6180 pos = 0;
6181 prev_sp_offset = 0;
6183 if (iphone_abi) {
6185 * The iphone uses R7 as the frame pointer, and it points at the saved
6186 * r7+lr:
6187 * <lr>
6188 * r7 -> <r7>
6189 * <rest of frame>
6190 * We can't use r7 as a frame pointer since it points into the middle of
6191 * the frame, so we keep using our own frame pointer.
6192 * FIXME: Optimize this.
6194 ARM_PUSH (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
6195 prev_sp_offset += 8; /* r7 and lr */
6196 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
6197 mono_emit_unwind_op_offset (cfg, code, ARMREG_R7, (- prev_sp_offset) + 0);
6198 ARM_MOV_REG_REG (code, ARMREG_R7, ARMREG_SP);
6201 if (!method->save_lmf) {
6202 if (iphone_abi) {
6203 /* No need to push LR again */
6204 if (cfg->used_int_regs)
6205 ARM_PUSH (code, cfg->used_int_regs);
6206 } else {
6207 ARM_PUSH (code, cfg->used_int_regs | (1 << ARMREG_LR));
6208 prev_sp_offset += 4;
6210 for (i = 0; i < 16; ++i) {
6211 if (cfg->used_int_regs & (1 << i))
6212 prev_sp_offset += 4;
6214 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
6215 reg_offset = 0;
6216 for (i = 0; i < 16; ++i) {
6217 if ((cfg->used_int_regs & (1 << i))) {
6218 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
6219 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + reg_offset, SLOT_NOREF);
6220 reg_offset += 4;
6223 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
6224 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
6225 } else {
6226 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
6227 ARM_PUSH (code, 0x5ff0);
6228 prev_sp_offset += 4 * 10; /* all but r0-r3, sp and pc */
6229 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
6230 reg_offset = 0;
6231 for (i = 0; i < 16; ++i) {
6232 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
6233 /* The original r7 is saved at the start */
6234 if (!(iphone_abi && i == ARMREG_R7))
6235 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
6236 reg_offset += 4;
6239 g_assert (reg_offset == 4 * 10);
6240 pos += MONO_ABI_SIZEOF (MonoLMF) - (4 * 10);
6241 lmf_offset = pos;
6243 alloc_size += pos;
6244 orig_alloc_size = alloc_size;
6245 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
6246 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
6247 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
6248 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
6251 /* the stack used in the pushed regs */
6252 alloc_size += ALIGN_TO (prev_sp_offset, MONO_ARCH_FRAME_ALIGNMENT) - prev_sp_offset;
6253 cfg->stack_usage = alloc_size;
6254 if (alloc_size) {
6255 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
6256 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
6257 } else {
6258 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
6259 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
6261 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
6263 if (cfg->frame_reg != ARMREG_SP) {
6264 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
6265 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
6267 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
6268 prev_sp_offset += alloc_size;
6270 for (i = 0; i < alloc_size - orig_alloc_size; i += 4)
6271 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + orig_alloc_size + i, SLOT_NOREF);
6273 /* compute max_offset in order to use short forward jumps
6274 * we could skip do it on arm because the immediate displacement
6275 * for jumps is large enough, it may be useful later for constant pools
6277 max_offset = 0;
6278 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
6279 MonoInst *ins = bb->code;
6280 bb->max_offset = max_offset;
6282 MONO_BB_FOR_EACH_INS (bb, ins)
6283 max_offset += ins_get_size (ins->opcode);
6286 /* stack alignment check */
6289 guint8 *buf [16];
6290 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_SP);
6291 code = mono_arm_emit_load_imm (code, ARMREG_IP, MONO_ARCH_FRAME_ALIGNMENT -1);
6292 ARM_AND_REG_REG (code, ARMREG_LR, ARMREG_LR, ARMREG_IP);
6293 ARM_CMP_REG_IMM (code, ARMREG_LR, 0, 0);
6294 buf [0] = code;
6295 ARM_B_COND (code, ARMCOND_EQ, 0);
6296 if (cfg->compile_aot)
6297 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
6298 else
6299 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
6300 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arm_unaligned_stack));
6301 code = emit_call_seq (cfg, code);
6302 arm_patch (buf [0], code);
6306 /* store runtime generic context */
6307 if (cfg->rgctx_var) {
6308 MonoInst *ins = cfg->rgctx_var;
6310 g_assert (ins->opcode == OP_REGOFFSET);
6312 if (arm_is_imm12 (ins->inst_offset)) {
6313 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
6314 } else {
6315 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6316 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
6319 mono_add_var_location (cfg, cfg->rgctx_var, TRUE, MONO_ARCH_RGCTX_REG, 0, 0, code - cfg->native_code);
6320 mono_add_var_location (cfg, cfg->rgctx_var, FALSE, ins->inst_basereg, ins->inst_offset, code - cfg->native_code, 0);
6323 /* load arguments allocated to register from the stack */
6324 cinfo = get_call_info (NULL, sig);
6326 if (cinfo->ret.storage == RegTypeStructByAddr) {
6327 ArgInfo *ainfo = &cinfo->ret;
6328 inst = cfg->vret_addr;
6329 g_assert (arm_is_imm12 (inst->inst_offset));
6330 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
6333 if (sig->call_convention == MONO_CALL_VARARG) {
6334 ArgInfo *cookie = &cinfo->sig_cookie;
6336 /* Save the sig cookie address */
6337 g_assert (cookie->storage == RegTypeBase);
6339 g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
6340 g_assert (arm_is_imm12 (cfg->sig_cookie));
6341 ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
6342 ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
6345 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6346 ArgInfo *ainfo = cinfo->args + i;
6347 inst = cfg->args [i];
6349 if (cfg->verbose_level > 2)
6350 g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
6352 if (inst->opcode == OP_REGVAR) {
6353 if (ainfo->storage == RegTypeGeneral)
6354 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
6355 else if (ainfo->storage == RegTypeFP) {
6356 g_assert_not_reached ();
6357 } else if (ainfo->storage == RegTypeBase) {
6358 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
6359 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
6360 } else {
6361 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
6362 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
6364 } else
6365 g_assert_not_reached ();
6367 if (i == 0 && sig->hasthis) {
6368 g_assert (ainfo->storage == RegTypeGeneral);
6369 mono_add_var_location (cfg, inst, TRUE, ainfo->reg, 0, 0, code - cfg->native_code);
6370 mono_add_var_location (cfg, inst, TRUE, inst->dreg, 0, code - cfg->native_code, 0);
6373 if (cfg->verbose_level > 2)
6374 g_print ("Argument %d assigned to register %s\n", i, mono_arch_regname (inst->dreg));
6375 } else {
6376 switch (ainfo->storage) {
6377 case RegTypeHFA:
6378 for (part = 0; part < ainfo->nregs; part ++) {
6379 if (ainfo->esize == 4)
6380 ARM_FSTS (code, ainfo->reg + part, inst->inst_basereg, inst->inst_offset + (part * ainfo->esize));
6381 else
6382 ARM_FSTD (code, ainfo->reg + (part * 2), inst->inst_basereg, inst->inst_offset + (part * ainfo->esize));
6384 break;
6385 case RegTypeGeneral:
6386 case RegTypeIRegPair:
6387 case RegTypeGSharedVtInReg:
6388 case RegTypeStructByAddr:
6389 switch (ainfo->size) {
6390 case 1:
6391 if (arm_is_imm12 (inst->inst_offset))
6392 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
6393 else {
6394 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6395 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
6397 break;
6398 case 2:
6399 if (arm_is_imm8 (inst->inst_offset)) {
6400 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
6401 } else {
6402 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6403 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
6405 break;
6406 case 8:
6407 if (arm_is_imm12 (inst->inst_offset)) {
6408 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
6409 } else {
6410 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6411 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
6413 if (arm_is_imm12 (inst->inst_offset + 4)) {
6414 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
6415 } else {
6416 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
6417 ARM_STR_REG_REG (code, ainfo->reg + 1, inst->inst_basereg, ARMREG_IP);
6419 break;
6420 default:
6421 if (arm_is_imm12 (inst->inst_offset)) {
6422 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
6423 } else {
6424 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6425 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
6427 break;
6429 if (i == 0 && sig->hasthis) {
6430 g_assert (ainfo->storage == RegTypeGeneral);
6431 mono_add_var_location (cfg, inst, TRUE, ainfo->reg, 0, 0, code - cfg->native_code);
6432 mono_add_var_location (cfg, inst, FALSE, inst->inst_basereg, inst->inst_offset, code - cfg->native_code, 0);
6434 break;
6435 case RegTypeBaseGen:
6436 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
6437 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
6438 } else {
6439 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
6440 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6442 if (arm_is_imm12 (inst->inst_offset + 4)) {
6443 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
6444 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
6445 } else {
6446 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
6447 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6448 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6449 ARM_STR_REG_REG (code, ARMREG_R3, inst->inst_basereg, ARMREG_IP);
6451 break;
6452 case RegTypeBase:
6453 case RegTypeGSharedVtOnStack:
6454 case RegTypeStructByAddrOnStack:
6455 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
6456 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
6457 } else {
6458 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
6459 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6462 switch (ainfo->size) {
6463 case 1:
6464 if (arm_is_imm8 (inst->inst_offset)) {
6465 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6466 } else {
6467 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6468 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6470 break;
6471 case 2:
6472 if (arm_is_imm8 (inst->inst_offset)) {
6473 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6474 } else {
6475 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6476 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6478 break;
6479 case 8:
6480 if (arm_is_imm12 (inst->inst_offset)) {
6481 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6482 } else {
6483 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6484 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6486 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
6487 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
6488 } else {
6489 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
6490 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6492 if (arm_is_imm12 (inst->inst_offset + 4)) {
6493 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
6494 } else {
6495 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
6496 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6498 break;
6499 default:
6500 if (arm_is_imm12 (inst->inst_offset)) {
6501 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6502 } else {
6503 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6504 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6506 break;
6508 break;
6509 case RegTypeFP: {
6510 int imm8, rot_amount;
6512 if ((imm8 = mono_arm_is_rotated_imm8 (inst->inst_offset, &rot_amount)) == -1) {
6513 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6514 ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg);
6515 } else
6516 ARM_ADD_REG_IMM (code, ARMREG_IP, inst->inst_basereg, imm8, rot_amount);
6518 if (ainfo->size == 8)
6519 ARM_FSTD (code, ainfo->reg, ARMREG_IP, 0);
6520 else
6521 ARM_FSTS (code, ainfo->reg, ARMREG_IP, 0);
6522 break;
6524 case RegTypeStructByVal: {
6525 int doffset = inst->inst_offset;
6526 int soffset = 0;
6527 int cur_reg;
6528 int size = 0;
6529 size = mini_type_stack_size_full (inst->inst_vtype, NULL, sig->pinvoke);
6530 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
6531 if (arm_is_imm12 (doffset)) {
6532 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
6533 } else {
6534 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
6535 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
6537 soffset += sizeof (target_mgreg_t);
6538 doffset += sizeof (target_mgreg_t);
6540 if (ainfo->vtsize) {
6541 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6542 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
6543 code = emit_memcpy (code, ainfo->vtsize * sizeof (target_mgreg_t), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
6545 break;
6547 default:
6548 g_assert_not_reached ();
6549 break;
6554 if (method->save_lmf)
6555 code = emit_save_lmf (cfg, code, alloc_size - lmf_offset);
6557 if (cfg->arch.seq_point_info_var) {
6558 MonoInst *ins = cfg->arch.seq_point_info_var;
6560 /* Initialize the variable from a GOT slot */
6561 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
6562 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6563 ARM_B (code, 0);
6564 *(gpointer*)code = NULL;
6565 code += 4;
6566 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
6568 g_assert (ins->opcode == OP_REGOFFSET);
6570 if (arm_is_imm12 (ins->inst_offset)) {
6571 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6572 } else {
6573 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6574 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6578 /* Initialize ss_trigger_page_var */
6579 if (!cfg->soft_breakpoints) {
6580 MonoInst *info_var = cfg->arch.seq_point_info_var;
6581 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
6582 int dreg = ARMREG_LR;
6584 if (info_var) {
6585 g_assert (info_var->opcode == OP_REGOFFSET);
6587 code = emit_ldr_imm (code, dreg, info_var->inst_basereg, info_var->inst_offset);
6588 /* Load the trigger page addr */
6589 ARM_LDR_IMM (code, dreg, dreg, MONO_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
6590 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
6594 if (cfg->arch.seq_point_ss_method_var) {
6595 MonoInst *ss_method_ins = cfg->arch.seq_point_ss_method_var;
6596 MonoInst *bp_method_ins = cfg->arch.seq_point_bp_method_var;
6598 g_assert (ss_method_ins->opcode == OP_REGOFFSET);
6599 g_assert (arm_is_imm12 (ss_method_ins->inst_offset));
6601 if (cfg->compile_aot) {
6602 MonoInst *info_var = cfg->arch.seq_point_info_var;
6603 int dreg = ARMREG_LR;
6605 g_assert (info_var->opcode == OP_REGOFFSET);
6606 g_assert (arm_is_imm12 (info_var->inst_offset));
6608 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
6609 ARM_LDR_IMM (code, dreg, dreg, MONO_STRUCT_OFFSET (SeqPointInfo, ss_tramp_addr));
6610 ARM_STR_IMM (code, dreg, ss_method_ins->inst_basereg, ss_method_ins->inst_offset);
6611 } else {
6612 g_assert (bp_method_ins->opcode == OP_REGOFFSET);
6613 g_assert (arm_is_imm12 (bp_method_ins->inst_offset));
6615 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
6616 ARM_B (code, 1);
6617 *(gpointer*)code = &single_step_tramp;
6618 code += 4;
6619 *(gpointer*)code = breakpoint_tramp;
6620 code += 4;
6622 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 0);
6623 ARM_STR_IMM (code, ARMREG_IP, ss_method_ins->inst_basereg, ss_method_ins->inst_offset);
6624 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 4);
6625 ARM_STR_IMM (code, ARMREG_IP, bp_method_ins->inst_basereg, bp_method_ins->inst_offset);
6629 set_code_cursor (cfg, code);
6630 g_free (cinfo);
6632 return code;
6635 void
6636 mono_arch_emit_epilog (MonoCompile *cfg)
6638 MonoMethod *method = cfg->method;
6639 int pos, i, rot_amount;
6640 int max_epilog_size = 16 + 20*4;
6641 guint8 *code;
6642 CallInfo *cinfo;
6644 if (cfg->method->save_lmf)
6645 max_epilog_size += 128;
6647 code = realloc_code (cfg, max_epilog_size);
6649 /* Save the uwind state which is needed by the out-of-line code */
6650 mono_emit_unwind_op_remember_state (cfg, code);
6652 pos = 0;
6654 /* Load returned vtypes into registers if needed */
6655 cinfo = cfg->arch.cinfo;
6656 switch (cinfo->ret.storage) {
6657 case RegTypeStructByVal: {
6658 MonoInst *ins = cfg->ret;
6660 if (cinfo->ret.nregs == 1) {
6661 if (arm_is_imm12 (ins->inst_offset)) {
6662 ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6663 } else {
6664 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6665 ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6667 } else {
6668 for (i = 0; i < cinfo->ret.nregs; ++i) {
6669 int offset = ins->inst_offset + (i * 4);
6670 if (arm_is_imm12 (offset)) {
6671 ARM_LDR_IMM (code, i, ins->inst_basereg, offset);
6672 } else {
6673 code = mono_arm_emit_load_imm (code, ARMREG_LR, offset);
6674 ARM_LDR_REG_REG (code, i, ins->inst_basereg, ARMREG_LR);
6678 break;
6680 case RegTypeHFA: {
6681 MonoInst *ins = cfg->ret;
6683 for (i = 0; i < cinfo->ret.nregs; ++i) {
6684 if (cinfo->ret.esize == 4)
6685 ARM_FLDS (code, cinfo->ret.reg + i, ins->inst_basereg, ins->inst_offset + (i * cinfo->ret.esize));
6686 else
6687 ARM_FLDD (code, cinfo->ret.reg + (i * 2), ins->inst_basereg, ins->inst_offset + (i * cinfo->ret.esize));
6689 break;
6691 default:
6692 break;
6695 if (method->save_lmf) {
6696 int lmf_offset, reg, sp_adj, regmask, nused_int_regs = 0;
6697 /* all but r0-r3, sp and pc */
6698 pos += MONO_ABI_SIZEOF (MonoLMF) - (MONO_ARM_NUM_SAVED_REGS * sizeof (target_mgreg_t));
6699 lmf_offset = pos;
6701 code = emit_restore_lmf (cfg, code, cfg->stack_usage - lmf_offset);
6703 /* This points to r4 inside MonoLMF->iregs */
6704 sp_adj = (MONO_ABI_SIZEOF (MonoLMF) - MONO_ARM_NUM_SAVED_REGS * sizeof (target_mgreg_t));
6705 reg = ARMREG_R4;
6706 regmask = 0x9ff0; /* restore lr to pc */
6707 /* Skip caller saved registers not used by the method */
6708 while (!(cfg->used_int_regs & (1 << reg)) && reg < ARMREG_FP) {
6709 regmask &= ~(1 << reg);
6710 sp_adj += 4;
6711 reg ++;
6713 if (iphone_abi)
6714 /* Restored later */
6715 regmask &= ~(1 << ARMREG_PC);
6716 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
6717 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage - lmf_offset + sp_adj);
6718 for (i = 0; i < 16; i++) {
6719 if (regmask & (1 << i))
6720 nused_int_regs ++;
6722 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, ((iphone_abi ? 3 : 0) + nused_int_regs) * 4);
6723 /* restore iregs */
6724 ARM_POP (code, regmask);
6725 if (iphone_abi) {
6726 for (i = 0; i < 16; i++) {
6727 if (regmask & (1 << i))
6728 mono_emit_unwind_op_same_value (cfg, code, i);
6730 /* Restore saved r7, restore LR to PC */
6731 /* Skip lr from the lmf */
6732 mono_emit_unwind_op_def_cfa_offset (cfg, code, 3 * 4);
6733 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, sizeof (target_mgreg_t), 0);
6734 mono_emit_unwind_op_def_cfa_offset (cfg, code, 2 * 4);
6735 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6737 } else {
6738 int i, nused_int_regs = 0;
6740 for (i = 0; i < 16; i++) {
6741 if (cfg->used_int_regs & (1 << i))
6742 nused_int_regs ++;
6745 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
6746 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
6747 } else {
6748 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
6749 ARM_ADD_REG_REG (code, ARMREG_SP, cfg->frame_reg, ARMREG_IP);
6752 if (cfg->frame_reg != ARMREG_SP) {
6753 mono_emit_unwind_op_def_cfa_reg (cfg, code, ARMREG_SP);
6756 if (iphone_abi) {
6757 /* Restore saved gregs */
6758 if (cfg->used_int_regs) {
6759 mono_emit_unwind_op_def_cfa_offset (cfg, code, (2 + nused_int_regs) * 4);
6760 ARM_POP (code, cfg->used_int_regs);
6761 for (i = 0; i < 16; i++) {
6762 if (cfg->used_int_regs & (1 << i))
6763 mono_emit_unwind_op_same_value (cfg, code, i);
6766 mono_emit_unwind_op_def_cfa_offset (cfg, code, 2 * 4);
6767 /* Restore saved r7, restore LR to PC */
6768 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6769 } else {
6770 mono_emit_unwind_op_def_cfa_offset (cfg, code, (nused_int_regs + 1) * 4);
6771 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_PC));
6775 /* Restore the unwind state to be the same as before the epilog */
6776 mono_emit_unwind_op_restore_state (cfg, code);
6778 set_code_cursor (cfg, code);
6782 void
6783 mono_arch_emit_exceptions (MonoCompile *cfg)
6785 MonoJumpInfo *patch_info;
6786 int i;
6787 guint8 *code;
6788 guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
6789 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
6790 int max_epilog_size = 50;
6792 for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
6793 exc_throw_pos [i] = NULL;
6794 exc_throw_found [i] = 0;
6797 /* count the number of exception infos */
6800 * make sure we have enough space for exceptions
6802 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6803 if (patch_info->type == MONO_PATCH_INFO_EXC) {
6804 i = mini_exception_id_by_name ((const char*)patch_info->data.target);
6805 if (!exc_throw_found [i]) {
6806 max_epilog_size += 32;
6807 exc_throw_found [i] = TRUE;
6812 code = realloc_code (cfg, max_epilog_size);
6814 /* add code to raise exceptions */
6815 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6816 switch (patch_info->type) {
6817 case MONO_PATCH_INFO_EXC: {
6818 MonoClass *exc_class;
6819 unsigned char *ip = patch_info->ip.i + cfg->native_code;
6821 i = mini_exception_id_by_name ((const char*)patch_info->data.target);
6822 if (exc_throw_pos [i]) {
6823 arm_patch (ip, exc_throw_pos [i]);
6824 patch_info->type = MONO_PATCH_INFO_NONE;
6825 break;
6826 } else {
6827 exc_throw_pos [i] = code;
6829 arm_patch (ip, code);
6831 exc_class = mono_class_load_from_name (mono_defaults.corlib, "System", patch_info->data.name);
6833 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
6834 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6835 patch_info->type = MONO_PATCH_INFO_JIT_ICALL_ID;
6836 patch_info->data.jit_icall_id = MONO_JIT_ICALL_mono_arch_throw_corlib_exception;
6837 patch_info->ip.i = code - cfg->native_code;
6838 ARM_BL (code, 0);
6839 cfg->thunk_area += THUNK_SIZE;
6840 *(guint32*)(gpointer)code = m_class_get_type_token (exc_class) - MONO_TOKEN_TYPE_DEF;
6841 code += 4;
6842 break;
6844 default:
6845 /* do nothing */
6846 break;
6850 set_code_cursor (cfg, code);
6853 #endif /* #ifndef DISABLE_JIT */
6855 void
6856 mono_arch_finish_init (void)
6860 MonoInst*
6861 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
6863 /* FIXME: */
6864 return NULL;
6867 #ifndef DISABLE_JIT
6869 #endif
6871 guint32
6872 mono_arch_get_patch_offset (guint8 *code)
6874 /* OP_AOTCONST */
6875 return 8;
6878 void
6879 mono_arch_flush_register_windows (void)
6883 MonoMethod*
6884 mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code)
6886 return (MonoMethod*)regs [MONO_ARCH_IMT_REG];
6889 MonoVTable*
6890 mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code)
6892 return (MonoVTable*)(gsize)regs [MONO_ARCH_RGCTX_REG];
6895 GSList*
6896 mono_arch_get_cie_program (void)
6898 GSList *l = NULL;
6900 mono_add_unwind_op_def_cfa (l, (guint8*)NULL, (guint8*)NULL, ARMREG_SP, 0);
6902 return l;
6905 /* #define ENABLE_WRONG_METHOD_CHECK 1 */
6906 #define BASE_SIZE (6 * 4)
6907 #define BSEARCH_ENTRY_SIZE (4 * 4)
6908 #define CMP_SIZE (3 * 4)
6909 #define BRANCH_SIZE (1 * 4)
6910 #define CALL_SIZE (2 * 4)
6911 #define WMC_SIZE (8 * 4)
6912 #define DISTANCE(A, B) (((gint32)(gssize)(B)) - ((gint32)(gssize)(A)))
6914 static arminstr_t *
6915 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
6917 guint32 delta = DISTANCE (target, code);
6918 delta -= 8;
6919 g_assert (delta >= 0 && delta <= 0xFFF);
6920 *target = *target | delta;
6921 *code = value;
6922 return code + 1;
6925 #ifdef ENABLE_WRONG_METHOD_CHECK
6926 static void
6927 mini_dump_bad_imt (int input_imt, int compared_imt, int pc)
6929 g_print ("BAD IMT comparing %x with expected %x at ip %x", input_imt, compared_imt, pc);
6930 g_assert (0);
6932 #endif
6934 gpointer
6935 mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
6936 gpointer fail_tramp)
6938 int size, i;
6939 arminstr_t *code, *start;
6940 gboolean large_offsets = FALSE;
6941 guint32 **constant_pool_starts;
6942 arminstr_t *vtable_target = NULL;
6943 int extra_space = 0;
6944 #ifdef ENABLE_WRONG_METHOD_CHECK
6945 char * cond;
6946 #endif
6947 GSList *unwind_ops;
6949 size = BASE_SIZE;
6950 constant_pool_starts = g_new0 (guint32*, count);
6952 for (i = 0; i < count; ++i) {
6953 MonoIMTCheckItem *item = imt_entries [i];
6954 if (item->is_equals) {
6955 gboolean fail_case = !item->check_target_idx && fail_tramp;
6957 if (item->has_target_code || !arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
6958 item->chunk_size += 32;
6959 large_offsets = TRUE;
6962 if (item->check_target_idx || fail_case) {
6963 if (!item->compare_done || fail_case)
6964 item->chunk_size += CMP_SIZE;
6965 item->chunk_size += BRANCH_SIZE;
6966 } else {
6967 #ifdef ENABLE_WRONG_METHOD_CHECK
6968 item->chunk_size += WMC_SIZE;
6969 #endif
6971 if (fail_case) {
6972 item->chunk_size += 16;
6973 large_offsets = TRUE;
6975 item->chunk_size += CALL_SIZE;
6976 } else {
6977 item->chunk_size += BSEARCH_ENTRY_SIZE;
6978 imt_entries [item->check_target_idx]->compare_done = TRUE;
6980 size += item->chunk_size;
6983 if (large_offsets)
6984 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
6986 if (fail_tramp)
6987 code = mono_method_alloc_generic_virtual_trampoline (domain, size);
6988 else
6989 code = mono_domain_code_reserve (domain, size);
6990 start = code;
6992 unwind_ops = mono_arch_get_cie_program ();
6994 #ifdef DEBUG_IMT
6995 g_print ("Building IMT trampoline for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", m_class_get_name_space (vtable->klass), m_class_get_name (vtable->klass), count, size, start, ((guint8*)start) + size, vtable, fail_tramp);
6996 for (i = 0; i < count; ++i) {
6997 MonoIMTCheckItem *item = imt_entries [i];
6998 g_print ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, ((MonoMethod*)item->key)->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
7000 #endif
7002 if (large_offsets) {
7003 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
7004 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 4 * sizeof (target_mgreg_t));
7005 } else {
7006 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
7007 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 2 * sizeof (target_mgreg_t));
7009 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
7010 vtable_target = code;
7011 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
7012 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
7014 for (i = 0; i < count; ++i) {
7015 MonoIMTCheckItem *item = imt_entries [i];
7016 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL, *target_code_ins = NULL;
7017 gint32 vtable_offset;
7019 item->code_target = (guint8*)code;
7021 if (item->is_equals) {
7022 gboolean fail_case = !item->check_target_idx && fail_tramp;
7024 if (item->check_target_idx || fail_case) {
7025 if (!item->compare_done || fail_case) {
7026 imt_method = code;
7027 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
7028 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
7030 item->jmp_code = (guint8*)code;
7031 ARM_B_COND (code, ARMCOND_NE, 0);
7032 } else {
7033 /*Enable the commented code to assert on wrong method*/
7034 #ifdef ENABLE_WRONG_METHOD_CHECK
7035 imt_method = code;
7036 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
7037 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
7038 cond = code;
7039 ARM_B_COND (code, ARMCOND_EQ, 0);
7041 /* Define this if your system is so bad that gdb is failing. */
7042 #ifdef BROKEN_DEV_ENV
7043 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
7044 ARM_BL (code, 0);
7045 arm_patch (code - 1, mini_dump_bad_imt);
7046 #else
7047 ARM_DBRK (code);
7048 #endif
7049 arm_patch (cond, code);
7050 #endif
7053 if (item->has_target_code) {
7054 /* Load target address */
7055 target_code_ins = code;
7056 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
7057 /* Save it to the fourth slot */
7058 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (target_mgreg_t));
7059 /* Restore registers and branch */
7060 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
7062 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)item->value.target_code);
7063 } else {
7064 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
7065 if (!arm_is_imm12 (vtable_offset)) {
7067 * We need to branch to a computed address but we don't have
7068 * a free register to store it, since IP must contain the
7069 * vtable address. So we push the two values to the stack, and
7070 * load them both using LDM.
7072 /* Compute target address */
7073 vtable_offset_ins = code;
7074 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
7075 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
7076 /* Save it to the fourth slot */
7077 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (target_mgreg_t));
7078 /* Restore registers and branch */
7079 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
7081 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
7082 } else {
7083 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
7084 if (large_offsets) {
7085 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 2 * sizeof (target_mgreg_t));
7086 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (target_mgreg_t));
7088 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 0);
7089 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
7093 if (fail_case) {
7094 arm_patch (item->jmp_code, (guchar*)code);
7096 target_code_ins = code;
7097 /* Load target address */
7098 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
7099 /* Save it to the fourth slot */
7100 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (target_mgreg_t));
7101 /* Restore registers and branch */
7102 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
7104 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)fail_tramp);
7105 item->jmp_code = NULL;
7108 if (imt_method)
7109 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)(gsize)item->key);
7111 /*must emit after unconditional branch*/
7112 if (vtable_target) {
7113 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)(gsize)vtable);
7114 item->chunk_size += 4;
7115 vtable_target = NULL;
7118 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
7119 constant_pool_starts [i] = code;
7120 if (extra_space) {
7121 code += extra_space;
7122 extra_space = 0;
7124 } else {
7125 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
7126 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
7128 item->jmp_code = (guint8*)code;
7129 ARM_B_COND (code, ARMCOND_HS, 0);
7130 ++extra_space;
7134 for (i = 0; i < count; ++i) {
7135 MonoIMTCheckItem *item = imt_entries [i];
7136 if (item->jmp_code) {
7137 if (item->check_target_idx)
7138 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
7140 if (i > 0 && item->is_equals) {
7141 int j;
7142 arminstr_t *space_start = constant_pool_starts [i];
7143 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
7144 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)(gsize)imt_entries [j]->key);
7149 #ifdef DEBUG_IMT
7151 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", m_class_get_name_space (vtable->klass), m_class_get_name (vtable->klass), count);
7152 mono_disassemble_code (NULL, (guint8*)start, size, buff);
7153 g_free (buff);
7155 #endif
7157 g_free (constant_pool_starts);
7159 mono_arch_flush_icache ((guint8*)start, size);
7160 MONO_PROFILER_RAISE (jit_code_buffer, ((guint8*)start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL));
7161 UnlockedAdd (&mono_stats.imt_trampolines_size, code - start);
7163 g_assert (DISTANCE (start, code) <= size);
7165 mono_tramp_info_register (mono_tramp_info_create (NULL, (guint8*)start, DISTANCE (start, code), NULL, unwind_ops), domain);
7167 return start;
7170 host_mgreg_t
7171 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
7173 return ctx->regs [reg];
7176 void
7177 mono_arch_context_set_int_reg (MonoContext *ctx, int reg, host_mgreg_t val)
7179 ctx->regs [reg] = val;
7183 * mono_arch_get_trampolines:
7185 * Return a list of MonoTrampInfo structures describing arch specific trampolines
7186 * for AOT.
7188 GSList *
7189 mono_arch_get_trampolines (gboolean aot)
7191 return mono_arm_get_exception_trampolines (aot);
7194 #if defined(MONO_ARCH_SOFT_DEBUG_SUPPORTED)
7196 * mono_arch_set_breakpoint:
7198 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
7199 * The location should contain code emitted by OP_SEQ_POINT.
7201 void
7202 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
7204 guint8 *code = ip;
7205 guint32 native_offset = ip - (guint8*)ji->code_start;
7207 if (ji->from_aot) {
7208 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), (guint8*)ji->code_start);
7210 if (!breakpoint_tramp)
7211 breakpoint_tramp = mini_get_breakpoint_trampoline ();
7213 g_assert (native_offset % 4 == 0);
7214 g_assert (info->bp_addrs [native_offset / 4] == 0);
7215 info->bp_addrs [native_offset / 4] = (guint8*)(mini_debug_options.soft_breakpoints ? breakpoint_tramp : bp_trigger_page);
7216 } else if (mini_debug_options.soft_breakpoints) {
7217 code += 4;
7218 ARM_BLX_REG (code, ARMREG_LR);
7219 mono_arch_flush_icache (code - 4, 4);
7220 } else {
7221 int dreg = ARMREG_LR;
7223 /* Read from another trigger page */
7224 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
7225 ARM_B (code, 0);
7226 *(int*)code = (int)(gssize)bp_trigger_page;
7227 code += 4;
7228 ARM_LDR_IMM (code, dreg, dreg, 0);
7230 mono_arch_flush_icache (code - 16, 16);
7232 #if 0
7233 /* This is currently implemented by emitting an SWI instruction, which
7234 * qemu/linux seems to convert to a SIGILL.
7236 *(int*)code = (0xef << 24) | 8;
7237 code += 4;
7238 mono_arch_flush_icache (code - 4, 4);
7239 #endif
7244 * mono_arch_clear_breakpoint:
7246 * Clear the breakpoint at IP.
7248 void
7249 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
7251 guint8 *code = ip;
7252 int i;
7254 if (ji->from_aot) {
7255 guint32 native_offset = ip - (guint8*)ji->code_start;
7256 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), (guint8*)ji->code_start);
7258 if (!breakpoint_tramp)
7259 breakpoint_tramp = mini_get_breakpoint_trampoline ();
7261 g_assert (native_offset % 4 == 0);
7262 g_assert (info->bp_addrs [native_offset / 4] == (guint8*)(mini_debug_options.soft_breakpoints ? breakpoint_tramp : bp_trigger_page));
7263 info->bp_addrs [native_offset / 4] = 0;
7264 } else if (mini_debug_options.soft_breakpoints) {
7265 code += 4;
7266 ARM_NOP (code);
7267 mono_arch_flush_icache (code - 4, 4);
7268 } else {
7269 for (i = 0; i < 4; ++i)
7270 ARM_NOP (code);
7272 mono_arch_flush_icache (ip, code - ip);
7277 * mono_arch_start_single_stepping:
7279 * Start single stepping.
7281 void
7282 mono_arch_start_single_stepping (void)
7284 if (ss_trigger_page)
7285 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
7286 else
7287 single_step_tramp = mini_get_single_step_trampoline ();
7291 * mono_arch_stop_single_stepping:
7293 * Stop single stepping.
7295 void
7296 mono_arch_stop_single_stepping (void)
7298 if (ss_trigger_page)
7299 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
7300 else
7301 single_step_tramp = NULL;
7304 #if __APPLE__
7305 #define DBG_SIGNAL SIGBUS
7306 #else
7307 #define DBG_SIGNAL SIGSEGV
7308 #endif
7311 * mono_arch_is_single_step_event:
7313 * Return whenever the machine state in SIGCTX corresponds to a single
7314 * step event.
7316 gboolean
7317 mono_arch_is_single_step_event (void *info, void *sigctx)
7319 siginfo_t *sinfo = (siginfo_t*)info;
7321 if (!ss_trigger_page)
7322 return FALSE;
7324 /* Sometimes the address is off by 4 */
7325 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
7326 return TRUE;
7327 else
7328 return FALSE;
7332 * mono_arch_is_breakpoint_event:
7334 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
7336 gboolean
7337 mono_arch_is_breakpoint_event (void *info, void *sigctx)
7339 siginfo_t *sinfo = (siginfo_t*)info;
7341 if (!ss_trigger_page)
7342 return FALSE;
7344 if (sinfo->si_signo == DBG_SIGNAL) {
7345 /* Sometimes the address is off by 4 */
7346 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
7347 return TRUE;
7348 else
7349 return FALSE;
7350 } else {
7351 return FALSE;
7356 * mono_arch_skip_breakpoint:
7358 * See mini-amd64.c for docs.
7360 void
7361 mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
7363 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7367 * mono_arch_skip_single_step:
7369 * See mini-amd64.c for docs.
7371 void
7372 mono_arch_skip_single_step (MonoContext *ctx)
7374 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7378 * mono_arch_get_seq_point_info:
7380 * See mini-amd64.c for docs.
7382 SeqPointInfo*
7383 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
7385 SeqPointInfo *info;
7386 MonoJitInfo *ji;
7388 // FIXME: Add a free function
7390 mono_domain_lock (domain);
7391 info = (SeqPointInfo*)g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
7392 code);
7393 mono_domain_unlock (domain);
7395 if (!info) {
7396 ji = mono_jit_info_table_find (domain, code);
7397 g_assert (ji);
7399 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
7401 info->ss_trigger_page = ss_trigger_page;
7402 info->bp_trigger_page = bp_trigger_page;
7403 info->ss_tramp_addr = &single_step_tramp;
7405 mono_domain_lock (domain);
7406 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
7407 code, info);
7408 mono_domain_unlock (domain);
7411 return info;
7414 #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
7417 * mono_arch_set_target:
7419 * Set the target architecture the JIT backend should generate code for, in the form
7420 * of a GNU target triplet. Only used in AOT mode.
7422 void
7423 mono_arch_set_target (char *mtriple)
7425 /* The GNU target triple format is not very well documented */
7426 if (strstr (mtriple, "armv7")) {
7427 v5_supported = TRUE;
7428 v6_supported = TRUE;
7429 v7_supported = TRUE;
7431 if (strstr (mtriple, "armv6")) {
7432 v5_supported = TRUE;
7433 v6_supported = TRUE;
7435 if (strstr (mtriple, "armv7s")) {
7436 v7s_supported = TRUE;
7438 if (strstr (mtriple, "armv7k")) {
7439 v7k_supported = TRUE;
7441 if (strstr (mtriple, "thumbv7s")) {
7442 v5_supported = TRUE;
7443 v6_supported = TRUE;
7444 v7_supported = TRUE;
7445 v7s_supported = TRUE;
7446 thumb_supported = TRUE;
7447 thumb2_supported = TRUE;
7449 if (strstr (mtriple, "darwin") || strstr (mtriple, "ios")) {
7450 v5_supported = TRUE;
7451 v6_supported = TRUE;
7452 thumb_supported = TRUE;
7453 iphone_abi = TRUE;
7455 if (strstr (mtriple, "gnueabi"))
7456 eabi_supported = TRUE;
7459 gboolean
7460 mono_arch_opcode_supported (int opcode)
7462 switch (opcode) {
7463 case OP_ATOMIC_ADD_I4:
7464 case OP_ATOMIC_EXCHANGE_I4:
7465 case OP_ATOMIC_CAS_I4:
7466 case OP_ATOMIC_LOAD_I1:
7467 case OP_ATOMIC_LOAD_I2:
7468 case OP_ATOMIC_LOAD_I4:
7469 case OP_ATOMIC_LOAD_U1:
7470 case OP_ATOMIC_LOAD_U2:
7471 case OP_ATOMIC_LOAD_U4:
7472 case OP_ATOMIC_STORE_I1:
7473 case OP_ATOMIC_STORE_I2:
7474 case OP_ATOMIC_STORE_I4:
7475 case OP_ATOMIC_STORE_U1:
7476 case OP_ATOMIC_STORE_U2:
7477 case OP_ATOMIC_STORE_U4:
7478 return v7_supported;
7479 case OP_ATOMIC_LOAD_R4:
7480 case OP_ATOMIC_LOAD_R8:
7481 case OP_ATOMIC_STORE_R4:
7482 case OP_ATOMIC_STORE_R8:
7483 return v7_supported && IS_VFP;
7484 default:
7485 return FALSE;
7489 CallInfo*
7490 mono_arch_get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
7492 return get_call_info (mp, sig);
7495 gpointer
7496 mono_arch_get_get_tls_tramp (void)
7498 return NULL;
7501 static G_GNUC_UNUSED guint8*
7502 emit_aotconst (MonoCompile *cfg, guint8 *code, int dreg, int patch_type, gpointer data)
7504 /* OP_AOTCONST */
7505 mono_add_patch_info (cfg, code - cfg->native_code, (MonoJumpInfoType)patch_type, data);
7506 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
7507 ARM_B (code, 0);
7508 *(gpointer*)code = NULL;
7509 code += 4;
7510 /* Load the value from the GOT */
7511 ARM_LDR_REG_REG (code, dreg, ARMREG_PC, dreg);
7512 return code;
7515 guint8*
7516 mono_arm_emit_aotconst (gpointer ji_list, guint8 *code, guint8 *buf, int dreg, int patch_type, gconstpointer data)
7518 MonoJumpInfo **ji = (MonoJumpInfo**)ji_list;
7520 *ji = mono_patch_info_list_prepend (*ji, code - buf, (MonoJumpInfoType)patch_type, data);
7521 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
7522 ARM_B (code, 0);
7523 *(gpointer*)code = NULL;
7524 code += 4;
7525 ARM_LDR_REG_REG (code, dreg, ARMREG_PC, dreg);
7526 return code;
7529 gpointer
7530 mono_arch_load_function (MonoJitICallId jit_icall_id)
7532 gpointer target = NULL;
7533 switch (jit_icall_id) {
7534 #undef MONO_AOT_ICALL
7535 #define MONO_AOT_ICALL(x) case MONO_JIT_ICALL_ ## x: target = (gpointer)x; break;
7536 MONO_AOT_ICALL (mono_arm_resume_unwind)
7537 MONO_AOT_ICALL (mono_arm_start_gsharedvt_call)
7538 MONO_AOT_ICALL (mono_arm_throw_exception)
7539 MONO_AOT_ICALL (mono_arm_throw_exception_by_token)
7540 MONO_AOT_ICALL (mono_arm_unaligned_stack)
7542 return target;