Remove MONO_PATCH_INFO_METHOD_REL. (#14643)
[mono-project.git] / mono / mini / mini-arm.c
blobc7ebb0d9e5d26f7bc2b4833ce63f341bc987ce8d
1 /**
2 * \file
3 * ARM backend for the Mono code generator
5 * Authors:
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
9 * (C) 2003 Ximian, Inc.
10 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
11 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
14 #include "mini.h"
15 #include <string.h>
17 #include <mono/metadata/abi-details.h>
18 #include <mono/metadata/appdomain.h>
19 #include <mono/metadata/profiler-private.h>
20 #include <mono/metadata/debug-helpers.h>
21 #include <mono/utils/mono-mmap.h>
22 #include <mono/utils/mono-hwcap.h>
23 #include <mono/utils/mono-memory-model.h>
24 #include <mono/utils/mono-threads-coop.h>
25 #include <mono/utils/unlocked.h>
27 #include "interp/interp.h"
29 #include "mini-arm.h"
30 #include "cpu-arm.h"
31 #include "ir-emit.h"
32 #include "debugger-agent.h"
33 #include "mini-gc.h"
34 #include "mini-runtime.h"
35 #include "aot-runtime.h"
36 #include "mono/arch/arm/arm-vfp-codegen.h"
38 /* Sanity check: This makes no sense */
39 #if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD))
40 #error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined"
41 #endif
44 * IS_SOFT_FLOAT: Is full software floating point used?
45 * IS_HARD_FLOAT: Is full hardware floating point used?
46 * IS_VFP: Is hardware floating point with software ABI used?
48 * These are not necessarily constants, e.g. IS_SOFT_FLOAT and
49 * IS_VFP may delegate to mono_arch_is_soft_float ().
52 #if defined(ARM_FPU_VFP_HARD)
53 #define IS_SOFT_FLOAT (FALSE)
54 #define IS_HARD_FLOAT (TRUE)
55 #define IS_VFP (TRUE)
56 #elif defined(ARM_FPU_NONE)
57 #define IS_SOFT_FLOAT (mono_arch_is_soft_float ())
58 #define IS_HARD_FLOAT (FALSE)
59 #define IS_VFP (!mono_arch_is_soft_float ())
60 #else
61 #define IS_SOFT_FLOAT (FALSE)
62 #define IS_HARD_FLOAT (FALSE)
63 #define IS_VFP (TRUE)
64 #endif
66 #define THUNK_SIZE (3 * 4)
68 #if __APPLE__
69 G_BEGIN_DECLS
70 void sys_icache_invalidate (void *start, size_t len);
71 G_END_DECLS
72 #endif
74 /* This mutex protects architecture specific caches */
75 #define mono_mini_arch_lock() mono_os_mutex_lock (&mini_arch_mutex)
76 #define mono_mini_arch_unlock() mono_os_mutex_unlock (&mini_arch_mutex)
77 static mono_mutex_t mini_arch_mutex;
79 static gboolean v5_supported = FALSE;
80 static gboolean v6_supported = FALSE;
81 static gboolean v7_supported = FALSE;
82 static gboolean v7s_supported = FALSE;
83 static gboolean v7k_supported = FALSE;
84 static gboolean thumb_supported = FALSE;
85 static gboolean thumb2_supported = FALSE;
87 * Whenever to use the ARM EABI
89 static gboolean eabi_supported = FALSE;
91 /*
92 * Whenever to use the iphone ABI extensions:
93 * http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html
94 * Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr.
95 * This is required for debugging/profiling tools to work, but it has some overhead so it should
96 * only be turned on in debug builds.
98 static gboolean iphone_abi = FALSE;
101 * The FPU we are generating code for. This is NOT runtime configurable right now,
102 * since some things like MONO_ARCH_CALLEE_FREGS still depend on defines.
104 static MonoArmFPU arm_fpu;
106 #if defined(ARM_FPU_VFP_HARD)
108 * On armhf, d0-d7 are used for argument passing and d8-d15
109 * must be preserved across calls, which leaves us no room
110 * for scratch registers. So we use d14-d15 but back up their
111 * previous contents to a stack slot before using them - see
112 * mono_arm_emit_vfp_scratch_save/_restore ().
114 static int vfp_scratch1 = ARM_VFP_D14;
115 static int vfp_scratch2 = ARM_VFP_D15;
116 #else
118 * On armel, d0-d7 do not need to be preserved, so we can
119 * freely make use of them as scratch registers.
121 static int vfp_scratch1 = ARM_VFP_D0;
122 static int vfp_scratch2 = ARM_VFP_D1;
123 #endif
125 static int i8_align;
127 static gpointer single_step_tramp, breakpoint_tramp;
130 * The code generated for sequence points reads from this location, which is
131 * made read-only when single stepping is enabled.
133 static gpointer ss_trigger_page;
135 /* Enabled breakpoints read from this trigger page */
136 static gpointer bp_trigger_page;
139 * TODO:
140 * floating point support: on ARM it is a mess, there are at least 3
141 * different setups, each of which binary incompat with the other.
142 * 1) FPA: old and ugly, but unfortunately what current distros use
143 * the double binary format has the two words swapped. 8 double registers.
144 * Implemented usually by kernel emulation.
145 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
146 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
147 * 3) VFP: the new and actually sensible and useful FP support. Implemented
148 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
150 * We do not care about FPA. We will support soft float and VFP.
152 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
153 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
154 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
156 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
157 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
158 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
160 //#define DEBUG_IMT 0
162 #ifndef DISABLE_JIT
163 static void mono_arch_compute_omit_fp (MonoCompile *cfg);
164 #endif
166 static guint8*
167 emit_aotconst (MonoCompile *cfg, guint8 *code, int dreg, int patch_type, gpointer data);
169 const char*
170 mono_arch_regname (int reg)
172 static const char * rnames[] = {
173 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
174 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
175 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
176 "arm_pc"
178 if (reg >= 0 && reg < 16)
179 return rnames [reg];
180 return "unknown";
183 const char*
184 mono_arch_fregname (int reg)
186 static const char * rnames[] = {
187 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
188 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
189 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
190 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
191 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
192 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
193 "arm_f30", "arm_f31"
195 if (reg >= 0 && reg < 32)
196 return rnames [reg];
197 return "unknown";
201 #ifndef DISABLE_JIT
202 static guint8*
203 emit_big_add_temp (guint8 *code, int dreg, int sreg, int imm, int temp)
205 int imm8, rot_amount;
207 g_assert (temp == ARMREG_IP || temp == ARMREG_LR);
209 if (imm == 0) {
210 if (sreg != dreg)
211 ARM_MOV_REG_REG (code, dreg, sreg);
212 } else if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
213 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
214 return code;
216 if (dreg == sreg) {
217 code = mono_arm_emit_load_imm (code, temp, imm);
218 ARM_ADD_REG_REG (code, dreg, sreg, temp);
219 } else {
220 code = mono_arm_emit_load_imm (code, dreg, imm);
221 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
223 return code;
226 static guint8*
227 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
229 return emit_big_add_temp (code, dreg, sreg, imm, ARMREG_IP);
232 static guint8*
233 emit_ldr_imm (guint8 *code, int dreg, int sreg, int imm)
235 if (!arm_is_imm12 (imm)) {
236 g_assert (dreg != sreg);
237 code = emit_big_add (code, dreg, sreg, imm);
238 ARM_LDR_IMM (code, dreg, dreg, 0);
239 } else {
240 ARM_LDR_IMM (code, dreg, sreg, imm);
242 return code;
245 /* If dreg == sreg, this clobbers IP */
246 static guint8*
247 emit_sub_imm (guint8 *code, int dreg, int sreg, int imm)
249 int imm8, rot_amount;
250 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
251 ARM_SUB_REG_IMM (code, dreg, sreg, imm8, rot_amount);
252 return code;
254 if (dreg == sreg) {
255 code = mono_arm_emit_load_imm (code, ARMREG_IP, imm);
256 ARM_SUB_REG_REG (code, dreg, sreg, ARMREG_IP);
257 } else {
258 code = mono_arm_emit_load_imm (code, dreg, imm);
259 ARM_SUB_REG_REG (code, dreg, dreg, sreg);
261 return code;
264 static guint8*
265 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
267 /* we can use r0-r3, since this is called only for incoming args on the stack */
268 if (size > sizeof (target_mgreg_t) * 4) {
269 guint8 *start_loop;
270 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
271 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
272 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
273 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
274 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
275 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
276 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
277 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
278 ARM_B_COND (code, ARMCOND_NE, 0);
279 arm_patch (code - 4, start_loop);
280 return code;
282 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
283 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
284 while (size >= 4) {
285 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
286 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
287 doffset += 4;
288 soffset += 4;
289 size -= 4;
291 } else if (size) {
292 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
293 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
294 doffset = soffset = 0;
295 while (size >= 4) {
296 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
297 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
298 doffset += 4;
299 soffset += 4;
300 size -= 4;
303 g_assert (size == 0);
304 return code;
307 static guint8*
308 emit_jmp_reg (guint8 *code, int reg)
310 if (thumb_supported)
311 ARM_BX (code, reg);
312 else
313 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
314 return code;
317 static guint8*
318 emit_call_reg (guint8 *code, int reg)
320 if (v5_supported) {
321 ARM_BLX_REG (code, reg);
322 } else {
323 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
324 return emit_jmp_reg (code, reg);
326 return code;
329 static guint8*
330 emit_call_seq (MonoCompile *cfg, guint8 *code)
332 if (cfg->method->dynamic) {
333 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
334 ARM_B (code, 0);
335 *(gpointer*)code = NULL;
336 code += 4;
337 code = emit_call_reg (code, ARMREG_IP);
338 } else {
339 ARM_BL (code, 0);
341 cfg->thunk_area += THUNK_SIZE;
342 return code;
345 guint8*
346 mono_arm_patchable_b (guint8 *code, int cond)
348 ARM_B_COND (code, cond, 0);
349 return code;
352 guint8*
353 mono_arm_patchable_bl (guint8 *code, int cond)
355 ARM_BL_COND (code, cond, 0);
356 return code;
359 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(HOST_ANDROID) && !defined(MONO_CROSS_COMPILE)
360 #define HAVE_AEABI_READ_TP 1
361 #endif
363 #ifdef HAVE_AEABI_READ_TP
364 G_BEGIN_DECLS
365 gpointer __aeabi_read_tp (void);
366 G_END_DECLS
367 #endif
369 gboolean
370 mono_arch_have_fast_tls (void)
372 #ifdef HAVE_AEABI_READ_TP
373 static gboolean have_fast_tls = FALSE;
374 static gboolean inited = FALSE;
376 if (mini_get_debug_options ()->use_fallback_tls)
377 return FALSE;
379 if (inited)
380 return have_fast_tls;
382 if (v7_supported) {
383 gpointer tp1, tp2;
385 tp1 = __aeabi_read_tp ();
386 asm volatile("mrc p15, 0, %0, c13, c0, 3" : "=r" (tp2));
388 have_fast_tls = tp1 && tp1 == tp2;
390 inited = TRUE;
391 return have_fast_tls;
392 #else
393 return FALSE;
394 #endif
397 static guint8*
398 emit_tls_get (guint8 *code, int dreg, int tls_offset)
400 g_assert (v7_supported);
401 ARM_MRC (code, 15, 0, dreg, 13, 0, 3);
402 ARM_LDR_IMM (code, dreg, dreg, tls_offset);
403 return code;
406 static guint8*
407 emit_tls_set (guint8 *code, int sreg, int tls_offset)
409 int tp_reg = (sreg != ARMREG_R0) ? ARMREG_R0 : ARMREG_R1;
410 g_assert (v7_supported);
411 ARM_MRC (code, 15, 0, tp_reg, 13, 0, 3);
412 ARM_STR_IMM (code, sreg, tp_reg, tls_offset);
413 return code;
417 * emit_save_lmf:
419 * Emit code to push an LMF structure on the LMF stack.
420 * On arm, this is intermixed with the initialization of other fields of the structure.
422 static guint8*
423 emit_save_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
425 int i;
427 if (mono_arch_have_fast_tls () && mono_tls_get_tls_offset (TLS_KEY_LMF_ADDR) != -1) {
428 code = emit_tls_get (code, ARMREG_R0, mono_tls_get_tls_offset (TLS_KEY_LMF_ADDR));
429 } else {
430 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
431 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_tls_get_lmf_addr));
432 code = emit_call_seq (cfg, code);
434 /* we build the MonoLMF structure on the stack - see mini-arm.h */
435 /* lmf_offset is the offset from the previous stack pointer,
436 * alloc_size is the total stack space allocated, so the offset
437 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
438 * The pointer to the struct is put in r1 (new_lmf).
439 * ip is used as scratch
440 * The callee-saved registers are already in the MonoLMF structure
442 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, lmf_offset);
443 /* r0 is the result from mono_get_lmf_addr () */
444 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
445 /* new_lmf->previous_lmf = *lmf_addr */
446 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
447 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
448 /* *(lmf_addr) = r1 */
449 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
450 /* Skip method (only needed for trampoline LMF frames) */
451 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, sp));
452 ARM_STR_IMM (code, ARMREG_FP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, fp));
453 /* save the current IP */
454 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_PC);
455 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, ip));
457 for (i = 0; i < MONO_ABI_SIZEOF (MonoLMF); i += sizeof (target_mgreg_t))
458 mini_gc_set_slot_type_from_fp (cfg, lmf_offset + i, SLOT_NOREF);
460 return code;
463 typedef struct {
464 gint32 vreg;
465 gint32 hreg;
466 } FloatArgData;
468 static guint8 *
469 emit_float_args (MonoCompile *cfg, MonoCallInst *inst, guint8 *code, int *max_len, guint *offset)
471 GSList *list;
473 set_code_cursor (cfg, code);
475 for (list = inst->float_args; list; list = list->next) {
476 FloatArgData *fad = (FloatArgData*)list->data;
477 MonoInst *var = get_vreg_to_inst (cfg, fad->vreg);
478 gboolean imm = arm_is_fpimm8 (var->inst_offset);
480 /* 4+1 insns for emit_big_add () and 1 for FLDS. */
481 if (!imm)
482 *max_len += 20 + 4;
484 *max_len += 4;
486 code = realloc_code (cfg, *max_len);
488 if (!imm) {
489 code = emit_big_add (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
490 ARM_FLDS (code, fad->hreg, ARMREG_LR, 0);
491 } else
492 ARM_FLDS (code, fad->hreg, var->inst_basereg, var->inst_offset);
494 set_code_cursor (cfg, code);
495 *offset = code - cfg->native_code;
498 return code;
501 static guint8 *
502 mono_arm_emit_vfp_scratch_save (MonoCompile *cfg, guint8 *code, int reg)
504 MonoInst *inst;
506 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
508 inst = cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
510 if (IS_HARD_FLOAT) {
511 if (!arm_is_fpimm8 (inst->inst_offset)) {
512 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
513 ARM_FSTD (code, reg, ARMREG_LR, 0);
514 } else
515 ARM_FSTD (code, reg, inst->inst_basereg, inst->inst_offset);
518 return code;
521 static guint8 *
522 mono_arm_emit_vfp_scratch_restore (MonoCompile *cfg, guint8 *code, int reg)
524 MonoInst *inst;
526 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
528 inst = cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
530 if (IS_HARD_FLOAT) {
531 if (!arm_is_fpimm8 (inst->inst_offset)) {
532 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
533 ARM_FLDD (code, reg, ARMREG_LR, 0);
534 } else
535 ARM_FLDD (code, reg, inst->inst_basereg, inst->inst_offset);
538 return code;
542 * emit_restore_lmf:
544 * Emit code to pop an LMF structure from the LMF stack.
546 static guint8*
547 emit_restore_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
549 int basereg, offset;
551 if (lmf_offset < 32) {
552 basereg = cfg->frame_reg;
553 offset = lmf_offset;
554 } else {
555 basereg = ARMREG_R2;
556 offset = 0;
557 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, lmf_offset);
560 /* ip = previous_lmf */
561 ARM_LDR_IMM (code, ARMREG_IP, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
562 /* lr = lmf_addr */
563 ARM_LDR_IMM (code, ARMREG_LR, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
564 /* *(lmf_addr) = previous_lmf */
565 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
567 return code;
570 #endif /* #ifndef DISABLE_JIT */
573 * mono_arch_get_argument_info:
574 * @csig: a method signature
575 * @param_count: the number of parameters to consider
576 * @arg_info: an array to store the result infos
578 * Gathers information on parameters such as size, alignment and
579 * padding. arg_info should be large enought to hold param_count + 1 entries.
581 * Returns the size of the activation frame.
584 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
586 int k, frame_size = 0;
587 guint32 size, align, pad;
588 int offset = 8;
589 MonoType *t;
591 t = mini_get_underlying_type (csig->ret);
592 if (MONO_TYPE_ISSTRUCT (t)) {
593 frame_size += sizeof (target_mgreg_t);
594 offset += 4;
597 arg_info [0].offset = offset;
599 if (csig->hasthis) {
600 frame_size += sizeof (target_mgreg_t);
601 offset += 4;
604 arg_info [0].size = frame_size;
606 for (k = 0; k < param_count; k++) {
607 size = mini_type_stack_size_full (csig->params [k], &align, csig->pinvoke);
609 /* ignore alignment for now */
610 align = 1;
612 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
613 arg_info [k].pad = pad;
614 frame_size += size;
615 arg_info [k + 1].pad = 0;
616 arg_info [k + 1].size = size;
617 offset += pad;
618 arg_info [k + 1].offset = offset;
619 offset += size;
622 align = MONO_ARCH_FRAME_ALIGNMENT;
623 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
624 arg_info [k].pad = pad;
626 return frame_size;
629 #define MAX_ARCH_DELEGATE_PARAMS 3
631 static guint8*
632 get_delegate_invoke_impl (MonoTrampInfo **info, gboolean has_target, gboolean param_count)
634 guint8 *code, *start;
635 GSList *unwind_ops = mono_arch_get_cie_program ();
637 if (has_target) {
638 start = code = mono_global_codeman_reserve (12);
640 /* Replace the this argument with the target */
641 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
642 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, target));
643 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
645 g_assert ((code - start) <= 12);
647 mono_arch_flush_icache (start, 12);
648 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL));
649 } else {
650 int size, i;
652 size = 8 + param_count * 4;
653 start = code = mono_global_codeman_reserve (size);
655 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
656 /* slide down the arguments */
657 for (i = 0; i < param_count; ++i) {
658 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
660 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
662 g_assert ((code - start) <= size);
664 mono_arch_flush_icache (start, size);
665 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL));
668 if (has_target) {
669 *info = mono_tramp_info_create ("delegate_invoke_impl_has_target", start, code - start, NULL, unwind_ops);
670 } else {
671 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", param_count);
672 *info = mono_tramp_info_create (name, start, code - start, NULL, unwind_ops);
673 g_free (name);
676 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL));
678 return start;
682 * mono_arch_get_delegate_invoke_impls:
684 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
685 * trampolines.
687 GSList*
688 mono_arch_get_delegate_invoke_impls (void)
690 GSList *res = NULL;
691 MonoTrampInfo *info;
692 int i;
694 get_delegate_invoke_impl (&info, TRUE, 0);
695 res = g_slist_prepend (res, info);
697 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
698 get_delegate_invoke_impl (&info, FALSE, i);
699 res = g_slist_prepend (res, info);
702 return res;
705 gpointer
706 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
708 guint8 *code, *start;
709 MonoType *sig_ret;
711 /* FIXME: Support more cases */
712 sig_ret = mini_get_underlying_type (sig->ret);
713 if (MONO_TYPE_ISSTRUCT (sig_ret))
714 return NULL;
716 if (has_target) {
717 static guint8* cached = NULL;
718 mono_mini_arch_lock ();
719 if (cached) {
720 mono_mini_arch_unlock ();
721 return cached;
724 if (mono_ee_features.use_aot_trampolines) {
725 start = (guint8*)mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
726 } else {
727 MonoTrampInfo *info;
728 start = get_delegate_invoke_impl (&info, TRUE, 0);
729 mono_tramp_info_register (info, NULL);
731 cached = start;
732 mono_mini_arch_unlock ();
733 return cached;
734 } else {
735 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
736 int i;
738 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
739 return NULL;
740 for (i = 0; i < sig->param_count; ++i)
741 if (!mono_is_regsize_var (sig->params [i]))
742 return NULL;
744 mono_mini_arch_lock ();
745 code = cache [sig->param_count];
746 if (code) {
747 mono_mini_arch_unlock ();
748 return code;
751 if (mono_ee_features.use_aot_trampolines) {
752 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
753 start = (guint8*)mono_aot_get_trampoline (name);
754 g_free (name);
755 } else {
756 MonoTrampInfo *info;
757 start = get_delegate_invoke_impl (&info, FALSE, sig->param_count);
758 mono_tramp_info_register (info, NULL);
760 cache [sig->param_count] = start;
761 mono_mini_arch_unlock ();
762 return start;
765 return NULL;
768 gpointer
769 mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg)
771 return NULL;
774 gpointer
775 mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code)
777 return (gpointer)regs [ARMREG_R0];
781 * Initialize the cpu to execute managed code.
783 void
784 mono_arch_cpu_init (void)
786 i8_align = MONO_ABI_ALIGNOF (gint64);
787 #ifdef MONO_CROSS_COMPILE
788 /* Need to set the alignment of i8 since it can different on the target */
789 #ifdef TARGET_ANDROID
790 /* linux gnueabi */
791 mono_type_set_alignment (MONO_TYPE_I8, i8_align);
792 #endif
793 #endif
797 * Initialize architecture specific code.
799 void
800 mono_arch_init (void)
802 char *cpu_arch;
804 #ifdef TARGET_WATCHOS
805 mini_get_debug_options ()->soft_breakpoints = TRUE;
806 #endif
808 mono_os_mutex_init_recursive (&mini_arch_mutex);
809 if (mini_get_debug_options ()->soft_breakpoints) {
810 if (!mono_aot_only)
811 breakpoint_tramp = mini_get_breakpoint_trampoline ();
812 } else {
813 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT, MONO_MEM_ACCOUNT_OTHER);
814 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT, MONO_MEM_ACCOUNT_OTHER);
815 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
818 mono_aot_register_jit_icall ("mono_arm_throw_exception", mono_arm_throw_exception);
819 mono_aot_register_jit_icall ("mono_arm_throw_exception_by_token", mono_arm_throw_exception_by_token);
820 mono_aot_register_jit_icall ("mono_arm_resume_unwind", mono_arm_resume_unwind);
821 #if defined(MONO_ARCH_GSHAREDVT_SUPPORTED)
822 mono_aot_register_jit_icall ("mono_arm_start_gsharedvt_call", mono_arm_start_gsharedvt_call);
823 #endif
824 mono_aot_register_jit_icall ("mono_arm_unaligned_stack", mono_arm_unaligned_stack);
825 #if defined(__ARM_EABI__)
826 eabi_supported = TRUE;
827 #endif
829 #if defined(ARM_FPU_VFP_HARD)
830 arm_fpu = MONO_ARM_FPU_VFP_HARD;
831 #else
832 arm_fpu = MONO_ARM_FPU_VFP;
834 #if defined(ARM_FPU_NONE) && !defined(TARGET_IOS)
836 * If we're compiling with a soft float fallback and it
837 * turns out that no VFP unit is available, we need to
838 * switch to soft float. We don't do this for iOS, since
839 * iOS devices always have a VFP unit.
841 if (!mono_hwcap_arm_has_vfp)
842 arm_fpu = MONO_ARM_FPU_NONE;
845 * This environment variable can be useful in testing
846 * environments to make sure the soft float fallback
847 * works. Most ARM devices have VFP units these days, so
848 * normally soft float code would not be exercised much.
850 char *soft = g_getenv ("MONO_ARM_FORCE_SOFT_FLOAT");
852 if (soft && !strncmp (soft, "1", 1))
853 arm_fpu = MONO_ARM_FPU_NONE;
854 g_free (soft);
855 #endif
856 #endif
858 v5_supported = mono_hwcap_arm_is_v5;
859 v6_supported = mono_hwcap_arm_is_v6;
860 v7_supported = mono_hwcap_arm_is_v7;
863 * On weird devices, the hwcap code may fail to detect
864 * the ARM version. In that case, we can at least safely
865 * assume the version the runtime was compiled for.
867 #ifdef HAVE_ARMV5
868 v5_supported = TRUE;
869 #endif
870 #ifdef HAVE_ARMV6
871 v6_supported = TRUE;
872 #endif
873 #ifdef HAVE_ARMV7
874 v7_supported = TRUE;
875 #endif
877 #if defined(TARGET_IOS)
878 /* iOS is special-cased here because we don't yet
879 have a way to properly detect CPU features on it. */
880 thumb_supported = TRUE;
881 iphone_abi = TRUE;
882 #else
883 thumb_supported = mono_hwcap_arm_has_thumb;
884 thumb2_supported = mono_hwcap_arm_has_thumb2;
885 #endif
887 /* Format: armv(5|6|7[s])[-thumb[2]] */
888 cpu_arch = g_getenv ("MONO_CPU_ARCH");
890 /* Do this here so it overrides any detection. */
891 if (cpu_arch) {
892 if (strncmp (cpu_arch, "armv", 4) == 0) {
893 v5_supported = cpu_arch [4] >= '5';
894 v6_supported = cpu_arch [4] >= '6';
895 v7_supported = cpu_arch [4] >= '7';
896 v7s_supported = strncmp (cpu_arch, "armv7s", 6) == 0;
897 v7k_supported = strncmp (cpu_arch, "armv7k", 6) == 0;
900 thumb_supported = strstr (cpu_arch, "thumb") != NULL;
901 thumb2_supported = strstr (cpu_arch, "thumb2") != NULL;
902 g_free (cpu_arch);
907 * Cleanup architecture specific code.
909 void
910 mono_arch_cleanup (void)
915 * This function returns the optimizations supported on this cpu.
917 guint32
918 mono_arch_cpu_optimizations (guint32 *exclude_mask)
920 /* no arm-specific optimizations yet */
921 *exclude_mask = 0;
922 return 0;
926 * This function test for all SIMD functions supported.
928 * Returns a bitmask corresponding to all supported versions.
931 guint32
932 mono_arch_cpu_enumerate_simd_versions (void)
934 /* SIMD is currently unimplemented */
935 return 0;
938 gboolean
939 mono_arm_is_hard_float (void)
941 return arm_fpu == MONO_ARM_FPU_VFP_HARD;
944 #ifndef DISABLE_JIT
946 gboolean
947 mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode)
949 if (v7s_supported || v7k_supported) {
950 switch (opcode) {
951 case OP_IDIV:
952 case OP_IREM:
953 case OP_IDIV_UN:
954 case OP_IREM_UN:
955 return FALSE;
956 default:
957 break;
960 return TRUE;
963 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
964 gboolean
965 mono_arch_is_soft_float (void)
967 return arm_fpu == MONO_ARM_FPU_NONE;
969 #endif
971 static gboolean
972 is_regsize_var (MonoType *t)
974 if (t->byref)
975 return TRUE;
976 t = mini_get_underlying_type (t);
977 switch (t->type) {
978 case MONO_TYPE_I4:
979 case MONO_TYPE_U4:
980 case MONO_TYPE_I:
981 case MONO_TYPE_U:
982 case MONO_TYPE_PTR:
983 case MONO_TYPE_FNPTR:
984 return TRUE;
985 case MONO_TYPE_OBJECT:
986 return TRUE;
987 case MONO_TYPE_GENERICINST:
988 if (!mono_type_generic_inst_is_valuetype (t))
989 return TRUE;
990 return FALSE;
991 case MONO_TYPE_VALUETYPE:
992 return FALSE;
994 return FALSE;
997 GList *
998 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
1000 GList *vars = NULL;
1001 int i;
1003 for (i = 0; i < cfg->num_varinfo; i++) {
1004 MonoInst *ins = cfg->varinfo [i];
1005 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
1007 /* unused vars */
1008 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
1009 continue;
1011 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
1012 continue;
1014 /* we can only allocate 32 bit values */
1015 if (is_regsize_var (ins->inst_vtype)) {
1016 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
1017 g_assert (i == vmv->idx);
1018 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
1022 return vars;
1025 GList *
1026 mono_arch_get_global_int_regs (MonoCompile *cfg)
1028 GList *regs = NULL;
1030 mono_arch_compute_omit_fp (cfg);
1033 * FIXME: Interface calls might go through a static rgctx trampoline which
1034 * sets V5, but it doesn't save it, so we need to save it ourselves, and
1035 * avoid using it.
1037 if (cfg->flags & MONO_CFG_HAS_CALLS)
1038 cfg->uses_rgctx_reg = TRUE;
1040 if (cfg->arch.omit_fp)
1041 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_FP));
1042 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
1043 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
1044 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
1045 if (iphone_abi)
1046 /* V4=R7 is used as a frame pointer, but V7=R10 is preserved */
1047 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));
1048 else
1049 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
1050 if (!(cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg)))
1051 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1052 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
1053 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
1054 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
1056 return regs;
1060 * mono_arch_regalloc_cost:
1062 * Return the cost, in number of memory references, of the action of
1063 * allocating the variable VMV into a register during global register
1064 * allocation.
1066 guint32
1067 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
1069 /* FIXME: */
1070 return 2;
1073 #endif /* #ifndef DISABLE_JIT */
1075 void
1076 mono_arch_flush_icache (guint8 *code, gint size)
1078 #if defined(MONO_CROSS_COMPILE)
1079 #elif __APPLE__
1080 sys_icache_invalidate (code, size);
1081 #else
1082 __builtin___clear_cache ((char*)code, (char*)code + size);
1083 #endif
1086 #define DEBUG(a)
1088 static void inline
1089 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
1091 if (simple) {
1092 if (*gr > ARMREG_R3) {
1093 ainfo->size = 4;
1094 ainfo->offset = *stack_size;
1095 ainfo->reg = ARMREG_SP; /* in the caller */
1096 ainfo->storage = RegTypeBase;
1097 *stack_size += 4;
1098 } else {
1099 ainfo->storage = RegTypeGeneral;
1100 ainfo->reg = *gr;
1102 } else {
1103 gboolean split;
1105 if (eabi_supported)
1106 split = i8_align == 4;
1107 else
1108 split = TRUE;
1110 ainfo->size = 8;
1111 if (*gr == ARMREG_R3 && split) {
1112 /* first word in r3 and the second on the stack */
1113 ainfo->offset = *stack_size;
1114 ainfo->reg = ARMREG_SP; /* in the caller */
1115 ainfo->storage = RegTypeBaseGen;
1116 *stack_size += 4;
1117 } else if (*gr >= ARMREG_R3) {
1118 if (eabi_supported) {
1119 /* darwin aligns longs to 4 byte only */
1120 if (i8_align == 8) {
1121 *stack_size += 7;
1122 *stack_size &= ~7;
1125 ainfo->offset = *stack_size;
1126 ainfo->reg = ARMREG_SP; /* in the caller */
1127 ainfo->storage = RegTypeBase;
1128 *stack_size += 8;
1129 } else {
1130 if (eabi_supported) {
1131 if (i8_align == 8 && ((*gr) & 1))
1132 (*gr) ++;
1134 ainfo->storage = RegTypeIRegPair;
1135 ainfo->reg = *gr;
1137 (*gr) ++;
1139 (*gr) ++;
1142 static void inline
1143 add_float (guint *fpr, guint *stack_size, ArgInfo *ainfo, gboolean is_double, gint *float_spare)
1146 * If we're calling a function like this:
1148 * void foo(float a, double b, float c)
1150 * We pass a in s0 and b in d1. That leaves us
1151 * with s1 being unused. The armhf ABI recognizes
1152 * this and requires register assignment to then
1153 * use that for the next single-precision arg,
1154 * i.e. c in this example. So float_spare either
1155 * tells us which reg to use for the next single-
1156 * precision arg, or it's -1, meaning use *fpr.
1158 * Note that even though most of the JIT speaks
1159 * double-precision, fpr represents single-
1160 * precision registers.
1162 * See parts 5.5 and 6.1.2 of the AAPCS for how
1163 * this all works.
1166 if (*fpr < ARM_VFP_F16 || (!is_double && *float_spare >= 0)) {
1167 ainfo->storage = RegTypeFP;
1169 if (is_double) {
1171 * If we're passing a double-precision value
1172 * and *fpr is odd (e.g. it's s1, s3, ...)
1173 * we need to use the next even register. So
1174 * we mark the current *fpr as a spare that
1175 * can be used for the next single-precision
1176 * value.
1178 if (*fpr % 2) {
1179 *float_spare = *fpr;
1180 (*fpr)++;
1184 * At this point, we have an even register
1185 * so we assign that and move along.
1187 ainfo->reg = *fpr;
1188 *fpr += 2;
1189 } else if (*float_spare >= 0) {
1191 * We're passing a single-precision value
1192 * and it looks like a spare single-
1193 * precision register is available. Let's
1194 * use it.
1197 ainfo->reg = *float_spare;
1198 *float_spare = -1;
1199 } else {
1201 * If we hit this branch, we're passing a
1202 * single-precision value and we can simply
1203 * use the next available register.
1206 ainfo->reg = *fpr;
1207 (*fpr)++;
1209 } else {
1211 * We've exhausted available floating point
1212 * regs, so pass the rest on the stack.
1215 if (is_double) {
1216 *stack_size += 7;
1217 *stack_size &= ~7;
1220 ainfo->offset = *stack_size;
1221 ainfo->reg = ARMREG_SP;
1222 ainfo->storage = RegTypeBase;
1224 *stack_size += 8;
1228 static gboolean
1229 is_hfa (MonoType *t, int *out_nfields, int *out_esize)
1231 MonoClass *klass;
1232 gpointer iter;
1233 MonoClassField *field;
1234 MonoType *ftype, *prev_ftype = NULL;
1235 int nfields = 0;
1237 klass = mono_class_from_mono_type_internal (t);
1238 iter = NULL;
1239 while ((field = mono_class_get_fields_internal (klass, &iter))) {
1240 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
1241 continue;
1242 ftype = mono_field_get_type_internal (field);
1243 ftype = mini_get_underlying_type (ftype);
1245 if (MONO_TYPE_ISSTRUCT (ftype)) {
1246 int nested_nfields, nested_esize;
1248 if (!is_hfa (ftype, &nested_nfields, &nested_esize))
1249 return FALSE;
1250 if (nested_esize == 4)
1251 ftype = m_class_get_byval_arg (mono_defaults.single_class);
1252 else
1253 ftype = m_class_get_byval_arg (mono_defaults.double_class);
1254 if (prev_ftype && prev_ftype->type != ftype->type)
1255 return FALSE;
1256 prev_ftype = ftype;
1257 nfields += nested_nfields;
1258 } else {
1259 if (!(!ftype->byref && (ftype->type == MONO_TYPE_R4 || ftype->type == MONO_TYPE_R8)))
1260 return FALSE;
1261 if (prev_ftype && prev_ftype->type != ftype->type)
1262 return FALSE;
1263 prev_ftype = ftype;
1264 nfields ++;
1267 if (nfields == 0 || nfields > 4)
1268 return FALSE;
1269 *out_nfields = nfields;
1270 *out_esize = prev_ftype->type == MONO_TYPE_R4 ? 4 : 8;
1271 return TRUE;
1274 static CallInfo*
1275 get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
1277 guint i, gr, fpr, pstart;
1278 gint float_spare;
1279 int n = sig->hasthis + sig->param_count;
1280 int nfields, esize;
1281 guint32 align;
1282 MonoType *t;
1283 guint32 stack_size = 0;
1284 CallInfo *cinfo;
1285 gboolean is_pinvoke = sig->pinvoke;
1286 gboolean vtype_retaddr = FALSE;
1288 if (mp)
1289 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1290 else
1291 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1293 cinfo->nargs = n;
1294 gr = ARMREG_R0;
1295 fpr = ARM_VFP_F0;
1296 float_spare = -1;
1298 t = mini_get_underlying_type (sig->ret);
1299 switch (t->type) {
1300 case MONO_TYPE_I1:
1301 case MONO_TYPE_U1:
1302 case MONO_TYPE_I2:
1303 case MONO_TYPE_U2:
1304 case MONO_TYPE_I4:
1305 case MONO_TYPE_U4:
1306 case MONO_TYPE_I:
1307 case MONO_TYPE_U:
1308 case MONO_TYPE_PTR:
1309 case MONO_TYPE_FNPTR:
1310 case MONO_TYPE_OBJECT:
1311 cinfo->ret.storage = RegTypeGeneral;
1312 cinfo->ret.reg = ARMREG_R0;
1313 break;
1314 case MONO_TYPE_U8:
1315 case MONO_TYPE_I8:
1316 cinfo->ret.storage = RegTypeIRegPair;
1317 cinfo->ret.reg = ARMREG_R0;
1318 break;
1319 case MONO_TYPE_R4:
1320 case MONO_TYPE_R8:
1321 cinfo->ret.storage = RegTypeFP;
1323 if (t->type == MONO_TYPE_R4)
1324 cinfo->ret.size = 4;
1325 else
1326 cinfo->ret.size = 8;
1328 if (IS_HARD_FLOAT) {
1329 cinfo->ret.reg = ARM_VFP_F0;
1330 } else {
1331 cinfo->ret.reg = ARMREG_R0;
1333 break;
1334 case MONO_TYPE_GENERICINST:
1335 if (!mono_type_generic_inst_is_valuetype (t)) {
1336 cinfo->ret.storage = RegTypeGeneral;
1337 cinfo->ret.reg = ARMREG_R0;
1338 break;
1340 if (mini_is_gsharedvt_variable_type (t)) {
1341 cinfo->ret.storage = RegTypeStructByAddr;
1342 break;
1344 /* Fall through */
1345 case MONO_TYPE_VALUETYPE:
1346 case MONO_TYPE_TYPEDBYREF:
1347 if (IS_HARD_FLOAT && sig->pinvoke && is_hfa (t, &nfields, &esize)) {
1348 cinfo->ret.storage = RegTypeHFA;
1349 cinfo->ret.reg = 0;
1350 cinfo->ret.nregs = nfields;
1351 cinfo->ret.esize = esize;
1352 } else {
1353 if (is_pinvoke) {
1354 int native_size = mono_class_native_size (mono_class_from_mono_type_internal (t), &align);
1355 int max_size;
1357 #ifdef TARGET_WATCHOS
1358 max_size = 16;
1359 #else
1360 max_size = 4;
1361 #endif
1362 if (native_size <= max_size) {
1363 cinfo->ret.storage = RegTypeStructByVal;
1364 cinfo->ret.struct_size = native_size;
1365 cinfo->ret.nregs = ALIGN_TO (native_size, 4) / 4;
1366 } else {
1367 cinfo->ret.storage = RegTypeStructByAddr;
1369 } else {
1370 cinfo->ret.storage = RegTypeStructByAddr;
1373 break;
1374 case MONO_TYPE_VAR:
1375 case MONO_TYPE_MVAR:
1376 g_assert (mini_is_gsharedvt_type (t));
1377 cinfo->ret.storage = RegTypeStructByAddr;
1378 break;
1379 case MONO_TYPE_VOID:
1380 break;
1381 default:
1382 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1385 vtype_retaddr = cinfo->ret.storage == RegTypeStructByAddr;
1387 pstart = 0;
1388 n = 0;
1390 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1391 * the first argument, allowing 'this' to be always passed in the first arg reg.
1392 * Also do this if the first argument is a reference type, since virtual calls
1393 * are sometimes made using calli without sig->hasthis set, like in the delegate
1394 * invoke wrappers.
1396 if (vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig->params [0]))))) {
1397 if (sig->hasthis) {
1398 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1399 } else {
1400 add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE);
1401 pstart = 1;
1403 n ++;
1404 cinfo->ret.reg = gr;
1405 gr ++;
1406 cinfo->vret_arg_index = 1;
1407 } else {
1408 /* this */
1409 if (sig->hasthis) {
1410 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1411 n ++;
1413 if (vtype_retaddr) {
1414 cinfo->ret.reg = gr;
1415 gr ++;
1419 DEBUG(g_print("params: %d\n", sig->param_count));
1420 for (i = pstart; i < sig->param_count; ++i) {
1421 ArgInfo *ainfo = &cinfo->args [n];
1423 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1424 /* Prevent implicit arguments and sig_cookie from
1425 being passed in registers */
1426 gr = ARMREG_R3 + 1;
1427 fpr = ARM_VFP_F16;
1428 /* Emit the signature cookie just before the implicit arguments */
1429 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1431 DEBUG(g_print("param %d: ", i));
1432 if (sig->params [i]->byref) {
1433 DEBUG(g_print("byref\n"));
1434 add_general (&gr, &stack_size, ainfo, TRUE);
1435 n++;
1436 continue;
1438 t = mini_get_underlying_type (sig->params [i]);
1439 switch (t->type) {
1440 case MONO_TYPE_I1:
1441 case MONO_TYPE_U1:
1442 cinfo->args [n].size = 1;
1443 add_general (&gr, &stack_size, ainfo, TRUE);
1444 break;
1445 case MONO_TYPE_I2:
1446 case MONO_TYPE_U2:
1447 cinfo->args [n].size = 2;
1448 add_general (&gr, &stack_size, ainfo, TRUE);
1449 break;
1450 case MONO_TYPE_I4:
1451 case MONO_TYPE_U4:
1452 cinfo->args [n].size = 4;
1453 add_general (&gr, &stack_size, ainfo, TRUE);
1454 break;
1455 case MONO_TYPE_I:
1456 case MONO_TYPE_U:
1457 case MONO_TYPE_PTR:
1458 case MONO_TYPE_FNPTR:
1459 case MONO_TYPE_OBJECT:
1460 cinfo->args [n].size = sizeof (target_mgreg_t);
1461 add_general (&gr, &stack_size, ainfo, TRUE);
1462 break;
1463 case MONO_TYPE_GENERICINST:
1464 if (!mono_type_generic_inst_is_valuetype (t)) {
1465 cinfo->args [n].size = sizeof (target_mgreg_t);
1466 add_general (&gr, &stack_size, ainfo, TRUE);
1467 break;
1469 if (mini_is_gsharedvt_variable_type (t)) {
1470 /* gsharedvt arguments are passed by ref */
1471 g_assert (mini_is_gsharedvt_type (t));
1472 add_general (&gr, &stack_size, ainfo, TRUE);
1473 switch (ainfo->storage) {
1474 case RegTypeGeneral:
1475 ainfo->storage = RegTypeGSharedVtInReg;
1476 break;
1477 case RegTypeBase:
1478 ainfo->storage = RegTypeGSharedVtOnStack;
1479 break;
1480 default:
1481 g_assert_not_reached ();
1483 break;
1485 /* Fall through */
1486 case MONO_TYPE_TYPEDBYREF:
1487 case MONO_TYPE_VALUETYPE: {
1488 gint size;
1489 int align_size;
1490 int nwords, nfields, esize;
1491 guint32 align;
1493 if (IS_HARD_FLOAT && sig->pinvoke && is_hfa (t, &nfields, &esize)) {
1494 if (fpr + nfields < ARM_VFP_F16) {
1495 ainfo->storage = RegTypeHFA;
1496 ainfo->reg = fpr;
1497 ainfo->nregs = nfields;
1498 ainfo->esize = esize;
1499 if (esize == 4)
1500 fpr += nfields;
1501 else
1502 fpr += nfields * 2;
1503 break;
1504 } else {
1505 fpr = ARM_VFP_F16;
1509 if (t->type == MONO_TYPE_TYPEDBYREF) {
1510 size = MONO_ABI_SIZEOF (MonoTypedRef);
1511 align = sizeof (target_mgreg_t);
1512 } else {
1513 MonoClass *klass = mono_class_from_mono_type_internal (sig->params [i]);
1514 if (is_pinvoke)
1515 size = mono_class_native_size (klass, &align);
1516 else
1517 size = mini_type_stack_size_full (t, &align, FALSE);
1519 DEBUG(g_print ("load %d bytes struct\n", size));
1521 #ifdef TARGET_WATCHOS
1522 /* Watchos pass large structures by ref */
1523 /* We only do this for pinvoke to make gsharedvt/dyncall simpler */
1524 if (sig->pinvoke && size > 16) {
1525 add_general (&gr, &stack_size, ainfo, TRUE);
1526 switch (ainfo->storage) {
1527 case RegTypeGeneral:
1528 ainfo->storage = RegTypeStructByAddr;
1529 break;
1530 case RegTypeBase:
1531 ainfo->storage = RegTypeStructByAddrOnStack;
1532 break;
1533 default:
1534 g_assert_not_reached ();
1535 break;
1537 break;
1539 #endif
1541 align_size = size;
1542 nwords = 0;
1543 align_size += (sizeof (target_mgreg_t) - 1);
1544 align_size &= ~(sizeof (target_mgreg_t) - 1);
1545 nwords = (align_size + sizeof (target_mgreg_t) -1 ) / sizeof (target_mgreg_t);
1546 ainfo->storage = RegTypeStructByVal;
1547 ainfo->struct_size = size;
1548 ainfo->align = align;
1550 if (eabi_supported) {
1551 if (align >= 8 && (gr & 1))
1552 gr ++;
1554 if (gr > ARMREG_R3) {
1555 ainfo->size = 0;
1556 ainfo->vtsize = nwords;
1557 } else {
1558 int rest = ARMREG_R3 - gr + 1;
1559 int n_in_regs = rest >= nwords? nwords: rest;
1561 ainfo->size = n_in_regs;
1562 ainfo->vtsize = nwords - n_in_regs;
1563 ainfo->reg = gr;
1564 gr += n_in_regs;
1565 nwords -= n_in_regs;
1567 stack_size = ALIGN_TO (stack_size, align);
1569 ainfo->offset = stack_size;
1570 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
1571 stack_size += nwords * sizeof (target_mgreg_t);
1572 break;
1574 case MONO_TYPE_U8:
1575 case MONO_TYPE_I8:
1576 ainfo->size = 8;
1577 add_general (&gr, &stack_size, ainfo, FALSE);
1578 break;
1579 case MONO_TYPE_R4:
1580 ainfo->size = 4;
1582 if (IS_HARD_FLOAT)
1583 add_float (&fpr, &stack_size, ainfo, FALSE, &float_spare);
1584 else
1585 add_general (&gr, &stack_size, ainfo, TRUE);
1586 break;
1587 case MONO_TYPE_R8:
1588 ainfo->size = 8;
1590 if (IS_HARD_FLOAT)
1591 add_float (&fpr, &stack_size, ainfo, TRUE, &float_spare);
1592 else
1593 add_general (&gr, &stack_size, ainfo, FALSE);
1594 break;
1595 case MONO_TYPE_VAR:
1596 case MONO_TYPE_MVAR:
1597 /* gsharedvt arguments are passed by ref */
1598 g_assert (mini_is_gsharedvt_type (t));
1599 add_general (&gr, &stack_size, ainfo, TRUE);
1600 switch (ainfo->storage) {
1601 case RegTypeGeneral:
1602 ainfo->storage = RegTypeGSharedVtInReg;
1603 break;
1604 case RegTypeBase:
1605 ainfo->storage = RegTypeGSharedVtOnStack;
1606 break;
1607 default:
1608 g_assert_not_reached ();
1610 break;
1611 default:
1612 g_error ("Can't handle 0x%x", sig->params [i]->type);
1614 n ++;
1617 /* Handle the case where there are no implicit arguments */
1618 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1619 /* Prevent implicit arguments and sig_cookie from
1620 being passed in registers */
1621 gr = ARMREG_R3 + 1;
1622 fpr = ARM_VFP_F16;
1623 /* Emit the signature cookie just before the implicit arguments */
1624 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1627 DEBUG (g_print (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1628 stack_size = ALIGN_TO (stack_size, MONO_ARCH_FRAME_ALIGNMENT);
1630 cinfo->stack_usage = stack_size;
1631 return cinfo;
1635 * We need to create a temporary value if the argument is not stored in
1636 * a linear memory range in the ccontext (this normally happens for
1637 * value types if they are passed both by stack and regs).
1639 static int
1640 arg_need_temp (ArgInfo *ainfo)
1642 if (ainfo->storage == RegTypeStructByVal && ainfo->vtsize)
1643 return ainfo->struct_size;
1644 return 0;
1647 static gpointer
1648 arg_get_storage (CallContext *ccontext, ArgInfo *ainfo)
1650 switch (ainfo->storage) {
1651 case RegTypeIRegPair:
1652 case RegTypeGeneral:
1653 case RegTypeStructByVal:
1654 return &ccontext->gregs [ainfo->reg];
1655 case RegTypeHFA:
1656 case RegTypeFP:
1657 return &ccontext->fregs [ainfo->reg];
1658 case RegTypeBase:
1659 return ccontext->stack + ainfo->offset;
1660 default:
1661 g_error ("Arg storage type not yet supported");
1665 static void
1666 arg_get_val (CallContext *ccontext, ArgInfo *ainfo, gpointer dest)
1668 int reg_size = ainfo->size * sizeof (host_mgreg_t);
1669 g_assert (arg_need_temp (ainfo));
1670 memcpy (dest, &ccontext->gregs [ainfo->reg], reg_size);
1671 memcpy ((host_mgreg_t*)dest + ainfo->size, ccontext->stack + ainfo->offset, ainfo->struct_size - reg_size);
1674 static void
1675 arg_set_val (CallContext *ccontext, ArgInfo *ainfo, gpointer src)
1677 int reg_size = ainfo->size * sizeof (host_mgreg_t);
1678 g_assert (arg_need_temp (ainfo));
1679 memcpy (&ccontext->gregs [ainfo->reg], src, reg_size);
1680 memcpy (ccontext->stack + ainfo->offset, (host_mgreg_t*)src + ainfo->size, ainfo->struct_size - reg_size);
1683 /* Set arguments in the ccontext (for i2n entry) */
1684 void
1685 mono_arch_set_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
1687 MonoEECallbacks *interp_cb = mini_get_interp_callbacks ();
1688 CallInfo *cinfo = get_call_info (NULL, sig);
1689 gpointer storage;
1690 ArgInfo *ainfo;
1692 memset (ccontext, 0, sizeof (CallContext));
1694 ccontext->stack_size = ALIGN_TO (cinfo->stack_usage, MONO_ARCH_FRAME_ALIGNMENT);
1695 if (ccontext->stack_size)
1696 ccontext->stack = (guint8*)g_calloc (1, ccontext->stack_size);
1698 if (sig->ret->type != MONO_TYPE_VOID) {
1699 ainfo = &cinfo->ret;
1700 if (ainfo->storage == RegTypeStructByAddr) {
1701 storage = interp_cb->frame_arg_to_storage ((MonoInterpFrameHandle)frame, sig, -1);
1702 ccontext->gregs [cinfo->ret.reg] = (host_mgreg_t)(gsize)storage;
1706 g_assert (!sig->hasthis);
1708 for (int i = 0; i < sig->param_count; i++) {
1709 ainfo = &cinfo->args [i];
1710 int temp_size = arg_need_temp (ainfo);
1712 if (temp_size)
1713 storage = alloca (temp_size); // FIXME? alloca in a loop
1714 else
1715 storage = arg_get_storage (ccontext, ainfo);
1717 interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, i, storage);
1718 if (temp_size)
1719 arg_set_val (ccontext, ainfo, storage);
1722 g_free (cinfo);
1725 /* Set return value in the ccontext (for n2i return) */
1726 void
1727 mono_arch_set_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
1729 MonoEECallbacks *interp_cb;
1730 CallInfo *cinfo;
1731 gpointer storage;
1732 ArgInfo *ainfo;
1734 if (sig->ret->type == MONO_TYPE_VOID)
1735 return;
1737 interp_cb = mini_get_interp_callbacks ();
1738 cinfo = get_call_info (NULL, sig);
1739 ainfo = &cinfo->ret;
1741 if (ainfo->storage != RegTypeStructByAddr) {
1742 g_assert (!arg_need_temp (ainfo));
1743 storage = arg_get_storage (ccontext, ainfo);
1744 memset (ccontext, 0, sizeof (CallContext)); // FIXME
1745 interp_cb->frame_arg_to_data ((MonoInterpFrameHandle)frame, sig, -1, storage);
1748 g_free (cinfo);
1751 /* Gets the arguments from ccontext (for n2i entry) */
1752 void
1753 mono_arch_get_native_call_context_args (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
1755 MonoEECallbacks *interp_cb = mini_get_interp_callbacks ();
1756 CallInfo *cinfo = get_call_info (NULL, sig);
1757 gpointer storage;
1758 ArgInfo *ainfo;
1760 if (sig->ret->type != MONO_TYPE_VOID) {
1761 ainfo = &cinfo->ret;
1762 if (ainfo->storage == RegTypeStructByAddr) {
1763 storage = (gpointer)(gsize)ccontext->gregs [cinfo->ret.reg];
1764 interp_cb->frame_arg_set_storage ((MonoInterpFrameHandle)frame, sig, -1, storage);
1768 for (int i = 0; i < sig->param_count + sig->hasthis; i++) {
1769 ainfo = &cinfo->args [i];
1770 int temp_size = arg_need_temp (ainfo);
1772 if (temp_size) {
1773 storage = alloca (temp_size); // FIXME? alloca in a loop
1774 arg_get_val (ccontext, ainfo, storage);
1775 } else {
1776 storage = arg_get_storage (ccontext, ainfo);
1778 interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, i, storage);
1781 g_free (cinfo);
1784 /* Gets the return value from ccontext (for i2n exit) */
1785 void
1786 mono_arch_get_native_call_context_ret (CallContext *ccontext, gpointer frame, MonoMethodSignature *sig)
1788 MonoEECallbacks *interp_cb;
1789 CallInfo *cinfo;
1790 ArgInfo *ainfo;
1791 gpointer storage;
1793 if (sig->ret->type == MONO_TYPE_VOID)
1794 return;
1796 interp_cb = mini_get_interp_callbacks ();
1797 cinfo = get_call_info (NULL, sig);
1798 ainfo = &cinfo->ret;
1800 if (ainfo->storage != RegTypeStructByAddr) {
1801 g_assert (!arg_need_temp (ainfo));
1802 storage = arg_get_storage (ccontext, ainfo);
1803 interp_cb->data_to_frame_arg ((MonoInterpFrameHandle)frame, sig, -1, storage);
1806 g_free (cinfo);
1809 #ifndef DISABLE_JIT
1811 gboolean
1812 mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_)
1814 g_assert (caller_sig);
1815 g_assert (callee_sig);
1817 CallInfo *caller_info = get_call_info (NULL, caller_sig);
1818 CallInfo *callee_info = get_call_info (NULL, callee_sig);
1821 * Tailcalls with more callee stack usage than the caller cannot be supported, since
1822 * the extra stack space would be left on the stack after the tailcall.
1824 gboolean res = IS_SUPPORTED_TAILCALL (callee_info->stack_usage <= caller_info->stack_usage)
1825 && IS_SUPPORTED_TAILCALL (caller_info->ret.storage == callee_info->ret.storage);
1827 // FIXME The limit here is that moving the parameters requires addressing the parameters
1828 // with 12bit (4K) immediate offsets. - 4 for TAILCALL_REG/MEMBASE
1829 res &= IS_SUPPORTED_TAILCALL (callee_info->stack_usage < (4096 - 4));
1830 res &= IS_SUPPORTED_TAILCALL (caller_info->stack_usage < (4096 - 4));
1832 g_free (caller_info);
1833 g_free (callee_info);
1835 return res;
1838 static gboolean
1839 debug_omit_fp (void)
1841 #if 0
1842 return mono_debug_count ();
1843 #else
1844 return TRUE;
1845 #endif
1849 * mono_arch_compute_omit_fp:
1850 * Determine whether the frame pointer can be eliminated.
1852 static void
1853 mono_arch_compute_omit_fp (MonoCompile *cfg)
1855 MonoMethodSignature *sig;
1856 MonoMethodHeader *header;
1857 int i, locals_size;
1858 CallInfo *cinfo;
1860 if (cfg->arch.omit_fp_computed)
1861 return;
1863 header = cfg->header;
1865 sig = mono_method_signature_internal (cfg->method);
1867 if (!cfg->arch.cinfo)
1868 cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
1869 cinfo = cfg->arch.cinfo;
1872 * FIXME: Remove some of the restrictions.
1874 cfg->arch.omit_fp = TRUE;
1875 cfg->arch.omit_fp_computed = TRUE;
1877 if (cfg->disable_omit_fp)
1878 cfg->arch.omit_fp = FALSE;
1879 if (!debug_omit_fp ())
1880 cfg->arch.omit_fp = FALSE;
1882 if (cfg->method->save_lmf)
1883 cfg->arch.omit_fp = FALSE;
1885 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
1886 cfg->arch.omit_fp = FALSE;
1887 if (header->num_clauses)
1888 cfg->arch.omit_fp = FALSE;
1889 if (cfg->param_area)
1890 cfg->arch.omit_fp = FALSE;
1891 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1892 cfg->arch.omit_fp = FALSE;
1893 if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)))
1894 cfg->arch.omit_fp = FALSE;
1895 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1896 ArgInfo *ainfo = &cinfo->args [i];
1898 if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeBaseGen || ainfo->storage == RegTypeStructByVal) {
1900 * The stack offset can only be determined when the frame
1901 * size is known.
1903 cfg->arch.omit_fp = FALSE;
1907 locals_size = 0;
1908 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1909 MonoInst *ins = cfg->varinfo [i];
1910 int ialign;
1912 locals_size += mono_type_size (ins->inst_vtype, &ialign);
1917 * Set var information according to the calling convention. arm version.
1918 * The locals var stuff should most likely be split in another method.
1920 void
1921 mono_arch_allocate_vars (MonoCompile *cfg)
1923 MonoMethodSignature *sig;
1924 MonoMethodHeader *header;
1925 MonoInst *ins;
1926 MonoType *sig_ret;
1927 int i, offset, size, align, curinst;
1928 CallInfo *cinfo;
1929 ArgInfo *ainfo;
1930 guint32 ualign;
1932 sig = mono_method_signature_internal (cfg->method);
1934 if (!cfg->arch.cinfo)
1935 cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
1936 cinfo = cfg->arch.cinfo;
1937 sig_ret = mini_get_underlying_type (sig->ret);
1939 mono_arch_compute_omit_fp (cfg);
1941 if (cfg->arch.omit_fp)
1942 cfg->frame_reg = ARMREG_SP;
1943 else
1944 cfg->frame_reg = ARMREG_FP;
1946 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1948 /* allow room for the vararg method args: void* and long/double */
1949 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1950 cfg->param_area = MAX (cfg->param_area, sizeof (target_mgreg_t)*8);
1952 header = cfg->header;
1954 /* See mono_arch_get_global_int_regs () */
1955 if (cfg->flags & MONO_CFG_HAS_CALLS)
1956 cfg->uses_rgctx_reg = TRUE;
1958 if (cfg->frame_reg != ARMREG_SP)
1959 cfg->used_int_regs |= 1 << cfg->frame_reg;
1961 if (cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg))
1962 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1963 cfg->used_int_regs |= (1 << MONO_ARCH_IMT_REG);
1965 offset = 0;
1966 curinst = 0;
1967 if (!MONO_TYPE_ISSTRUCT (sig_ret) && cinfo->ret.storage != RegTypeStructByAddr) {
1968 if (sig_ret->type != MONO_TYPE_VOID) {
1969 cfg->ret->opcode = OP_REGVAR;
1970 cfg->ret->inst_c0 = ARMREG_R0;
1973 /* local vars are at a positive offset from the stack pointer */
1975 * also note that if the function uses alloca, we use FP
1976 * to point at the local variables.
1978 offset = 0; /* linkage area */
1979 /* align the offset to 16 bytes: not sure this is needed here */
1980 //offset += 8 - 1;
1981 //offset &= ~(8 - 1);
1983 /* add parameter area size for called functions */
1984 offset += cfg->param_area;
1985 offset += 8 - 1;
1986 offset &= ~(8 - 1);
1987 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1988 offset += 8;
1990 /* allow room to save the return value */
1991 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1992 offset += 8;
1994 switch (cinfo->ret.storage) {
1995 case RegTypeStructByVal:
1996 case RegTypeHFA:
1997 /* Allocate a local to hold the result, the epilog will copy it to the correct place */
1998 offset = ALIGN_TO (offset, 8);
1999 cfg->ret->opcode = OP_REGOFFSET;
2000 cfg->ret->inst_basereg = cfg->frame_reg;
2001 cfg->ret->inst_offset = offset;
2002 if (cinfo->ret.storage == RegTypeStructByVal)
2003 offset += cinfo->ret.nregs * sizeof (target_mgreg_t);
2004 else
2005 offset += 32;
2006 break;
2007 case RegTypeStructByAddr:
2008 ins = cfg->vret_addr;
2009 offset += sizeof (target_mgreg_t) - 1;
2010 offset &= ~(sizeof (target_mgreg_t) - 1);
2011 ins->inst_offset = offset;
2012 ins->opcode = OP_REGOFFSET;
2013 ins->inst_basereg = cfg->frame_reg;
2014 if (G_UNLIKELY (cfg->verbose_level > 1)) {
2015 g_print ("vret_addr =");
2016 mono_print_ins (cfg->vret_addr);
2018 offset += sizeof (target_mgreg_t);
2019 break;
2020 default:
2021 break;
2024 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
2025 if (cfg->arch.seq_point_info_var) {
2026 MonoInst *ins;
2028 ins = cfg->arch.seq_point_info_var;
2030 size = 4;
2031 align = 4;
2032 offset += align - 1;
2033 offset &= ~(align - 1);
2034 ins->opcode = OP_REGOFFSET;
2035 ins->inst_basereg = cfg->frame_reg;
2036 ins->inst_offset = offset;
2037 offset += size;
2039 if (cfg->arch.ss_trigger_page_var) {
2040 MonoInst *ins;
2042 ins = cfg->arch.ss_trigger_page_var;
2043 size = 4;
2044 align = 4;
2045 offset += align - 1;
2046 offset &= ~(align - 1);
2047 ins->opcode = OP_REGOFFSET;
2048 ins->inst_basereg = cfg->frame_reg;
2049 ins->inst_offset = offset;
2050 offset += size;
2053 if (cfg->arch.seq_point_ss_method_var) {
2054 MonoInst *ins;
2056 ins = cfg->arch.seq_point_ss_method_var;
2057 size = 4;
2058 align = 4;
2059 offset += align - 1;
2060 offset &= ~(align - 1);
2061 ins->opcode = OP_REGOFFSET;
2062 ins->inst_basereg = cfg->frame_reg;
2063 ins->inst_offset = offset;
2064 offset += size;
2066 if (cfg->arch.seq_point_bp_method_var) {
2067 MonoInst *ins;
2069 ins = cfg->arch.seq_point_bp_method_var;
2070 size = 4;
2071 align = 4;
2072 offset += align - 1;
2073 offset &= ~(align - 1);
2074 ins->opcode = OP_REGOFFSET;
2075 ins->inst_basereg = cfg->frame_reg;
2076 ins->inst_offset = offset;
2077 offset += size;
2080 if (cfg->has_atomic_exchange_i4 || cfg->has_atomic_cas_i4 || cfg->has_atomic_add_i4) {
2081 /* Allocate a temporary used by the atomic ops */
2082 size = 4;
2083 align = 4;
2085 /* Allocate a local slot to hold the sig cookie address */
2086 offset += align - 1;
2087 offset &= ~(align - 1);
2088 cfg->arch.atomic_tmp_offset = offset;
2089 offset += size;
2090 } else {
2091 cfg->arch.atomic_tmp_offset = -1;
2094 cfg->locals_min_stack_offset = offset;
2096 curinst = cfg->locals_start;
2097 for (i = curinst; i < cfg->num_varinfo; ++i) {
2098 MonoType *t;
2100 ins = cfg->varinfo [i];
2101 if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
2102 continue;
2104 t = ins->inst_vtype;
2105 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (t))
2106 continue;
2108 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
2109 * pinvoke wrappers when they call functions returning structure */
2110 if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
2111 size = mono_class_native_size (mono_class_from_mono_type_internal (t), &ualign);
2112 align = ualign;
2114 else
2115 size = mono_type_size (t, &align);
2117 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2118 * since it loads/stores misaligned words, which don't do the right thing.
2120 if (align < 4 && size >= 4)
2121 align = 4;
2122 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2123 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2124 offset += align - 1;
2125 offset &= ~(align - 1);
2126 ins->opcode = OP_REGOFFSET;
2127 ins->inst_offset = offset;
2128 ins->inst_basereg = cfg->frame_reg;
2129 offset += size;
2130 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
2133 cfg->locals_max_stack_offset = offset;
2135 curinst = 0;
2136 if (sig->hasthis) {
2137 ins = cfg->args [curinst];
2138 if (ins->opcode != OP_REGVAR) {
2139 ins->opcode = OP_REGOFFSET;
2140 ins->inst_basereg = cfg->frame_reg;
2141 offset += sizeof (target_mgreg_t) - 1;
2142 offset &= ~(sizeof (target_mgreg_t) - 1);
2143 ins->inst_offset = offset;
2144 offset += sizeof (target_mgreg_t);
2146 curinst++;
2149 if (sig->call_convention == MONO_CALL_VARARG) {
2150 size = 4;
2151 align = 4;
2153 /* Allocate a local slot to hold the sig cookie address */
2154 offset += align - 1;
2155 offset &= ~(align - 1);
2156 cfg->sig_cookie = offset;
2157 offset += size;
2160 for (i = 0; i < sig->param_count; ++i) {
2161 ainfo = cinfo->args + i;
2163 ins = cfg->args [curinst];
2165 switch (ainfo->storage) {
2166 case RegTypeHFA:
2167 offset = ALIGN_TO (offset, 8);
2168 ins->opcode = OP_REGOFFSET;
2169 ins->inst_basereg = cfg->frame_reg;
2170 /* These arguments are saved to the stack in the prolog */
2171 ins->inst_offset = offset;
2172 if (cfg->verbose_level >= 2)
2173 g_print ("arg %d allocated to %s+0x%0x.\n", i, mono_arch_regname (ins->inst_basereg), (int)ins->inst_offset);
2174 // FIXME:
2175 offset += 32;
2176 break;
2177 default:
2178 break;
2181 if (ins->opcode != OP_REGVAR) {
2182 ins->opcode = OP_REGOFFSET;
2183 ins->inst_basereg = cfg->frame_reg;
2184 size = mini_type_stack_size_full (sig->params [i], &ualign, sig->pinvoke);
2185 align = ualign;
2186 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2187 * since it loads/stores misaligned words, which don't do the right thing.
2189 if (align < 4 && size >= 4)
2190 align = 4;
2191 /* The code in the prolog () stores words when storing vtypes received in a register */
2192 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
2193 align = 4;
2194 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2195 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2196 offset += align - 1;
2197 offset &= ~(align - 1);
2198 ins->inst_offset = offset;
2199 offset += size;
2201 curinst++;
2204 /* align the offset to 8 bytes */
2205 if (ALIGN_TO (offset, 8) > ALIGN_TO (offset, 4))
2206 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2207 offset += 8 - 1;
2208 offset &= ~(8 - 1);
2210 /* change sign? */
2211 cfg->stack_offset = offset;
2214 void
2215 mono_arch_create_vars (MonoCompile *cfg)
2217 MonoMethodSignature *sig;
2218 CallInfo *cinfo;
2219 int i;
2221 sig = mono_method_signature_internal (cfg->method);
2223 if (!cfg->arch.cinfo)
2224 cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
2225 cinfo = cfg->arch.cinfo;
2227 if (IS_HARD_FLOAT) {
2228 for (i = 0; i < 2; i++) {
2229 MonoInst *inst = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.double_class), OP_LOCAL);
2230 inst->flags |= MONO_INST_VOLATILE;
2232 cfg->arch.vfp_scratch_slots [i] = inst;
2236 if (cinfo->ret.storage == RegTypeStructByVal)
2237 cfg->ret_var_is_local = TRUE;
2239 if (cinfo->ret.storage == RegTypeStructByAddr) {
2240 cfg->vret_addr = mono_compile_create_var (cfg, mono_get_int_type (), OP_ARG);
2241 if (G_UNLIKELY (cfg->verbose_level > 1)) {
2242 g_print ("vret_addr = ");
2243 mono_print_ins (cfg->vret_addr);
2247 if (cfg->gen_sdb_seq_points) {
2248 if (cfg->compile_aot) {
2249 MonoInst *ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
2250 ins->flags |= MONO_INST_VOLATILE;
2251 cfg->arch.seq_point_info_var = ins;
2253 if (!cfg->soft_breakpoints) {
2254 /* Allocate a separate variable for this to save 1 load per seq point */
2255 ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
2256 ins->flags |= MONO_INST_VOLATILE;
2257 cfg->arch.ss_trigger_page_var = ins;
2260 if (cfg->soft_breakpoints) {
2261 MonoInst *ins;
2263 ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
2264 ins->flags |= MONO_INST_VOLATILE;
2265 cfg->arch.seq_point_ss_method_var = ins;
2267 ins = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
2268 ins->flags |= MONO_INST_VOLATILE;
2269 cfg->arch.seq_point_bp_method_var = ins;
2274 static void
2275 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
2277 MonoMethodSignature *tmp_sig;
2278 int sig_reg;
2280 if (MONO_IS_TAILCALL_OPCODE (call))
2281 NOT_IMPLEMENTED;
2283 g_assert (cinfo->sig_cookie.storage == RegTypeBase);
2286 * mono_ArgIterator_Setup assumes the signature cookie is
2287 * passed first and all the arguments which were before it are
2288 * passed on the stack after the signature. So compensate by
2289 * passing a different signature.
2291 tmp_sig = mono_metadata_signature_dup (call->signature);
2292 tmp_sig->param_count -= call->signature->sentinelpos;
2293 tmp_sig->sentinelpos = 0;
2294 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
2296 sig_reg = mono_alloc_ireg (cfg);
2297 MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig);
2299 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_reg);
2302 #ifdef ENABLE_LLVM
2303 LLVMCallInfo*
2304 mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
2306 int i, n;
2307 CallInfo *cinfo;
2308 ArgInfo *ainfo;
2309 LLVMCallInfo *linfo;
2311 n = sig->param_count + sig->hasthis;
2313 cinfo = get_call_info (cfg->mempool, sig);
2315 linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
2318 * LLVM always uses the native ABI while we use our own ABI, the
2319 * only difference is the handling of vtypes:
2320 * - we only pass/receive them in registers in some cases, and only
2321 * in 1 or 2 integer registers.
2323 switch (cinfo->ret.storage) {
2324 case RegTypeGeneral:
2325 case RegTypeNone:
2326 case RegTypeFP:
2327 case RegTypeIRegPair:
2328 break;
2329 case RegTypeStructByAddr:
2330 if (sig->pinvoke) {
2331 linfo->ret.storage = LLVMArgVtypeByRef;
2332 } else {
2333 /* Vtype returned using a hidden argument */
2334 linfo->ret.storage = LLVMArgVtypeRetAddr;
2335 linfo->vret_arg_index = cinfo->vret_arg_index;
2337 break;
2338 #if TARGET_WATCHOS
2339 case RegTypeStructByVal:
2340 /* LLVM models this by returning an int array */
2341 linfo->ret.storage = LLVMArgAsIArgs;
2342 linfo->ret.nslots = cinfo->ret.nregs;
2343 break;
2344 #endif
2345 case RegTypeHFA:
2346 linfo->ret.storage = LLVMArgFpStruct;
2347 linfo->ret.nslots = cinfo->ret.nregs;
2348 linfo->ret.esize = cinfo->ret.esize;
2349 break;
2350 default:
2351 cfg->exception_message = g_strdup_printf ("unknown ret conv (%d)", cinfo->ret.storage);
2352 cfg->disable_llvm = TRUE;
2353 return linfo;
2356 for (i = 0; i < n; ++i) {
2357 LLVMArgInfo *lainfo = &linfo->args [i];
2358 ainfo = cinfo->args + i;
2360 lainfo->storage = LLVMArgNone;
2362 switch (ainfo->storage) {
2363 case RegTypeGeneral:
2364 case RegTypeIRegPair:
2365 case RegTypeBase:
2366 case RegTypeBaseGen:
2367 case RegTypeFP:
2368 lainfo->storage = LLVMArgNormal;
2369 break;
2370 case RegTypeStructByVal: {
2371 lainfo->storage = LLVMArgAsIArgs;
2372 int slotsize = eabi_supported && ainfo->align == 8 ? 8 : 4;
2373 lainfo->nslots = ALIGN_TO (ainfo->struct_size, slotsize) / slotsize;
2374 lainfo->esize = slotsize;
2375 break;
2377 case RegTypeStructByAddr:
2378 case RegTypeStructByAddrOnStack:
2379 lainfo->storage = LLVMArgVtypeByRef;
2380 break;
2381 case RegTypeHFA: {
2382 int j;
2384 lainfo->storage = LLVMArgAsFpArgs;
2385 lainfo->nslots = ainfo->nregs;
2386 lainfo->esize = ainfo->esize;
2387 for (j = 0; j < ainfo->nregs; ++j)
2388 lainfo->pair_storage [j] = LLVMArgInFPReg;
2389 break;
2391 default:
2392 cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
2393 cfg->disable_llvm = TRUE;
2394 break;
2398 return linfo;
2400 #endif
2402 void
2403 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
2405 MonoInst *in, *ins;
2406 MonoMethodSignature *sig;
2407 int i, n;
2408 CallInfo *cinfo;
2410 sig = call->signature;
2411 n = sig->param_count + sig->hasthis;
2413 cinfo = get_call_info (cfg->mempool, sig);
2415 switch (cinfo->ret.storage) {
2416 case RegTypeStructByVal:
2417 case RegTypeHFA:
2418 if (cinfo->ret.storage == RegTypeStructByVal && cinfo->ret.nregs == 1) {
2419 /* The JIT will transform this into a normal call */
2420 call->vret_in_reg = TRUE;
2421 break;
2423 if (MONO_IS_TAILCALL_OPCODE (call))
2424 break;
2426 * The vtype is returned in registers, save the return area address in a local, and save the vtype into
2427 * the location pointed to by it after call in emit_move_return_value ().
2429 if (!cfg->arch.vret_addr_loc) {
2430 cfg->arch.vret_addr_loc = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
2431 /* Prevent it from being register allocated or optimized away */
2432 cfg->arch.vret_addr_loc->flags |= MONO_INST_VOLATILE;
2435 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->arch.vret_addr_loc->dreg, call->vret_var->dreg);
2436 break;
2437 case RegTypeStructByAddr: {
2438 MonoInst *vtarg;
2439 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
2440 vtarg->sreg1 = call->vret_var->dreg;
2441 vtarg->dreg = mono_alloc_preg (cfg);
2442 MONO_ADD_INS (cfg->cbb, vtarg);
2444 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
2445 break;
2447 default:
2448 break;
2451 for (i = 0; i < n; ++i) {
2452 ArgInfo *ainfo = cinfo->args + i;
2453 MonoType *t;
2455 if (i >= sig->hasthis)
2456 t = sig->params [i - sig->hasthis];
2457 else
2458 t = mono_get_int_type ();
2459 t = mini_get_underlying_type (t);
2461 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
2462 /* Emit the signature cookie just before the implicit arguments */
2463 emit_sig_cookie (cfg, call, cinfo);
2466 in = call->args [i];
2468 switch (ainfo->storage) {
2469 case RegTypeGeneral:
2470 case RegTypeIRegPair:
2471 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2472 MONO_INST_NEW (cfg, ins, OP_MOVE);
2473 ins->dreg = mono_alloc_ireg (cfg);
2474 ins->sreg1 = MONO_LVREG_LS (in->dreg);
2475 MONO_ADD_INS (cfg->cbb, ins);
2476 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2478 MONO_INST_NEW (cfg, ins, OP_MOVE);
2479 ins->dreg = mono_alloc_ireg (cfg);
2480 ins->sreg1 = MONO_LVREG_MS (in->dreg);
2481 MONO_ADD_INS (cfg->cbb, ins);
2482 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2483 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
2484 if (ainfo->size == 4) {
2485 if (IS_SOFT_FLOAT) {
2486 /* mono_emit_call_args () have already done the r8->r4 conversion */
2487 /* The converted value is in an int vreg */
2488 MONO_INST_NEW (cfg, ins, OP_MOVE);
2489 ins->dreg = mono_alloc_ireg (cfg);
2490 ins->sreg1 = in->dreg;
2491 MONO_ADD_INS (cfg->cbb, ins);
2492 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2493 } else {
2494 int creg;
2496 cfg->param_area = MAX (cfg->param_area, 8);
2497 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2498 creg = mono_alloc_ireg (cfg);
2499 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2500 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2502 } else {
2503 if (IS_SOFT_FLOAT) {
2504 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
2505 ins->dreg = mono_alloc_ireg (cfg);
2506 ins->sreg1 = in->dreg;
2507 MONO_ADD_INS (cfg->cbb, ins);
2508 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2510 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
2511 ins->dreg = mono_alloc_ireg (cfg);
2512 ins->sreg1 = in->dreg;
2513 MONO_ADD_INS (cfg->cbb, ins);
2514 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2515 } else {
2516 int creg;
2518 cfg->param_area = MAX (cfg->param_area, 8);
2519 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2520 creg = mono_alloc_ireg (cfg);
2521 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2522 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2523 creg = mono_alloc_ireg (cfg);
2524 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
2525 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
2528 cfg->flags |= MONO_CFG_HAS_FPOUT;
2529 } else {
2530 MONO_INST_NEW (cfg, ins, OP_MOVE);
2531 ins->dreg = mono_alloc_ireg (cfg);
2532 ins->sreg1 = in->dreg;
2533 MONO_ADD_INS (cfg->cbb, ins);
2535 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2537 break;
2538 case RegTypeStructByVal:
2539 case RegTypeGSharedVtInReg:
2540 case RegTypeGSharedVtOnStack:
2541 case RegTypeHFA:
2542 case RegTypeStructByAddr:
2543 case RegTypeStructByAddrOnStack:
2544 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
2545 ins->opcode = OP_OUTARG_VT;
2546 ins->sreg1 = in->dreg;
2547 ins->klass = in->klass;
2548 ins->inst_p0 = call;
2549 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
2550 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
2551 mono_call_inst_add_outarg_vt (cfg, call, ins);
2552 MONO_ADD_INS (cfg->cbb, ins);
2553 break;
2554 case RegTypeBase:
2555 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2556 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2557 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
2558 if (t->type == MONO_TYPE_R8) {
2559 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2560 } else {
2561 if (IS_SOFT_FLOAT)
2562 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2563 else
2564 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2566 } else {
2567 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2569 break;
2570 case RegTypeBaseGen:
2571 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2572 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? MONO_LVREG_LS (in->dreg) : MONO_LVREG_MS (in->dreg));
2573 MONO_INST_NEW (cfg, ins, OP_MOVE);
2574 ins->dreg = mono_alloc_ireg (cfg);
2575 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? MONO_LVREG_MS (in->dreg) : MONO_LVREG_LS (in->dreg);
2576 MONO_ADD_INS (cfg->cbb, ins);
2577 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
2578 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
2579 int creg;
2581 /* This should work for soft-float as well */
2583 cfg->param_area = MAX (cfg->param_area, 8);
2584 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2585 creg = mono_alloc_ireg (cfg);
2586 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
2587 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2588 creg = mono_alloc_ireg (cfg);
2589 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
2590 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
2591 cfg->flags |= MONO_CFG_HAS_FPOUT;
2592 } else {
2593 g_assert_not_reached ();
2595 break;
2596 case RegTypeFP: {
2597 int fdreg = mono_alloc_freg (cfg);
2599 if (ainfo->size == 8) {
2600 MONO_INST_NEW (cfg, ins, OP_FMOVE);
2601 ins->sreg1 = in->dreg;
2602 ins->dreg = fdreg;
2603 MONO_ADD_INS (cfg->cbb, ins);
2605 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, TRUE);
2606 } else {
2607 FloatArgData *fad;
2610 * Mono's register allocator doesn't speak single-precision registers that
2611 * overlap double-precision registers (i.e. armhf). So we have to work around
2612 * the register allocator and load the value from memory manually.
2614 * So we create a variable for the float argument and an instruction to store
2615 * the argument into the variable. We then store the list of these arguments
2616 * in call->float_args. This list is then used by emit_float_args later to
2617 * pass the arguments in the various call opcodes.
2619 * This is not very nice, and we should really try to fix the allocator.
2622 MonoInst *float_arg = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.single_class), OP_LOCAL);
2624 /* Make sure the instruction isn't seen as pointless and removed.
2626 float_arg->flags |= MONO_INST_VOLATILE;
2628 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, float_arg->dreg, in->dreg);
2630 /* We use the dreg to look up the instruction later. The hreg is used to
2631 * emit the instruction that loads the value into the FP reg.
2633 fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData));
2634 fad->vreg = float_arg->dreg;
2635 fad->hreg = ainfo->reg;
2637 call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad);
2640 call->used_iregs |= 1 << ainfo->reg;
2641 cfg->flags |= MONO_CFG_HAS_FPOUT;
2642 break;
2644 default:
2645 g_assert_not_reached ();
2649 /* Handle the case where there are no implicit arguments */
2650 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
2651 emit_sig_cookie (cfg, call, cinfo);
2653 call->call_info = cinfo;
2654 call->stack_usage = cinfo->stack_usage;
2657 static void
2658 add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, MonoInst *arg)
2660 MonoInst *ins;
2662 switch (storage) {
2663 case RegTypeFP:
2664 MONO_INST_NEW (cfg, ins, OP_FMOVE);
2665 ins->dreg = mono_alloc_freg (cfg);
2666 ins->sreg1 = arg->dreg;
2667 MONO_ADD_INS (cfg->cbb, ins);
2668 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE);
2669 break;
2670 default:
2671 g_assert_not_reached ();
2672 break;
2676 void
2677 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
2679 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
2680 MonoInst *load;
2681 ArgInfo *ainfo = (ArgInfo*)ins->inst_p1;
2682 int ovf_size = ainfo->vtsize;
2683 int doffset = ainfo->offset;
2684 int struct_size = ainfo->struct_size;
2685 int i, soffset, dreg, tmpreg;
2687 switch (ainfo->storage) {
2688 case RegTypeGSharedVtInReg:
2689 case RegTypeStructByAddr:
2690 /* Pass by addr */
2691 mono_call_inst_add_outarg_reg (cfg, call, src->dreg, ainfo->reg, FALSE);
2692 break;
2693 case RegTypeGSharedVtOnStack:
2694 case RegTypeStructByAddrOnStack:
2695 /* Pass by addr on stack */
2696 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, src->dreg);
2697 break;
2698 case RegTypeHFA:
2699 for (i = 0; i < ainfo->nregs; ++i) {
2700 if (ainfo->esize == 4)
2701 MONO_INST_NEW (cfg, load, OP_LOADR4_MEMBASE);
2702 else
2703 MONO_INST_NEW (cfg, load, OP_LOADR8_MEMBASE);
2704 load->dreg = mono_alloc_freg (cfg);
2705 load->inst_basereg = src->dreg;
2706 load->inst_offset = i * ainfo->esize;
2707 MONO_ADD_INS (cfg->cbb, load);
2709 if (ainfo->esize == 4) {
2710 FloatArgData *fad;
2712 /* See RegTypeFP in mono_arch_emit_call () */
2713 MonoInst *float_arg = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.single_class), OP_LOCAL);
2714 float_arg->flags |= MONO_INST_VOLATILE;
2715 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, float_arg->dreg, load->dreg);
2717 fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData));
2718 fad->vreg = float_arg->dreg;
2719 fad->hreg = ainfo->reg + i;
2721 call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad);
2722 } else {
2723 add_outarg_reg (cfg, call, RegTypeFP, ainfo->reg + (i * 2), load);
2726 break;
2727 default:
2728 soffset = 0;
2729 for (i = 0; i < ainfo->size; ++i) {
2730 dreg = mono_alloc_ireg (cfg);
2731 switch (struct_size) {
2732 case 1:
2733 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2734 break;
2735 case 2:
2736 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, dreg, src->dreg, soffset);
2737 break;
2738 case 3:
2739 tmpreg = mono_alloc_ireg (cfg);
2740 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2741 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 1);
2742 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 8);
2743 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2744 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 2);
2745 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 16);
2746 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2747 break;
2748 default:
2749 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
2750 break;
2752 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
2753 soffset += sizeof (target_mgreg_t);
2754 struct_size -= sizeof (target_mgreg_t);
2756 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
2757 if (ovf_size != 0)
2758 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, MIN (ovf_size * sizeof (target_mgreg_t), struct_size), struct_size < 4 ? 1 : 4);
2759 break;
2763 void
2764 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
2766 MonoType *ret = mini_get_underlying_type (mono_method_signature_internal (method)->ret);
2768 if (!ret->byref) {
2769 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
2770 MonoInst *ins;
2772 if (COMPILE_LLVM (cfg)) {
2773 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2774 } else {
2775 MONO_INST_NEW (cfg, ins, OP_SETLRET);
2776 ins->sreg1 = MONO_LVREG_LS (val->dreg);
2777 ins->sreg2 = MONO_LVREG_MS (val->dreg);
2778 MONO_ADD_INS (cfg->cbb, ins);
2780 return;
2782 switch (arm_fpu) {
2783 case MONO_ARM_FPU_NONE:
2784 if (ret->type == MONO_TYPE_R8) {
2785 MonoInst *ins;
2787 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2788 ins->dreg = cfg->ret->dreg;
2789 ins->sreg1 = val->dreg;
2790 MONO_ADD_INS (cfg->cbb, ins);
2791 return;
2793 if (ret->type == MONO_TYPE_R4) {
2794 /* Already converted to an int in method_to_ir () */
2795 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2796 return;
2798 break;
2799 case MONO_ARM_FPU_VFP:
2800 case MONO_ARM_FPU_VFP_HARD:
2801 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
2802 MonoInst *ins;
2804 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2805 ins->dreg = cfg->ret->dreg;
2806 ins->sreg1 = val->dreg;
2807 MONO_ADD_INS (cfg->cbb, ins);
2808 return;
2810 break;
2811 default:
2812 g_assert_not_reached ();
2816 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2819 #endif /* #ifndef DISABLE_JIT */
2821 gboolean
2822 mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm)
2824 return TRUE;
2827 typedef struct {
2828 MonoMethodSignature *sig;
2829 CallInfo *cinfo;
2830 MonoType *rtype;
2831 MonoType **param_types;
2832 } ArchDynCallInfo;
2834 static gboolean
2835 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
2837 int i;
2839 switch (cinfo->ret.storage) {
2840 case RegTypeNone:
2841 case RegTypeGeneral:
2842 case RegTypeIRegPair:
2843 case RegTypeStructByAddr:
2844 break;
2845 case RegTypeFP:
2846 if (IS_VFP)
2847 break;
2848 else
2849 return FALSE;
2850 default:
2851 return FALSE;
2854 for (i = 0; i < cinfo->nargs; ++i) {
2855 ArgInfo *ainfo = &cinfo->args [i];
2856 int last_slot;
2858 switch (ainfo->storage) {
2859 case RegTypeGeneral:
2860 case RegTypeIRegPair:
2861 case RegTypeBaseGen:
2862 case RegTypeFP:
2863 break;
2864 case RegTypeBase:
2865 break;
2866 case RegTypeStructByVal:
2867 if (ainfo->size == 0)
2868 last_slot = PARAM_REGS + (ainfo->offset / 4) + ainfo->vtsize;
2869 else
2870 last_slot = ainfo->reg + ainfo->size + ainfo->vtsize;
2871 break;
2872 default:
2873 return FALSE;
2877 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
2878 for (i = 0; i < sig->param_count; ++i) {
2879 MonoType *t = sig->params [i];
2881 if (t->byref)
2882 continue;
2884 t = mini_get_underlying_type (t);
2886 switch (t->type) {
2887 case MONO_TYPE_R4:
2888 case MONO_TYPE_R8:
2889 if (IS_SOFT_FLOAT)
2890 return FALSE;
2891 else
2892 break;
2894 case MONO_TYPE_I8:
2895 case MONO_TYPE_U8:
2896 return FALSE;
2898 default:
2899 break;
2903 return TRUE;
2906 MonoDynCallInfo*
2907 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
2909 ArchDynCallInfo *info;
2910 CallInfo *cinfo;
2911 int i;
2913 cinfo = get_call_info (NULL, sig);
2915 if (!dyn_call_supported (cinfo, sig)) {
2916 g_free (cinfo);
2917 return NULL;
2920 info = g_new0 (ArchDynCallInfo, 1);
2921 // FIXME: Preprocess the info to speed up start_dyn_call ()
2922 info->sig = sig;
2923 info->cinfo = cinfo;
2924 info->rtype = mini_get_underlying_type (sig->ret);
2925 info->param_types = g_new0 (MonoType*, sig->param_count);
2926 for (i = 0; i < sig->param_count; ++i)
2927 info->param_types [i] = mini_get_underlying_type (sig->params [i]);
2929 return (MonoDynCallInfo*)info;
2932 void
2933 mono_arch_dyn_call_free (MonoDynCallInfo *info)
2935 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2937 g_free (ainfo->cinfo);
2938 g_free (ainfo);
2942 mono_arch_dyn_call_get_buf_size (MonoDynCallInfo *info)
2944 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2946 g_assert (ainfo->cinfo->stack_usage % MONO_ARCH_FRAME_ALIGNMENT == 0);
2947 return sizeof (DynCallArgs) + ainfo->cinfo->stack_usage;
2950 void
2951 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf)
2953 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
2954 CallInfo *cinfo = dinfo->cinfo;
2955 DynCallArgs *p = (DynCallArgs*)buf;
2956 int arg_index, greg, i, j, pindex;
2957 MonoMethodSignature *sig = dinfo->sig;
2959 p->res = 0;
2960 p->ret = ret;
2961 p->has_fpregs = 0;
2962 p->n_stackargs = cinfo->stack_usage / sizeof (host_mgreg_t);
2964 arg_index = 0;
2965 greg = 0;
2966 pindex = 0;
2968 if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) {
2969 p->regs [greg ++] = (host_mgreg_t)(gsize)*(args [arg_index ++]);
2970 if (!sig->hasthis)
2971 pindex = 1;
2974 if (dinfo->cinfo->ret.storage == RegTypeStructByAddr)
2975 p->regs [greg ++] = (host_mgreg_t)(gsize)ret;
2977 for (i = pindex; i < sig->param_count; i++) {
2978 MonoType *t = dinfo->param_types [i];
2979 gpointer *arg = args [arg_index ++];
2980 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
2981 int slot = -1;
2983 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal) {
2984 slot = ainfo->reg;
2985 } else if (ainfo->storage == RegTypeFP) {
2986 } else if (ainfo->storage == RegTypeBase) {
2987 slot = PARAM_REGS + (ainfo->offset / 4);
2988 } else if (ainfo->storage == RegTypeBaseGen) {
2989 /* slot + 1 is the first stack slot, so the code below will work */
2990 slot = 3;
2991 } else {
2992 g_assert_not_reached ();
2995 if (t->byref) {
2996 p->regs [slot] = (host_mgreg_t)(gsize)*arg;
2997 continue;
3000 switch (t->type) {
3001 case MONO_TYPE_OBJECT:
3002 case MONO_TYPE_PTR:
3003 case MONO_TYPE_I:
3004 case MONO_TYPE_U:
3005 p->regs [slot] = (host_mgreg_t)(gsize)*arg;
3006 break;
3007 case MONO_TYPE_U1:
3008 p->regs [slot] = *(guint8*)arg;
3009 break;
3010 case MONO_TYPE_I1:
3011 p->regs [slot] = *(gint8*)arg;
3012 break;
3013 case MONO_TYPE_I2:
3014 p->regs [slot] = *(gint16*)arg;
3015 break;
3016 case MONO_TYPE_U2:
3017 p->regs [slot] = *(guint16*)arg;
3018 break;
3019 case MONO_TYPE_I4:
3020 p->regs [slot] = *(gint32*)arg;
3021 break;
3022 case MONO_TYPE_U4:
3023 p->regs [slot] = *(guint32*)arg;
3024 break;
3025 case MONO_TYPE_I8:
3026 case MONO_TYPE_U8:
3027 p->regs [slot ++] = (host_mgreg_t)(gsize)arg [0];
3028 p->regs [slot] = (host_mgreg_t)(gsize)arg [1];
3029 break;
3030 case MONO_TYPE_R4:
3031 if (ainfo->storage == RegTypeFP) {
3032 float f = *(float*)arg;
3033 p->fpregs [ainfo->reg / 2] = *(double*)&f;
3034 p->has_fpregs = 1;
3035 } else {
3036 p->regs [slot] = *(host_mgreg_t*)arg;
3038 break;
3039 case MONO_TYPE_R8:
3040 if (ainfo->storage == RegTypeFP) {
3041 p->fpregs [ainfo->reg / 2] = *(double*)arg;
3042 p->has_fpregs = 1;
3043 } else {
3044 p->regs [slot ++] = (host_mgreg_t)(gsize)arg [0];
3045 p->regs [slot] = (host_mgreg_t)(gsize)arg [1];
3047 break;
3048 case MONO_TYPE_GENERICINST:
3049 if (MONO_TYPE_IS_REFERENCE (t)) {
3050 p->regs [slot] = (host_mgreg_t)(gsize)*arg;
3051 break;
3052 } else {
3053 if (t->type == MONO_TYPE_GENERICINST && mono_class_is_nullable (mono_class_from_mono_type_internal (t))) {
3054 MonoClass *klass = mono_class_from_mono_type_internal (t);
3055 guint8 *nullable_buf;
3056 int size;
3058 size = mono_class_value_size (klass, NULL);
3059 nullable_buf = g_alloca (size);
3060 g_assert (nullable_buf);
3062 /* The argument pointed to by arg is either a boxed vtype or null */
3063 mono_nullable_init (nullable_buf, (MonoObject*)arg, klass);
3065 arg = (gpointer*)nullable_buf;
3066 /* Fall though */
3067 } else {
3068 /* Fall though */
3071 case MONO_TYPE_VALUETYPE:
3072 g_assert (ainfo->storage == RegTypeStructByVal);
3074 if (ainfo->size == 0)
3075 slot = PARAM_REGS + (ainfo->offset / 4);
3076 else
3077 slot = ainfo->reg;
3079 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
3080 p->regs [slot ++] = ((host_mgreg_t*)arg) [j];
3081 break;
3082 default:
3083 g_assert_not_reached ();
3088 void
3089 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
3091 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
3092 DynCallArgs *p = (DynCallArgs*)buf;
3093 MonoType *ptype = ainfo->rtype;
3094 guint8 *ret = p->ret;
3095 host_mgreg_t res = p->res;
3096 host_mgreg_t res2 = p->res2;
3098 switch (ptype->type) {
3099 case MONO_TYPE_VOID:
3100 *(gpointer*)ret = NULL;
3101 break;
3102 case MONO_TYPE_OBJECT:
3103 case MONO_TYPE_I:
3104 case MONO_TYPE_U:
3105 case MONO_TYPE_PTR:
3106 *(gpointer*)ret = (gpointer)(gsize)res;
3107 break;
3108 case MONO_TYPE_I1:
3109 *(gint8*)ret = res;
3110 break;
3111 case MONO_TYPE_U1:
3112 *(guint8*)ret = res;
3113 break;
3114 case MONO_TYPE_I2:
3115 *(gint16*)ret = res;
3116 break;
3117 case MONO_TYPE_U2:
3118 *(guint16*)ret = res;
3119 break;
3120 case MONO_TYPE_I4:
3121 *(gint32*)ret = res;
3122 break;
3123 case MONO_TYPE_U4:
3124 *(guint32*)ret = res;
3125 break;
3126 case MONO_TYPE_I8:
3127 case MONO_TYPE_U8:
3128 /* This handles endianness as well */
3129 ((gint32*)ret) [0] = res;
3130 ((gint32*)ret) [1] = res2;
3131 break;
3132 case MONO_TYPE_GENERICINST:
3133 if (MONO_TYPE_IS_REFERENCE (ptype)) {
3134 *(gpointer*)ret = (gpointer)res;
3135 break;
3136 } else {
3137 /* Fall though */
3139 case MONO_TYPE_VALUETYPE:
3140 g_assert (ainfo->cinfo->ret.storage == RegTypeStructByAddr);
3141 /* Nothing to do */
3142 break;
3143 case MONO_TYPE_R4:
3144 g_assert (IS_VFP);
3145 if (IS_HARD_FLOAT)
3146 *(float*)ret = *(float*)&p->fpregs [0];
3147 else
3148 *(float*)ret = *(float*)&res;
3149 break;
3150 case MONO_TYPE_R8: {
3151 host_mgreg_t regs [2];
3153 g_assert (IS_VFP);
3154 if (IS_HARD_FLOAT) {
3155 *(double*)ret = p->fpregs [0];
3156 } else {
3157 regs [0] = res;
3158 regs [1] = res2;
3160 *(double*)ret = *(double*)&regs;
3162 break;
3164 default:
3165 g_assert_not_reached ();
3169 #ifndef DISABLE_JIT
3172 * The immediate field for cond branches is big enough for all reasonable methods
3174 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
3175 if (0 && ins->inst_true_bb->native_offset) { \
3176 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
3177 } else { \
3178 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
3179 ARM_B_COND (code, (condcode), 0); \
3182 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
3184 /* emit an exception if condition is fail
3186 * We assign the extra code used to throw the implicit exceptions
3187 * to cfg->bb_exit as far as the big branch handling is concerned
3189 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
3190 do { \
3191 mono_add_patch_info (cfg, code - cfg->native_code, \
3192 MONO_PATCH_INFO_EXC, exc_name); \
3193 ARM_BL_COND (code, (condcode), 0); \
3194 } while (0);
3196 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
3198 void
3199 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
3203 void
3204 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
3206 MonoInst *ins, *n;
3208 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
3209 MonoInst *last_ins = mono_inst_prev (ins, FILTER_IL_SEQ_POINT);
3211 switch (ins->opcode) {
3212 case OP_MUL_IMM:
3213 case OP_IMUL_IMM:
3214 /* Already done by an arch-independent pass */
3215 break;
3216 case OP_LOAD_MEMBASE:
3217 case OP_LOADI4_MEMBASE:
3219 * OP_STORE_MEMBASE_REG reg, offset(basereg)
3220 * OP_LOAD_MEMBASE offset(basereg), reg
3222 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
3223 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
3224 ins->inst_basereg == last_ins->inst_destbasereg &&
3225 ins->inst_offset == last_ins->inst_offset) {
3226 if (ins->dreg == last_ins->sreg1) {
3227 MONO_DELETE_INS (bb, ins);
3228 continue;
3229 } else {
3230 //static int c = 0; g_print ("MATCHX %s %d\n", cfg->method->name,c++);
3231 ins->opcode = OP_MOVE;
3232 ins->sreg1 = last_ins->sreg1;
3236 * Note: reg1 must be different from the basereg in the second load
3237 * OP_LOAD_MEMBASE offset(basereg), reg1
3238 * OP_LOAD_MEMBASE offset(basereg), reg2
3239 * -->
3240 * OP_LOAD_MEMBASE offset(basereg), reg1
3241 * OP_MOVE reg1, reg2
3243 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
3244 || last_ins->opcode == OP_LOAD_MEMBASE) &&
3245 ins->inst_basereg != last_ins->dreg &&
3246 ins->inst_basereg == last_ins->inst_basereg &&
3247 ins->inst_offset == last_ins->inst_offset) {
3249 if (ins->dreg == last_ins->dreg) {
3250 MONO_DELETE_INS (bb, ins);
3251 continue;
3252 } else {
3253 ins->opcode = OP_MOVE;
3254 ins->sreg1 = last_ins->dreg;
3257 //g_assert_not_reached ();
3259 #if 0
3261 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3262 * OP_LOAD_MEMBASE offset(basereg), reg
3263 * -->
3264 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3265 * OP_ICONST reg, imm
3267 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
3268 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
3269 ins->inst_basereg == last_ins->inst_destbasereg &&
3270 ins->inst_offset == last_ins->inst_offset) {
3271 //static int c = 0; g_print ("MATCHX %s %d\n", cfg->method->name,c++);
3272 ins->opcode = OP_ICONST;
3273 ins->inst_c0 = last_ins->inst_imm;
3274 g_assert_not_reached (); // check this rule
3275 #endif
3277 break;
3278 case OP_LOADU1_MEMBASE:
3279 case OP_LOADI1_MEMBASE:
3280 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
3281 ins->inst_basereg == last_ins->inst_destbasereg &&
3282 ins->inst_offset == last_ins->inst_offset) {
3283 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
3284 ins->sreg1 = last_ins->sreg1;
3286 break;
3287 case OP_LOADU2_MEMBASE:
3288 case OP_LOADI2_MEMBASE:
3289 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
3290 ins->inst_basereg == last_ins->inst_destbasereg &&
3291 ins->inst_offset == last_ins->inst_offset) {
3292 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
3293 ins->sreg1 = last_ins->sreg1;
3295 break;
3296 case OP_MOVE:
3297 ins->opcode = OP_MOVE;
3299 * OP_MOVE reg, reg
3301 if (ins->dreg == ins->sreg1) {
3302 MONO_DELETE_INS (bb, ins);
3303 continue;
3306 * OP_MOVE sreg, dreg
3307 * OP_MOVE dreg, sreg
3309 if (last_ins && last_ins->opcode == OP_MOVE &&
3310 ins->sreg1 == last_ins->dreg &&
3311 ins->dreg == last_ins->sreg1) {
3312 MONO_DELETE_INS (bb, ins);
3313 continue;
3315 break;
3321 * the branch_cc_table should maintain the order of these
3322 * opcodes.
3323 case CEE_BEQ:
3324 case CEE_BGE:
3325 case CEE_BGT:
3326 case CEE_BLE:
3327 case CEE_BLT:
3328 case CEE_BNE_UN:
3329 case CEE_BGE_UN:
3330 case CEE_BGT_UN:
3331 case CEE_BLE_UN:
3332 case CEE_BLT_UN:
3334 static const guchar
3335 branch_cc_table [] = {
3336 ARMCOND_EQ,
3337 ARMCOND_GE,
3338 ARMCOND_GT,
3339 ARMCOND_LE,
3340 ARMCOND_LT,
3342 ARMCOND_NE,
3343 ARMCOND_HS,
3344 ARMCOND_HI,
3345 ARMCOND_LS,
3346 ARMCOND_LO
3349 #define ADD_NEW_INS(cfg,dest,op) do { \
3350 MONO_INST_NEW ((cfg), (dest), (op)); \
3351 mono_bblock_insert_before_ins (bb, ins, (dest)); \
3352 } while (0)
3354 static int
3355 map_to_reg_reg_op (int op)
3357 switch (op) {
3358 case OP_ADD_IMM:
3359 return OP_IADD;
3360 case OP_SUB_IMM:
3361 return OP_ISUB;
3362 case OP_AND_IMM:
3363 return OP_IAND;
3364 case OP_COMPARE_IMM:
3365 return OP_COMPARE;
3366 case OP_ICOMPARE_IMM:
3367 return OP_ICOMPARE;
3368 case OP_ADDCC_IMM:
3369 return OP_ADDCC;
3370 case OP_ADC_IMM:
3371 return OP_ADC;
3372 case OP_SUBCC_IMM:
3373 return OP_SUBCC;
3374 case OP_SBB_IMM:
3375 return OP_SBB;
3376 case OP_OR_IMM:
3377 return OP_IOR;
3378 case OP_XOR_IMM:
3379 return OP_IXOR;
3380 case OP_LOAD_MEMBASE:
3381 return OP_LOAD_MEMINDEX;
3382 case OP_LOADI4_MEMBASE:
3383 return OP_LOADI4_MEMINDEX;
3384 case OP_LOADU4_MEMBASE:
3385 return OP_LOADU4_MEMINDEX;
3386 case OP_LOADU1_MEMBASE:
3387 return OP_LOADU1_MEMINDEX;
3388 case OP_LOADI2_MEMBASE:
3389 return OP_LOADI2_MEMINDEX;
3390 case OP_LOADU2_MEMBASE:
3391 return OP_LOADU2_MEMINDEX;
3392 case OP_LOADI1_MEMBASE:
3393 return OP_LOADI1_MEMINDEX;
3394 case OP_STOREI1_MEMBASE_REG:
3395 return OP_STOREI1_MEMINDEX;
3396 case OP_STOREI2_MEMBASE_REG:
3397 return OP_STOREI2_MEMINDEX;
3398 case OP_STOREI4_MEMBASE_REG:
3399 return OP_STOREI4_MEMINDEX;
3400 case OP_STORE_MEMBASE_REG:
3401 return OP_STORE_MEMINDEX;
3402 case OP_STORER4_MEMBASE_REG:
3403 return OP_STORER4_MEMINDEX;
3404 case OP_STORER8_MEMBASE_REG:
3405 return OP_STORER8_MEMINDEX;
3406 case OP_STORE_MEMBASE_IMM:
3407 return OP_STORE_MEMBASE_REG;
3408 case OP_STOREI1_MEMBASE_IMM:
3409 return OP_STOREI1_MEMBASE_REG;
3410 case OP_STOREI2_MEMBASE_IMM:
3411 return OP_STOREI2_MEMBASE_REG;
3412 case OP_STOREI4_MEMBASE_IMM:
3413 return OP_STOREI4_MEMBASE_REG;
3415 g_assert_not_reached ();
3419 * Remove from the instruction list the instructions that can't be
3420 * represented with very simple instructions with no register
3421 * requirements.
3423 void
3424 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
3426 MonoInst *ins, *temp, *last_ins = NULL;
3427 int rot_amount, imm8, low_imm;
3429 MONO_BB_FOR_EACH_INS (bb, ins) {
3430 loop_start:
3431 switch (ins->opcode) {
3432 case OP_ADD_IMM:
3433 case OP_SUB_IMM:
3434 case OP_AND_IMM:
3435 case OP_COMPARE_IMM:
3436 case OP_ICOMPARE_IMM:
3437 case OP_ADDCC_IMM:
3438 case OP_ADC_IMM:
3439 case OP_SUBCC_IMM:
3440 case OP_SBB_IMM:
3441 case OP_OR_IMM:
3442 case OP_XOR_IMM:
3443 case OP_IADD_IMM:
3444 case OP_ISUB_IMM:
3445 case OP_IAND_IMM:
3446 case OP_IADC_IMM:
3447 case OP_ISBB_IMM:
3448 case OP_IOR_IMM:
3449 case OP_IXOR_IMM:
3450 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
3451 int opcode2 = mono_op_imm_to_op (ins->opcode);
3452 ADD_NEW_INS (cfg, temp, OP_ICONST);
3453 temp->inst_c0 = ins->inst_imm;
3454 temp->dreg = mono_alloc_ireg (cfg);
3455 ins->sreg2 = temp->dreg;
3456 if (opcode2 == -1)
3457 g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins->opcode));
3458 ins->opcode = opcode2;
3460 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
3461 goto loop_start;
3462 else
3463 break;
3464 case OP_MUL_IMM:
3465 case OP_IMUL_IMM:
3466 if (ins->inst_imm == 1) {
3467 ins->opcode = OP_MOVE;
3468 break;
3470 if (ins->inst_imm == 0) {
3471 ins->opcode = OP_ICONST;
3472 ins->inst_c0 = 0;
3473 break;
3475 imm8 = mono_is_power_of_two (ins->inst_imm);
3476 if (imm8 > 0) {
3477 ins->opcode = OP_SHL_IMM;
3478 ins->inst_imm = imm8;
3479 break;
3481 ADD_NEW_INS (cfg, temp, OP_ICONST);
3482 temp->inst_c0 = ins->inst_imm;
3483 temp->dreg = mono_alloc_ireg (cfg);
3484 ins->sreg2 = temp->dreg;
3485 ins->opcode = OP_IMUL;
3486 break;
3487 case OP_SBB:
3488 case OP_ISBB:
3489 case OP_SUBCC:
3490 case OP_ISUBCC: {
3491 int try_count = 2;
3492 MonoInst *current = ins;
3494 /* may require a look-ahead of a couple instructions due to spilling */
3495 while (try_count-- && current->next) {
3496 if (current->next->opcode == OP_COND_EXC_C || current->next->opcode == OP_COND_EXC_IC) {
3497 /* ARM sets the C flag to 1 if there was _no_ overflow */
3498 current->next->opcode = OP_COND_EXC_NC;
3499 break;
3501 current = current->next;
3503 break;
3505 case OP_IDIV_IMM:
3506 case OP_IDIV_UN_IMM:
3507 case OP_IREM_IMM:
3508 case OP_IREM_UN_IMM: {
3509 int opcode2 = mono_op_imm_to_op (ins->opcode);
3510 ADD_NEW_INS (cfg, temp, OP_ICONST);
3511 temp->inst_c0 = ins->inst_imm;
3512 temp->dreg = mono_alloc_ireg (cfg);
3513 ins->sreg2 = temp->dreg;
3514 if (opcode2 == -1)
3515 g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins->opcode));
3516 ins->opcode = opcode2;
3517 break;
3519 case OP_LOCALLOC_IMM:
3520 ADD_NEW_INS (cfg, temp, OP_ICONST);
3521 temp->inst_c0 = ins->inst_imm;
3522 temp->dreg = mono_alloc_ireg (cfg);
3523 ins->sreg1 = temp->dreg;
3524 ins->opcode = OP_LOCALLOC;
3525 break;
3526 case OP_LOAD_MEMBASE:
3527 case OP_LOADI4_MEMBASE:
3528 case OP_LOADU4_MEMBASE:
3529 case OP_LOADU1_MEMBASE:
3530 /* we can do two things: load the immed in a register
3531 * and use an indexed load, or see if the immed can be
3532 * represented as an ad_imm + a load with a smaller offset
3533 * that fits. We just do the first for now, optimize later.
3535 if (arm_is_imm12 (ins->inst_offset))
3536 break;
3537 ADD_NEW_INS (cfg, temp, OP_ICONST);
3538 temp->inst_c0 = ins->inst_offset;
3539 temp->dreg = mono_alloc_ireg (cfg);
3540 ins->sreg2 = temp->dreg;
3541 ins->opcode = map_to_reg_reg_op (ins->opcode);
3542 break;
3543 case OP_LOADI2_MEMBASE:
3544 case OP_LOADU2_MEMBASE:
3545 case OP_LOADI1_MEMBASE:
3546 if (arm_is_imm8 (ins->inst_offset))
3547 break;
3548 ADD_NEW_INS (cfg, temp, OP_ICONST);
3549 temp->inst_c0 = ins->inst_offset;
3550 temp->dreg = mono_alloc_ireg (cfg);
3551 ins->sreg2 = temp->dreg;
3552 ins->opcode = map_to_reg_reg_op (ins->opcode);
3553 break;
3554 case OP_LOADR4_MEMBASE:
3555 case OP_LOADR8_MEMBASE:
3556 if (arm_is_fpimm8 (ins->inst_offset))
3557 break;
3558 low_imm = ins->inst_offset & 0x1ff;
3559 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
3560 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3561 temp->inst_imm = ins->inst_offset & ~0x1ff;
3562 temp->sreg1 = ins->inst_basereg;
3563 temp->dreg = mono_alloc_ireg (cfg);
3564 ins->inst_basereg = temp->dreg;
3565 ins->inst_offset = low_imm;
3566 } else {
3567 MonoInst *add_ins;
3569 ADD_NEW_INS (cfg, temp, OP_ICONST);
3570 temp->inst_c0 = ins->inst_offset;
3571 temp->dreg = mono_alloc_ireg (cfg);
3573 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3574 add_ins->sreg1 = ins->inst_basereg;
3575 add_ins->sreg2 = temp->dreg;
3576 add_ins->dreg = mono_alloc_ireg (cfg);
3578 ins->inst_basereg = add_ins->dreg;
3579 ins->inst_offset = 0;
3581 break;
3582 case OP_STORE_MEMBASE_REG:
3583 case OP_STOREI4_MEMBASE_REG:
3584 case OP_STOREI1_MEMBASE_REG:
3585 if (arm_is_imm12 (ins->inst_offset))
3586 break;
3587 ADD_NEW_INS (cfg, temp, OP_ICONST);
3588 temp->inst_c0 = ins->inst_offset;
3589 temp->dreg = mono_alloc_ireg (cfg);
3590 ins->sreg2 = temp->dreg;
3591 ins->opcode = map_to_reg_reg_op (ins->opcode);
3592 break;
3593 case OP_STOREI2_MEMBASE_REG:
3594 if (arm_is_imm8 (ins->inst_offset))
3595 break;
3596 ADD_NEW_INS (cfg, temp, OP_ICONST);
3597 temp->inst_c0 = ins->inst_offset;
3598 temp->dreg = mono_alloc_ireg (cfg);
3599 ins->sreg2 = temp->dreg;
3600 ins->opcode = map_to_reg_reg_op (ins->opcode);
3601 break;
3602 case OP_STORER4_MEMBASE_REG:
3603 case OP_STORER8_MEMBASE_REG:
3604 if (arm_is_fpimm8 (ins->inst_offset))
3605 break;
3606 low_imm = ins->inst_offset & 0x1ff;
3607 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
3608 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3609 temp->inst_imm = ins->inst_offset & ~0x1ff;
3610 temp->sreg1 = ins->inst_destbasereg;
3611 temp->dreg = mono_alloc_ireg (cfg);
3612 ins->inst_destbasereg = temp->dreg;
3613 ins->inst_offset = low_imm;
3614 } else {
3615 MonoInst *add_ins;
3617 ADD_NEW_INS (cfg, temp, OP_ICONST);
3618 temp->inst_c0 = ins->inst_offset;
3619 temp->dreg = mono_alloc_ireg (cfg);
3621 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3622 add_ins->sreg1 = ins->inst_destbasereg;
3623 add_ins->sreg2 = temp->dreg;
3624 add_ins->dreg = mono_alloc_ireg (cfg);
3626 ins->inst_destbasereg = add_ins->dreg;
3627 ins->inst_offset = 0;
3629 break;
3630 case OP_STORE_MEMBASE_IMM:
3631 case OP_STOREI1_MEMBASE_IMM:
3632 case OP_STOREI2_MEMBASE_IMM:
3633 case OP_STOREI4_MEMBASE_IMM:
3634 ADD_NEW_INS (cfg, temp, OP_ICONST);
3635 temp->inst_c0 = ins->inst_imm;
3636 temp->dreg = mono_alloc_ireg (cfg);
3637 ins->sreg1 = temp->dreg;
3638 ins->opcode = map_to_reg_reg_op (ins->opcode);
3639 last_ins = temp;
3640 goto loop_start; /* make it handle the possibly big ins->inst_offset */
3641 case OP_FCOMPARE:
3642 case OP_RCOMPARE: {
3643 gboolean swap = FALSE;
3644 int reg;
3646 if (!ins->next) {
3647 /* Optimized away */
3648 NULLIFY_INS (ins);
3649 break;
3652 /* Some fp compares require swapped operands */
3653 switch (ins->next->opcode) {
3654 case OP_FBGT:
3655 ins->next->opcode = OP_FBLT;
3656 swap = TRUE;
3657 break;
3658 case OP_FBGT_UN:
3659 ins->next->opcode = OP_FBLT_UN;
3660 swap = TRUE;
3661 break;
3662 case OP_FBLE:
3663 ins->next->opcode = OP_FBGE;
3664 swap = TRUE;
3665 break;
3666 case OP_FBLE_UN:
3667 ins->next->opcode = OP_FBGE_UN;
3668 swap = TRUE;
3669 break;
3670 default:
3671 break;
3673 if (swap) {
3674 reg = ins->sreg1;
3675 ins->sreg1 = ins->sreg2;
3676 ins->sreg2 = reg;
3678 break;
3682 last_ins = ins;
3684 bb->last_ins = last_ins;
3685 bb->max_vreg = cfg->next_vreg;
3688 void
3689 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
3691 MonoInst *ins;
3693 if (long_ins->opcode == OP_LNEG) {
3694 ins = long_ins;
3695 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, MONO_LVREG_LS (ins->dreg), MONO_LVREG_LS (ins->sreg1), 0);
3696 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, MONO_LVREG_MS (ins->dreg), MONO_LVREG_MS (ins->sreg1), 0);
3697 NULLIFY_INS (ins);
3701 static guchar*
3702 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
3704 /* sreg is a float, dreg is an integer reg */
3705 if (IS_VFP) {
3706 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
3707 if (is_signed)
3708 ARM_TOSIZD (code, vfp_scratch1, sreg);
3709 else
3710 ARM_TOUIZD (code, vfp_scratch1, sreg);
3711 ARM_FMRS (code, dreg, vfp_scratch1);
3712 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
3714 if (!is_signed) {
3715 if (size == 1)
3716 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
3717 else if (size == 2) {
3718 ARM_SHL_IMM (code, dreg, dreg, 16);
3719 ARM_SHR_IMM (code, dreg, dreg, 16);
3721 } else {
3722 if (size == 1) {
3723 ARM_SHL_IMM (code, dreg, dreg, 24);
3724 ARM_SAR_IMM (code, dreg, dreg, 24);
3725 } else if (size == 2) {
3726 ARM_SHL_IMM (code, dreg, dreg, 16);
3727 ARM_SAR_IMM (code, dreg, dreg, 16);
3730 return code;
3733 static guchar*
3734 emit_r4_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
3736 /* sreg is a float, dreg is an integer reg */
3737 g_assert (IS_VFP);
3738 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
3739 if (is_signed)
3740 ARM_TOSIZS (code, vfp_scratch1, sreg);
3741 else
3742 ARM_TOUIZS (code, vfp_scratch1, sreg);
3743 ARM_FMRS (code, dreg, vfp_scratch1);
3744 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
3746 if (!is_signed) {
3747 if (size == 1)
3748 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
3749 else if (size == 2) {
3750 ARM_SHL_IMM (code, dreg, dreg, 16);
3751 ARM_SHR_IMM (code, dreg, dreg, 16);
3753 } else {
3754 if (size == 1) {
3755 ARM_SHL_IMM (code, dreg, dreg, 24);
3756 ARM_SAR_IMM (code, dreg, dreg, 24);
3757 } else if (size == 2) {
3758 ARM_SHL_IMM (code, dreg, dreg, 16);
3759 ARM_SAR_IMM (code, dreg, dreg, 16);
3762 return code;
3765 #endif /* #ifndef DISABLE_JIT */
3767 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
3769 static void
3770 emit_thunk (guint8 *code, gconstpointer target)
3772 guint8 *p = code;
3774 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3775 if (thumb_supported)
3776 ARM_BX (code, ARMREG_IP);
3777 else
3778 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3779 *(guint32*)code = (guint32)(gsize)target;
3780 code += 4;
3781 mono_arch_flush_icache (p, code - p);
3784 static void
3785 handle_thunk (MonoCompile *cfg, MonoDomain *domain, guchar *code, const guchar *target)
3787 MonoJitInfo *ji = NULL;
3788 MonoThunkJitInfo *info;
3789 guint8 *thunks, *p;
3790 int thunks_size;
3791 guint8 *orig_target;
3792 guint8 *target_thunk;
3794 if (!domain)
3795 domain = mono_domain_get ();
3797 if (cfg) {
3799 * This can be called multiple times during JITting,
3800 * save the current position in cfg->arch to avoid
3801 * doing a O(n^2) search.
3803 if (!cfg->arch.thunks) {
3804 cfg->arch.thunks = cfg->thunks;
3805 cfg->arch.thunks_size = cfg->thunk_area;
3807 thunks = cfg->arch.thunks;
3808 thunks_size = cfg->arch.thunks_size;
3809 if (!thunks_size) {
3810 g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, mono_method_full_name (cfg->method, TRUE));
3811 g_assert_not_reached ();
3814 g_assert (*(guint32*)thunks == 0);
3815 emit_thunk (thunks, target);
3816 arm_patch (code, thunks);
3818 cfg->arch.thunks += THUNK_SIZE;
3819 cfg->arch.thunks_size -= THUNK_SIZE;
3820 } else {
3821 ji = mini_jit_info_table_find (domain, (char*)code, NULL);
3822 g_assert (ji);
3823 info = mono_jit_info_get_thunk_info (ji);
3824 g_assert (info);
3826 thunks = (guint8*)ji->code_start + info->thunks_offset;
3827 thunks_size = info->thunks_size;
3829 orig_target = mono_arch_get_call_target (code + 4);
3831 mono_mini_arch_lock ();
3833 target_thunk = NULL;
3834 if (orig_target >= thunks && orig_target < thunks + thunks_size) {
3835 /* The call already points to a thunk, because of trampolines etc. */
3836 target_thunk = orig_target;
3837 } else {
3838 for (p = thunks; p < thunks + thunks_size; p += THUNK_SIZE) {
3839 if (((guint32*)p) [0] == 0) {
3840 /* Free entry */
3841 target_thunk = p;
3842 break;
3843 } else if (((guint32*)p) [2] == (guint32)(gsize)target) {
3844 /* Thunk already points to target */
3845 target_thunk = p;
3846 break;
3851 //g_print ("THUNK: %p %p %p\n", code, target, target_thunk);
3853 if (!target_thunk) {
3854 mono_mini_arch_unlock ();
3855 g_print ("thunk failed %p->%p, thunk space=%d method %s", code, target, thunks_size, cfg ? mono_method_full_name (cfg->method, TRUE) : mono_method_full_name (jinfo_get_method (ji), TRUE));
3856 g_assert_not_reached ();
3859 emit_thunk (target_thunk, target);
3860 arm_patch (code, target_thunk);
3861 mono_arch_flush_icache (code, 4);
3863 mono_mini_arch_unlock ();
3867 static void
3868 arm_patch_general (MonoCompile *cfg, MonoDomain *domain, guchar *code, const guchar *target)
3870 guint32 *code32 = (guint32*)code;
3871 guint32 ins = *code32;
3872 guint32 prim = (ins >> 25) & 7;
3873 guint32 tval = GPOINTER_TO_UINT (target);
3875 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
3876 if (prim == 5) { /* 101b */
3877 /* the diff starts 8 bytes from the branch opcode */
3878 gint diff = target - code - 8;
3879 gint tbits;
3880 gint tmask = 0xffffffff;
3881 if (tval & 1) { /* entering thumb mode */
3882 diff = target - 1 - code - 8;
3883 g_assert (thumb_supported);
3884 tbits = 0xf << 28; /* bl->blx bit pattern */
3885 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
3886 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
3887 if (diff & 2) {
3888 tbits |= 1 << 24;
3890 tmask = ~(1 << 24); /* clear the link bit */
3891 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
3892 } else {
3893 tbits = 0;
3895 if (diff >= 0) {
3896 if (diff <= 33554431) {
3897 diff >>= 2;
3898 ins = (ins & 0xff000000) | diff;
3899 ins &= tmask;
3900 *code32 = ins | tbits;
3901 return;
3903 } else {
3904 /* diff between 0 and -33554432 */
3905 if (diff >= -33554432) {
3906 diff >>= 2;
3907 ins = (ins & 0xff000000) | (diff & ~0xff000000);
3908 ins &= tmask;
3909 *code32 = ins | tbits;
3910 return;
3914 handle_thunk (cfg, domain, code, target);
3915 return;
3919 * The alternative call sequences looks like this:
3921 * ldr ip, [pc] // loads the address constant
3922 * b 1f // jumps around the constant
3923 * address constant embedded in the code
3924 * 1f:
3925 * mov lr, pc
3926 * mov pc, ip
3928 * There are two cases for patching:
3929 * a) at the end of method emission: in this case code points to the start
3930 * of the call sequence
3931 * b) during runtime patching of the call site: in this case code points
3932 * to the mov pc, ip instruction
3934 * We have to handle also the thunk jump code sequence:
3936 * ldr ip, [pc]
3937 * mov pc, ip
3938 * address constant // execution never reaches here
3940 if ((ins & 0x0ffffff0) == 0x12fff10) {
3941 /* Branch and exchange: the address is constructed in a reg
3942 * We can patch BX when the code sequence is the following:
3943 * ldr ip, [pc, #0] ; 0x8
3944 * b 0xc
3945 * .word code_ptr
3946 * mov lr, pc
3947 * bx ips
3948 * */
3949 guint32 ccode [4];
3950 guint8 *emit = (guint8*)ccode;
3951 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3952 ARM_B (emit, 0);
3953 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3954 ARM_BX (emit, ARMREG_IP);
3956 /*patching from magic trampoline*/
3957 if (ins == ccode [3]) {
3958 g_assert (code32 [-4] == ccode [0]);
3959 g_assert (code32 [-3] == ccode [1]);
3960 g_assert (code32 [-1] == ccode [2]);
3961 code32 [-2] = (guint32)(gsize)target;
3962 return;
3964 /*patching from JIT*/
3965 if (ins == ccode [0]) {
3966 g_assert (code32 [1] == ccode [1]);
3967 g_assert (code32 [3] == ccode [2]);
3968 g_assert (code32 [4] == ccode [3]);
3969 code32 [2] = (guint32)(gsize)target;
3970 return;
3972 g_assert_not_reached ();
3973 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
3975 * ldr ip, [pc, #0]
3976 * b 0xc
3977 * .word code_ptr
3978 * blx ip
3980 guint32 ccode [4];
3981 guint8 *emit = (guint8*)ccode;
3982 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3983 ARM_B (emit, 0);
3984 ARM_BLX_REG (emit, ARMREG_IP);
3986 g_assert (code32 [-3] == ccode [0]);
3987 g_assert (code32 [-2] == ccode [1]);
3988 g_assert (code32 [0] == ccode [2]);
3990 code32 [-1] = (guint32)(gsize)target;
3991 } else {
3992 guint32 ccode [4];
3993 guint32 *tmp = ccode;
3994 guint8 *emit = (guint8*)tmp;
3995 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3996 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3997 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
3998 ARM_BX (emit, ARMREG_IP);
3999 if (ins == ccode [2]) {
4000 g_assert_not_reached (); // should be -2 ...
4001 code32 [-1] = (guint32)(gsize)target;
4002 return;
4004 if (ins == ccode [0]) {
4005 /* handles both thunk jump code and the far call sequence */
4006 code32 [2] = (guint32)(gsize)target;
4007 return;
4009 g_assert_not_reached ();
4011 // g_print ("patched with 0x%08x\n", ins);
4014 void
4015 arm_patch (guchar *code, const guchar *target)
4017 arm_patch_general (NULL, NULL, code, target);
4021 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
4022 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
4023 * to be used with the emit macros.
4024 * Return -1 otherwise.
4027 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
4029 guint32 res, i;
4030 for (i = 0; i < 31; i+= 2) {
4031 if (i == 0)
4032 res = val;
4033 else
4034 res = (val << (32 - i)) | (val >> i);
4035 if (res & ~0xff)
4036 continue;
4037 *rot_amount = i? 32 - i: 0;
4038 return res;
4040 return -1;
4044 * Emits in code a sequence of instructions that load the value 'val'
4045 * into the dreg register. Uses at most 4 instructions.
4047 guint8*
4048 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
4050 int imm8, rot_amount;
4051 #if 0
4052 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
4053 /* skip the constant pool */
4054 ARM_B (code, 0);
4055 *(int*)code = val;
4056 code += 4;
4057 return code;
4058 #endif
4059 if (mini_get_debug_options()->single_imm_size && v7_supported) {
4060 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
4061 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
4062 return code;
4065 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
4066 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
4067 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
4068 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
4069 } else {
4070 if (v7_supported) {
4071 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
4072 if (val >> 16)
4073 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
4074 return code;
4076 if (val & 0xFF) {
4077 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
4078 if (val & 0xFF00) {
4079 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
4081 if (val & 0xFF0000) {
4082 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
4084 if (val & 0xFF000000) {
4085 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
4087 } else if (val & 0xFF00) {
4088 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
4089 if (val & 0xFF0000) {
4090 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
4092 if (val & 0xFF000000) {
4093 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
4095 } else if (val & 0xFF0000) {
4096 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
4097 if (val & 0xFF000000) {
4098 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
4101 //g_assert_not_reached ();
4103 return code;
4106 gboolean
4107 mono_arm_thumb_supported (void)
4109 return thumb_supported;
4112 gboolean
4113 mono_arm_eabi_supported (void)
4115 return eabi_supported;
4119 mono_arm_i8_align (void)
4121 return i8_align;
4124 #ifndef DISABLE_JIT
4126 static guint8*
4127 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
4129 CallInfo *cinfo;
4130 MonoCallInst *call;
4132 call = (MonoCallInst*)ins;
4133 cinfo = call->call_info;
4135 switch (cinfo->ret.storage) {
4136 case RegTypeStructByVal:
4137 case RegTypeHFA: {
4138 MonoInst *loc = cfg->arch.vret_addr_loc;
4139 int i;
4141 if (cinfo->ret.storage == RegTypeStructByVal && cinfo->ret.nregs == 1) {
4142 /* The JIT treats this as a normal call */
4143 break;
4146 /* Load the destination address */
4147 g_assert (loc && loc->opcode == OP_REGOFFSET);
4149 if (arm_is_imm12 (loc->inst_offset)) {
4150 ARM_LDR_IMM (code, ARMREG_LR, loc->inst_basereg, loc->inst_offset);
4151 } else {
4152 code = mono_arm_emit_load_imm (code, ARMREG_LR, loc->inst_offset);
4153 ARM_LDR_REG_REG (code, ARMREG_LR, loc->inst_basereg, ARMREG_LR);
4156 if (cinfo->ret.storage == RegTypeStructByVal) {
4157 int rsize = cinfo->ret.struct_size;
4159 for (i = 0; i < cinfo->ret.nregs; ++i) {
4160 g_assert (rsize >= 0);
4161 switch (rsize) {
4162 case 0:
4163 break;
4164 case 1:
4165 ARM_STRB_IMM (code, i, ARMREG_LR, i * 4);
4166 break;
4167 case 2:
4168 ARM_STRH_IMM (code, i, ARMREG_LR, i * 4);
4169 break;
4170 default:
4171 ARM_STR_IMM (code, i, ARMREG_LR, i * 4);
4172 break;
4174 rsize -= 4;
4176 } else {
4177 for (i = 0; i < cinfo->ret.nregs; ++i) {
4178 if (cinfo->ret.esize == 4)
4179 ARM_FSTS (code, cinfo->ret.reg + i, ARMREG_LR, i * 4);
4180 else
4181 ARM_FSTD (code, cinfo->ret.reg + (i * 2), ARMREG_LR, i * 8);
4184 return code;
4186 default:
4187 break;
4190 switch (ins->opcode) {
4191 case OP_FCALL:
4192 case OP_FCALL_REG:
4193 case OP_FCALL_MEMBASE:
4194 if (IS_VFP) {
4195 MonoType *sig_ret = mini_get_underlying_type (((MonoCallInst*)ins)->signature->ret);
4196 if (sig_ret->type == MONO_TYPE_R4) {
4197 if (IS_HARD_FLOAT) {
4198 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
4199 } else {
4200 ARM_FMSR (code, ins->dreg, ARMREG_R0);
4201 ARM_CVTS (code, ins->dreg, ins->dreg);
4203 } else {
4204 if (IS_HARD_FLOAT) {
4205 ARM_CPYD (code, ins->dreg, ARM_VFP_D0);
4206 } else {
4207 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
4211 break;
4212 case OP_RCALL:
4213 case OP_RCALL_REG:
4214 case OP_RCALL_MEMBASE: {
4215 MonoType *sig_ret;
4217 g_assert (IS_VFP);
4219 sig_ret = mini_get_underlying_type (((MonoCallInst*)ins)->signature->ret);
4220 g_assert (sig_ret->type == MONO_TYPE_R4);
4221 if (IS_HARD_FLOAT) {
4222 ARM_CPYS (code, ins->dreg, ARM_VFP_F0);
4223 } else {
4224 ARM_FMSR (code, ins->dreg, ARMREG_R0);
4225 ARM_CPYS (code, ins->dreg, ins->dreg);
4227 break;
4229 default:
4230 break;
4233 return code;
4236 void
4237 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
4239 MonoInst *ins;
4240 MonoCallInst *call;
4241 guint8 *code = cfg->native_code + cfg->code_len;
4242 MonoInst *last_ins = NULL;
4243 int max_len, cpos;
4244 int imm8, rot_amount;
4246 /* we don't align basic blocks of loops on arm */
4248 if (cfg->verbose_level > 2)
4249 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
4251 cpos = bb->max_offset;
4253 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
4254 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
4255 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break));
4256 code = emit_call_seq (cfg, code);
4259 MONO_BB_FOR_EACH_INS (bb, ins) {
4260 guint offset = code - cfg->native_code;
4261 set_code_cursor (cfg, code);
4262 max_len = ins_get_size (ins->opcode);
4263 code = realloc_code (cfg, max_len);
4264 // if (ins->cil_code)
4265 // g_print ("cil code\n");
4266 mono_debug_record_line_number (cfg, ins, offset);
4268 switch (ins->opcode) {
4269 case OP_MEMORY_BARRIER:
4270 if (v7_supported) {
4271 ARM_DMB (code, ARM_DMB_ISH);
4272 } else if (v6_supported) {
4273 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
4274 ARM_MCR (code, 15, 0, ARMREG_R0, 7, 10, 5);
4276 break;
4277 case OP_TLS_GET:
4278 code = emit_tls_get (code, ins->dreg, ins->inst_offset);
4279 break;
4280 case OP_TLS_SET:
4281 code = emit_tls_set (code, ins->sreg1, ins->inst_offset);
4282 break;
4283 case OP_ATOMIC_EXCHANGE_I4:
4284 case OP_ATOMIC_CAS_I4:
4285 case OP_ATOMIC_ADD_I4: {
4286 int tmpreg;
4287 guint8 *buf [16];
4289 g_assert (v7_supported);
4291 /* Free up a reg */
4292 if (ins->sreg1 != ARMREG_IP && ins->sreg2 != ARMREG_IP && ins->sreg3 != ARMREG_IP)
4293 tmpreg = ARMREG_IP;
4294 else if (ins->sreg1 != ARMREG_R0 && ins->sreg2 != ARMREG_R0 && ins->sreg3 != ARMREG_R0)
4295 tmpreg = ARMREG_R0;
4296 else if (ins->sreg1 != ARMREG_R1 && ins->sreg2 != ARMREG_R1 && ins->sreg3 != ARMREG_R1)
4297 tmpreg = ARMREG_R1;
4298 else
4299 tmpreg = ARMREG_R2;
4300 g_assert (cfg->arch.atomic_tmp_offset != -1);
4301 ARM_STR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4303 switch (ins->opcode) {
4304 case OP_ATOMIC_EXCHANGE_I4:
4305 buf [0] = code;
4306 ARM_DMB (code, ARM_DMB_ISH);
4307 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4308 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4309 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4310 buf [1] = code;
4311 ARM_B_COND (code, ARMCOND_NE, 0);
4312 arm_patch (buf [1], buf [0]);
4313 break;
4314 case OP_ATOMIC_CAS_I4:
4315 ARM_DMB (code, ARM_DMB_ISH);
4316 buf [0] = code;
4317 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4318 ARM_CMP_REG_REG (code, ARMREG_LR, ins->sreg3);
4319 buf [1] = code;
4320 ARM_B_COND (code, ARMCOND_NE, 0);
4321 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4322 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4323 buf [2] = code;
4324 ARM_B_COND (code, ARMCOND_NE, 0);
4325 arm_patch (buf [2], buf [0]);
4326 arm_patch (buf [1], code);
4327 break;
4328 case OP_ATOMIC_ADD_I4:
4329 buf [0] = code;
4330 ARM_DMB (code, ARM_DMB_ISH);
4331 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4332 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->sreg2);
4333 ARM_STREX_REG (code, tmpreg, ARMREG_LR, ins->sreg1);
4334 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4335 buf [1] = code;
4336 ARM_B_COND (code, ARMCOND_NE, 0);
4337 arm_patch (buf [1], buf [0]);
4338 break;
4339 default:
4340 g_assert_not_reached ();
4343 ARM_DMB (code, ARM_DMB_ISH);
4344 if (tmpreg != ins->dreg)
4345 ARM_LDR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4346 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_LR);
4347 break;
4349 case OP_ATOMIC_LOAD_I1:
4350 case OP_ATOMIC_LOAD_U1:
4351 case OP_ATOMIC_LOAD_I2:
4352 case OP_ATOMIC_LOAD_U2:
4353 case OP_ATOMIC_LOAD_I4:
4354 case OP_ATOMIC_LOAD_U4:
4355 case OP_ATOMIC_LOAD_R4:
4356 case OP_ATOMIC_LOAD_R8: {
4357 if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
4358 ARM_DMB (code, ARM_DMB_ISH);
4360 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4362 switch (ins->opcode) {
4363 case OP_ATOMIC_LOAD_I1:
4364 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4365 break;
4366 case OP_ATOMIC_LOAD_U1:
4367 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4368 break;
4369 case OP_ATOMIC_LOAD_I2:
4370 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4371 break;
4372 case OP_ATOMIC_LOAD_U2:
4373 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4374 break;
4375 case OP_ATOMIC_LOAD_I4:
4376 case OP_ATOMIC_LOAD_U4:
4377 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4378 break;
4379 case OP_ATOMIC_LOAD_R4:
4380 if (cfg->r4fp) {
4381 ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_basereg, ARMREG_LR);
4382 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
4383 } else {
4384 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
4385 ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_basereg, ARMREG_LR);
4386 ARM_FLDS (code, vfp_scratch1, ARMREG_LR, 0);
4387 ARM_CVTS (code, ins->dreg, vfp_scratch1);
4388 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
4390 break;
4391 case OP_ATOMIC_LOAD_R8:
4392 ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_basereg, ARMREG_LR);
4393 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
4394 break;
4397 if (ins->backend.memory_barrier_kind != MONO_MEMORY_BARRIER_NONE)
4398 ARM_DMB (code, ARM_DMB_ISH);
4399 break;
4401 case OP_ATOMIC_STORE_I1:
4402 case OP_ATOMIC_STORE_U1:
4403 case OP_ATOMIC_STORE_I2:
4404 case OP_ATOMIC_STORE_U2:
4405 case OP_ATOMIC_STORE_I4:
4406 case OP_ATOMIC_STORE_U4:
4407 case OP_ATOMIC_STORE_R4:
4408 case OP_ATOMIC_STORE_R8: {
4409 if (ins->backend.memory_barrier_kind != MONO_MEMORY_BARRIER_NONE)
4410 ARM_DMB (code, ARM_DMB_ISH);
4412 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4414 switch (ins->opcode) {
4415 case OP_ATOMIC_STORE_I1:
4416 case OP_ATOMIC_STORE_U1:
4417 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4418 break;
4419 case OP_ATOMIC_STORE_I2:
4420 case OP_ATOMIC_STORE_U2:
4421 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4422 break;
4423 case OP_ATOMIC_STORE_I4:
4424 case OP_ATOMIC_STORE_U4:
4425 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4426 break;
4427 case OP_ATOMIC_STORE_R4:
4428 if (cfg->r4fp) {
4429 ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_destbasereg, ARMREG_LR);
4430 ARM_FSTS (code, ins->sreg1, ARMREG_LR, 0);
4431 } else {
4432 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
4433 ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_destbasereg, ARMREG_LR);
4434 ARM_CVTD (code, vfp_scratch1, ins->sreg1);
4435 ARM_FSTS (code, vfp_scratch1, ARMREG_LR, 0);
4436 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
4438 break;
4439 case OP_ATOMIC_STORE_R8:
4440 ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_destbasereg, ARMREG_LR);
4441 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
4442 break;
4445 if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
4446 ARM_DMB (code, ARM_DMB_ISH);
4447 break;
4449 case OP_BIGMUL:
4450 ARM_SMULL_REG_REG (code, ins->backend.reg3, ins->dreg, ins->sreg1, ins->sreg2);
4451 break;
4452 case OP_BIGMUL_UN:
4453 ARM_UMULL_REG_REG (code, ins->backend.reg3, ins->dreg, ins->sreg1, ins->sreg2);
4454 break;
4455 case OP_STOREI1_MEMBASE_IMM:
4456 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
4457 g_assert (arm_is_imm12 (ins->inst_offset));
4458 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4459 break;
4460 case OP_STOREI2_MEMBASE_IMM:
4461 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
4462 g_assert (arm_is_imm8 (ins->inst_offset));
4463 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4464 break;
4465 case OP_STORE_MEMBASE_IMM:
4466 case OP_STOREI4_MEMBASE_IMM:
4467 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
4468 g_assert (arm_is_imm12 (ins->inst_offset));
4469 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4470 break;
4471 case OP_STOREI1_MEMBASE_REG:
4472 g_assert (arm_is_imm12 (ins->inst_offset));
4473 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4474 break;
4475 case OP_STOREI2_MEMBASE_REG:
4476 g_assert (arm_is_imm8 (ins->inst_offset));
4477 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4478 break;
4479 case OP_STORE_MEMBASE_REG:
4480 case OP_STOREI4_MEMBASE_REG:
4481 /* this case is special, since it happens for spill code after lowering has been called */
4482 if (arm_is_imm12 (ins->inst_offset)) {
4483 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4484 } else {
4485 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4486 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4488 break;
4489 case OP_STOREI1_MEMINDEX:
4490 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4491 break;
4492 case OP_STOREI2_MEMINDEX:
4493 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4494 break;
4495 case OP_STORE_MEMINDEX:
4496 case OP_STOREI4_MEMINDEX:
4497 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4498 break;
4499 case OP_LOADU4_MEM:
4500 g_assert_not_reached ();
4501 break;
4502 case OP_LOAD_MEMINDEX:
4503 case OP_LOADI4_MEMINDEX:
4504 case OP_LOADU4_MEMINDEX:
4505 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4506 break;
4507 case OP_LOADI1_MEMINDEX:
4508 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4509 break;
4510 case OP_LOADU1_MEMINDEX:
4511 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4512 break;
4513 case OP_LOADI2_MEMINDEX:
4514 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4515 break;
4516 case OP_LOADU2_MEMINDEX:
4517 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4518 break;
4519 case OP_LOAD_MEMBASE:
4520 case OP_LOADI4_MEMBASE:
4521 case OP_LOADU4_MEMBASE:
4522 /* this case is special, since it happens for spill code after lowering has been called */
4523 if (arm_is_imm12 (ins->inst_offset)) {
4524 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4525 } else {
4526 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4527 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4529 break;
4530 case OP_LOADI1_MEMBASE:
4531 g_assert (arm_is_imm8 (ins->inst_offset));
4532 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4533 break;
4534 case OP_LOADU1_MEMBASE:
4535 g_assert (arm_is_imm12 (ins->inst_offset));
4536 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4537 break;
4538 case OP_LOADU2_MEMBASE:
4539 g_assert (arm_is_imm8 (ins->inst_offset));
4540 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4541 break;
4542 case OP_LOADI2_MEMBASE:
4543 g_assert (arm_is_imm8 (ins->inst_offset));
4544 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4545 break;
4546 case OP_ICONV_TO_I1:
4547 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
4548 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
4549 break;
4550 case OP_ICONV_TO_I2:
4551 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4552 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
4553 break;
4554 case OP_ICONV_TO_U1:
4555 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
4556 break;
4557 case OP_ICONV_TO_U2:
4558 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4559 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
4560 break;
4561 case OP_COMPARE:
4562 case OP_ICOMPARE:
4563 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
4564 break;
4565 case OP_COMPARE_IMM:
4566 case OP_ICOMPARE_IMM:
4567 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4568 g_assert (imm8 >= 0);
4569 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
4570 break;
4571 case OP_BREAK:
4573 * gdb does not like encountering the hw breakpoint ins in the debugged code.
4574 * So instead of emitting a trap, we emit a call a C function and place a
4575 * breakpoint there.
4577 //*(int*)code = 0xef9f0001;
4578 //code += 4;
4579 //ARM_DBRK (code);
4580 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
4581 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break));
4582 code = emit_call_seq (cfg, code);
4583 break;
4584 case OP_RELAXED_NOP:
4585 ARM_NOP (code);
4586 break;
4587 case OP_NOP:
4588 case OP_DUMMY_USE:
4589 case OP_DUMMY_ICONST:
4590 case OP_DUMMY_R8CONST:
4591 case OP_DUMMY_R4CONST:
4592 case OP_NOT_REACHED:
4593 case OP_NOT_NULL:
4594 break;
4595 case OP_IL_SEQ_POINT:
4596 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
4597 break;
4598 case OP_SEQ_POINT: {
4599 int i;
4600 MonoInst *info_var = cfg->arch.seq_point_info_var;
4601 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4602 MonoInst *ss_method_var = cfg->arch.seq_point_ss_method_var;
4603 MonoInst *bp_method_var = cfg->arch.seq_point_bp_method_var;
4604 MonoInst *var;
4605 int dreg = ARMREG_LR;
4607 #if 0
4608 if (cfg->soft_breakpoints) {
4609 g_assert (!cfg->compile_aot);
4611 #endif
4614 * For AOT, we use one got slot per method, which will point to a
4615 * SeqPointInfo structure, containing all the information required
4616 * by the code below.
4618 if (cfg->compile_aot) {
4619 g_assert (info_var);
4620 g_assert (info_var->opcode == OP_REGOFFSET);
4623 if (!cfg->soft_breakpoints && !cfg->compile_aot) {
4625 * Read from the single stepping trigger page. This will cause a
4626 * SIGSEGV when single stepping is enabled.
4627 * We do this _before_ the breakpoint, so single stepping after
4628 * a breakpoint is hit will step to the next IL offset.
4630 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
4633 /* Single step check */
4634 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
4635 if (cfg->soft_breakpoints) {
4636 /* Load the address of the sequence point method variable. */
4637 var = ss_method_var;
4638 g_assert (var);
4639 g_assert (var->opcode == OP_REGOFFSET);
4640 code = emit_ldr_imm (code, dreg, var->inst_basereg, var->inst_offset);
4641 /* Read the value and check whether it is non-zero. */
4642 ARM_LDR_IMM (code, dreg, dreg, 0);
4643 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4644 /* Call it conditionally. */
4645 ARM_BLX_REG_COND (code, ARMCOND_NE, dreg);
4646 } else {
4647 if (cfg->compile_aot) {
4648 /* Load the trigger page addr from the variable initialized in the prolog */
4649 var = ss_trigger_page_var;
4650 g_assert (var);
4651 g_assert (var->opcode == OP_REGOFFSET);
4652 code = emit_ldr_imm (code, dreg, var->inst_basereg, var->inst_offset);
4653 } else {
4654 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
4655 ARM_B (code, 0);
4656 *(int*)code = (int)(gsize)ss_trigger_page;
4657 code += 4;
4659 ARM_LDR_IMM (code, dreg, dreg, 0);
4663 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
4665 /* Breakpoint check */
4666 if (cfg->compile_aot) {
4667 const guint32 offset = code - cfg->native_code;
4668 guint32 val;
4670 var = info_var;
4671 code = emit_ldr_imm (code, dreg, var->inst_basereg, var->inst_offset);
4672 /* Add the offset */
4673 val = ((offset / 4) * sizeof (guint8*)) + MONO_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
4674 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
4675 if (arm_is_imm12 ((int)val)) {
4676 ARM_LDR_IMM (code, dreg, dreg, val);
4677 } else {
4678 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
4679 if (val & 0xFF00)
4680 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
4681 if (val & 0xFF0000)
4682 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
4683 g_assert (!(val & 0xFF000000));
4685 ARM_LDR_IMM (code, dreg, dreg, 0);
4687 /* What is faster, a branch or a load ? */
4688 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4689 /* The breakpoint instruction */
4690 if (cfg->soft_breakpoints)
4691 ARM_BLX_REG_COND (code, ARMCOND_NE, dreg);
4692 else
4693 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
4694 } else if (cfg->soft_breakpoints) {
4695 /* Load the address of the breakpoint method into ip. */
4696 var = bp_method_var;
4697 g_assert (var);
4698 g_assert (var->opcode == OP_REGOFFSET);
4699 g_assert (arm_is_imm12 (var->inst_offset));
4700 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4703 * A placeholder for a possible breakpoint inserted by
4704 * mono_arch_set_breakpoint ().
4706 ARM_NOP (code);
4707 } else {
4709 * A placeholder for a possible breakpoint inserted by
4710 * mono_arch_set_breakpoint ().
4712 for (i = 0; i < 4; ++i)
4713 ARM_NOP (code);
4717 * Add an additional nop so skipping the bp doesn't cause the ip to point
4718 * to another IL offset.
4721 ARM_NOP (code);
4722 break;
4724 case OP_ADDCC:
4725 case OP_IADDCC:
4726 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4727 break;
4728 case OP_IADD:
4729 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4730 break;
4731 case OP_ADC:
4732 case OP_IADC:
4733 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4734 break;
4735 case OP_ADDCC_IMM:
4736 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4737 g_assert (imm8 >= 0);
4738 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4739 break;
4740 case OP_ADD_IMM:
4741 case OP_IADD_IMM:
4742 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4743 g_assert (imm8 >= 0);
4744 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4745 break;
4746 case OP_ADC_IMM:
4747 case OP_IADC_IMM:
4748 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4749 g_assert (imm8 >= 0);
4750 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4751 break;
4752 case OP_IADD_OVF:
4753 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4754 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4755 break;
4756 case OP_IADD_OVF_UN:
4757 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4758 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4759 break;
4760 case OP_ISUB_OVF:
4761 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4762 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4763 break;
4764 case OP_ISUB_OVF_UN:
4765 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4766 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4767 break;
4768 case OP_ADD_OVF_CARRY:
4769 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4770 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4771 break;
4772 case OP_ADD_OVF_UN_CARRY:
4773 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4774 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4775 break;
4776 case OP_SUB_OVF_CARRY:
4777 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4778 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4779 break;
4780 case OP_SUB_OVF_UN_CARRY:
4781 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4782 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4783 break;
4784 case OP_SUBCC:
4785 case OP_ISUBCC:
4786 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4787 break;
4788 case OP_SUBCC_IMM:
4789 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4790 g_assert (imm8 >= 0);
4791 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4792 break;
4793 case OP_ISUB:
4794 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4795 break;
4796 case OP_SBB:
4797 case OP_ISBB:
4798 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4799 break;
4800 case OP_SUB_IMM:
4801 case OP_ISUB_IMM:
4802 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4803 g_assert (imm8 >= 0);
4804 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4805 break;
4806 case OP_SBB_IMM:
4807 case OP_ISBB_IMM:
4808 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4809 g_assert (imm8 >= 0);
4810 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4811 break;
4812 case OP_ARM_RSBS_IMM:
4813 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4814 g_assert (imm8 >= 0);
4815 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4816 break;
4817 case OP_ARM_RSC_IMM:
4818 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4819 g_assert (imm8 >= 0);
4820 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4821 break;
4822 case OP_IAND:
4823 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4824 break;
4825 case OP_AND_IMM:
4826 case OP_IAND_IMM:
4827 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4828 g_assert (imm8 >= 0);
4829 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4830 break;
4831 case OP_IDIV:
4832 g_assert (v7s_supported || v7k_supported);
4833 ARM_SDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4834 break;
4835 case OP_IDIV_UN:
4836 g_assert (v7s_supported || v7k_supported);
4837 ARM_UDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4838 break;
4839 case OP_IREM:
4840 g_assert (v7s_supported || v7k_supported);
4841 ARM_SDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4842 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4843 break;
4844 case OP_IREM_UN:
4845 g_assert (v7s_supported || v7k_supported);
4846 ARM_UDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4847 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4848 break;
4849 case OP_DIV_IMM:
4850 case OP_REM_IMM:
4851 g_assert_not_reached ();
4852 case OP_IOR:
4853 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4854 break;
4855 case OP_OR_IMM:
4856 case OP_IOR_IMM:
4857 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4858 g_assert (imm8 >= 0);
4859 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4860 break;
4861 case OP_IXOR:
4862 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4863 break;
4864 case OP_XOR_IMM:
4865 case OP_IXOR_IMM:
4866 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4867 g_assert (imm8 >= 0);
4868 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4869 break;
4870 case OP_ISHL:
4871 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4872 break;
4873 case OP_SHL_IMM:
4874 case OP_ISHL_IMM:
4875 if (ins->inst_imm)
4876 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4877 else if (ins->dreg != ins->sreg1)
4878 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4879 break;
4880 case OP_ISHR:
4881 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4882 break;
4883 case OP_SHR_IMM:
4884 case OP_ISHR_IMM:
4885 if (ins->inst_imm)
4886 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4887 else if (ins->dreg != ins->sreg1)
4888 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4889 break;
4890 case OP_SHR_UN_IMM:
4891 case OP_ISHR_UN_IMM:
4892 if (ins->inst_imm)
4893 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4894 else if (ins->dreg != ins->sreg1)
4895 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4896 break;
4897 case OP_ISHR_UN:
4898 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4899 break;
4900 case OP_INOT:
4901 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
4902 break;
4903 case OP_INEG:
4904 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
4905 break;
4906 case OP_IMUL:
4907 if (ins->dreg == ins->sreg2)
4908 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4909 else
4910 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
4911 break;
4912 case OP_MUL_IMM:
4913 g_assert_not_reached ();
4914 break;
4915 case OP_IMUL_OVF:
4916 /* FIXME: handle ovf/ sreg2 != dreg */
4917 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4918 /* FIXME: MUL doesn't set the C/O flags on ARM */
4919 break;
4920 case OP_IMUL_OVF_UN:
4921 /* FIXME: handle ovf/ sreg2 != dreg */
4922 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4923 /* FIXME: MUL doesn't set the C/O flags on ARM */
4924 break;
4925 case OP_ICONST:
4926 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
4927 break;
4928 case OP_AOTCONST:
4929 /* Load the GOT offset */
4930 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)(gsize)ins->inst_i1, ins->inst_p0);
4931 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4932 ARM_B (code, 0);
4933 *(gpointer*)code = NULL;
4934 code += 4;
4935 /* Load the value from the GOT */
4936 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4937 break;
4938 case OP_OBJC_GET_SELECTOR:
4939 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_OBJC_SELECTOR_REF, ins->inst_p0);
4940 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4941 ARM_B (code, 0);
4942 *(gpointer*)code = NULL;
4943 code += 4;
4944 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4945 break;
4946 case OP_ICONV_TO_I4:
4947 case OP_ICONV_TO_U4:
4948 case OP_MOVE:
4949 if (ins->dreg != ins->sreg1)
4950 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4951 break;
4952 case OP_SETLRET: {
4953 int saved = ins->sreg2;
4954 if (ins->sreg2 == ARM_LSW_REG) {
4955 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
4956 saved = ARMREG_LR;
4958 if (ins->sreg1 != ARM_LSW_REG)
4959 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
4960 if (saved != ARM_MSW_REG)
4961 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
4962 break;
4964 case OP_FMOVE:
4965 if (IS_VFP && ins->dreg != ins->sreg1)
4966 ARM_CPYD (code, ins->dreg, ins->sreg1);
4967 break;
4968 case OP_RMOVE:
4969 if (IS_VFP && ins->dreg != ins->sreg1)
4970 ARM_CPYS (code, ins->dreg, ins->sreg1);
4971 break;
4972 case OP_MOVE_F_TO_I4:
4973 if (cfg->r4fp) {
4974 ARM_FMRS (code, ins->dreg, ins->sreg1);
4975 } else {
4976 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
4977 ARM_CVTD (code, vfp_scratch1, ins->sreg1);
4978 ARM_FMRS (code, ins->dreg, vfp_scratch1);
4979 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
4981 break;
4982 case OP_MOVE_I4_TO_F:
4983 if (cfg->r4fp) {
4984 ARM_FMSR (code, ins->dreg, ins->sreg1);
4985 } else {
4986 ARM_FMSR (code, ins->dreg, ins->sreg1);
4987 ARM_CVTS (code, ins->dreg, ins->dreg);
4989 break;
4990 case OP_FCONV_TO_R4:
4991 if (IS_VFP) {
4992 if (cfg->r4fp) {
4993 ARM_CVTD (code, ins->dreg, ins->sreg1);
4994 } else {
4995 ARM_CVTD (code, ins->dreg, ins->sreg1);
4996 ARM_CVTS (code, ins->dreg, ins->dreg);
4999 break;
5001 case OP_TAILCALL_PARAMETER:
5002 // This opcode helps compute sizes, i.e.
5003 // of the subsequent OP_TAILCALL, but contributes no code.
5004 g_assert (ins->next);
5005 break;
5007 case OP_TAILCALL:
5008 case OP_TAILCALL_MEMBASE:
5009 case OP_TAILCALL_REG: {
5010 gboolean const tailcall_membase = ins->opcode == OP_TAILCALL_MEMBASE;
5011 gboolean const tailcall_reg = ins->opcode == OP_TAILCALL_REG;
5012 MonoCallInst *call = (MonoCallInst*)ins;
5014 max_len += call->stack_usage / sizeof (target_mgreg_t) * ins_get_size (OP_TAILCALL_PARAMETER);
5016 if (IS_HARD_FLOAT)
5017 code = emit_float_args (cfg, call, code, &max_len, &offset);
5019 code = realloc_code (cfg, max_len);
5021 // For reg and membase, get destination in IP.
5023 if (tailcall_reg) {
5024 g_assert (ins->sreg1 > -1);
5025 if (ins->sreg1 != ARMREG_IP)
5026 ARM_MOV_REG_REG (code, ARMREG_IP, ins->sreg1);
5027 } else if (tailcall_membase) {
5028 g_assert (ins->sreg1 > -1);
5029 if (!arm_is_imm12 (ins->inst_offset)) {
5030 g_assert (ins->sreg1 != ARMREG_IP); // temp in emit_big_add
5031 code = emit_big_add (code, ARMREG_IP, ins->sreg1, ins->inst_offset);
5032 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, 0);
5033 } else {
5034 ARM_LDR_IMM (code, ARMREG_IP, ins->sreg1, ins->inst_offset);
5039 * The stack looks like the following:
5040 * <caller argument area>
5041 * <saved regs etc>
5042 * <rest of frame>
5043 * <callee argument area>
5044 * <optionally saved IP> (about to be)
5045 * Need to copy the arguments from the callee argument area to
5046 * the caller argument area, and pop the frame.
5048 if (call->stack_usage) {
5049 int i, prev_sp_offset = 0;
5051 // When we get here, the parameters to the tailcall are already formed,
5052 // in registers and at the bottom of the grow-down stack.
5054 // Our goal is generally preserve parameters, and trim the stack,
5055 // and, before trimming stack, move parameters from the bottom of the
5056 // frame to the bottom of the trimmed frame.
5058 // For the case of large frames, and presently therefore always,
5059 // IP is used as an adjusted frame_reg.
5060 // Be conservative and save IP around the movement
5061 // of parameters from the bottom of frame to top of the frame.
5062 const gboolean save_ip = tailcall_membase || tailcall_reg;
5063 if (save_ip)
5064 ARM_PUSH (code, 1 << ARMREG_IP);
5066 // When moving stacked parameters from the bottom
5067 // of the frame (sp) to the top of the frame (ip),
5068 // account, 0 or 4, for the conditional save of IP.
5069 const int offset_sp = save_ip ? 4 : 0;
5070 const int offset_ip = (save_ip && (cfg->frame_reg == ARMREG_SP)) ? 4 : 0;
5072 /* Compute size of saved registers restored below */
5073 if (iphone_abi)
5074 prev_sp_offset = 2 * 4;
5075 else
5076 prev_sp_offset = 1 * 4;
5077 for (i = 0; i < 16; ++i) {
5078 if (cfg->used_int_regs & (1 << i))
5079 prev_sp_offset += 4;
5082 // Point IP at the start of where the parameters will go after trimming stack.
5083 // After locals and saved registers.
5084 code = emit_big_add (code, ARMREG_IP, cfg->frame_reg, cfg->stack_usage + prev_sp_offset);
5086 /* Copy arguments on the stack to our argument area */
5087 // FIXME a fixed size memcpy is desirable here,
5088 // at least for larger values of stack_usage.
5090 // FIXME For most functions, with frames < 4K, we can use frame_reg directly here instead of IP.
5091 // See https://github.com/mono/mono/pull/12079
5092 // See https://github.com/mono/mono/pull/12079/commits/93e7007a9567b78fa8152ce404b372b26e735516
5093 for (i = 0; i < call->stack_usage; i += sizeof (target_mgreg_t)) {
5094 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, i + offset_sp);
5095 ARM_STR_IMM (code, ARMREG_LR, ARMREG_IP, i + offset_ip);
5098 if (save_ip)
5099 ARM_POP (code, 1 << ARMREG_IP);
5103 * Keep in sync with mono_arch_emit_epilog
5105 g_assert (!cfg->method->save_lmf);
5106 code = emit_big_add_temp (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage, ARMREG_LR);
5107 if (iphone_abi) {
5108 if (cfg->used_int_regs)
5109 ARM_POP (code, cfg->used_int_regs);
5110 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
5111 } else {
5112 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
5115 if (tailcall_reg || tailcall_membase) {
5116 code = emit_jmp_reg (code, ARMREG_IP);
5117 } else {
5118 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method);
5120 if (cfg->compile_aot) {
5121 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
5122 ARM_B (code, 0);
5123 *(gpointer*)code = NULL;
5124 code += 4;
5125 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
5126 } else {
5127 code = mono_arm_patchable_b (code, ARMCOND_AL);
5128 cfg->thunk_area += THUNK_SIZE;
5131 break;
5133 case OP_CHECK_THIS:
5134 /* ensure ins->sreg1 is not NULL */
5135 ARM_LDRB_IMM (code, ARMREG_LR, ins->sreg1, 0);
5136 break;
5137 case OP_ARGLIST: {
5138 g_assert (cfg->sig_cookie < 128);
5139 ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
5140 ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
5141 break;
5143 case OP_FCALL:
5144 case OP_RCALL:
5145 case OP_LCALL:
5146 case OP_VCALL:
5147 case OP_VCALL2:
5148 case OP_VOIDCALL:
5149 case OP_CALL:
5150 call = (MonoCallInst*)ins;
5152 if (IS_HARD_FLOAT)
5153 code = emit_float_args (cfg, call, code, &max_len, &offset);
5155 mono_call_add_patch_info (cfg, call, code - cfg->native_code);
5157 code = emit_call_seq (cfg, code);
5158 ins->flags |= MONO_INST_GC_CALLSITE;
5159 ins->backend.pc_offset = code - cfg->native_code;
5160 code = emit_move_return_value (cfg, ins, code);
5161 break;
5162 case OP_FCALL_REG:
5163 case OP_RCALL_REG:
5164 case OP_LCALL_REG:
5165 case OP_VCALL_REG:
5166 case OP_VCALL2_REG:
5167 case OP_VOIDCALL_REG:
5168 case OP_CALL_REG:
5169 if (IS_HARD_FLOAT)
5170 code = emit_float_args (cfg, (MonoCallInst *)ins, code, &max_len, &offset);
5172 code = emit_call_reg (code, ins->sreg1);
5173 ins->flags |= MONO_INST_GC_CALLSITE;
5174 ins->backend.pc_offset = code - cfg->native_code;
5175 code = emit_move_return_value (cfg, ins, code);
5176 break;
5177 case OP_FCALL_MEMBASE:
5178 case OP_RCALL_MEMBASE:
5179 case OP_LCALL_MEMBASE:
5180 case OP_VCALL_MEMBASE:
5181 case OP_VCALL2_MEMBASE:
5182 case OP_VOIDCALL_MEMBASE:
5183 case OP_CALL_MEMBASE: {
5184 g_assert (ins->sreg1 != ARMREG_LR);
5185 call = (MonoCallInst*)ins;
5187 if (IS_HARD_FLOAT)
5188 code = emit_float_args (cfg, call, code, &max_len, &offset);
5189 if (!arm_is_imm12 (ins->inst_offset)) {
5190 /* sreg1 might be IP */
5191 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg1);
5192 code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_offset);
5193 ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, ARMREG_LR);
5194 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
5195 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, 0);
5196 } else {
5197 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
5198 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
5200 ins->flags |= MONO_INST_GC_CALLSITE;
5201 ins->backend.pc_offset = code - cfg->native_code;
5202 code = emit_move_return_value (cfg, ins, code);
5203 break;
5205 case OP_GENERIC_CLASS_INIT: {
5206 int byte_offset;
5207 guint8 *jump;
5209 byte_offset = MONO_STRUCT_OFFSET (MonoVTable, initialized);
5211 g_assert (arm_is_imm8 (byte_offset));
5212 ARM_LDRSB_IMM (code, ARMREG_IP, ins->sreg1, byte_offset);
5213 ARM_CMP_REG_IMM (code, ARMREG_IP, 0, 0);
5214 jump = code;
5215 ARM_B_COND (code, ARMCOND_NE, 0);
5217 /* Uninitialized case */
5218 g_assert (ins->sreg1 == ARMREG_R0);
5220 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
5221 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_generic_class_init));
5222 code = emit_call_seq (cfg, code);
5224 /* Initialized case */
5225 arm_patch (jump, code);
5226 break;
5228 case OP_LOCALLOC: {
5229 /* round the size to 8 bytes */
5230 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, (MONO_ARCH_FRAME_ALIGNMENT - 1));
5231 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, (MONO_ARCH_FRAME_ALIGNMENT - 1));
5232 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
5233 /* memzero the area: dreg holds the size, sp is the pointer */
5234 if (ins->flags & MONO_INST_INIT) {
5235 guint8 *start_loop, *branch_to_cond;
5236 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
5237 branch_to_cond = code;
5238 ARM_B (code, 0);
5239 start_loop = code;
5240 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
5241 arm_patch (branch_to_cond, code);
5242 /* decrement by 4 and set flags */
5243 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, sizeof (target_mgreg_t));
5244 ARM_B_COND (code, ARMCOND_GE, 0);
5245 arm_patch (code - 4, start_loop);
5247 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_SP);
5248 if (cfg->param_area)
5249 code = emit_sub_imm (code, ARMREG_SP, ARMREG_SP, ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT));
5250 break;
5252 case OP_DYN_CALL: {
5253 int i;
5254 MonoInst *var = cfg->dyn_call_var;
5255 guint8 *labels [16];
5257 g_assert (var->opcode == OP_REGOFFSET);
5258 g_assert (arm_is_imm12 (var->inst_offset));
5260 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
5261 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg1);
5262 /* ip = ftn */
5263 ARM_MOV_REG_REG (code, ARMREG_IP, ins->sreg2);
5265 /* Save args buffer */
5266 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
5268 /* Set fp argument registers */
5269 if (IS_HARD_FLOAT) {
5270 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, has_fpregs));
5271 ARM_CMP_REG_IMM (code, ARMREG_R0, 0, 0);
5272 labels [0] = code;
5273 ARM_B_COND (code, ARMCOND_EQ, 0);
5274 for (i = 0; i < FP_PARAM_REGS; ++i) {
5275 const int offset = MONO_STRUCT_OFFSET (DynCallArgs, fpregs) + (i * sizeof (double));
5276 g_assert (arm_is_fpimm8 (offset));
5277 ARM_FLDD (code, i * 2, ARMREG_LR, offset);
5279 arm_patch (labels [0], code);
5282 /* Allocate callee area */
5283 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_stackargs));
5284 ARM_SHL_IMM (code, ARMREG_R1, ARMREG_R1, 2);
5285 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_R1);
5287 /* Set stack args */
5288 /* R1 = limit */
5289 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_stackargs));
5290 /* R2 = pointer into regs */
5291 code = emit_big_add (code, ARMREG_R2, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, regs) + (PARAM_REGS * sizeof (target_mgreg_t)));
5292 /* R3 = pointer to stack */
5293 ARM_MOV_REG_REG (code, ARMREG_R3, ARMREG_SP);
5294 /* Loop */
5295 labels [0] = code;
5296 ARM_B_COND (code, ARMCOND_AL, 0);
5297 labels [1] = code;
5298 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R2, 0);
5299 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R3, 0);
5300 ARM_ADD_REG_IMM (code, ARMREG_R2, ARMREG_R2, sizeof (target_mgreg_t), 0);
5301 ARM_ADD_REG_IMM (code, ARMREG_R3, ARMREG_R3, sizeof (target_mgreg_t), 0);
5302 ARM_SUB_REG_IMM (code, ARMREG_R1, ARMREG_R1, 1, 0);
5303 arm_patch (labels [0], code);
5304 ARM_CMP_REG_IMM (code, ARMREG_R1, 0, 0);
5305 labels [2] = code;
5306 ARM_B_COND (code, ARMCOND_GT, 0);
5307 arm_patch (labels [2], labels [1]);
5309 /* Set argument registers */
5310 for (i = 0; i < PARAM_REGS; ++i)
5311 ARM_LDR_IMM (code, i, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, regs) + (i * sizeof (target_mgreg_t)));
5313 /* Make the call */
5314 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
5315 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5317 /* Save result */
5318 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
5319 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res));
5320 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res2));
5321 if (IS_HARD_FLOAT)
5322 ARM_FSTD (code, ARM_VFP_D0, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, fpregs));
5323 break;
5325 case OP_THROW: {
5326 if (ins->sreg1 != ARMREG_R0)
5327 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5328 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
5329 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_throw_exception));
5330 code = emit_call_seq (cfg, code);
5331 break;
5333 case OP_RETHROW: {
5334 if (ins->sreg1 != ARMREG_R0)
5335 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5336 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
5337 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_rethrow_exception));
5338 code = emit_call_seq (cfg, code);
5339 break;
5341 case OP_START_HANDLER: {
5342 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5343 int param_area = ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT);
5344 int i, rot_amount;
5346 /* Reserve a param area, see filter-stack.exe */
5347 if (param_area) {
5348 if ((i = mono_arm_is_rotated_imm8 (param_area, &rot_amount)) >= 0) {
5349 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5350 } else {
5351 code = mono_arm_emit_load_imm (code, ARMREG_IP, param_area);
5352 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5356 if (arm_is_imm12 (spvar->inst_offset)) {
5357 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
5358 } else {
5359 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5360 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
5362 break;
5364 case OP_ENDFILTER: {
5365 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5366 int param_area = ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT);
5367 int i, rot_amount;
5369 /* Free the param area */
5370 if (param_area) {
5371 if ((i = mono_arm_is_rotated_imm8 (param_area, &rot_amount)) >= 0) {
5372 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5373 } else {
5374 code = mono_arm_emit_load_imm (code, ARMREG_IP, param_area);
5375 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5379 if (ins->sreg1 != ARMREG_R0)
5380 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5381 if (arm_is_imm12 (spvar->inst_offset)) {
5382 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5383 } else {
5384 g_assert (ARMREG_IP != spvar->inst_basereg);
5385 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5386 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5388 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5389 break;
5391 case OP_ENDFINALLY: {
5392 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5393 int param_area = ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT);
5394 int i, rot_amount;
5396 /* Free the param area */
5397 if (param_area) {
5398 if ((i = mono_arm_is_rotated_imm8 (param_area, &rot_amount)) >= 0) {
5399 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5400 } else {
5401 code = mono_arm_emit_load_imm (code, ARMREG_IP, param_area);
5402 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5406 if (arm_is_imm12 (spvar->inst_offset)) {
5407 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5408 } else {
5409 g_assert (ARMREG_IP != spvar->inst_basereg);
5410 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5411 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5413 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5414 break;
5416 case OP_CALL_HANDLER:
5417 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5418 code = mono_arm_patchable_bl (code, ARMCOND_AL);
5419 cfg->thunk_area += THUNK_SIZE;
5420 for (GList *tmp = ins->inst_eh_blocks; tmp != bb->clause_holes; tmp = tmp->prev)
5421 mono_cfg_add_try_hole (cfg, ((MonoLeaveClause *) tmp->data)->clause, code, bb);
5422 break;
5423 case OP_GET_EX_OBJ:
5424 if (ins->dreg != ARMREG_R0)
5425 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_R0);
5426 break;
5428 case OP_LABEL:
5429 ins->inst_c0 = code - cfg->native_code;
5430 break;
5431 case OP_BR:
5432 /*if (ins->inst_target_bb->native_offset) {
5433 ARM_B (code, 0);
5434 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
5435 } else*/ {
5436 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5437 code = mono_arm_patchable_b (code, ARMCOND_AL);
5439 break;
5440 case OP_BR_REG:
5441 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
5442 break;
5443 case OP_SWITCH:
5445 * In the normal case we have:
5446 * ldr pc, [pc, ins->sreg1 << 2]
5447 * nop
5448 * If aot, we have:
5449 * ldr lr, [pc, ins->sreg1 << 2]
5450 * add pc, pc, lr
5451 * After follows the data.
5452 * FIXME: add aot support.
5454 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
5455 max_len += 4 * GPOINTER_TO_INT (ins->klass);
5456 code = realloc_code (cfg, max_len);
5457 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
5458 ARM_NOP (code);
5459 code += 4 * GPOINTER_TO_INT (ins->klass);
5460 break;
5461 case OP_CEQ:
5462 case OP_ICEQ:
5463 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5464 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5465 break;
5466 case OP_CLT:
5467 case OP_ICLT:
5468 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5469 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
5470 break;
5471 case OP_CLT_UN:
5472 case OP_ICLT_UN:
5473 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5474 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
5475 break;
5476 case OP_CGT:
5477 case OP_ICGT:
5478 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5479 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
5480 break;
5481 case OP_CGT_UN:
5482 case OP_ICGT_UN:
5483 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5484 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
5485 break;
5486 case OP_ICNEQ:
5487 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5488 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5489 break;
5490 case OP_ICGE:
5491 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5492 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LT);
5493 break;
5494 case OP_ICLE:
5495 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5496 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_GT);
5497 break;
5498 case OP_ICGE_UN:
5499 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5500 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LO);
5501 break;
5502 case OP_ICLE_UN:
5503 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5504 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_HI);
5505 break;
5506 case OP_COND_EXC_EQ:
5507 case OP_COND_EXC_NE_UN:
5508 case OP_COND_EXC_LT:
5509 case OP_COND_EXC_LT_UN:
5510 case OP_COND_EXC_GT:
5511 case OP_COND_EXC_GT_UN:
5512 case OP_COND_EXC_GE:
5513 case OP_COND_EXC_GE_UN:
5514 case OP_COND_EXC_LE:
5515 case OP_COND_EXC_LE_UN:
5516 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
5517 break;
5518 case OP_COND_EXC_IEQ:
5519 case OP_COND_EXC_INE_UN:
5520 case OP_COND_EXC_ILT:
5521 case OP_COND_EXC_ILT_UN:
5522 case OP_COND_EXC_IGT:
5523 case OP_COND_EXC_IGT_UN:
5524 case OP_COND_EXC_IGE:
5525 case OP_COND_EXC_IGE_UN:
5526 case OP_COND_EXC_ILE:
5527 case OP_COND_EXC_ILE_UN:
5528 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
5529 break;
5530 case OP_COND_EXC_C:
5531 case OP_COND_EXC_IC:
5532 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
5533 break;
5534 case OP_COND_EXC_OV:
5535 case OP_COND_EXC_IOV:
5536 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
5537 break;
5538 case OP_COND_EXC_NC:
5539 case OP_COND_EXC_INC:
5540 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
5541 break;
5542 case OP_COND_EXC_NO:
5543 case OP_COND_EXC_INO:
5544 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
5545 break;
5546 case OP_IBEQ:
5547 case OP_IBNE_UN:
5548 case OP_IBLT:
5549 case OP_IBLT_UN:
5550 case OP_IBGT:
5551 case OP_IBGT_UN:
5552 case OP_IBGE:
5553 case OP_IBGE_UN:
5554 case OP_IBLE:
5555 case OP_IBLE_UN:
5556 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
5557 break;
5559 /* floating point opcodes */
5560 case OP_R8CONST:
5561 if (cfg->compile_aot) {
5562 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
5563 ARM_B (code, 1);
5564 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5565 code += 4;
5566 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
5567 code += 4;
5568 } else {
5569 /* FIXME: we can optimize the imm load by dealing with part of
5570 * the displacement in LDFD (aligning to 512).
5572 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)(gsize)ins->inst_p0);
5573 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5575 break;
5576 case OP_R4CONST:
5577 if (cfg->compile_aot) {
5578 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
5579 ARM_B (code, 0);
5580 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5581 code += 4;
5582 if (!cfg->r4fp)
5583 ARM_CVTS (code, ins->dreg, ins->dreg);
5584 } else {
5585 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)(gsize)ins->inst_p0);
5586 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
5587 if (!cfg->r4fp)
5588 ARM_CVTS (code, ins->dreg, ins->dreg);
5590 break;
5591 case OP_STORER8_MEMBASE_REG:
5592 /* This is generated by the local regalloc pass which runs after the lowering pass */
5593 if (!arm_is_fpimm8 (ins->inst_offset)) {
5594 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5595 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
5596 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
5597 } else {
5598 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
5600 break;
5601 case OP_LOADR8_MEMBASE:
5602 /* This is generated by the local regalloc pass which runs after the lowering pass */
5603 if (!arm_is_fpimm8 (ins->inst_offset)) {
5604 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5605 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
5606 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5607 } else {
5608 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
5610 break;
5611 case OP_STORER4_MEMBASE_REG:
5612 g_assert (arm_is_fpimm8 (ins->inst_offset));
5613 if (cfg->r4fp) {
5614 ARM_FSTS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
5615 } else {
5616 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5617 ARM_CVTD (code, vfp_scratch1, ins->sreg1);
5618 ARM_FSTS (code, vfp_scratch1, ins->inst_destbasereg, ins->inst_offset);
5619 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5621 break;
5622 case OP_LOADR4_MEMBASE:
5623 if (cfg->r4fp) {
5624 ARM_FLDS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
5625 } else {
5626 g_assert (arm_is_fpimm8 (ins->inst_offset));
5627 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5628 ARM_FLDS (code, vfp_scratch1, ins->inst_basereg, ins->inst_offset);
5629 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5630 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5632 break;
5633 case OP_ICONV_TO_R_UN: {
5634 g_assert_not_reached ();
5635 break;
5637 case OP_ICONV_TO_R4:
5638 if (cfg->r4fp) {
5639 ARM_FMSR (code, ins->dreg, ins->sreg1);
5640 ARM_FSITOS (code, ins->dreg, ins->dreg);
5641 } else {
5642 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5643 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5644 ARM_FSITOS (code, vfp_scratch1, vfp_scratch1);
5645 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5646 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5648 break;
5649 case OP_ICONV_TO_R8:
5650 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5651 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5652 ARM_FSITOD (code, ins->dreg, vfp_scratch1);
5653 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5654 break;
5656 case OP_SETFRET: {
5657 MonoType *sig_ret = mini_get_underlying_type (mono_method_signature_internal (cfg->method)->ret);
5658 if (sig_ret->type == MONO_TYPE_R4) {
5659 if (cfg->r4fp) {
5660 if (IS_HARD_FLOAT) {
5661 if (ins->sreg1 != ARM_VFP_D0)
5662 ARM_CPYS (code, ARM_VFP_D0, ins->sreg1);
5663 } else {
5664 ARM_FMRS (code, ARMREG_R0, ins->sreg1);
5666 } else {
5667 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
5669 if (!IS_HARD_FLOAT)
5670 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
5672 } else {
5673 if (IS_HARD_FLOAT)
5674 ARM_CPYD (code, ARM_VFP_D0, ins->sreg1);
5675 else
5676 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
5678 break;
5680 case OP_FCONV_TO_I1:
5681 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
5682 break;
5683 case OP_FCONV_TO_U1:
5684 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
5685 break;
5686 case OP_FCONV_TO_I2:
5687 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
5688 break;
5689 case OP_FCONV_TO_U2:
5690 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
5691 break;
5692 case OP_FCONV_TO_I4:
5693 case OP_FCONV_TO_I:
5694 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
5695 break;
5696 case OP_FCONV_TO_U4:
5697 case OP_FCONV_TO_U:
5698 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
5699 break;
5700 case OP_FCONV_TO_I8:
5701 case OP_FCONV_TO_U8:
5702 g_assert_not_reached ();
5703 /* Implemented as helper calls */
5704 break;
5705 case OP_LCONV_TO_R_UN:
5706 g_assert_not_reached ();
5707 /* Implemented as helper calls */
5708 break;
5709 case OP_LCONV_TO_OVF_I4_2: {
5710 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
5712 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
5715 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
5716 high_bit_not_set = code;
5717 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
5719 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
5720 valid_negative = code;
5721 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
5722 invalid_negative = code;
5723 ARM_B_COND (code, ARMCOND_AL, 0);
5725 arm_patch (high_bit_not_set, code);
5727 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
5728 valid_positive = code;
5729 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
5731 arm_patch (invalid_negative, code);
5732 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
5734 arm_patch (valid_negative, code);
5735 arm_patch (valid_positive, code);
5737 if (ins->dreg != ins->sreg1)
5738 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
5739 break;
5741 case OP_FADD:
5742 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
5743 break;
5744 case OP_FSUB:
5745 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
5746 break;
5747 case OP_FMUL:
5748 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
5749 break;
5750 case OP_FDIV:
5751 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
5752 break;
5753 case OP_FNEG:
5754 ARM_NEGD (code, ins->dreg, ins->sreg1);
5755 break;
5756 case OP_FREM:
5757 /* emulated */
5758 g_assert_not_reached ();
5759 break;
5760 case OP_FCOMPARE:
5761 if (IS_VFP) {
5762 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5763 ARM_FMSTAT (code);
5765 break;
5766 case OP_RCOMPARE:
5767 g_assert (IS_VFP);
5768 ARM_CMPS (code, ins->sreg1, ins->sreg2);
5769 ARM_FMSTAT (code);
5770 break;
5771 case OP_FCEQ:
5772 if (IS_VFP) {
5773 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5774 ARM_FMSTAT (code);
5776 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5777 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5778 break;
5779 case OP_FCLT:
5780 if (IS_VFP) {
5781 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5782 ARM_FMSTAT (code);
5784 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5785 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5786 break;
5787 case OP_FCLT_UN:
5788 if (IS_VFP) {
5789 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5790 ARM_FMSTAT (code);
5792 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5793 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5794 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5795 break;
5796 case OP_FCGT:
5797 if (IS_VFP) {
5798 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5799 ARM_FMSTAT (code);
5801 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5802 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5803 break;
5804 case OP_FCGT_UN:
5805 if (IS_VFP) {
5806 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5807 ARM_FMSTAT (code);
5809 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5810 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5811 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5812 break;
5813 case OP_FCNEQ:
5814 if (IS_VFP) {
5815 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5816 ARM_FMSTAT (code);
5818 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5819 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5820 break;
5821 case OP_FCGE:
5822 if (IS_VFP) {
5823 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5824 ARM_FMSTAT (code);
5826 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5827 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5828 break;
5829 case OP_FCLE:
5830 if (IS_VFP) {
5831 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5832 ARM_FMSTAT (code);
5834 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5835 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5836 break;
5838 /* ARM FPA flags table:
5839 * N Less than ARMCOND_MI
5840 * Z Equal ARMCOND_EQ
5841 * C Greater Than or Equal ARMCOND_CS
5842 * V Unordered ARMCOND_VS
5844 case OP_FBEQ:
5845 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
5846 break;
5847 case OP_FBNE_UN:
5848 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
5849 break;
5850 case OP_FBLT:
5851 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5852 break;
5853 case OP_FBLT_UN:
5854 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5855 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5856 break;
5857 case OP_FBGT:
5858 case OP_FBGT_UN:
5859 case OP_FBLE:
5860 case OP_FBLE_UN:
5861 g_assert_not_reached ();
5862 break;
5863 case OP_FBGE:
5864 if (IS_VFP) {
5865 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5866 } else {
5867 /* FPA requires EQ even thou the docs suggests that just CS is enough */
5868 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
5869 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
5871 break;
5872 case OP_FBGE_UN:
5873 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5874 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5875 break;
5877 case OP_CKFINITE: {
5878 if (IS_VFP) {
5879 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5880 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch2);
5882 ARM_ABSD (code, vfp_scratch2, ins->sreg1);
5883 ARM_FLDD (code, vfp_scratch1, ARMREG_PC, 0);
5884 ARM_B (code, 1);
5885 *(guint32*)code = 0xffffffff;
5886 code += 4;
5887 *(guint32*)code = 0x7fefffff;
5888 code += 4;
5889 ARM_CMPD (code, vfp_scratch2, vfp_scratch1);
5890 ARM_FMSTAT (code);
5891 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "OverflowException");
5892 ARM_CMPD (code, ins->sreg1, ins->sreg1);
5893 ARM_FMSTAT (code);
5894 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "OverflowException");
5895 ARM_CPYD (code, ins->dreg, ins->sreg1);
5897 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5898 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch2);
5900 break;
5903 case OP_RCONV_TO_I1:
5904 code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
5905 break;
5906 case OP_RCONV_TO_U1:
5907 code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
5908 break;
5909 case OP_RCONV_TO_I2:
5910 code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
5911 break;
5912 case OP_RCONV_TO_U2:
5913 code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
5914 break;
5915 case OP_RCONV_TO_I4:
5916 code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
5917 break;
5918 case OP_RCONV_TO_U4:
5919 code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
5920 break;
5921 case OP_RCONV_TO_R4:
5922 g_assert (IS_VFP);
5923 if (ins->dreg != ins->sreg1)
5924 ARM_CPYS (code, ins->dreg, ins->sreg1);
5925 break;
5926 case OP_RCONV_TO_R8:
5927 g_assert (IS_VFP);
5928 ARM_CVTS (code, ins->dreg, ins->sreg1);
5929 break;
5930 case OP_RADD:
5931 ARM_VFP_ADDS (code, ins->dreg, ins->sreg1, ins->sreg2);
5932 break;
5933 case OP_RSUB:
5934 ARM_VFP_SUBS (code, ins->dreg, ins->sreg1, ins->sreg2);
5935 break;
5936 case OP_RMUL:
5937 ARM_VFP_MULS (code, ins->dreg, ins->sreg1, ins->sreg2);
5938 break;
5939 case OP_RDIV:
5940 ARM_VFP_DIVS (code, ins->dreg, ins->sreg1, ins->sreg2);
5941 break;
5942 case OP_RNEG:
5943 ARM_NEGS (code, ins->dreg, ins->sreg1);
5944 break;
5945 case OP_RCEQ:
5946 if (IS_VFP) {
5947 ARM_CMPS (code, ins->sreg1, ins->sreg2);
5948 ARM_FMSTAT (code);
5950 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5951 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5952 break;
5953 case OP_RCLT:
5954 if (IS_VFP) {
5955 ARM_CMPS (code, ins->sreg1, ins->sreg2);
5956 ARM_FMSTAT (code);
5958 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5959 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5960 break;
5961 case OP_RCLT_UN:
5962 if (IS_VFP) {
5963 ARM_CMPS (code, ins->sreg1, ins->sreg2);
5964 ARM_FMSTAT (code);
5966 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5967 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5968 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5969 break;
5970 case OP_RCGT:
5971 if (IS_VFP) {
5972 ARM_CMPS (code, ins->sreg2, ins->sreg1);
5973 ARM_FMSTAT (code);
5975 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5976 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5977 break;
5978 case OP_RCGT_UN:
5979 if (IS_VFP) {
5980 ARM_CMPS (code, ins->sreg2, ins->sreg1);
5981 ARM_FMSTAT (code);
5983 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5984 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5985 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5986 break;
5987 case OP_RCNEQ:
5988 if (IS_VFP) {
5989 ARM_CMPS (code, ins->sreg1, ins->sreg2);
5990 ARM_FMSTAT (code);
5992 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5993 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5994 break;
5995 case OP_RCGE:
5996 if (IS_VFP) {
5997 ARM_CMPS (code, ins->sreg1, ins->sreg2);
5998 ARM_FMSTAT (code);
6000 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
6001 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
6002 break;
6003 case OP_RCLE:
6004 if (IS_VFP) {
6005 ARM_CMPS (code, ins->sreg2, ins->sreg1);
6006 ARM_FMSTAT (code);
6008 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
6009 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
6010 break;
6012 case OP_GC_LIVENESS_DEF:
6013 case OP_GC_LIVENESS_USE:
6014 case OP_GC_PARAM_SLOT_LIVENESS_DEF:
6015 ins->backend.pc_offset = code - cfg->native_code;
6016 break;
6017 case OP_GC_SPILL_SLOT_LIVENESS_DEF:
6018 ins->backend.pc_offset = code - cfg->native_code;
6019 bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
6020 break;
6021 case OP_LIVERANGE_START: {
6022 if (cfg->verbose_level > 1)
6023 printf ("R%d START=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code));
6024 MONO_VARINFO (cfg, ins->inst_c0)->live_range_start = code - cfg->native_code;
6025 break;
6027 case OP_LIVERANGE_END: {
6028 if (cfg->verbose_level > 1)
6029 printf ("R%d END=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code));
6030 MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code;
6031 break;
6033 case OP_GC_SAFE_POINT: {
6034 guint8 *buf [1];
6036 ARM_LDR_IMM (code, ARMREG_IP, ins->sreg1, 0);
6037 ARM_CMP_REG_IMM (code, ARMREG_IP, 0, 0);
6038 buf [0] = code;
6039 ARM_B_COND (code, ARMCOND_EQ, 0);
6040 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_threads_state_poll));
6041 code = emit_call_seq (cfg, code);
6042 arm_patch (buf [0], code);
6043 break;
6045 case OP_FILL_PROF_CALL_CTX:
6046 for (int i = 0; i < ARMREG_MAX; i++)
6047 if ((MONO_ARCH_CALLEE_SAVED_REGS & (1 << i)) || i == ARMREG_SP || i == ARMREG_FP)
6048 ARM_STR_IMM (code, i, ins->sreg1, MONO_STRUCT_OFFSET (MonoContext, regs) + i * sizeof (target_mgreg_t));
6049 break;
6050 default:
6051 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
6052 g_assert_not_reached ();
6055 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
6056 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
6057 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
6058 g_assert_not_reached ();
6061 cpos += max_len;
6063 last_ins = ins;
6066 set_code_cursor (cfg, code);
6069 #endif /* DISABLE_JIT */
6071 void
6072 mono_arch_register_lowlevel_calls (void)
6074 /* The signature doesn't matter */
6075 mono_register_jit_icall (mono_arm_throw_exception, mono_icall_sig_void, TRUE);
6076 mono_register_jit_icall (mono_arm_throw_exception_by_token, mono_icall_sig_void, TRUE);
6077 mono_register_jit_icall (mono_arm_unaligned_stack, mono_icall_sig_void, TRUE);
6080 #define patch_lis_ori(ip,val) do {\
6081 guint16 *__lis_ori = (guint16*)(ip); \
6082 __lis_ori [1] = (((guint32)(gsize)(val)) >> 16) & 0xffff; \
6083 __lis_ori [3] = ((guint32)(gsize)(val)) & 0xffff; \
6084 } while (0)
6086 void
6087 mono_arch_patch_code_new (MonoCompile *cfg, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gpointer target)
6089 unsigned char *ip = ji->ip.i + code;
6091 if (ji->type == MONO_PATCH_INFO_SWITCH) {
6094 switch (ji->type) {
6095 case MONO_PATCH_INFO_SWITCH: {
6096 gpointer *jt = (gpointer*)(ip + 8);
6097 int i;
6098 /* jt is the inlined jump table, 2 instructions after ip
6099 * In the normal case we store the absolute addresses,
6100 * otherwise the displacements.
6102 for (i = 0; i < ji->data.table->table_size; i++)
6103 jt [i] = code + (int)(gsize)ji->data.table->table [i];
6104 break;
6106 case MONO_PATCH_INFO_IP:
6107 g_assert_not_reached ();
6108 patch_lis_ori (ip, ip);
6109 break;
6110 case MONO_PATCH_INFO_METHODCONST:
6111 case MONO_PATCH_INFO_CLASS:
6112 case MONO_PATCH_INFO_IMAGE:
6113 case MONO_PATCH_INFO_FIELD:
6114 case MONO_PATCH_INFO_VTABLE:
6115 case MONO_PATCH_INFO_IID:
6116 case MONO_PATCH_INFO_SFLDA:
6117 case MONO_PATCH_INFO_LDSTR:
6118 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
6119 case MONO_PATCH_INFO_LDTOKEN:
6120 g_assert_not_reached ();
6121 /* from OP_AOTCONST : lis + ori */
6122 patch_lis_ori (ip, target);
6123 break;
6124 case MONO_PATCH_INFO_R4:
6125 case MONO_PATCH_INFO_R8:
6126 g_assert_not_reached ();
6127 *((gconstpointer *)(ip + 2)) = target;
6128 break;
6129 case MONO_PATCH_INFO_EXC_NAME:
6130 g_assert_not_reached ();
6131 *((gconstpointer *)(ip + 1)) = target;
6132 break;
6133 case MONO_PATCH_INFO_NONE:
6134 case MONO_PATCH_INFO_BB_OVF:
6135 case MONO_PATCH_INFO_EXC_OVF:
6136 /* everything is dealt with at epilog output time */
6137 break;
6138 default:
6139 arm_patch_general (cfg, domain, ip, (const guchar*)target);
6140 break;
6144 void
6145 mono_arm_unaligned_stack (MonoMethod *method)
6147 g_assert_not_reached ();
6150 #ifndef DISABLE_JIT
6153 * Stack frame layout:
6155 * ------------------- fp
6156 * MonoLMF structure or saved registers
6157 * -------------------
6158 * locals
6159 * -------------------
6160 * spilled regs
6161 * -------------------
6162 * param area size is cfg->param_area
6163 * ------------------- sp
6165 guint8 *
6166 mono_arch_emit_prolog (MonoCompile *cfg)
6168 MonoMethod *method = cfg->method;
6169 MonoBasicBlock *bb;
6170 MonoMethodSignature *sig;
6171 MonoInst *inst;
6172 int alloc_size, orig_alloc_size, pos, max_offset, i, rot_amount, part;
6173 guint8 *code;
6174 CallInfo *cinfo;
6175 int lmf_offset = 0;
6176 int prev_sp_offset, reg_offset;
6178 sig = mono_method_signature_internal (method);
6179 cfg->code_size = 256 + sig->param_count * 64;
6180 code = cfg->native_code = g_malloc (cfg->code_size);
6182 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
6184 alloc_size = cfg->stack_offset;
6185 pos = 0;
6186 prev_sp_offset = 0;
6188 if (iphone_abi) {
6190 * The iphone uses R7 as the frame pointer, and it points at the saved
6191 * r7+lr:
6192 * <lr>
6193 * r7 -> <r7>
6194 * <rest of frame>
6195 * We can't use r7 as a frame pointer since it points into the middle of
6196 * the frame, so we keep using our own frame pointer.
6197 * FIXME: Optimize this.
6199 ARM_PUSH (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
6200 prev_sp_offset += 8; /* r7 and lr */
6201 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
6202 mono_emit_unwind_op_offset (cfg, code, ARMREG_R7, (- prev_sp_offset) + 0);
6203 ARM_MOV_REG_REG (code, ARMREG_R7, ARMREG_SP);
6206 if (!method->save_lmf) {
6207 if (iphone_abi) {
6208 /* No need to push LR again */
6209 if (cfg->used_int_regs)
6210 ARM_PUSH (code, cfg->used_int_regs);
6211 } else {
6212 ARM_PUSH (code, cfg->used_int_regs | (1 << ARMREG_LR));
6213 prev_sp_offset += 4;
6215 for (i = 0; i < 16; ++i) {
6216 if (cfg->used_int_regs & (1 << i))
6217 prev_sp_offset += 4;
6219 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
6220 reg_offset = 0;
6221 for (i = 0; i < 16; ++i) {
6222 if ((cfg->used_int_regs & (1 << i))) {
6223 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
6224 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + reg_offset, SLOT_NOREF);
6225 reg_offset += 4;
6228 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
6229 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
6230 } else {
6231 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
6232 ARM_PUSH (code, 0x5ff0);
6233 prev_sp_offset += 4 * 10; /* all but r0-r3, sp and pc */
6234 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
6235 reg_offset = 0;
6236 for (i = 0; i < 16; ++i) {
6237 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
6238 /* The original r7 is saved at the start */
6239 if (!(iphone_abi && i == ARMREG_R7))
6240 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
6241 reg_offset += 4;
6244 g_assert (reg_offset == 4 * 10);
6245 pos += MONO_ABI_SIZEOF (MonoLMF) - (4 * 10);
6246 lmf_offset = pos;
6248 alloc_size += pos;
6249 orig_alloc_size = alloc_size;
6250 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
6251 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
6252 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
6253 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
6256 /* the stack used in the pushed regs */
6257 alloc_size += ALIGN_TO (prev_sp_offset, MONO_ARCH_FRAME_ALIGNMENT) - prev_sp_offset;
6258 cfg->stack_usage = alloc_size;
6259 if (alloc_size) {
6260 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
6261 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
6262 } else {
6263 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
6264 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
6266 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
6268 if (cfg->frame_reg != ARMREG_SP) {
6269 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
6270 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
6272 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
6273 prev_sp_offset += alloc_size;
6275 for (i = 0; i < alloc_size - orig_alloc_size; i += 4)
6276 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + orig_alloc_size + i, SLOT_NOREF);
6278 /* compute max_offset in order to use short forward jumps
6279 * we could skip do it on arm because the immediate displacement
6280 * for jumps is large enough, it may be useful later for constant pools
6282 max_offset = 0;
6283 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
6284 MonoInst *ins = bb->code;
6285 bb->max_offset = max_offset;
6287 MONO_BB_FOR_EACH_INS (bb, ins)
6288 max_offset += ins_get_size (ins->opcode);
6291 /* stack alignment check */
6294 guint8 *buf [16];
6295 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_SP);
6296 code = mono_arm_emit_load_imm (code, ARMREG_IP, MONO_ARCH_FRAME_ALIGNMENT -1);
6297 ARM_AND_REG_REG (code, ARMREG_LR, ARMREG_LR, ARMREG_IP);
6298 ARM_CMP_REG_IMM (code, ARMREG_LR, 0, 0);
6299 buf [0] = code;
6300 ARM_B_COND (code, ARMCOND_EQ, 0);
6301 if (cfg->compile_aot)
6302 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
6303 else
6304 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
6305 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arm_unaligned_stack));
6306 code = emit_call_seq (cfg, code);
6307 arm_patch (buf [0], code);
6311 /* store runtime generic context */
6312 if (cfg->rgctx_var) {
6313 MonoInst *ins = cfg->rgctx_var;
6315 g_assert (ins->opcode == OP_REGOFFSET);
6317 if (arm_is_imm12 (ins->inst_offset)) {
6318 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
6319 } else {
6320 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6321 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
6325 /* load arguments allocated to register from the stack */
6326 pos = 0;
6328 cinfo = get_call_info (NULL, sig);
6330 if (cinfo->ret.storage == RegTypeStructByAddr) {
6331 ArgInfo *ainfo = &cinfo->ret;
6332 inst = cfg->vret_addr;
6333 g_assert (arm_is_imm12 (inst->inst_offset));
6334 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
6337 if (sig->call_convention == MONO_CALL_VARARG) {
6338 ArgInfo *cookie = &cinfo->sig_cookie;
6340 /* Save the sig cookie address */
6341 g_assert (cookie->storage == RegTypeBase);
6343 g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
6344 g_assert (arm_is_imm12 (cfg->sig_cookie));
6345 ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
6346 ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
6349 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6350 ArgInfo *ainfo = cinfo->args + i;
6351 inst = cfg->args [pos];
6353 if (cfg->verbose_level > 2)
6354 g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
6356 if (inst->opcode == OP_REGVAR) {
6357 if (ainfo->storage == RegTypeGeneral)
6358 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
6359 else if (ainfo->storage == RegTypeFP) {
6360 g_assert_not_reached ();
6361 } else if (ainfo->storage == RegTypeBase) {
6362 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
6363 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
6364 } else {
6365 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
6366 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
6368 } else
6369 g_assert_not_reached ();
6371 if (cfg->verbose_level > 2)
6372 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
6373 } else {
6374 switch (ainfo->storage) {
6375 case RegTypeHFA:
6376 for (part = 0; part < ainfo->nregs; part ++) {
6377 if (ainfo->esize == 4)
6378 ARM_FSTS (code, ainfo->reg + part, inst->inst_basereg, inst->inst_offset + (part * ainfo->esize));
6379 else
6380 ARM_FSTD (code, ainfo->reg + (part * 2), inst->inst_basereg, inst->inst_offset + (part * ainfo->esize));
6382 break;
6383 case RegTypeGeneral:
6384 case RegTypeIRegPair:
6385 case RegTypeGSharedVtInReg:
6386 case RegTypeStructByAddr:
6387 switch (ainfo->size) {
6388 case 1:
6389 if (arm_is_imm12 (inst->inst_offset))
6390 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
6391 else {
6392 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6393 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
6395 break;
6396 case 2:
6397 if (arm_is_imm8 (inst->inst_offset)) {
6398 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
6399 } else {
6400 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6401 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
6403 break;
6404 case 8:
6405 if (arm_is_imm12 (inst->inst_offset)) {
6406 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
6407 } else {
6408 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6409 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
6411 if (arm_is_imm12 (inst->inst_offset + 4)) {
6412 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
6413 } else {
6414 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
6415 ARM_STR_REG_REG (code, ainfo->reg + 1, inst->inst_basereg, ARMREG_IP);
6417 break;
6418 default:
6419 if (arm_is_imm12 (inst->inst_offset)) {
6420 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
6421 } else {
6422 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6423 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
6425 break;
6427 break;
6428 case RegTypeBaseGen:
6429 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
6430 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
6431 } else {
6432 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
6433 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6435 if (arm_is_imm12 (inst->inst_offset + 4)) {
6436 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
6437 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
6438 } else {
6439 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
6440 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6441 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6442 ARM_STR_REG_REG (code, ARMREG_R3, inst->inst_basereg, ARMREG_IP);
6444 break;
6445 case RegTypeBase:
6446 case RegTypeGSharedVtOnStack:
6447 case RegTypeStructByAddrOnStack:
6448 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
6449 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
6450 } else {
6451 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
6452 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6455 switch (ainfo->size) {
6456 case 1:
6457 if (arm_is_imm8 (inst->inst_offset)) {
6458 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6459 } else {
6460 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6461 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6463 break;
6464 case 2:
6465 if (arm_is_imm8 (inst->inst_offset)) {
6466 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6467 } else {
6468 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6469 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6471 break;
6472 case 8:
6473 if (arm_is_imm12 (inst->inst_offset)) {
6474 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6475 } else {
6476 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6477 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6479 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
6480 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
6481 } else {
6482 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
6483 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6485 if (arm_is_imm12 (inst->inst_offset + 4)) {
6486 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
6487 } else {
6488 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
6489 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6491 break;
6492 default:
6493 if (arm_is_imm12 (inst->inst_offset)) {
6494 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6495 } else {
6496 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6497 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6499 break;
6501 break;
6502 case RegTypeFP: {
6503 int imm8, rot_amount;
6505 if ((imm8 = mono_arm_is_rotated_imm8 (inst->inst_offset, &rot_amount)) == -1) {
6506 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6507 ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg);
6508 } else
6509 ARM_ADD_REG_IMM (code, ARMREG_IP, inst->inst_basereg, imm8, rot_amount);
6511 if (ainfo->size == 8)
6512 ARM_FSTD (code, ainfo->reg, ARMREG_IP, 0);
6513 else
6514 ARM_FSTS (code, ainfo->reg, ARMREG_IP, 0);
6515 break;
6517 case RegTypeStructByVal: {
6518 int doffset = inst->inst_offset;
6519 int soffset = 0;
6520 int cur_reg;
6521 int size = 0;
6522 size = mini_type_stack_size_full (inst->inst_vtype, NULL, sig->pinvoke);
6523 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
6524 if (arm_is_imm12 (doffset)) {
6525 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
6526 } else {
6527 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
6528 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
6530 soffset += sizeof (target_mgreg_t);
6531 doffset += sizeof (target_mgreg_t);
6533 if (ainfo->vtsize) {
6534 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6535 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
6536 code = emit_memcpy (code, ainfo->vtsize * sizeof (target_mgreg_t), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
6538 break;
6540 default:
6541 g_assert_not_reached ();
6542 break;
6545 pos++;
6548 if (method->save_lmf)
6549 code = emit_save_lmf (cfg, code, alloc_size - lmf_offset);
6551 if (cfg->arch.seq_point_info_var) {
6552 MonoInst *ins = cfg->arch.seq_point_info_var;
6554 /* Initialize the variable from a GOT slot */
6555 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
6556 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6557 ARM_B (code, 0);
6558 *(gpointer*)code = NULL;
6559 code += 4;
6560 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
6562 g_assert (ins->opcode == OP_REGOFFSET);
6564 if (arm_is_imm12 (ins->inst_offset)) {
6565 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6566 } else {
6567 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6568 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6572 /* Initialize ss_trigger_page_var */
6573 if (!cfg->soft_breakpoints) {
6574 MonoInst *info_var = cfg->arch.seq_point_info_var;
6575 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
6576 int dreg = ARMREG_LR;
6578 if (info_var) {
6579 g_assert (info_var->opcode == OP_REGOFFSET);
6581 code = emit_ldr_imm (code, dreg, info_var->inst_basereg, info_var->inst_offset);
6582 /* Load the trigger page addr */
6583 ARM_LDR_IMM (code, dreg, dreg, MONO_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
6584 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
6588 if (cfg->arch.seq_point_ss_method_var) {
6589 MonoInst *ss_method_ins = cfg->arch.seq_point_ss_method_var;
6590 MonoInst *bp_method_ins = cfg->arch.seq_point_bp_method_var;
6592 g_assert (ss_method_ins->opcode == OP_REGOFFSET);
6593 g_assert (arm_is_imm12 (ss_method_ins->inst_offset));
6595 if (cfg->compile_aot) {
6596 MonoInst *info_var = cfg->arch.seq_point_info_var;
6597 int dreg = ARMREG_LR;
6599 g_assert (info_var->opcode == OP_REGOFFSET);
6600 g_assert (arm_is_imm12 (info_var->inst_offset));
6602 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
6603 ARM_LDR_IMM (code, dreg, dreg, MONO_STRUCT_OFFSET (SeqPointInfo, ss_tramp_addr));
6604 ARM_STR_IMM (code, dreg, ss_method_ins->inst_basereg, ss_method_ins->inst_offset);
6605 } else {
6606 g_assert (bp_method_ins->opcode == OP_REGOFFSET);
6607 g_assert (arm_is_imm12 (bp_method_ins->inst_offset));
6609 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
6610 ARM_B (code, 1);
6611 *(gpointer*)code = &single_step_tramp;
6612 code += 4;
6613 *(gpointer*)code = breakpoint_tramp;
6614 code += 4;
6616 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 0);
6617 ARM_STR_IMM (code, ARMREG_IP, ss_method_ins->inst_basereg, ss_method_ins->inst_offset);
6618 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 4);
6619 ARM_STR_IMM (code, ARMREG_IP, bp_method_ins->inst_basereg, bp_method_ins->inst_offset);
6623 set_code_cursor (cfg, code);
6624 g_free (cinfo);
6626 return code;
6629 void
6630 mono_arch_emit_epilog (MonoCompile *cfg)
6632 MonoMethod *method = cfg->method;
6633 int pos, i, rot_amount;
6634 int max_epilog_size = 16 + 20*4;
6635 guint8 *code;
6636 CallInfo *cinfo;
6638 if (cfg->method->save_lmf)
6639 max_epilog_size += 128;
6641 code = realloc_code (cfg, max_epilog_size);
6643 /* Save the uwind state which is needed by the out-of-line code */
6644 mono_emit_unwind_op_remember_state (cfg, code);
6646 pos = 0;
6648 /* Load returned vtypes into registers if needed */
6649 cinfo = cfg->arch.cinfo;
6650 switch (cinfo->ret.storage) {
6651 case RegTypeStructByVal: {
6652 MonoInst *ins = cfg->ret;
6654 if (cinfo->ret.nregs == 1) {
6655 if (arm_is_imm12 (ins->inst_offset)) {
6656 ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6657 } else {
6658 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6659 ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6661 } else {
6662 for (i = 0; i < cinfo->ret.nregs; ++i) {
6663 int offset = ins->inst_offset + (i * 4);
6664 if (arm_is_imm12 (offset)) {
6665 ARM_LDR_IMM (code, i, ins->inst_basereg, offset);
6666 } else {
6667 code = mono_arm_emit_load_imm (code, ARMREG_LR, offset);
6668 ARM_LDR_REG_REG (code, i, ins->inst_basereg, ARMREG_LR);
6672 break;
6674 case RegTypeHFA: {
6675 MonoInst *ins = cfg->ret;
6677 for (i = 0; i < cinfo->ret.nregs; ++i) {
6678 if (cinfo->ret.esize == 4)
6679 ARM_FLDS (code, cinfo->ret.reg + i, ins->inst_basereg, ins->inst_offset + (i * cinfo->ret.esize));
6680 else
6681 ARM_FLDD (code, cinfo->ret.reg + (i * 2), ins->inst_basereg, ins->inst_offset + (i * cinfo->ret.esize));
6683 break;
6685 default:
6686 break;
6689 if (method->save_lmf) {
6690 int lmf_offset, reg, sp_adj, regmask, nused_int_regs = 0;
6691 /* all but r0-r3, sp and pc */
6692 pos += MONO_ABI_SIZEOF (MonoLMF) - (MONO_ARM_NUM_SAVED_REGS * sizeof (target_mgreg_t));
6693 lmf_offset = pos;
6695 code = emit_restore_lmf (cfg, code, cfg->stack_usage - lmf_offset);
6697 /* This points to r4 inside MonoLMF->iregs */
6698 sp_adj = (MONO_ABI_SIZEOF (MonoLMF) - MONO_ARM_NUM_SAVED_REGS * sizeof (target_mgreg_t));
6699 reg = ARMREG_R4;
6700 regmask = 0x9ff0; /* restore lr to pc */
6701 /* Skip caller saved registers not used by the method */
6702 while (!(cfg->used_int_regs & (1 << reg)) && reg < ARMREG_FP) {
6703 regmask &= ~(1 << reg);
6704 sp_adj += 4;
6705 reg ++;
6707 if (iphone_abi)
6708 /* Restored later */
6709 regmask &= ~(1 << ARMREG_PC);
6710 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
6711 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage - lmf_offset + sp_adj);
6712 for (i = 0; i < 16; i++) {
6713 if (regmask & (1 << i))
6714 nused_int_regs ++;
6716 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, ((iphone_abi ? 3 : 0) + nused_int_regs) * 4);
6717 /* restore iregs */
6718 ARM_POP (code, regmask);
6719 if (iphone_abi) {
6720 for (i = 0; i < 16; i++) {
6721 if (regmask & (1 << i))
6722 mono_emit_unwind_op_same_value (cfg, code, i);
6724 /* Restore saved r7, restore LR to PC */
6725 /* Skip lr from the lmf */
6726 mono_emit_unwind_op_def_cfa_offset (cfg, code, 3 * 4);
6727 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, sizeof (target_mgreg_t), 0);
6728 mono_emit_unwind_op_def_cfa_offset (cfg, code, 2 * 4);
6729 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6731 } else {
6732 int i, nused_int_regs = 0;
6734 for (i = 0; i < 16; i++) {
6735 if (cfg->used_int_regs & (1 << i))
6736 nused_int_regs ++;
6739 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
6740 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
6741 } else {
6742 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
6743 ARM_ADD_REG_REG (code, ARMREG_SP, cfg->frame_reg, ARMREG_IP);
6746 if (cfg->frame_reg != ARMREG_SP) {
6747 mono_emit_unwind_op_def_cfa_reg (cfg, code, ARMREG_SP);
6750 if (iphone_abi) {
6751 /* Restore saved gregs */
6752 if (cfg->used_int_regs) {
6753 mono_emit_unwind_op_def_cfa_offset (cfg, code, (2 + nused_int_regs) * 4);
6754 ARM_POP (code, cfg->used_int_regs);
6755 for (i = 0; i < 16; i++) {
6756 if (cfg->used_int_regs & (1 << i))
6757 mono_emit_unwind_op_same_value (cfg, code, i);
6760 mono_emit_unwind_op_def_cfa_offset (cfg, code, 2 * 4);
6761 /* Restore saved r7, restore LR to PC */
6762 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6763 } else {
6764 mono_emit_unwind_op_def_cfa_offset (cfg, code, (nused_int_regs + 1) * 4);
6765 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_PC));
6769 /* Restore the unwind state to be the same as before the epilog */
6770 mono_emit_unwind_op_restore_state (cfg, code);
6772 set_code_cursor (cfg, code);
6776 void
6777 mono_arch_emit_exceptions (MonoCompile *cfg)
6779 MonoJumpInfo *patch_info;
6780 int i;
6781 guint8 *code;
6782 guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
6783 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
6784 int max_epilog_size = 50;
6786 for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
6787 exc_throw_pos [i] = NULL;
6788 exc_throw_found [i] = 0;
6791 /* count the number of exception infos */
6794 * make sure we have enough space for exceptions
6796 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6797 if (patch_info->type == MONO_PATCH_INFO_EXC) {
6798 i = mini_exception_id_by_name ((const char*)patch_info->data.target);
6799 if (!exc_throw_found [i]) {
6800 max_epilog_size += 32;
6801 exc_throw_found [i] = TRUE;
6806 code = realloc_code (cfg, max_epilog_size);
6808 /* add code to raise exceptions */
6809 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6810 switch (patch_info->type) {
6811 case MONO_PATCH_INFO_EXC: {
6812 MonoClass *exc_class;
6813 unsigned char *ip = patch_info->ip.i + cfg->native_code;
6815 i = mini_exception_id_by_name ((const char*)patch_info->data.target);
6816 if (exc_throw_pos [i]) {
6817 arm_patch (ip, exc_throw_pos [i]);
6818 patch_info->type = MONO_PATCH_INFO_NONE;
6819 break;
6820 } else {
6821 exc_throw_pos [i] = code;
6823 arm_patch (ip, code);
6825 exc_class = mono_class_load_from_name (mono_defaults.corlib, "System", patch_info->data.name);
6827 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
6828 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6829 patch_info->type = MONO_PATCH_INFO_JIT_ICALL_ID;
6830 patch_info->data.jit_icall_id = MONO_JIT_ICALL_mono_arch_throw_corlib_exception;
6831 patch_info->ip.i = code - cfg->native_code;
6832 ARM_BL (code, 0);
6833 cfg->thunk_area += THUNK_SIZE;
6834 *(guint32*)(gpointer)code = m_class_get_type_token (exc_class) - MONO_TOKEN_TYPE_DEF;
6835 code += 4;
6836 break;
6838 default:
6839 /* do nothing */
6840 break;
6844 set_code_cursor (cfg, code);
6847 #endif /* #ifndef DISABLE_JIT */
6849 void
6850 mono_arch_finish_init (void)
6854 void
6855 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
6859 MonoInst*
6860 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
6862 /* FIXME: */
6863 return NULL;
6866 #ifndef DISABLE_JIT
6868 #endif
6870 guint32
6871 mono_arch_get_patch_offset (guint8 *code)
6873 /* OP_AOTCONST */
6874 return 8;
6877 void
6878 mono_arch_flush_register_windows (void)
6882 MonoMethod*
6883 mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code)
6885 return (MonoMethod*)regs [MONO_ARCH_IMT_REG];
6888 MonoVTable*
6889 mono_arch_find_static_call_vtable (host_mgreg_t *regs, guint8 *code)
6891 return (MonoVTable*)(gsize)regs [MONO_ARCH_RGCTX_REG];
6894 GSList*
6895 mono_arch_get_cie_program (void)
6897 GSList *l = NULL;
6899 mono_add_unwind_op_def_cfa (l, (guint8*)NULL, (guint8*)NULL, ARMREG_SP, 0);
6901 return l;
6904 /* #define ENABLE_WRONG_METHOD_CHECK 1 */
6905 #define BASE_SIZE (6 * 4)
6906 #define BSEARCH_ENTRY_SIZE (4 * 4)
6907 #define CMP_SIZE (3 * 4)
6908 #define BRANCH_SIZE (1 * 4)
6909 #define CALL_SIZE (2 * 4)
6910 #define WMC_SIZE (8 * 4)
6911 #define DISTANCE(A, B) (((gint32)(gssize)(B)) - ((gint32)(gssize)(A)))
6913 static arminstr_t *
6914 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
6916 guint32 delta = DISTANCE (target, code);
6917 delta -= 8;
6918 g_assert (delta >= 0 && delta <= 0xFFF);
6919 *target = *target | delta;
6920 *code = value;
6921 return code + 1;
6924 #ifdef ENABLE_WRONG_METHOD_CHECK
6925 static void
6926 mini_dump_bad_imt (int input_imt, int compared_imt, int pc)
6928 g_print ("BAD IMT comparing %x with expected %x at ip %x", input_imt, compared_imt, pc);
6929 g_assert (0);
6931 #endif
6933 gpointer
6934 mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
6935 gpointer fail_tramp)
6937 int size, i;
6938 arminstr_t *code, *start;
6939 gboolean large_offsets = FALSE;
6940 guint32 **constant_pool_starts;
6941 arminstr_t *vtable_target = NULL;
6942 int extra_space = 0;
6943 #ifdef ENABLE_WRONG_METHOD_CHECK
6944 char * cond;
6945 #endif
6946 GSList *unwind_ops;
6948 size = BASE_SIZE;
6949 constant_pool_starts = g_new0 (guint32*, count);
6951 for (i = 0; i < count; ++i) {
6952 MonoIMTCheckItem *item = imt_entries [i];
6953 if (item->is_equals) {
6954 gboolean fail_case = !item->check_target_idx && fail_tramp;
6956 if (item->has_target_code || !arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
6957 item->chunk_size += 32;
6958 large_offsets = TRUE;
6961 if (item->check_target_idx || fail_case) {
6962 if (!item->compare_done || fail_case)
6963 item->chunk_size += CMP_SIZE;
6964 item->chunk_size += BRANCH_SIZE;
6965 } else {
6966 #ifdef ENABLE_WRONG_METHOD_CHECK
6967 item->chunk_size += WMC_SIZE;
6968 #endif
6970 if (fail_case) {
6971 item->chunk_size += 16;
6972 large_offsets = TRUE;
6974 item->chunk_size += CALL_SIZE;
6975 } else {
6976 item->chunk_size += BSEARCH_ENTRY_SIZE;
6977 imt_entries [item->check_target_idx]->compare_done = TRUE;
6979 size += item->chunk_size;
6982 if (large_offsets)
6983 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
6985 if (fail_tramp)
6986 code = mono_method_alloc_generic_virtual_trampoline (domain, size);
6987 else
6988 code = mono_domain_code_reserve (domain, size);
6989 start = code;
6991 unwind_ops = mono_arch_get_cie_program ();
6993 #ifdef DEBUG_IMT
6994 g_print ("Building IMT trampoline for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", m_class_get_name_space (vtable->klass), m_class_get_name (vtable->klass), count, size, start, ((guint8*)start) + size, vtable, fail_tramp);
6995 for (i = 0; i < count; ++i) {
6996 MonoIMTCheckItem *item = imt_entries [i];
6997 g_print ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, ((MonoMethod*)item->key)->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
6999 #endif
7001 if (large_offsets) {
7002 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
7003 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 4 * sizeof (host_mgreg_t));
7004 } else {
7005 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
7006 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 2 * sizeof (host_mgreg_t));
7008 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
7009 vtable_target = code;
7010 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
7011 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
7013 for (i = 0; i < count; ++i) {
7014 MonoIMTCheckItem *item = imt_entries [i];
7015 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL, *target_code_ins = NULL;
7016 gint32 vtable_offset;
7018 item->code_target = (guint8*)code;
7020 if (item->is_equals) {
7021 gboolean fail_case = !item->check_target_idx && fail_tramp;
7023 if (item->check_target_idx || fail_case) {
7024 if (!item->compare_done || fail_case) {
7025 imt_method = code;
7026 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
7027 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
7029 item->jmp_code = (guint8*)code;
7030 ARM_B_COND (code, ARMCOND_NE, 0);
7031 } else {
7032 /*Enable the commented code to assert on wrong method*/
7033 #ifdef ENABLE_WRONG_METHOD_CHECK
7034 imt_method = code;
7035 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
7036 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
7037 cond = code;
7038 ARM_B_COND (code, ARMCOND_EQ, 0);
7040 /* Define this if your system is so bad that gdb is failing. */
7041 #ifdef BROKEN_DEV_ENV
7042 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
7043 ARM_BL (code, 0);
7044 arm_patch (code - 1, mini_dump_bad_imt);
7045 #else
7046 ARM_DBRK (code);
7047 #endif
7048 arm_patch (cond, code);
7049 #endif
7052 if (item->has_target_code) {
7053 /* Load target address */
7054 target_code_ins = code;
7055 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
7056 /* Save it to the fourth slot */
7057 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (target_mgreg_t));
7058 /* Restore registers and branch */
7059 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
7061 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)item->value.target_code);
7062 } else {
7063 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
7064 if (!arm_is_imm12 (vtable_offset)) {
7066 * We need to branch to a computed address but we don't have
7067 * a free register to store it, since IP must contain the
7068 * vtable address. So we push the two values to the stack, and
7069 * load them both using LDM.
7071 /* Compute target address */
7072 vtable_offset_ins = code;
7073 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
7074 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
7075 /* Save it to the fourth slot */
7076 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (target_mgreg_t));
7077 /* Restore registers and branch */
7078 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
7080 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
7081 } else {
7082 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
7083 if (large_offsets) {
7084 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 2 * sizeof (host_mgreg_t));
7085 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (host_mgreg_t));
7087 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 0);
7088 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
7092 if (fail_case) {
7093 arm_patch (item->jmp_code, (guchar*)code);
7095 target_code_ins = code;
7096 /* Load target address */
7097 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
7098 /* Save it to the fourth slot */
7099 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (host_mgreg_t));
7100 /* Restore registers and branch */
7101 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
7103 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)fail_tramp);
7104 item->jmp_code = NULL;
7107 if (imt_method)
7108 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)(gsize)item->key);
7110 /*must emit after unconditional branch*/
7111 if (vtable_target) {
7112 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)(gsize)vtable);
7113 item->chunk_size += 4;
7114 vtable_target = NULL;
7117 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
7118 constant_pool_starts [i] = code;
7119 if (extra_space) {
7120 code += extra_space;
7121 extra_space = 0;
7123 } else {
7124 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
7125 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
7127 item->jmp_code = (guint8*)code;
7128 ARM_B_COND (code, ARMCOND_HS, 0);
7129 ++extra_space;
7133 for (i = 0; i < count; ++i) {
7134 MonoIMTCheckItem *item = imt_entries [i];
7135 if (item->jmp_code) {
7136 if (item->check_target_idx)
7137 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
7139 if (i > 0 && item->is_equals) {
7140 int j;
7141 arminstr_t *space_start = constant_pool_starts [i];
7142 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
7143 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)(gsize)imt_entries [j]->key);
7148 #ifdef DEBUG_IMT
7150 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", m_class_get_name_space (vtable->klass), m_class_get_name (vtable->klass), count);
7151 mono_disassemble_code (NULL, (guint8*)start, size, buff);
7152 g_free (buff);
7154 #endif
7156 g_free (constant_pool_starts);
7158 mono_arch_flush_icache ((guint8*)start, size);
7159 MONO_PROFILER_RAISE (jit_code_buffer, ((guint8*)start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL));
7160 UnlockedAdd (&mono_stats.imt_trampolines_size, code - start);
7162 g_assert (DISTANCE (start, code) <= size);
7164 mono_tramp_info_register (mono_tramp_info_create (NULL, (guint8*)start, DISTANCE (start, code), NULL, unwind_ops), domain);
7166 return start;
7169 host_mgreg_t
7170 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
7172 return ctx->regs [reg];
7175 void
7176 mono_arch_context_set_int_reg (MonoContext *ctx, int reg, host_mgreg_t val)
7178 ctx->regs [reg] = val;
7182 * mono_arch_get_trampolines:
7184 * Return a list of MonoTrampInfo structures describing arch specific trampolines
7185 * for AOT.
7187 GSList *
7188 mono_arch_get_trampolines (gboolean aot)
7190 return mono_arm_get_exception_trampolines (aot);
7193 #if defined(MONO_ARCH_SOFT_DEBUG_SUPPORTED)
7195 * mono_arch_set_breakpoint:
7197 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
7198 * The location should contain code emitted by OP_SEQ_POINT.
7200 void
7201 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
7203 guint8 *code = ip;
7204 guint32 native_offset = ip - (guint8*)ji->code_start;
7205 MonoDebugOptions *opt = mini_get_debug_options ();
7207 if (ji->from_aot) {
7208 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), (guint8*)ji->code_start);
7210 if (!breakpoint_tramp)
7211 breakpoint_tramp = mini_get_breakpoint_trampoline ();
7213 g_assert (native_offset % 4 == 0);
7214 g_assert (info->bp_addrs [native_offset / 4] == 0);
7215 info->bp_addrs [native_offset / 4] = (guint8*)(opt->soft_breakpoints ? breakpoint_tramp : bp_trigger_page);
7216 } else if (opt->soft_breakpoints) {
7217 code += 4;
7218 ARM_BLX_REG (code, ARMREG_LR);
7219 mono_arch_flush_icache (code - 4, 4);
7220 } else {
7221 int dreg = ARMREG_LR;
7223 /* Read from another trigger page */
7224 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
7225 ARM_B (code, 0);
7226 *(int*)code = (int)(gssize)bp_trigger_page;
7227 code += 4;
7228 ARM_LDR_IMM (code, dreg, dreg, 0);
7230 mono_arch_flush_icache (code - 16, 16);
7232 #if 0
7233 /* This is currently implemented by emitting an SWI instruction, which
7234 * qemu/linux seems to convert to a SIGILL.
7236 *(int*)code = (0xef << 24) | 8;
7237 code += 4;
7238 mono_arch_flush_icache (code - 4, 4);
7239 #endif
7244 * mono_arch_clear_breakpoint:
7246 * Clear the breakpoint at IP.
7248 void
7249 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
7251 MonoDebugOptions *opt = mini_get_debug_options ();
7252 guint8 *code = ip;
7253 int i;
7255 if (ji->from_aot) {
7256 guint32 native_offset = ip - (guint8*)ji->code_start;
7257 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), (guint8*)ji->code_start);
7259 if (!breakpoint_tramp)
7260 breakpoint_tramp = mini_get_breakpoint_trampoline ();
7262 g_assert (native_offset % 4 == 0);
7263 g_assert (info->bp_addrs [native_offset / 4] == (guint8*)(opt->soft_breakpoints ? breakpoint_tramp : bp_trigger_page));
7264 info->bp_addrs [native_offset / 4] = 0;
7265 } else if (opt->soft_breakpoints) {
7266 code += 4;
7267 ARM_NOP (code);
7268 mono_arch_flush_icache (code - 4, 4);
7269 } else {
7270 for (i = 0; i < 4; ++i)
7271 ARM_NOP (code);
7273 mono_arch_flush_icache (ip, code - ip);
7278 * mono_arch_start_single_stepping:
7280 * Start single stepping.
7282 void
7283 mono_arch_start_single_stepping (void)
7285 if (ss_trigger_page)
7286 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
7287 else
7288 single_step_tramp = mini_get_single_step_trampoline ();
7292 * mono_arch_stop_single_stepping:
7294 * Stop single stepping.
7296 void
7297 mono_arch_stop_single_stepping (void)
7299 if (ss_trigger_page)
7300 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
7301 else
7302 single_step_tramp = NULL;
7305 #if __APPLE__
7306 #define DBG_SIGNAL SIGBUS
7307 #else
7308 #define DBG_SIGNAL SIGSEGV
7309 #endif
7312 * mono_arch_is_single_step_event:
7314 * Return whenever the machine state in SIGCTX corresponds to a single
7315 * step event.
7317 gboolean
7318 mono_arch_is_single_step_event (void *info, void *sigctx)
7320 siginfo_t *sinfo = (siginfo_t*)info;
7322 if (!ss_trigger_page)
7323 return FALSE;
7325 /* Sometimes the address is off by 4 */
7326 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
7327 return TRUE;
7328 else
7329 return FALSE;
7333 * mono_arch_is_breakpoint_event:
7335 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
7337 gboolean
7338 mono_arch_is_breakpoint_event (void *info, void *sigctx)
7340 siginfo_t *sinfo = (siginfo_t*)info;
7342 if (!ss_trigger_page)
7343 return FALSE;
7345 if (sinfo->si_signo == DBG_SIGNAL) {
7346 /* Sometimes the address is off by 4 */
7347 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
7348 return TRUE;
7349 else
7350 return FALSE;
7351 } else {
7352 return FALSE;
7357 * mono_arch_skip_breakpoint:
7359 * See mini-amd64.c for docs.
7361 void
7362 mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
7364 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7368 * mono_arch_skip_single_step:
7370 * See mini-amd64.c for docs.
7372 void
7373 mono_arch_skip_single_step (MonoContext *ctx)
7375 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7379 * mono_arch_get_seq_point_info:
7381 * See mini-amd64.c for docs.
7383 SeqPointInfo*
7384 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
7386 SeqPointInfo *info;
7387 MonoJitInfo *ji;
7389 // FIXME: Add a free function
7391 mono_domain_lock (domain);
7392 info = (SeqPointInfo*)g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
7393 code);
7394 mono_domain_unlock (domain);
7396 if (!info) {
7397 ji = mono_jit_info_table_find (domain, code);
7398 g_assert (ji);
7400 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
7402 info->ss_trigger_page = ss_trigger_page;
7403 info->bp_trigger_page = bp_trigger_page;
7404 info->ss_tramp_addr = &single_step_tramp;
7406 mono_domain_lock (domain);
7407 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
7408 code, info);
7409 mono_domain_unlock (domain);
7412 return info;
7415 #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
7418 * mono_arch_set_target:
7420 * Set the target architecture the JIT backend should generate code for, in the form
7421 * of a GNU target triplet. Only used in AOT mode.
7423 void
7424 mono_arch_set_target (char *mtriple)
7426 /* The GNU target triple format is not very well documented */
7427 if (strstr (mtriple, "armv7")) {
7428 v5_supported = TRUE;
7429 v6_supported = TRUE;
7430 v7_supported = TRUE;
7432 if (strstr (mtriple, "armv6")) {
7433 v5_supported = TRUE;
7434 v6_supported = TRUE;
7436 if (strstr (mtriple, "armv7s")) {
7437 v7s_supported = TRUE;
7439 if (strstr (mtriple, "armv7k")) {
7440 v7k_supported = TRUE;
7442 if (strstr (mtriple, "thumbv7s")) {
7443 v5_supported = TRUE;
7444 v6_supported = TRUE;
7445 v7_supported = TRUE;
7446 v7s_supported = TRUE;
7447 thumb_supported = TRUE;
7448 thumb2_supported = TRUE;
7450 if (strstr (mtriple, "darwin") || strstr (mtriple, "ios")) {
7451 v5_supported = TRUE;
7452 v6_supported = TRUE;
7453 thumb_supported = TRUE;
7454 iphone_abi = TRUE;
7456 if (strstr (mtriple, "gnueabi"))
7457 eabi_supported = TRUE;
7460 gboolean
7461 mono_arch_opcode_supported (int opcode)
7463 switch (opcode) {
7464 case OP_ATOMIC_ADD_I4:
7465 case OP_ATOMIC_EXCHANGE_I4:
7466 case OP_ATOMIC_CAS_I4:
7467 case OP_ATOMIC_LOAD_I1:
7468 case OP_ATOMIC_LOAD_I2:
7469 case OP_ATOMIC_LOAD_I4:
7470 case OP_ATOMIC_LOAD_U1:
7471 case OP_ATOMIC_LOAD_U2:
7472 case OP_ATOMIC_LOAD_U4:
7473 case OP_ATOMIC_STORE_I1:
7474 case OP_ATOMIC_STORE_I2:
7475 case OP_ATOMIC_STORE_I4:
7476 case OP_ATOMIC_STORE_U1:
7477 case OP_ATOMIC_STORE_U2:
7478 case OP_ATOMIC_STORE_U4:
7479 return v7_supported;
7480 case OP_ATOMIC_LOAD_R4:
7481 case OP_ATOMIC_LOAD_R8:
7482 case OP_ATOMIC_STORE_R4:
7483 case OP_ATOMIC_STORE_R8:
7484 return v7_supported && IS_VFP;
7485 default:
7486 return FALSE;
7490 CallInfo*
7491 mono_arch_get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
7493 return get_call_info (mp, sig);
7496 gpointer
7497 mono_arch_get_get_tls_tramp (void)
7499 return NULL;
7502 static G_GNUC_UNUSED guint8*
7503 emit_aotconst (MonoCompile *cfg, guint8 *code, int dreg, int patch_type, gpointer data)
7505 /* OP_AOTCONST */
7506 mono_add_patch_info (cfg, code - cfg->native_code, (MonoJumpInfoType)patch_type, data);
7507 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
7508 ARM_B (code, 0);
7509 *(gpointer*)code = NULL;
7510 code += 4;
7511 /* Load the value from the GOT */
7512 ARM_LDR_REG_REG (code, dreg, ARMREG_PC, dreg);
7513 return code;
7516 guint8*
7517 mono_arm_emit_aotconst (gpointer ji_list, guint8 *code, guint8 *buf, int dreg, int patch_type, gconstpointer data)
7519 MonoJumpInfo **ji = (MonoJumpInfo**)ji_list;
7521 *ji = mono_patch_info_list_prepend (*ji, code - buf, (MonoJumpInfoType)patch_type, data);
7522 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
7523 ARM_B (code, 0);
7524 *(gpointer*)code = NULL;
7525 code += 4;
7526 ARM_LDR_REG_REG (code, dreg, ARMREG_PC, dreg);
7527 return code;