3 * ARM64 backend for the Mono code generator
5 * Copyright 2013 Xamarin, Inc (http://www.xamarin.com)
10 * Paolo Molaro (lupus@ximian.com)
11 * Dietmar Maurer (dietmar@ximian.com)
13 * (C) 2003 Ximian, Inc.
14 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
15 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
16 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
20 #include "cpu-arm64.h"
23 #include <mono/arch/arm64/arm64-codegen.h>
24 #include <mono/utils/mono-mmap.h>
25 #include <mono/utils/mono-memory-model.h>
26 #include <mono/metadata/abi-details.h>
31 * - ARM(R) Architecture Reference Manual, ARMv8, for ARMv8-A architecture profile (DDI0487A_a_armv8_arm.pdf)
32 * - Procedure Call Standard for the ARM 64-bit Architecture (AArch64) (IHI0055B_aapcs64.pdf)
33 * - ELF for the ARM 64-bit Architecture (IHI0056B_aaelf64.pdf)
36 * - ip0/ip1/lr are used as temporary registers
37 * - r27 is used as the rgctx/imt register
38 * - r28 is used to access arguments passed on the stack
39 * - d15/d16 are used as fp temporary registers
42 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
44 #define FP_TEMP_REG ARMREG_D16
45 #define FP_TEMP_REG2 ARMREG_D17
47 #define THUNK_SIZE (4 * 4)
49 /* The single step trampoline */
50 static gpointer ss_trampoline
;
52 /* The breakpoint trampoline */
53 static gpointer bp_trampoline
;
55 static gboolean ios_abi
;
57 static __attribute__ ((__warn_unused_result__
)) guint8
* emit_load_regset (guint8
*code
, guint64 regs
, int basereg
, int offset
);
60 mono_arch_regname (int reg
)
62 static const char * rnames
[] = {
63 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9",
64 "r10", "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19",
65 "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "fp",
68 if (reg
>= 0 && reg
< 32)
74 mono_arch_fregname (int reg
)
76 static const char * rnames
[] = {
77 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9",
78 "d10", "d11", "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19",
79 "d20", "d21", "d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29",
82 if (reg
>= 0 && reg
< 32)
88 mono_arch_get_argument_info (MonoMethodSignature
*csig
, int param_count
, MonoJitArgumentInfo
*arg_info
)
94 #define MAX_ARCH_DELEGATE_PARAMS 7
97 get_delegate_invoke_impl (gboolean has_target
, gboolean param_count
, guint32
*code_size
)
102 start
= code
= mono_global_codeman_reserve (12);
104 /* Replace the this argument with the target */
105 arm_ldrx (code
, ARMREG_IP0
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
106 arm_ldrx (code
, ARMREG_R0
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoDelegate
, target
));
107 arm_brx (code
, ARMREG_IP0
);
109 g_assert ((code
- start
) <= 12);
111 mono_arch_flush_icache (start
, 12);
115 size
= 8 + param_count
* 4;
116 start
= code
= mono_global_codeman_reserve (size
);
118 arm_ldrx (code
, ARMREG_IP0
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
119 /* slide down the arguments */
120 for (i
= 0; i
< param_count
; ++i
)
121 arm_movx (code
, i
, i
+ 1);
122 arm_brx (code
, ARMREG_IP0
);
124 g_assert ((code
- start
) <= size
);
126 mono_arch_flush_icache (start
, size
);
130 *code_size
= code
- start
;
136 * mono_arch_get_delegate_invoke_impls:
138 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
142 mono_arch_get_delegate_invoke_impls (void)
150 code
= get_delegate_invoke_impl (TRUE
, 0, &code_len
);
151 res
= g_slist_prepend (res
, mono_tramp_info_create ("delegate_invoke_impl_has_target", code
, code_len
, NULL
, NULL
));
153 for (i
= 0; i
<= MAX_ARCH_DELEGATE_PARAMS
; ++i
) {
154 code
= get_delegate_invoke_impl (FALSE
, i
, &code_len
);
155 tramp_name
= g_strdup_printf ("delegate_invoke_impl_target_%d", i
);
156 res
= g_slist_prepend (res
, mono_tramp_info_create (tramp_name
, code
, code_len
, NULL
, NULL
));
164 mono_arch_get_delegate_invoke_impl (MonoMethodSignature
*sig
, gboolean has_target
)
166 guint8
*code
, *start
;
169 * vtypes are returned in registers, or using the dedicated r8 register, so
170 * they can be supported by delegate invokes.
174 static guint8
* cached
= NULL
;
180 start
= mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
182 start
= get_delegate_invoke_impl (TRUE
, 0, NULL
);
183 mono_memory_barrier ();
187 static guint8
* cache
[MAX_ARCH_DELEGATE_PARAMS
+ 1] = {NULL
};
190 if (sig
->param_count
> MAX_ARCH_DELEGATE_PARAMS
)
192 for (i
= 0; i
< sig
->param_count
; ++i
)
193 if (!mono_is_regsize_var (sig
->params
[i
]))
196 code
= cache
[sig
->param_count
];
201 char *name
= g_strdup_printf ("delegate_invoke_impl_target_%d", sig
->param_count
);
202 start
= mono_aot_get_trampoline (name
);
205 start
= get_delegate_invoke_impl (FALSE
, sig
->param_count
, NULL
);
207 mono_memory_barrier ();
208 cache
[sig
->param_count
] = start
;
216 mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature
*sig
, MonoMethod
*method
, int offset
, gboolean load_imt_reg
)
222 mono_arch_get_this_arg_from_call (mgreg_t
*regs
, guint8
*code
)
224 return (gpointer
)regs
[ARMREG_R0
];
228 mono_arch_cpu_init (void)
233 mono_arch_init (void)
235 mono_aot_register_jit_icall ("mono_arm_throw_exception", mono_arm_throw_exception
);
236 mono_aot_register_jit_icall ("mono_arm_resume_unwind", mono_arm_resume_unwind
);
239 bp_trampoline
= mini_get_breakpoint_trampoline ();
241 mono_arm_gsharedvt_init ();
243 #if defined(TARGET_IOS)
249 mono_arch_cleanup (void)
254 mono_arch_cpu_optimizations (guint32
*exclude_mask
)
261 mono_arch_cpu_enumerate_simd_versions (void)
267 mono_arch_register_lowlevel_calls (void)
272 mono_arch_finish_init (void)
276 /* The maximum length is 2 instructions */
278 emit_imm (guint8
*code
, int dreg
, int imm
)
280 // FIXME: Optimize this
283 arm_movnx (code
, dreg
, (~limm
) & 0xffff, 0);
284 arm_movkx (code
, dreg
, (limm
>> 16) & 0xffff, 16);
286 arm_movzx (code
, dreg
, imm
& 0xffff, 0);
288 arm_movkx (code
, dreg
, (imm
>> 16) & 0xffff, 16);
294 /* The maximum length is 4 instructions */
296 emit_imm64 (guint8
*code
, int dreg
, guint64 imm
)
298 // FIXME: Optimize this
299 arm_movzx (code
, dreg
, imm
& 0xffff, 0);
300 if ((imm
>> 16) & 0xffff)
301 arm_movkx (code
, dreg
, (imm
>> 16) & 0xffff, 16);
302 if ((imm
>> 32) & 0xffff)
303 arm_movkx (code
, dreg
, (imm
>> 32) & 0xffff, 32);
304 if ((imm
>> 48) & 0xffff)
305 arm_movkx (code
, dreg
, (imm
>> 48) & 0xffff, 48);
311 mono_arm_emit_imm64 (guint8
*code
, int dreg
, gint64 imm
)
313 return emit_imm64 (code
, dreg
, imm
);
319 * Emit a patchable code sequence for constructing a 64 bit immediate.
322 emit_imm64_template (guint8
*code
, int dreg
)
324 arm_movzx (code
, dreg
, 0, 0);
325 arm_movkx (code
, dreg
, 0, 16);
326 arm_movkx (code
, dreg
, 0, 32);
327 arm_movkx (code
, dreg
, 0, 48);
332 static inline __attribute__ ((__warn_unused_result__
)) guint8
*
333 emit_addw_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
335 if (!arm_is_arith_imm (imm
)) {
336 code
= emit_imm (code
, ARMREG_LR
, imm
);
337 arm_addw (code
, dreg
, sreg
, ARMREG_LR
);
339 arm_addw_imm (code
, dreg
, sreg
, imm
);
344 static inline __attribute__ ((__warn_unused_result__
)) guint8
*
345 emit_addx_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
347 if (!arm_is_arith_imm (imm
)) {
348 code
= emit_imm (code
, ARMREG_LR
, imm
);
349 arm_addx (code
, dreg
, sreg
, ARMREG_LR
);
351 arm_addx_imm (code
, dreg
, sreg
, imm
);
356 static inline __attribute__ ((__warn_unused_result__
)) guint8
*
357 emit_subw_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
359 if (!arm_is_arith_imm (imm
)) {
360 code
= emit_imm (code
, ARMREG_LR
, imm
);
361 arm_subw (code
, dreg
, sreg
, ARMREG_LR
);
363 arm_subw_imm (code
, dreg
, sreg
, imm
);
368 static inline __attribute__ ((__warn_unused_result__
)) guint8
*
369 emit_subx_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
371 if (!arm_is_arith_imm (imm
)) {
372 code
= emit_imm (code
, ARMREG_LR
, imm
);
373 arm_subx (code
, dreg
, sreg
, ARMREG_LR
);
375 arm_subx_imm (code
, dreg
, sreg
, imm
);
380 /* Emit sp+=imm. Clobbers ip0/ip1 */
381 static inline __attribute__ ((__warn_unused_result__
)) guint8
*
382 emit_addx_sp_imm (guint8
*code
, int imm
)
384 code
= emit_imm (code
, ARMREG_IP0
, imm
);
385 arm_movspx (code
, ARMREG_IP1
, ARMREG_SP
);
386 arm_addx (code
, ARMREG_IP1
, ARMREG_IP1
, ARMREG_IP0
);
387 arm_movspx (code
, ARMREG_SP
, ARMREG_IP1
);
391 /* Emit sp-=imm. Clobbers ip0/ip1 */
392 static inline __attribute__ ((__warn_unused_result__
)) guint8
*
393 emit_subx_sp_imm (guint8
*code
, int imm
)
395 code
= emit_imm (code
, ARMREG_IP0
, imm
);
396 arm_movspx (code
, ARMREG_IP1
, ARMREG_SP
);
397 arm_subx (code
, ARMREG_IP1
, ARMREG_IP1
, ARMREG_IP0
);
398 arm_movspx (code
, ARMREG_SP
, ARMREG_IP1
);
402 static inline __attribute__ ((__warn_unused_result__
)) guint8
*
403 emit_andw_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
406 code
= emit_imm (code
, ARMREG_LR
, imm
);
407 arm_andw (code
, dreg
, sreg
, ARMREG_LR
);
412 static inline __attribute__ ((__warn_unused_result__
)) guint8
*
413 emit_andx_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
416 code
= emit_imm (code
, ARMREG_LR
, imm
);
417 arm_andx (code
, dreg
, sreg
, ARMREG_LR
);
422 static inline __attribute__ ((__warn_unused_result__
)) guint8
*
423 emit_orrw_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
426 code
= emit_imm (code
, ARMREG_LR
, imm
);
427 arm_orrw (code
, dreg
, sreg
, ARMREG_LR
);
432 static inline __attribute__ ((__warn_unused_result__
)) guint8
*
433 emit_orrx_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
436 code
= emit_imm (code
, ARMREG_LR
, imm
);
437 arm_orrx (code
, dreg
, sreg
, ARMREG_LR
);
442 static inline __attribute__ ((__warn_unused_result__
)) guint8
*
443 emit_eorw_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
446 code
= emit_imm (code
, ARMREG_LR
, imm
);
447 arm_eorw (code
, dreg
, sreg
, ARMREG_LR
);
452 static inline __attribute__ ((__warn_unused_result__
)) guint8
*
453 emit_eorx_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
456 code
= emit_imm (code
, ARMREG_LR
, imm
);
457 arm_eorx (code
, dreg
, sreg
, ARMREG_LR
);
462 static inline __attribute__ ((__warn_unused_result__
)) guint8
*
463 emit_cmpw_imm (guint8
*code
, int sreg
, int imm
)
466 arm_cmpw (code
, sreg
, ARMREG_RZR
);
469 code
= emit_imm (code
, ARMREG_LR
, imm
);
470 arm_cmpw (code
, sreg
, ARMREG_LR
);
476 static inline __attribute__ ((__warn_unused_result__
)) guint8
*
477 emit_cmpx_imm (guint8
*code
, int sreg
, int imm
)
480 arm_cmpx (code
, sreg
, ARMREG_RZR
);
483 code
= emit_imm (code
, ARMREG_LR
, imm
);
484 arm_cmpx (code
, sreg
, ARMREG_LR
);
490 static inline __attribute__ ((__warn_unused_result__
)) guint8
*
491 emit_strb (guint8
*code
, int rt
, int rn
, int imm
)
493 if (arm_is_strb_imm (imm
)) {
494 arm_strb (code
, rt
, rn
, imm
);
496 g_assert (rt
!= ARMREG_IP0
);
497 g_assert (rn
!= ARMREG_IP0
);
498 code
= emit_imm (code
, ARMREG_IP0
, imm
);
499 arm_strb_reg (code
, rt
, rn
, ARMREG_IP0
);
504 static inline __attribute__ ((__warn_unused_result__
)) guint8
*
505 emit_strh (guint8
*code
, int rt
, int rn
, int imm
)
507 if (arm_is_strh_imm (imm
)) {
508 arm_strh (code
, rt
, rn
, imm
);
510 g_assert (rt
!= ARMREG_IP0
);
511 g_assert (rn
!= ARMREG_IP0
);
512 code
= emit_imm (code
, ARMREG_IP0
, imm
);
513 arm_strh_reg (code
, rt
, rn
, ARMREG_IP0
);
518 static inline __attribute__ ((__warn_unused_result__
)) guint8
*
519 emit_strw (guint8
*code
, int rt
, int rn
, int imm
)
521 if (arm_is_strw_imm (imm
)) {
522 arm_strw (code
, rt
, rn
, imm
);
524 g_assert (rt
!= ARMREG_IP0
);
525 g_assert (rn
!= ARMREG_IP0
);
526 code
= emit_imm (code
, ARMREG_IP0
, imm
);
527 arm_strw_reg (code
, rt
, rn
, ARMREG_IP0
);
532 static inline __attribute__ ((__warn_unused_result__
)) guint8
*
533 emit_strfpw (guint8
*code
, int rt
, int rn
, int imm
)
535 if (arm_is_strw_imm (imm
)) {
536 arm_strfpw (code
, rt
, rn
, imm
);
538 g_assert (rn
!= ARMREG_IP0
);
539 code
= emit_imm (code
, ARMREG_IP0
, imm
);
540 arm_addx (code
, ARMREG_IP0
, rn
, ARMREG_IP0
);
541 arm_strfpw (code
, rt
, ARMREG_IP0
, 0);
546 static inline __attribute__ ((__warn_unused_result__
)) guint8
*
547 emit_strfpx (guint8
*code
, int rt
, int rn
, int imm
)
549 if (arm_is_strx_imm (imm
)) {
550 arm_strfpx (code
, rt
, rn
, imm
);
552 g_assert (rn
!= ARMREG_IP0
);
553 code
= emit_imm (code
, ARMREG_IP0
, imm
);
554 arm_addx (code
, ARMREG_IP0
, rn
, ARMREG_IP0
);
555 arm_strfpx (code
, rt
, ARMREG_IP0
, 0);
560 static inline __attribute__ ((__warn_unused_result__
)) guint8
*
561 emit_strx (guint8
*code
, int rt
, int rn
, int imm
)
563 if (arm_is_strx_imm (imm
)) {
564 arm_strx (code
, rt
, rn
, imm
);
566 g_assert (rt
!= ARMREG_IP0
);
567 g_assert (rn
!= ARMREG_IP0
);
568 code
= emit_imm (code
, ARMREG_IP0
, imm
);
569 arm_strx_reg (code
, rt
, rn
, ARMREG_IP0
);
574 static inline __attribute__ ((__warn_unused_result__
)) guint8
*
575 emit_ldrb (guint8
*code
, int rt
, int rn
, int imm
)
577 if (arm_is_pimm12_scaled (imm
, 1)) {
578 arm_ldrb (code
, rt
, rn
, imm
);
580 g_assert (rt
!= ARMREG_IP0
);
581 g_assert (rn
!= ARMREG_IP0
);
582 code
= emit_imm (code
, ARMREG_IP0
, imm
);
583 arm_ldrb_reg (code
, rt
, rn
, ARMREG_IP0
);
588 static inline __attribute__ ((__warn_unused_result__
)) guint8
*
589 emit_ldrsbx (guint8
*code
, int rt
, int rn
, int imm
)
591 if (arm_is_pimm12_scaled (imm
, 1)) {
592 arm_ldrsbx (code
, rt
, rn
, imm
);
594 g_assert (rt
!= ARMREG_IP0
);
595 g_assert (rn
!= ARMREG_IP0
);
596 code
= emit_imm (code
, ARMREG_IP0
, imm
);
597 arm_ldrsbx_reg (code
, rt
, rn
, ARMREG_IP0
);
602 static inline __attribute__ ((__warn_unused_result__
)) guint8
*
603 emit_ldrh (guint8
*code
, int rt
, int rn
, int imm
)
605 if (arm_is_pimm12_scaled (imm
, 2)) {
606 arm_ldrh (code
, rt
, rn
, imm
);
608 g_assert (rt
!= ARMREG_IP0
);
609 g_assert (rn
!= ARMREG_IP0
);
610 code
= emit_imm (code
, ARMREG_IP0
, imm
);
611 arm_ldrh_reg (code
, rt
, rn
, ARMREG_IP0
);
616 static inline __attribute__ ((__warn_unused_result__
)) guint8
*
617 emit_ldrshx (guint8
*code
, int rt
, int rn
, int imm
)
619 if (arm_is_pimm12_scaled (imm
, 2)) {
620 arm_ldrshx (code
, rt
, rn
, imm
);
622 g_assert (rt
!= ARMREG_IP0
);
623 g_assert (rn
!= ARMREG_IP0
);
624 code
= emit_imm (code
, ARMREG_IP0
, imm
);
625 arm_ldrshx_reg (code
, rt
, rn
, ARMREG_IP0
);
630 static inline __attribute__ ((__warn_unused_result__
)) guint8
*
631 emit_ldrswx (guint8
*code
, int rt
, int rn
, int imm
)
633 if (arm_is_pimm12_scaled (imm
, 4)) {
634 arm_ldrswx (code
, rt
, rn
, imm
);
636 g_assert (rt
!= ARMREG_IP0
);
637 g_assert (rn
!= ARMREG_IP0
);
638 code
= emit_imm (code
, ARMREG_IP0
, imm
);
639 arm_ldrswx_reg (code
, rt
, rn
, ARMREG_IP0
);
644 static inline __attribute__ ((__warn_unused_result__
)) guint8
*
645 emit_ldrw (guint8
*code
, int rt
, int rn
, int imm
)
647 if (arm_is_pimm12_scaled (imm
, 4)) {
648 arm_ldrw (code
, rt
, rn
, imm
);
650 g_assert (rn
!= ARMREG_IP0
);
651 code
= emit_imm (code
, ARMREG_IP0
, imm
);
652 arm_ldrw_reg (code
, rt
, rn
, ARMREG_IP0
);
657 static inline __attribute__ ((__warn_unused_result__
)) guint8
*
658 emit_ldrx (guint8
*code
, int rt
, int rn
, int imm
)
660 if (arm_is_pimm12_scaled (imm
, 8)) {
661 arm_ldrx (code
, rt
, rn
, imm
);
663 g_assert (rn
!= ARMREG_IP0
);
664 code
= emit_imm (code
, ARMREG_IP0
, imm
);
665 arm_ldrx_reg (code
, rt
, rn
, ARMREG_IP0
);
670 static inline __attribute__ ((__warn_unused_result__
)) guint8
*
671 emit_ldrfpw (guint8
*code
, int rt
, int rn
, int imm
)
673 if (arm_is_pimm12_scaled (imm
, 4)) {
674 arm_ldrfpw (code
, rt
, rn
, imm
);
676 g_assert (rn
!= ARMREG_IP0
);
677 code
= emit_imm (code
, ARMREG_IP0
, imm
);
678 arm_addx (code
, ARMREG_IP0
, rn
, ARMREG_IP0
);
679 arm_ldrfpw (code
, rt
, ARMREG_IP0
, 0);
684 static inline __attribute__ ((__warn_unused_result__
)) guint8
*
685 emit_ldrfpx (guint8
*code
, int rt
, int rn
, int imm
)
687 if (arm_is_pimm12_scaled (imm
, 8)) {
688 arm_ldrfpx (code
, rt
, rn
, imm
);
690 g_assert (rn
!= ARMREG_IP0
);
691 code
= emit_imm (code
, ARMREG_IP0
, imm
);
692 arm_addx (code
, ARMREG_IP0
, rn
, ARMREG_IP0
);
693 arm_ldrfpx (code
, rt
, ARMREG_IP0
, 0);
699 mono_arm_emit_ldrx (guint8
*code
, int rt
, int rn
, int imm
)
701 return emit_ldrx (code
, rt
, rn
, imm
);
705 emit_call (MonoCompile
*cfg
, guint8
* code
, guint32 patch_type
, gconstpointer data
)
708 mono_add_patch_info_rel (cfg, code - cfg->native_code, patch_type, data, MONO_R_ARM64_IMM);
709 code = emit_imm64_template (code, ARMREG_LR);
710 arm_blrx (code, ARMREG_LR);
712 mono_add_patch_info_rel (cfg
, code
- cfg
->native_code
, patch_type
, data
, MONO_R_ARM64_BL
);
714 cfg
->thunk_area
+= THUNK_SIZE
;
719 emit_aotconst_full (MonoCompile
*cfg
, MonoJumpInfo
**ji
, guint8
*code
, guint8
*start
, int dreg
, guint32 patch_type
, gconstpointer data
)
722 mono_add_patch_info (cfg
, code
- cfg
->native_code
, patch_type
, data
);
724 *ji
= mono_patch_info_list_prepend (*ji
, code
- start
, patch_type
, data
);
725 /* See arch_emit_got_access () in aot-compiler.c */
726 arm_ldrx_lit (code
, dreg
, 0);
733 emit_aotconst (MonoCompile
*cfg
, guint8
*code
, int dreg
, guint32 patch_type
, gconstpointer data
)
735 return emit_aotconst_full (cfg
, NULL
, code
, NULL
, dreg
, patch_type
, data
);
739 * mono_arm_emit_aotconst:
741 * Emit code to load an AOT constant into DREG. Usable from trampolines.
744 mono_arm_emit_aotconst (gpointer ji
, guint8
*code
, guint8
*code_start
, int dreg
, guint32 patch_type
, gconstpointer data
)
746 return emit_aotconst_full (NULL
, (MonoJumpInfo
**)ji
, code
, code_start
, dreg
, patch_type
, data
);
750 mono_arch_have_fast_tls (void)
760 emit_tls_get (guint8
*code
, int dreg
, int tls_offset
)
762 arm_mrs (code
, dreg
, ARM_MRS_REG_TPIDR_EL0
);
763 if (tls_offset
< 256) {
764 arm_ldrx (code
, dreg
, dreg
, tls_offset
);
766 code
= emit_addx_imm (code
, dreg
, dreg
, tls_offset
);
767 arm_ldrx (code
, dreg
, dreg
, 0);
773 emit_tls_set (guint8
*code
, int sreg
, int tls_offset
)
775 int tmpreg
= ARMREG_IP0
;
777 g_assert (sreg
!= tmpreg
);
778 arm_mrs (code
, tmpreg
, ARM_MRS_REG_TPIDR_EL0
);
779 if (tls_offset
< 256) {
780 arm_strx (code
, sreg
, tmpreg
, tls_offset
);
782 code
= emit_addx_imm (code
, tmpreg
, tmpreg
, tls_offset
);
783 arm_strx (code
, sreg
, tmpreg
, 0);
791 * - ldrp [fp, lr], [sp], !stack_offfset
792 * Clobbers TEMP_REGS.
794 __attribute__ ((__warn_unused_result__
)) guint8
*
795 mono_arm_emit_destroy_frame (guint8
*code
, int stack_offset
, guint64 temp_regs
)
797 arm_movspx (code
, ARMREG_SP
, ARMREG_FP
);
799 if (arm_is_ldpx_imm (stack_offset
)) {
800 arm_ldpx_post (code
, ARMREG_FP
, ARMREG_LR
, ARMREG_SP
, stack_offset
);
802 arm_ldpx (code
, ARMREG_FP
, ARMREG_LR
, ARMREG_SP
, 0);
803 /* sp += stack_offset */
804 g_assert (temp_regs
& (1 << ARMREG_IP0
));
805 if (temp_regs
& (1 << ARMREG_IP1
)) {
806 code
= emit_addx_sp_imm (code
, stack_offset
);
808 int imm
= stack_offset
;
810 /* Can't use addx_sp_imm () since we can't clobber ip0/ip1 */
811 arm_addx_imm (code
, ARMREG_IP0
, ARMREG_SP
, 0);
813 arm_addx_imm (code
, ARMREG_IP0
, ARMREG_IP0
, 256);
816 arm_addx_imm (code
, ARMREG_SP
, ARMREG_IP0
, imm
);
822 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
825 emit_thunk (guint8
*code
, gconstpointer target
)
829 arm_ldrx_lit (code
, ARMREG_IP0
, code
+ 8);
830 arm_brx (code
, ARMREG_IP0
);
831 *(guint64
*)code
= (guint64
)target
;
832 code
+= sizeof (guint64
);
834 mono_arch_flush_icache (p
, code
- p
);
839 create_thunk (MonoCompile
*cfg
, MonoDomain
*domain
, guchar
*code
, const guchar
*target
)
842 MonoThunkJitInfo
*info
;
846 guint8
*target_thunk
;
849 domain
= mono_domain_get ();
853 * This can be called multiple times during JITting,
854 * save the current position in cfg->arch to avoid
855 * doing a O(n^2) search.
857 if (!cfg
->arch
.thunks
) {
858 cfg
->arch
.thunks
= cfg
->thunks
;
859 cfg
->arch
.thunks_size
= cfg
->thunk_area
;
861 thunks
= cfg
->arch
.thunks
;
862 thunks_size
= cfg
->arch
.thunks_size
;
864 g_print ("thunk failed %p->%p, thunk space=%d method %s", code
, target
, thunks_size
, mono_method_full_name (cfg
->method
, TRUE
));
865 g_assert_not_reached ();
868 g_assert (*(guint32
*)thunks
== 0);
869 emit_thunk (thunks
, target
);
871 cfg
->arch
.thunks
+= THUNK_SIZE
;
872 cfg
->arch
.thunks_size
-= THUNK_SIZE
;
876 ji
= mini_jit_info_table_find (domain
, (char*)code
, NULL
);
878 info
= mono_jit_info_get_thunk_info (ji
);
881 thunks
= (guint8
*)ji
->code_start
+ info
->thunks_offset
;
882 thunks_size
= info
->thunks_size
;
884 orig_target
= mono_arch_get_call_target (code
+ 4);
886 mono_domain_lock (domain
);
889 if (orig_target
>= thunks
&& orig_target
< thunks
+ thunks_size
) {
890 /* The call already points to a thunk, because of trampolines etc. */
891 target_thunk
= orig_target
;
893 for (p
= thunks
; p
< thunks
+ thunks_size
; p
+= THUNK_SIZE
) {
894 if (((guint32
*)p
) [0] == 0) {
898 } else if (((guint64
*)p
) [1] == (guint64
)target
) {
899 /* Thunk already points to target */
906 //printf ("THUNK: %p %p %p\n", code, target, target_thunk);
909 mono_domain_unlock (domain
);
910 g_print ("thunk failed %p->%p, thunk space=%d method %s", code
, target
, thunks_size
, cfg
? mono_method_full_name (cfg
->method
, TRUE
) : mono_method_full_name (jinfo_get_method (ji
), TRUE
));
911 g_assert_not_reached ();
914 emit_thunk (target_thunk
, target
);
916 mono_domain_unlock (domain
);
923 arm_patch_full (MonoCompile
*cfg
, MonoDomain
*domain
, guint8
*code
, guint8
*target
, int relocation
)
925 switch (relocation
) {
927 if (arm_is_bl_disp (code
, target
)) {
928 arm_b (code
, target
);
932 thunk
= create_thunk (cfg
, domain
, code
, target
);
933 g_assert (arm_is_bl_disp (code
, thunk
));
937 case MONO_R_ARM64_BCC
: {
940 cond
= arm_get_bcc_cond (code
);
941 arm_bcc (code
, cond
, target
);
944 case MONO_R_ARM64_CBZ
:
945 arm_set_cbz_target (code
, target
);
947 case MONO_R_ARM64_IMM
: {
948 guint64 imm
= (guint64
)target
;
951 /* emit_imm64_template () */
952 dreg
= arm_get_movzx_rd (code
);
953 arm_movzx (code
, dreg
, imm
& 0xffff, 0);
954 arm_movkx (code
, dreg
, (imm
>> 16) & 0xffff, 16);
955 arm_movkx (code
, dreg
, (imm
>> 32) & 0xffff, 32);
956 arm_movkx (code
, dreg
, (imm
>> 48) & 0xffff, 48);
959 case MONO_R_ARM64_BL
:
960 if (arm_is_bl_disp (code
, target
)) {
961 arm_bl (code
, target
);
965 thunk
= create_thunk (cfg
, domain
, code
, target
);
966 g_assert (arm_is_bl_disp (code
, thunk
));
967 arm_bl (code
, thunk
);
971 g_assert_not_reached ();
976 arm_patch_rel (guint8
*code
, guint8
*target
, int relocation
)
978 arm_patch_full (NULL
, NULL
, code
, target
, relocation
);
982 mono_arm_patch (guint8
*code
, guint8
*target
, int relocation
)
984 arm_patch_rel (code
, target
, relocation
);
988 mono_arch_patch_code_new (MonoCompile
*cfg
, MonoDomain
*domain
, guint8
*code
, MonoJumpInfo
*ji
, gpointer target
)
992 ip
= ji
->ip
.i
+ code
;
995 case MONO_PATCH_INFO_METHOD_JUMP
:
996 /* ji->relocation is not set by the caller */
997 arm_patch_full (cfg
, domain
, ip
, (guint8
*)target
, MONO_R_ARM64_B
);
1000 arm_patch_full (cfg
, domain
, ip
, (guint8
*)target
, ji
->relocation
);
1006 mono_arch_free_jit_tls_data (MonoJitTlsData
*tls
)
1011 mono_arch_flush_register_windows (void)
1016 mono_arch_find_imt_method (mgreg_t
*regs
, guint8
*code
)
1018 return (gpointer
)regs
[MONO_ARCH_RGCTX_REG
];
1022 mono_arch_find_static_call_vtable (mgreg_t
*regs
, guint8
*code
)
1024 return (gpointer
)regs
[MONO_ARCH_RGCTX_REG
];
1028 mono_arch_context_get_int_reg (MonoContext
*ctx
, int reg
)
1030 return ctx
->regs
[reg
];
1034 mono_arch_context_set_int_reg (MonoContext
*ctx
, int reg
, mgreg_t val
)
1036 ctx
->regs
[reg
] = val
;
1040 * mono_arch_set_target:
1042 * Set the target architecture the JIT backend should generate code for, in the form
1043 * of a GNU target triplet. Only used in AOT mode.
1046 mono_arch_set_target (char *mtriple
)
1048 if (strstr (mtriple
, "darwin") || strstr (mtriple
, "ios")) {
1054 add_general (CallInfo
*cinfo
, ArgInfo
*ainfo
, int size
, gboolean sign
)
1056 if (cinfo
->gr
>= PARAM_REGS
) {
1057 ainfo
->storage
= ArgOnStack
;
1059 /* Assume size == align */
1060 cinfo
->stack_usage
= ALIGN_TO (cinfo
->stack_usage
, size
);
1061 ainfo
->offset
= cinfo
->stack_usage
;
1062 ainfo
->slot_size
= size
;
1064 cinfo
->stack_usage
+= size
;
1066 ainfo
->offset
= cinfo
->stack_usage
;
1067 ainfo
->slot_size
= 8;
1068 ainfo
->sign
= FALSE
;
1069 /* Put arguments into 8 byte aligned stack slots */
1070 cinfo
->stack_usage
+= 8;
1073 ainfo
->storage
= ArgInIReg
;
1074 ainfo
->reg
= cinfo
->gr
;
1080 add_fp (CallInfo
*cinfo
, ArgInfo
*ainfo
, gboolean single
)
1082 int size
= single
? 4 : 8;
1084 if (cinfo
->fr
>= FP_PARAM_REGS
) {
1085 ainfo
->storage
= single
? ArgOnStackR4
: ArgOnStackR8
;
1087 cinfo
->stack_usage
= ALIGN_TO (cinfo
->stack_usage
, size
);
1088 ainfo
->offset
= cinfo
->stack_usage
;
1089 ainfo
->slot_size
= size
;
1090 cinfo
->stack_usage
+= size
;
1092 ainfo
->offset
= cinfo
->stack_usage
;
1093 ainfo
->slot_size
= 8;
1094 /* Put arguments into 8 byte aligned stack slots */
1095 cinfo
->stack_usage
+= 8;
1099 ainfo
->storage
= ArgInFRegR4
;
1101 ainfo
->storage
= ArgInFReg
;
1102 ainfo
->reg
= cinfo
->fr
;
1108 is_hfa (MonoType
*t
, int *out_nfields
, int *out_esize
, int *field_offsets
)
1112 MonoClassField
*field
;
1113 MonoType
*ftype
, *prev_ftype
= NULL
;
1116 klass
= mono_class_from_mono_type (t
);
1118 while ((field
= mono_class_get_fields (klass
, &iter
))) {
1119 if (field
->type
->attrs
& FIELD_ATTRIBUTE_STATIC
)
1121 ftype
= mono_field_get_type (field
);
1122 ftype
= mini_get_underlying_type (ftype
);
1124 if (MONO_TYPE_ISSTRUCT (ftype
)) {
1125 int nested_nfields
, nested_esize
;
1126 int nested_field_offsets
[16];
1128 if (!is_hfa (ftype
, &nested_nfields
, &nested_esize
, nested_field_offsets
))
1130 if (nested_esize
== 4)
1131 ftype
= &mono_defaults
.single_class
->byval_arg
;
1133 ftype
= &mono_defaults
.double_class
->byval_arg
;
1134 if (prev_ftype
&& prev_ftype
->type
!= ftype
->type
)
1137 for (i
= 0; i
< nested_nfields
; ++i
) {
1138 if (nfields
+ i
< 4)
1139 field_offsets
[nfields
+ i
] = field
->offset
- sizeof (MonoObject
) + nested_field_offsets
[i
];
1141 nfields
+= nested_nfields
;
1143 if (!(!ftype
->byref
&& (ftype
->type
== MONO_TYPE_R4
|| ftype
->type
== MONO_TYPE_R8
)))
1145 if (prev_ftype
&& prev_ftype
->type
!= ftype
->type
)
1149 field_offsets
[nfields
] = field
->offset
- sizeof (MonoObject
);
1153 if (nfields
== 0 || nfields
> 4)
1155 *out_nfields
= nfields
;
1156 *out_esize
= prev_ftype
->type
== MONO_TYPE_R4
? 4 : 8;
1161 add_valuetype (CallInfo
*cinfo
, ArgInfo
*ainfo
, MonoType
*t
)
1163 int i
, size
, align_size
, nregs
, nfields
, esize
;
1164 int field_offsets
[16];
1167 size
= mini_type_stack_size_full (t
, &align
, cinfo
->pinvoke
);
1168 align_size
= ALIGN_TO (size
, 8);
1170 nregs
= align_size
/ 8;
1171 if (is_hfa (t
, &nfields
, &esize
, field_offsets
)) {
1173 * The struct might include nested float structs aligned at 8,
1174 * so need to keep track of the offsets of the individual fields.
1176 if (cinfo
->fr
+ nfields
<= FP_PARAM_REGS
) {
1177 ainfo
->storage
= ArgHFA
;
1178 ainfo
->reg
= cinfo
->fr
;
1179 ainfo
->nregs
= nfields
;
1181 ainfo
->esize
= esize
;
1182 for (i
= 0; i
< nfields
; ++i
)
1183 ainfo
->foffsets
[i
] = field_offsets
[i
];
1184 cinfo
->fr
+= ainfo
->nregs
;
1186 ainfo
->nfregs_to_skip
= FP_PARAM_REGS
> cinfo
->fr
? FP_PARAM_REGS
- cinfo
->fr
: 0;
1187 cinfo
->fr
= FP_PARAM_REGS
;
1188 size
= ALIGN_TO (size
, 8);
1189 ainfo
->storage
= ArgVtypeOnStack
;
1190 ainfo
->offset
= cinfo
->stack_usage
;
1193 ainfo
->nregs
= nfields
;
1194 ainfo
->esize
= esize
;
1195 cinfo
->stack_usage
+= size
;
1200 if (align_size
> 16) {
1201 ainfo
->storage
= ArgVtypeByRef
;
1206 if (cinfo
->gr
+ nregs
> PARAM_REGS
) {
1207 size
= ALIGN_TO (size
, 8);
1208 ainfo
->storage
= ArgVtypeOnStack
;
1209 ainfo
->offset
= cinfo
->stack_usage
;
1211 cinfo
->stack_usage
+= size
;
1212 cinfo
->gr
= PARAM_REGS
;
1214 ainfo
->storage
= ArgVtypeInIRegs
;
1215 ainfo
->reg
= cinfo
->gr
;
1216 ainfo
->nregs
= nregs
;
1223 add_param (CallInfo
*cinfo
, ArgInfo
*ainfo
, MonoType
*t
)
1227 ptype
= mini_get_underlying_type (t
);
1228 switch (ptype
->type
) {
1230 add_general (cinfo
, ainfo
, 1, TRUE
);
1233 add_general (cinfo
, ainfo
, 1, FALSE
);
1236 add_general (cinfo
, ainfo
, 2, TRUE
);
1239 add_general (cinfo
, ainfo
, 2, FALSE
);
1242 add_general (cinfo
, ainfo
, 4, TRUE
);
1245 add_general (cinfo
, ainfo
, 4, FALSE
);
1250 case MONO_TYPE_FNPTR
:
1251 case MONO_TYPE_OBJECT
:
1254 add_general (cinfo
, ainfo
, 8, FALSE
);
1257 add_fp (cinfo
, ainfo
, FALSE
);
1260 add_fp (cinfo
, ainfo
, TRUE
);
1262 case MONO_TYPE_VALUETYPE
:
1263 case MONO_TYPE_TYPEDBYREF
:
1264 add_valuetype (cinfo
, ainfo
, ptype
);
1266 case MONO_TYPE_VOID
:
1267 ainfo
->storage
= ArgNone
;
1269 case MONO_TYPE_GENERICINST
:
1270 if (!mono_type_generic_inst_is_valuetype (ptype
)) {
1271 add_general (cinfo
, ainfo
, 8, FALSE
);
1272 } else if (mini_is_gsharedvt_variable_type (ptype
)) {
1274 * Treat gsharedvt arguments as large vtypes
1276 ainfo
->storage
= ArgVtypeByRef
;
1277 ainfo
->gsharedvt
= TRUE
;
1279 add_valuetype (cinfo
, ainfo
, ptype
);
1283 case MONO_TYPE_MVAR
:
1284 g_assert (mini_is_gsharedvt_type (ptype
));
1285 ainfo
->storage
= ArgVtypeByRef
;
1286 ainfo
->gsharedvt
= TRUE
;
1289 g_assert_not_reached ();
1297 * Obtain information about a call according to the calling convention.
1300 get_call_info (MonoMemPool
*mp
, MonoMethodSignature
*sig
)
1304 int n
, pstart
, pindex
;
1306 n
= sig
->hasthis
+ sig
->param_count
;
1309 cinfo
= mono_mempool_alloc0 (mp
, sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
1311 cinfo
= g_malloc0 (sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
1314 cinfo
->pinvoke
= sig
->pinvoke
;
1317 add_param (cinfo
, &cinfo
->ret
, sig
->ret
);
1318 if (cinfo
->ret
.storage
== ArgVtypeByRef
)
1319 cinfo
->ret
.reg
= ARMREG_R8
;
1323 cinfo
->stack_usage
= 0;
1327 add_general (cinfo
, cinfo
->args
+ 0, 8, FALSE
);
1329 for (pindex
= pstart
; pindex
< sig
->param_count
; ++pindex
) {
1330 ainfo
= cinfo
->args
+ sig
->hasthis
+ pindex
;
1332 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (pindex
== sig
->sentinelpos
)) {
1333 /* Prevent implicit arguments and sig_cookie from
1334 being passed in registers */
1335 cinfo
->gr
= PARAM_REGS
;
1336 cinfo
->fr
= FP_PARAM_REGS
;
1337 /* Emit the signature cookie just before the implicit arguments */
1338 add_param (cinfo
, &cinfo
->sig_cookie
, &mono_defaults
.int_class
->byval_arg
);
1341 add_param (cinfo
, ainfo
, sig
->params
[pindex
]);
1342 if (ainfo
->storage
== ArgVtypeByRef
) {
1343 /* Pass the argument address in the next register */
1344 if (cinfo
->gr
>= PARAM_REGS
) {
1345 ainfo
->storage
= ArgVtypeByRefOnStack
;
1346 cinfo
->stack_usage
= ALIGN_TO (cinfo
->stack_usage
, 8);
1347 ainfo
->offset
= cinfo
->stack_usage
;
1348 cinfo
->stack_usage
+= 8;
1350 ainfo
->reg
= cinfo
->gr
;
1356 /* Handle the case where there are no implicit arguments */
1357 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (pindex
== sig
->sentinelpos
)) {
1358 /* Prevent implicit arguments and sig_cookie from
1359 being passed in registers */
1360 cinfo
->gr
= PARAM_REGS
;
1361 cinfo
->fr
= FP_PARAM_REGS
;
1362 /* Emit the signature cookie just before the implicit arguments */
1363 add_param (cinfo
, &cinfo
->sig_cookie
, &mono_defaults
.int_class
->byval_arg
);
1366 cinfo
->stack_usage
= ALIGN_TO (cinfo
->stack_usage
, MONO_ARCH_FRAME_ALIGNMENT
);
1372 MonoMethodSignature
*sig
;
1375 MonoType
**param_types
;
1376 int n_fpargs
, n_fpret
;
1380 dyn_call_supported (CallInfo
*cinfo
, MonoMethodSignature
*sig
)
1384 if (sig
->hasthis
+ sig
->param_count
> PARAM_REGS
+ DYN_CALL_STACK_ARGS
)
1387 // FIXME: Add more cases
1388 switch (cinfo
->ret
.storage
) {
1395 case ArgVtypeInIRegs
:
1396 if (cinfo
->ret
.nregs
> 2)
1405 for (i
= 0; i
< cinfo
->nargs
; ++i
) {
1406 ArgInfo
*ainfo
= &cinfo
->args
[i
];
1408 switch (ainfo
->storage
) {
1410 case ArgVtypeInIRegs
:
1417 if (ainfo
->offset
>= DYN_CALL_STACK_ARGS
* sizeof (mgreg_t
))
1429 mono_arch_dyn_call_prepare (MonoMethodSignature
*sig
)
1431 ArchDynCallInfo
*info
;
1435 cinfo
= get_call_info (NULL
, sig
);
1437 if (!dyn_call_supported (cinfo
, sig
)) {
1442 info
= g_new0 (ArchDynCallInfo
, 1);
1443 // FIXME: Preprocess the info to speed up start_dyn_call ()
1445 info
->cinfo
= cinfo
;
1446 info
->rtype
= mini_get_underlying_type (sig
->ret
);
1447 info
->param_types
= g_new0 (MonoType
*, sig
->param_count
);
1448 for (i
= 0; i
< sig
->param_count
; ++i
)
1449 info
->param_types
[i
] = mini_get_underlying_type (sig
->params
[i
]);
1451 switch (cinfo
->ret
.storage
) {
1457 info
->n_fpret
= cinfo
->ret
.nregs
;
1463 return (MonoDynCallInfo
*)info
;
1467 mono_arch_dyn_call_free (MonoDynCallInfo
*info
)
1469 ArchDynCallInfo
*ainfo
= (ArchDynCallInfo
*)info
;
1471 g_free (ainfo
->cinfo
);
1472 g_free (ainfo
->param_types
);
1477 bitcast_r4_to_r8 (float f
)
1485 bitcast_r8_to_r4 (double f
)
1493 mono_arch_start_dyn_call (MonoDynCallInfo
*info
, gpointer
**args
, guint8
*ret
, guint8
*buf
, int buf_len
)
1495 ArchDynCallInfo
*dinfo
= (ArchDynCallInfo
*)info
;
1496 DynCallArgs
*p
= (DynCallArgs
*)buf
;
1497 int aindex
, arg_index
, greg
, i
, pindex
;
1498 MonoMethodSignature
*sig
= dinfo
->sig
;
1499 CallInfo
*cinfo
= dinfo
->cinfo
;
1500 int buffer_offset
= 0;
1502 g_assert (buf_len
>= sizeof (DynCallArgs
));
1506 p
->n_fpargs
= dinfo
->n_fpargs
;
1507 p
->n_fpret
= dinfo
->n_fpret
;
1514 p
->regs
[greg
++] = (mgreg_t
)*(args
[arg_index
++]);
1516 if (cinfo
->ret
.storage
== ArgVtypeByRef
)
1517 p
->regs
[ARMREG_R8
] = (mgreg_t
)ret
;
1519 for (aindex
= pindex
; aindex
< sig
->param_count
; aindex
++) {
1520 MonoType
*t
= dinfo
->param_types
[aindex
];
1521 gpointer
*arg
= args
[arg_index
++];
1522 ArgInfo
*ainfo
= &cinfo
->args
[aindex
+ sig
->hasthis
];
1525 if (ainfo
->storage
== ArgOnStack
) {
1526 slot
= PARAM_REGS
+ 1 + (ainfo
->offset
/ sizeof (mgreg_t
));
1532 p
->regs
[slot
] = (mgreg_t
)*arg
;
1536 if (ios_abi
&& ainfo
->storage
== ArgOnStack
) {
1537 guint8
*stack_arg
= (guint8
*)&(p
->regs
[PARAM_REGS
+ 1]) + ainfo
->offset
;
1538 gboolean handled
= TRUE
;
1540 /* Special case arguments smaller than 1 machine word */
1543 *(guint8
*)stack_arg
= *(guint8
*)arg
;
1546 *(gint8
*)stack_arg
= *(gint8
*)arg
;
1549 *(guint16
*)stack_arg
= *(guint16
*)arg
;
1552 *(gint16
*)stack_arg
= *(gint16
*)arg
;
1555 *(gint32
*)stack_arg
= *(gint32
*)arg
;
1558 *(guint32
*)stack_arg
= *(guint32
*)arg
;
1569 case MONO_TYPE_OBJECT
:
1575 p
->regs
[slot
] = (mgreg_t
)*arg
;
1578 p
->regs
[slot
] = *(guint8
*)arg
;
1581 p
->regs
[slot
] = *(gint8
*)arg
;
1584 p
->regs
[slot
] = *(gint16
*)arg
;
1587 p
->regs
[slot
] = *(guint16
*)arg
;
1590 p
->regs
[slot
] = *(gint32
*)arg
;
1593 p
->regs
[slot
] = *(guint32
*)arg
;
1596 p
->fpregs
[ainfo
->reg
] = bitcast_r4_to_r8 (*(float*)arg
);
1600 p
->fpregs
[ainfo
->reg
] = *(double*)arg
;
1603 case MONO_TYPE_GENERICINST
:
1604 if (MONO_TYPE_IS_REFERENCE (t
)) {
1605 p
->regs
[slot
] = (mgreg_t
)*arg
;
1608 if (t
->type
== MONO_TYPE_GENERICINST
&& mono_class_is_nullable (mono_class_from_mono_type (t
))) {
1609 MonoClass
*klass
= mono_class_from_mono_type (t
);
1610 guint8
*nullable_buf
;
1614 * Use p->buffer as a temporary buffer since the data needs to be available after this call
1615 * if the nullable param is passed by ref.
1617 size
= mono_class_value_size (klass
, NULL
);
1618 nullable_buf
= p
->buffer
+ buffer_offset
;
1619 buffer_offset
+= size
;
1620 g_assert (buffer_offset
<= 256);
1622 /* The argument pointed to by arg is either a boxed vtype or null */
1623 mono_nullable_init (nullable_buf
, (MonoObject
*)arg
, klass
);
1625 arg
= (gpointer
*)nullable_buf
;
1631 case MONO_TYPE_VALUETYPE
:
1632 switch (ainfo
->storage
) {
1633 case ArgVtypeInIRegs
:
1634 for (i
= 0; i
< ainfo
->nregs
; ++i
)
1635 p
->regs
[slot
++] = ((mgreg_t
*)arg
) [i
];
1638 if (ainfo
->esize
== 4) {
1639 for (i
= 0; i
< ainfo
->nregs
; ++i
)
1640 p
->fpregs
[ainfo
->reg
+ i
] = bitcast_r4_to_r8 (((float*)arg
) [ainfo
->foffsets
[i
] / 4]);
1642 for (i
= 0; i
< ainfo
->nregs
; ++i
)
1643 p
->fpregs
[ainfo
->reg
+ i
] = ((double*)arg
) [ainfo
->foffsets
[i
] / 8];
1645 p
->n_fpargs
+= ainfo
->nregs
;
1648 p
->regs
[slot
] = (mgreg_t
)arg
;
1651 g_assert_not_reached ();
1656 g_assert_not_reached ();
1662 mono_arch_finish_dyn_call (MonoDynCallInfo
*info
, guint8
*buf
)
1664 ArchDynCallInfo
*ainfo
= (ArchDynCallInfo
*)info
;
1665 CallInfo
*cinfo
= ainfo
->cinfo
;
1666 DynCallArgs
*args
= (DynCallArgs
*)buf
;
1667 MonoType
*ptype
= ainfo
->rtype
;
1668 guint8
*ret
= args
->ret
;
1669 mgreg_t res
= args
->res
;
1670 mgreg_t res2
= args
->res2
;
1673 if (cinfo
->ret
.storage
== ArgVtypeByRef
)
1676 switch (ptype
->type
) {
1677 case MONO_TYPE_VOID
:
1678 *(gpointer
*)ret
= NULL
;
1680 case MONO_TYPE_OBJECT
:
1684 *(gpointer
*)ret
= (gpointer
)res
;
1690 *(guint8
*)ret
= res
;
1693 *(gint16
*)ret
= res
;
1696 *(guint16
*)ret
= res
;
1699 *(gint32
*)ret
= res
;
1702 *(guint32
*)ret
= res
;
1706 *(guint64
*)ret
= res
;
1709 *(float*)ret
= bitcast_r8_to_r4 (args
->fpregs
[0]);
1712 *(double*)ret
= args
->fpregs
[0];
1714 case MONO_TYPE_GENERICINST
:
1715 if (MONO_TYPE_IS_REFERENCE (ptype
)) {
1716 *(gpointer
*)ret
= (gpointer
)res
;
1721 case MONO_TYPE_VALUETYPE
:
1722 switch (ainfo
->cinfo
->ret
.storage
) {
1723 case ArgVtypeInIRegs
:
1724 *(mgreg_t
*)ret
= res
;
1725 if (ainfo
->cinfo
->ret
.nregs
> 1)
1726 ((mgreg_t
*)ret
) [1] = res2
;
1729 /* Use the same area for returning fp values */
1730 if (cinfo
->ret
.esize
== 4) {
1731 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
)
1732 ((float*)ret
) [cinfo
->ret
.foffsets
[i
] / 4] = bitcast_r8_to_r4 (args
->fpregs
[i
]);
1734 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
)
1735 ((double*)ret
) [cinfo
->ret
.foffsets
[i
] / 8] = args
->fpregs
[i
];
1739 g_assert_not_reached ();
1744 g_assert_not_reached ();
1749 void sys_icache_invalidate (void *start
, size_t len
);
1753 mono_arch_flush_icache (guint8
*code
, gint size
)
1755 #ifndef MONO_CROSS_COMPILE
1757 sys_icache_invalidate (code
, size
);
1759 /* Don't rely on GCC's __clear_cache implementation, as it caches
1760 * icache/dcache cache line sizes, that can vary between cores on
1761 * big.LITTLE architectures. */
1762 guint64 end
= (guint64
) (code
+ size
);
1764 /* always go with cacheline size of 4 bytes as this code isn't perf critical
1765 * anyway. Reading the cache line size from a machine register can be racy
1766 * on a big.LITTLE architecture if the cores don't have the same cache line
1768 const size_t icache_line_size
= 4;
1769 const size_t dcache_line_size
= 4;
1771 addr
= (guint64
) code
& ~(guint64
) (dcache_line_size
- 1);
1772 for (; addr
< end
; addr
+= dcache_line_size
)
1773 asm volatile("dc civac, %0" : : "r" (addr
) : "memory");
1774 asm volatile("dsb ish" : : : "memory");
1776 addr
= (guint64
) code
& ~(guint64
) (icache_line_size
- 1);
1777 for (; addr
< end
; addr
+= icache_line_size
)
1778 asm volatile("ic ivau, %0" : : "r" (addr
) : "memory");
1780 asm volatile ("dsb ish" : : : "memory");
1781 asm volatile ("isb" : : : "memory");
1789 mono_arch_opcode_needs_emulation (MonoCompile
*cfg
, int opcode
)
1796 mono_arch_get_allocatable_int_vars (MonoCompile
*cfg
)
1801 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
1802 MonoInst
*ins
= cfg
->varinfo
[i
];
1803 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
1806 if (vmv
->range
.first_use
.abs_pos
>= vmv
->range
.last_use
.abs_pos
)
1809 if ((ins
->flags
& (MONO_INST_IS_DEAD
|MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)) ||
1810 (ins
->opcode
!= OP_LOCAL
&& ins
->opcode
!= OP_ARG
))
1813 if (mono_is_regsize_var (ins
->inst_vtype
)) {
1814 g_assert (MONO_VARINFO (cfg
, i
)->reg
== -1);
1815 g_assert (i
== vmv
->idx
);
1816 vars
= g_list_prepend (vars
, vmv
);
1820 vars
= mono_varlist_sort (cfg
, vars
, 0);
1826 mono_arch_get_global_int_regs (MonoCompile
*cfg
)
1831 /* r28 is reserved for cfg->arch.args_reg */
1832 /* r27 is reserved for the imt argument */
1833 for (i
= ARMREG_R19
; i
<= ARMREG_R26
; ++i
)
1834 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (i
));
1840 mono_arch_regalloc_cost (MonoCompile
*cfg
, MonoMethodVar
*vmv
)
1842 MonoInst
*ins
= cfg
->varinfo
[vmv
->idx
];
1844 if (ins
->opcode
== OP_ARG
)
1851 mono_arch_create_vars (MonoCompile
*cfg
)
1853 MonoMethodSignature
*sig
;
1856 sig
= mono_method_signature (cfg
->method
);
1857 if (!cfg
->arch
.cinfo
)
1858 cfg
->arch
.cinfo
= get_call_info (cfg
->mempool
, sig
);
1859 cinfo
= cfg
->arch
.cinfo
;
1861 if (cinfo
->ret
.storage
== ArgVtypeByRef
) {
1862 cfg
->vret_addr
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1863 cfg
->vret_addr
->flags
|= MONO_INST_VOLATILE
;
1866 if (cfg
->gen_sdb_seq_points
) {
1869 if (cfg
->compile_aot
) {
1870 ins
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1871 ins
->flags
|= MONO_INST_VOLATILE
;
1872 cfg
->arch
.seq_point_info_var
= ins
;
1875 ins
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1876 ins
->flags
|= MONO_INST_VOLATILE
;
1877 cfg
->arch
.ss_tramp_var
= ins
;
1879 ins
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1880 ins
->flags
|= MONO_INST_VOLATILE
;
1881 cfg
->arch
.bp_tramp_var
= ins
;
1884 if (cfg
->method
->save_lmf
) {
1885 cfg
->create_lmf_var
= TRUE
;
1891 mono_arch_allocate_vars (MonoCompile
*cfg
)
1893 MonoMethodSignature
*sig
;
1897 int i
, offset
, size
, align
;
1898 guint32 locals_stack_size
, locals_stack_align
;
1902 * Allocate arguments and locals to either register (OP_REGVAR) or to a stack slot (OP_REGOFFSET).
1903 * Compute cfg->stack_offset and update cfg->used_int_regs.
1906 sig
= mono_method_signature (cfg
->method
);
1908 if (!cfg
->arch
.cinfo
)
1909 cfg
->arch
.cinfo
= get_call_info (cfg
->mempool
, sig
);
1910 cinfo
= cfg
->arch
.cinfo
;
1913 * The ARM64 ABI always uses a frame pointer.
1914 * The instruction set prefers positive offsets, so fp points to the bottom of the
1915 * frame, and stack slots are at positive offsets.
1916 * If some arguments are received on the stack, their offsets relative to fp can
1917 * not be computed right now because the stack frame might grow due to spilling
1918 * done by the local register allocator. To solve this, we reserve a register
1919 * which points to them.
1920 * The stack frame looks like this:
1921 * args_reg -> <bottom of parent frame>
1923 * fp -> <saved fp+lr>
1924 * sp -> <localloc/params area>
1926 cfg
->frame_reg
= ARMREG_FP
;
1927 cfg
->flags
|= MONO_CFG_HAS_SPILLUP
;
1933 if (cinfo
->stack_usage
) {
1934 g_assert (!(cfg
->used_int_regs
& (1 << ARMREG_R28
)));
1935 cfg
->arch
.args_reg
= ARMREG_R28
;
1936 cfg
->used_int_regs
|= 1 << ARMREG_R28
;
1939 if (cfg
->method
->save_lmf
) {
1940 /* The LMF var is allocated normally */
1942 /* Callee saved regs */
1943 cfg
->arch
.saved_gregs_offset
= offset
;
1944 for (i
= 0; i
< 32; ++i
)
1945 if ((MONO_ARCH_CALLEE_SAVED_REGS
& (1 << i
)) && (cfg
->used_int_regs
& (1 << i
)))
1950 switch (cinfo
->ret
.storage
) {
1956 cfg
->ret
->opcode
= OP_REGVAR
;
1957 cfg
->ret
->dreg
= cinfo
->ret
.reg
;
1959 case ArgVtypeInIRegs
:
1961 /* Allocate a local to hold the result, the epilog will copy it to the correct place */
1962 cfg
->ret
->opcode
= OP_REGOFFSET
;
1963 cfg
->ret
->inst_basereg
= cfg
->frame_reg
;
1964 cfg
->ret
->inst_offset
= offset
;
1965 if (cinfo
->ret
.storage
== ArgHFA
)
1972 /* This variable will be initalized in the prolog from R8 */
1973 cfg
->vret_addr
->opcode
= OP_REGOFFSET
;
1974 cfg
->vret_addr
->inst_basereg
= cfg
->frame_reg
;
1975 cfg
->vret_addr
->inst_offset
= offset
;
1977 if (G_UNLIKELY (cfg
->verbose_level
> 1)) {
1978 printf ("vret_addr =");
1979 mono_print_ins (cfg
->vret_addr
);
1983 g_assert_not_reached ();
1988 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
1989 ainfo
= cinfo
->args
+ i
;
1991 ins
= cfg
->args
[i
];
1992 if (ins
->opcode
== OP_REGVAR
)
1995 ins
->opcode
= OP_REGOFFSET
;
1996 ins
->inst_basereg
= cfg
->frame_reg
;
1998 switch (ainfo
->storage
) {
2002 // FIXME: Use nregs/size
2003 /* These will be copied to the stack in the prolog */
2004 ins
->inst_offset
= offset
;
2010 case ArgVtypeOnStack
:
2011 /* These are in the parent frame */
2012 g_assert (cfg
->arch
.args_reg
);
2013 ins
->inst_basereg
= cfg
->arch
.args_reg
;
2014 ins
->inst_offset
= ainfo
->offset
;
2016 case ArgVtypeInIRegs
:
2018 ins
->opcode
= OP_REGOFFSET
;
2019 ins
->inst_basereg
= cfg
->frame_reg
;
2020 /* These arguments are saved to the stack in the prolog */
2021 ins
->inst_offset
= offset
;
2022 if (cfg
->verbose_level
>= 2)
2023 printf ("arg %d allocated to %s+0x%0x.\n", i
, mono_arch_regname (ins
->inst_basereg
), (int)ins
->inst_offset
);
2024 if (ainfo
->storage
== ArgHFA
)
2030 case ArgVtypeByRefOnStack
: {
2033 if (ainfo
->gsharedvt
) {
2034 ins
->opcode
= OP_REGOFFSET
;
2035 ins
->inst_basereg
= cfg
->arch
.args_reg
;
2036 ins
->inst_offset
= ainfo
->offset
;
2040 /* The vtype address is in the parent frame */
2041 g_assert (cfg
->arch
.args_reg
);
2042 MONO_INST_NEW (cfg
, vtaddr
, 0);
2043 vtaddr
->opcode
= OP_REGOFFSET
;
2044 vtaddr
->inst_basereg
= cfg
->arch
.args_reg
;
2045 vtaddr
->inst_offset
= ainfo
->offset
;
2047 /* Need an indirection */
2048 ins
->opcode
= OP_VTARG_ADDR
;
2049 ins
->inst_left
= vtaddr
;
2052 case ArgVtypeByRef
: {
2055 if (ainfo
->gsharedvt
) {
2056 ins
->opcode
= OP_REGOFFSET
;
2057 ins
->inst_basereg
= cfg
->frame_reg
;
2058 ins
->inst_offset
= offset
;
2063 /* The vtype address is in a register, will be copied to the stack in the prolog */
2064 MONO_INST_NEW (cfg
, vtaddr
, 0);
2065 vtaddr
->opcode
= OP_REGOFFSET
;
2066 vtaddr
->inst_basereg
= cfg
->frame_reg
;
2067 vtaddr
->inst_offset
= offset
;
2070 /* Need an indirection */
2071 ins
->opcode
= OP_VTARG_ADDR
;
2072 ins
->inst_left
= vtaddr
;
2076 g_assert_not_reached ();
2081 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
2082 // FIXME: Allocate these to registers
2083 ins
= cfg
->arch
.seq_point_info_var
;
2087 offset
+= align
- 1;
2088 offset
&= ~(align
- 1);
2089 ins
->opcode
= OP_REGOFFSET
;
2090 ins
->inst_basereg
= cfg
->frame_reg
;
2091 ins
->inst_offset
= offset
;
2094 ins
= cfg
->arch
.ss_tramp_var
;
2098 offset
+= align
- 1;
2099 offset
&= ~(align
- 1);
2100 ins
->opcode
= OP_REGOFFSET
;
2101 ins
->inst_basereg
= cfg
->frame_reg
;
2102 ins
->inst_offset
= offset
;
2105 ins
= cfg
->arch
.bp_tramp_var
;
2109 offset
+= align
- 1;
2110 offset
&= ~(align
- 1);
2111 ins
->opcode
= OP_REGOFFSET
;
2112 ins
->inst_basereg
= cfg
->frame_reg
;
2113 ins
->inst_offset
= offset
;
2118 offsets
= mono_allocate_stack_slots (cfg
, FALSE
, &locals_stack_size
, &locals_stack_align
);
2119 if (locals_stack_align
)
2120 offset
= ALIGN_TO (offset
, locals_stack_align
);
2122 for (i
= cfg
->locals_start
; i
< cfg
->num_varinfo
; i
++) {
2123 if (offsets
[i
] != -1) {
2124 ins
= cfg
->varinfo
[i
];
2125 ins
->opcode
= OP_REGOFFSET
;
2126 ins
->inst_basereg
= cfg
->frame_reg
;
2127 ins
->inst_offset
= offset
+ offsets
[i
];
2128 //printf ("allocated local %d to ", i); mono_print_tree_nl (ins);
2131 offset
+= locals_stack_size
;
2133 offset
= ALIGN_TO (offset
, MONO_ARCH_FRAME_ALIGNMENT
);
2135 cfg
->stack_offset
= offset
;
2140 mono_arch_get_llvm_call_info (MonoCompile
*cfg
, MonoMethodSignature
*sig
)
2145 LLVMCallInfo
*linfo
;
2147 n
= sig
->param_count
+ sig
->hasthis
;
2149 cinfo
= get_call_info (cfg
->mempool
, sig
);
2151 linfo
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (LLVMCallInfo
) + (sizeof (LLVMArgInfo
) * n
));
2153 switch (cinfo
->ret
.storage
) {
2160 linfo
->ret
.storage
= LLVMArgVtypeByRef
;
2163 // FIXME: This doesn't work yet since the llvm backend represents these types as an i8
2164 // array which is returned in int regs
2167 linfo
->ret
.storage
= LLVMArgFpStruct
;
2168 linfo
->ret
.nslots
= cinfo
->ret
.nregs
;
2169 linfo
->ret
.esize
= cinfo
->ret
.esize
;
2171 case ArgVtypeInIRegs
:
2172 /* LLVM models this by returning an int */
2173 linfo
->ret
.storage
= LLVMArgVtypeAsScalar
;
2174 linfo
->ret
.nslots
= cinfo
->ret
.nregs
;
2175 linfo
->ret
.esize
= cinfo
->ret
.esize
;
2178 g_assert_not_reached ();
2182 for (i
= 0; i
< n
; ++i
) {
2183 LLVMArgInfo
*lainfo
= &linfo
->args
[i
];
2185 ainfo
= cinfo
->args
+ i
;
2187 lainfo
->storage
= LLVMArgNone
;
2189 switch (ainfo
->storage
) {
2196 lainfo
->storage
= LLVMArgNormal
;
2199 case ArgVtypeByRefOnStack
:
2200 lainfo
->storage
= LLVMArgVtypeByRef
;
2205 lainfo
->storage
= LLVMArgAsFpArgs
;
2206 lainfo
->nslots
= ainfo
->nregs
;
2207 lainfo
->esize
= ainfo
->esize
;
2208 for (j
= 0; j
< ainfo
->nregs
; ++j
)
2209 lainfo
->pair_storage
[j
] = LLVMArgInFPReg
;
2212 case ArgVtypeInIRegs
:
2213 lainfo
->storage
= LLVMArgAsIArgs
;
2214 lainfo
->nslots
= ainfo
->nregs
;
2216 case ArgVtypeOnStack
:
2220 lainfo
->storage
= LLVMArgAsFpArgs
;
2221 lainfo
->nslots
= ainfo
->nregs
;
2222 lainfo
->esize
= ainfo
->esize
;
2223 lainfo
->ndummy_fpargs
= ainfo
->nfregs_to_skip
;
2224 for (j
= 0; j
< ainfo
->nregs
; ++j
)
2225 lainfo
->pair_storage
[j
] = LLVMArgInFPReg
;
2227 lainfo
->storage
= LLVMArgAsIArgs
;
2228 lainfo
->nslots
= ainfo
->size
/ 8;
2232 g_assert_not_reached ();
2242 add_outarg_reg (MonoCompile
*cfg
, MonoCallInst
*call
, ArgStorage storage
, int reg
, MonoInst
*arg
)
2248 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2249 ins
->dreg
= mono_alloc_ireg_copy (cfg
, arg
->dreg
);
2250 ins
->sreg1
= arg
->dreg
;
2251 MONO_ADD_INS (cfg
->cbb
, ins
);
2252 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, reg
, FALSE
);
2255 MONO_INST_NEW (cfg
, ins
, OP_FMOVE
);
2256 ins
->dreg
= mono_alloc_freg (cfg
);
2257 ins
->sreg1
= arg
->dreg
;
2258 MONO_ADD_INS (cfg
->cbb
, ins
);
2259 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, reg
, TRUE
);
2262 if (COMPILE_LLVM (cfg
))
2263 MONO_INST_NEW (cfg
, ins
, OP_FMOVE
);
2265 MONO_INST_NEW (cfg
, ins
, OP_RMOVE
);
2267 MONO_INST_NEW (cfg
, ins
, OP_ARM_SETFREG_R4
);
2268 ins
->dreg
= mono_alloc_freg (cfg
);
2269 ins
->sreg1
= arg
->dreg
;
2270 MONO_ADD_INS (cfg
->cbb
, ins
);
2271 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, reg
, TRUE
);
2274 g_assert_not_reached ();
2280 emit_sig_cookie (MonoCompile
*cfg
, MonoCallInst
*call
, CallInfo
*cinfo
)
2282 MonoMethodSignature
*tmp_sig
;
2285 if (call
->tail_call
)
2288 g_assert (cinfo
->sig_cookie
.storage
== ArgOnStack
);
2291 * mono_ArgIterator_Setup assumes the signature cookie is
2292 * passed first and all the arguments which were before it are
2293 * passed on the stack after the signature. So compensate by
2294 * passing a different signature.
2296 tmp_sig
= mono_metadata_signature_dup (call
->signature
);
2297 tmp_sig
->param_count
-= call
->signature
->sentinelpos
;
2298 tmp_sig
->sentinelpos
= 0;
2299 memcpy (tmp_sig
->params
, call
->signature
->params
+ call
->signature
->sentinelpos
, tmp_sig
->param_count
* sizeof (MonoType
*));
2301 sig_reg
= mono_alloc_ireg (cfg
);
2302 MONO_EMIT_NEW_SIGNATURECONST (cfg
, sig_reg
, tmp_sig
);
2304 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, cinfo
->sig_cookie
.offset
, sig_reg
);
2308 mono_arch_emit_call (MonoCompile
*cfg
, MonoCallInst
*call
)
2310 MonoMethodSignature
*sig
;
2311 MonoInst
*arg
, *vtarg
;
2316 sig
= call
->signature
;
2318 cinfo
= get_call_info (cfg
->mempool
, sig
);
2320 switch (cinfo
->ret
.storage
) {
2321 case ArgVtypeInIRegs
:
2324 * The vtype is returned in registers, save the return area address in a local, and save the vtype into
2325 * the location pointed to by it after call in emit_move_return_value ().
2327 if (!cfg
->arch
.vret_addr_loc
) {
2328 cfg
->arch
.vret_addr_loc
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
2329 /* Prevent it from being register allocated or optimized away */
2330 ((MonoInst
*)cfg
->arch
.vret_addr_loc
)->flags
|= MONO_INST_VOLATILE
;
2333 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, ((MonoInst
*)cfg
->arch
.vret_addr_loc
)->dreg
, call
->vret_var
->dreg
);
2336 /* Pass the vtype return address in R8 */
2337 MONO_INST_NEW (cfg
, vtarg
, OP_MOVE
);
2338 vtarg
->sreg1
= call
->vret_var
->dreg
;
2339 vtarg
->dreg
= mono_alloc_preg (cfg
);
2340 MONO_ADD_INS (cfg
->cbb
, vtarg
);
2342 mono_call_inst_add_outarg_reg (cfg
, call
, vtarg
->dreg
, cinfo
->ret
.reg
, FALSE
);
2348 for (i
= 0; i
< cinfo
->nargs
; ++i
) {
2349 ainfo
= cinfo
->args
+ i
;
2350 arg
= call
->args
[i
];
2352 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
2353 /* Emit the signature cookie just before the implicit arguments */
2354 emit_sig_cookie (cfg
, call
, cinfo
);
2357 switch (ainfo
->storage
) {
2361 add_outarg_reg (cfg
, call
, ainfo
->storage
, ainfo
->reg
, arg
);
2364 switch (ainfo
->slot_size
) {
2366 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, arg
->dreg
);
2369 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, arg
->dreg
);
2372 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI2_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, arg
->dreg
);
2375 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, arg
->dreg
);
2378 g_assert_not_reached ();
2383 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, arg
->dreg
);
2386 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, arg
->dreg
);
2388 case ArgVtypeInIRegs
:
2390 case ArgVtypeByRefOnStack
:
2391 case ArgVtypeOnStack
:
2397 size
= mono_class_value_size (arg
->klass
, &align
);
2399 MONO_INST_NEW (cfg
, ins
, OP_OUTARG_VT
);
2400 ins
->sreg1
= arg
->dreg
;
2401 ins
->klass
= arg
->klass
;
2402 ins
->backend
.size
= size
;
2403 ins
->inst_p0
= call
;
2404 ins
->inst_p1
= mono_mempool_alloc (cfg
->mempool
, sizeof (ArgInfo
));
2405 memcpy (ins
->inst_p1
, ainfo
, sizeof (ArgInfo
));
2406 MONO_ADD_INS (cfg
->cbb
, ins
);
2410 g_assert_not_reached ();
2415 /* Handle the case where there are no implicit arguments */
2416 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (cinfo
->nargs
== sig
->sentinelpos
))
2417 emit_sig_cookie (cfg
, call
, cinfo
);
2419 call
->call_info
= cinfo
;
2420 call
->stack_usage
= cinfo
->stack_usage
;
2424 mono_arch_emit_outarg_vt (MonoCompile
*cfg
, MonoInst
*ins
, MonoInst
*src
)
2426 MonoCallInst
*call
= (MonoCallInst
*)ins
->inst_p0
;
2427 ArgInfo
*ainfo
= ins
->inst_p1
;
2431 if (ins
->backend
.size
== 0 && !ainfo
->gsharedvt
)
2434 switch (ainfo
->storage
) {
2435 case ArgVtypeInIRegs
:
2436 for (i
= 0; i
< ainfo
->nregs
; ++i
) {
2437 // FIXME: Smaller sizes
2438 MONO_INST_NEW (cfg
, load
, OP_LOADI8_MEMBASE
);
2439 load
->dreg
= mono_alloc_ireg (cfg
);
2440 load
->inst_basereg
= src
->dreg
;
2441 load
->inst_offset
= i
* sizeof(mgreg_t
);
2442 MONO_ADD_INS (cfg
->cbb
, load
);
2443 add_outarg_reg (cfg
, call
, ArgInIReg
, ainfo
->reg
+ i
, load
);
2447 for (i
= 0; i
< ainfo
->nregs
; ++i
) {
2448 if (ainfo
->esize
== 4)
2449 MONO_INST_NEW (cfg
, load
, OP_LOADR4_MEMBASE
);
2451 MONO_INST_NEW (cfg
, load
, OP_LOADR8_MEMBASE
);
2452 load
->dreg
= mono_alloc_freg (cfg
);
2453 load
->inst_basereg
= src
->dreg
;
2454 load
->inst_offset
= ainfo
->foffsets
[i
];
2455 MONO_ADD_INS (cfg
->cbb
, load
);
2456 add_outarg_reg (cfg
, call
, ainfo
->esize
== 4 ? ArgInFRegR4
: ArgInFReg
, ainfo
->reg
+ i
, load
);
2460 case ArgVtypeByRefOnStack
: {
2461 MonoInst
*vtaddr
, *load
, *arg
;
2463 /* Pass the vtype address in a reg/on the stack */
2464 if (ainfo
->gsharedvt
) {
2467 /* Make a copy of the argument */
2468 vtaddr
= mono_compile_create_var (cfg
, &ins
->klass
->byval_arg
, OP_LOCAL
);
2470 MONO_INST_NEW (cfg
, load
, OP_LDADDR
);
2471 load
->inst_p0
= vtaddr
;
2472 vtaddr
->flags
|= MONO_INST_INDIRECT
;
2473 load
->type
= STACK_MP
;
2474 load
->klass
= vtaddr
->klass
;
2475 load
->dreg
= mono_alloc_ireg (cfg
);
2476 MONO_ADD_INS (cfg
->cbb
, load
);
2477 mini_emit_memcpy (cfg
, load
->dreg
, 0, src
->dreg
, 0, ainfo
->size
, 8);
2480 if (ainfo
->storage
== ArgVtypeByRef
) {
2481 MONO_INST_NEW (cfg
, arg
, OP_MOVE
);
2482 arg
->dreg
= mono_alloc_preg (cfg
);
2483 arg
->sreg1
= load
->dreg
;
2484 MONO_ADD_INS (cfg
->cbb
, arg
);
2485 add_outarg_reg (cfg
, call
, ArgInIReg
, ainfo
->reg
, arg
);
2487 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, load
->dreg
);
2491 case ArgVtypeOnStack
:
2492 for (i
= 0; i
< ainfo
->size
/ 8; ++i
) {
2493 MONO_INST_NEW (cfg
, load
, OP_LOADI8_MEMBASE
);
2494 load
->dreg
= mono_alloc_ireg (cfg
);
2495 load
->inst_basereg
= src
->dreg
;
2496 load
->inst_offset
= i
* 8;
2497 MONO_ADD_INS (cfg
->cbb
, load
);
2498 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
+ (i
* 8), load
->dreg
);
2502 g_assert_not_reached ();
2508 mono_arch_emit_setret (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
*val
)
2510 MonoMethodSignature
*sig
;
2513 sig
= mono_method_signature (cfg
->method
);
2514 if (!cfg
->arch
.cinfo
)
2515 cfg
->arch
.cinfo
= get_call_info (cfg
->mempool
, sig
);
2516 cinfo
= cfg
->arch
.cinfo
;
2518 switch (cinfo
->ret
.storage
) {
2522 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
2525 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, cfg
->ret
->dreg
, val
->dreg
);
2528 if (COMPILE_LLVM (cfg
))
2529 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, cfg
->ret
->dreg
, val
->dreg
);
2531 MONO_EMIT_NEW_UNALU (cfg
, OP_RMOVE
, cfg
->ret
->dreg
, val
->dreg
);
2533 MONO_EMIT_NEW_UNALU (cfg
, OP_ARM_SETFREG_R4
, cfg
->ret
->dreg
, val
->dreg
);
2536 g_assert_not_reached ();
2542 mono_arch_tail_call_supported (MonoCompile
*cfg
, MonoMethodSignature
*caller_sig
, MonoMethodSignature
*callee_sig
)
2547 if (cfg
->compile_aot
&& !cfg
->full_aot
)
2548 /* OP_TAILCALL doesn't work with AOT */
2551 c1
= get_call_info (NULL
, caller_sig
);
2552 c2
= get_call_info (NULL
, callee_sig
);
2554 // FIXME: Relax these restrictions
2555 if (c1
->stack_usage
!= 0)
2557 if (c1
->stack_usage
!= c2
->stack_usage
)
2559 if ((c1
->ret
.storage
!= ArgNone
&& c1
->ret
.storage
!= ArgInIReg
) || c1
->ret
.storage
!= c2
->ret
.storage
)
2569 mono_arch_is_inst_imm (gint64 imm
)
2571 return (imm
>= -((gint64
)1<<31) && imm
<= (((gint64
)1<<31)-1));
2575 mono_arch_instrument_prolog (MonoCompile
*cfg
, void *func
, void *p
, gboolean enable_arguments
)
2582 mono_arch_instrument_epilog_full (MonoCompile
*cfg
, void *func
, void *p
, gboolean enable_arguments
, gboolean preserve_argument_registers
)
2589 mono_arch_peephole_pass_1 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
2595 mono_arch_peephole_pass_2 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
2600 #define ADD_NEW_INS(cfg,dest,op) do { \
2601 MONO_INST_NEW ((cfg), (dest), (op)); \
2602 mono_bblock_insert_before_ins (bb, ins, (dest)); \
2606 mono_arch_lowering_pass (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
2608 MonoInst
*ins
, *temp
, *last_ins
= NULL
;
2610 MONO_BB_FOR_EACH_INS (bb
, ins
) {
2611 switch (ins
->opcode
) {
2616 if (ins
->next
&& (ins
->next
->opcode
== OP_COND_EXC_C
|| ins
->next
->opcode
== OP_COND_EXC_IC
))
2617 /* ARM sets the C flag to 1 if there was _no_ overflow */
2618 ins
->next
->opcode
= OP_COND_EXC_NC
;
2622 case OP_IDIV_UN_IMM
:
2623 case OP_IREM_UN_IMM
:
2625 mono_decompose_op_imm (cfg
, bb
, ins
);
2627 case OP_LOCALLOC_IMM
:
2628 if (ins
->inst_imm
> 32) {
2629 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
2630 temp
->inst_c0
= ins
->inst_imm
;
2631 temp
->dreg
= mono_alloc_ireg (cfg
);
2632 ins
->sreg1
= temp
->dreg
;
2633 ins
->opcode
= mono_op_imm_to_op (ins
->opcode
);
2636 case OP_ICOMPARE_IMM
:
2637 if (ins
->inst_imm
== 0 && ins
->next
&& ins
->next
->opcode
== OP_IBEQ
) {
2638 ins
->next
->opcode
= OP_ARM64_CBZW
;
2639 ins
->next
->sreg1
= ins
->sreg1
;
2641 } else if (ins
->inst_imm
== 0 && ins
->next
&& ins
->next
->opcode
== OP_IBNE_UN
) {
2642 ins
->next
->opcode
= OP_ARM64_CBNZW
;
2643 ins
->next
->sreg1
= ins
->sreg1
;
2647 case OP_LCOMPARE_IMM
:
2648 case OP_COMPARE_IMM
:
2649 if (ins
->inst_imm
== 0 && ins
->next
&& ins
->next
->opcode
== OP_LBEQ
) {
2650 ins
->next
->opcode
= OP_ARM64_CBZX
;
2651 ins
->next
->sreg1
= ins
->sreg1
;
2653 } else if (ins
->inst_imm
== 0 && ins
->next
&& ins
->next
->opcode
== OP_LBNE_UN
) {
2654 ins
->next
->opcode
= OP_ARM64_CBNZX
;
2655 ins
->next
->sreg1
= ins
->sreg1
;
2660 gboolean swap
= FALSE
;
2664 /* Optimized away */
2670 * FP compares with unordered operands set the flags
2671 * to NZCV=0011, which matches some non-unordered compares
2672 * as well, like LE, so have to swap the operands.
2674 switch (ins
->next
->opcode
) {
2676 ins
->next
->opcode
= OP_FBGT
;
2680 ins
->next
->opcode
= OP_FBGE
;
2688 ins
->sreg1
= ins
->sreg2
;
2699 bb
->last_ins
= last_ins
;
2700 bb
->max_vreg
= cfg
->next_vreg
;
2704 mono_arch_decompose_long_opts (MonoCompile
*cfg
, MonoInst
*long_ins
)
2709 opcode_to_armcond (int opcode
)
2720 case OP_COND_EXC_IEQ
:
2721 case OP_COND_EXC_EQ
:
2738 case OP_COND_EXC_IGT
:
2739 case OP_COND_EXC_GT
:
2754 case OP_COND_EXC_ILT
:
2755 case OP_COND_EXC_LT
:
2763 case OP_COND_EXC_INE_UN
:
2764 case OP_COND_EXC_NE_UN
:
2770 case OP_COND_EXC_IGE_UN
:
2771 case OP_COND_EXC_GE_UN
:
2781 case OP_COND_EXC_IGT_UN
:
2782 case OP_COND_EXC_GT_UN
:
2788 case OP_COND_EXC_ILE_UN
:
2789 case OP_COND_EXC_LE_UN
:
2797 case OP_COND_EXC_ILT_UN
:
2798 case OP_COND_EXC_LT_UN
:
2801 * FCMP sets the NZCV condition bits as follows:
2806 * ARMCOND_LT is N!=V, so it matches unordered too, so
2807 * fclt and fclt_un need to be special cased.
2817 case OP_COND_EXC_IC
:
2819 case OP_COND_EXC_OV
:
2820 case OP_COND_EXC_IOV
:
2822 case OP_COND_EXC_NC
:
2823 case OP_COND_EXC_INC
:
2825 case OP_COND_EXC_NO
:
2826 case OP_COND_EXC_INO
:
2829 printf ("%s\n", mono_inst_name (opcode
));
2830 g_assert_not_reached ();
2835 /* This clobbers LR */
2836 static inline __attribute__ ((__warn_unused_result__
)) guint8
*
2837 emit_cond_exc (MonoCompile
*cfg
, guint8
*code
, int opcode
, const char *exc_name
)
2841 cond
= opcode_to_armcond (opcode
);
2843 arm_adrx (code
, ARMREG_IP1
, code
);
2844 mono_add_patch_info_rel (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_EXC
, exc_name
, MONO_R_ARM64_BCC
);
2845 arm_bcc (code
, cond
, 0);
2850 emit_move_return_value (MonoCompile
*cfg
, guint8
* code
, MonoInst
*ins
)
2855 call
= (MonoCallInst
*)ins
;
2856 cinfo
= call
->call_info
;
2858 switch (cinfo
->ret
.storage
) {
2862 /* LLVM compiled code might only set the bottom bits */
2863 if (call
->signature
&& mini_get_underlying_type (call
->signature
->ret
)->type
== MONO_TYPE_I4
)
2864 arm_sxtwx (code
, call
->inst
.dreg
, cinfo
->ret
.reg
);
2865 else if (call
->inst
.dreg
!= cinfo
->ret
.reg
)
2866 arm_movx (code
, call
->inst
.dreg
, cinfo
->ret
.reg
);
2869 if (call
->inst
.dreg
!= cinfo
->ret
.reg
)
2870 arm_fmovd (code
, call
->inst
.dreg
, cinfo
->ret
.reg
);
2874 arm_fmovs (code
, call
->inst
.dreg
, cinfo
->ret
.reg
);
2876 arm_fcvt_sd (code
, call
->inst
.dreg
, cinfo
->ret
.reg
);
2878 case ArgVtypeInIRegs
: {
2879 MonoInst
*loc
= cfg
->arch
.vret_addr_loc
;
2882 /* Load the destination address */
2883 g_assert (loc
&& loc
->opcode
== OP_REGOFFSET
);
2884 code
= emit_ldrx (code
, ARMREG_LR
, loc
->inst_basereg
, loc
->inst_offset
);
2885 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
)
2886 arm_strx (code
, cinfo
->ret
.reg
+ i
, ARMREG_LR
, i
* 8);
2890 MonoInst
*loc
= cfg
->arch
.vret_addr_loc
;
2893 /* Load the destination address */
2894 g_assert (loc
&& loc
->opcode
== OP_REGOFFSET
);
2895 code
= emit_ldrx (code
, ARMREG_LR
, loc
->inst_basereg
, loc
->inst_offset
);
2896 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
) {
2897 if (cinfo
->ret
.esize
== 4)
2898 arm_strfpw (code
, cinfo
->ret
.reg
+ i
, ARMREG_LR
, cinfo
->ret
.foffsets
[i
]);
2900 arm_strfpx (code
, cinfo
->ret
.reg
+ i
, ARMREG_LR
, cinfo
->ret
.foffsets
[i
]);
2907 g_assert_not_reached ();
2914 * emit_branch_island:
2916 * Emit a branch island for the conditional branches from cfg->native_code + start_offset to code.
2919 emit_branch_island (MonoCompile
*cfg
, guint8
*code
, int start_offset
)
2922 int offset
, island_size
;
2924 /* Iterate over the patch infos added so far by this bb */
2926 for (ji
= cfg
->patch_info
; ji
; ji
= ji
->next
) {
2927 if (ji
->ip
.i
< start_offset
)
2928 /* The patch infos are in reverse order, so this means the end */
2930 if (ji
->relocation
== MONO_R_ARM64_BCC
|| ji
->relocation
== MONO_R_ARM64_CBZ
)
2935 offset
= code
- cfg
->native_code
;
2936 if (offset
> (cfg
->code_size
- island_size
- 16)) {
2937 cfg
->code_size
*= 2;
2938 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
2939 code
= cfg
->native_code
+ offset
;
2942 /* Branch over the island */
2943 arm_b (code
, code
+ 4 + island_size
);
2945 for (ji
= cfg
->patch_info
; ji
; ji
= ji
->next
) {
2946 if (ji
->ip
.i
< start_offset
)
2948 if (ji
->relocation
== MONO_R_ARM64_BCC
|| ji
->relocation
== MONO_R_ARM64_CBZ
) {
2949 /* Rewrite the cond branch so it branches to an uncoditional branch in the branch island */
2950 arm_patch_rel (cfg
->native_code
+ ji
->ip
.i
, code
, ji
->relocation
);
2951 /* Rewrite the patch so it points to the unconditional branch */
2952 ji
->ip
.i
= code
- cfg
->native_code
;
2953 ji
->relocation
= MONO_R_ARM64_B
;
2962 mono_arch_output_basic_block (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
2967 guint8
*code
= cfg
->native_code
+ cfg
->code_len
;
2968 int start_offset
, max_len
, dreg
, sreg1
, sreg2
;
2971 if (cfg
->verbose_level
> 2)
2972 g_print ("Basic block %d starting at offset 0x%x\n", bb
->block_num
, bb
->native_offset
);
2974 start_offset
= code
- cfg
->native_code
;
2976 MONO_BB_FOR_EACH_INS (bb
, ins
) {
2977 offset
= code
- cfg
->native_code
;
2979 max_len
= ((guint8
*)ins_get_spec (ins
->opcode
))[MONO_INST_LEN
];
2981 if (offset
> (cfg
->code_size
- max_len
- 16)) {
2982 cfg
->code_size
*= 2;
2983 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
2984 code
= cfg
->native_code
+ offset
;
2987 if (G_UNLIKELY (cfg
->arch
.cond_branch_islands
&& offset
- start_offset
> 4 * 0x1ffff)) {
2988 /* Emit a branch island for large basic blocks */
2989 code
= emit_branch_island (cfg
, code
, start_offset
);
2990 offset
= code
- cfg
->native_code
;
2991 start_offset
= offset
;
2994 mono_debug_record_line_number (cfg
, ins
, offset
);
2999 imm
= ins
->inst_imm
;
3001 switch (ins
->opcode
) {
3003 code
= emit_imm (code
, dreg
, ins
->inst_c0
);
3006 code
= emit_imm64 (code
, dreg
, ins
->inst_c0
);
3010 arm_movx (code
, dreg
, sreg1
);
3013 case OP_RELAXED_NOP
:
3016 mono_add_patch_info_rel (cfg
, offset
, (MonoJumpInfoType
)ins
->inst_i1
, ins
->inst_p0
, MONO_R_ARM64_IMM
);
3017 code
= emit_imm64_template (code
, dreg
);
3021 * gdb does not like encountering the hw breakpoint ins in the debugged code.
3022 * So instead of emitting a trap, we emit a call a C function and place a
3025 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_INTERNAL_METHOD
, (gpointer
)"mono_break");
3030 arm_addx_imm (code
, ARMREG_IP0
, sreg1
, (MONO_ARCH_FRAME_ALIGNMENT
- 1));
3031 // FIXME: andx_imm doesn't work yet
3032 code
= emit_imm (code
, ARMREG_IP1
, -MONO_ARCH_FRAME_ALIGNMENT
);
3033 arm_andx (code
, ARMREG_IP0
, ARMREG_IP0
, ARMREG_IP1
);
3034 //arm_andx_imm (code, ARMREG_IP0, sreg1, - MONO_ARCH_FRAME_ALIGNMENT);
3035 arm_movspx (code
, ARMREG_IP1
, ARMREG_SP
);
3036 arm_subx (code
, ARMREG_IP1
, ARMREG_IP1
, ARMREG_IP0
);
3037 arm_movspx (code
, ARMREG_SP
, ARMREG_IP1
);
3040 /* ip1 = pointer, ip0 = end */
3041 arm_addx (code
, ARMREG_IP0
, ARMREG_IP1
, ARMREG_IP0
);
3043 arm_cmpx (code
, ARMREG_IP1
, ARMREG_IP0
);
3045 arm_bcc (code
, ARMCOND_EQ
, 0);
3046 arm_stpx (code
, ARMREG_RZR
, ARMREG_RZR
, ARMREG_IP1
, 0);
3047 arm_addx_imm (code
, ARMREG_IP1
, ARMREG_IP1
, 16);
3048 arm_b (code
, buf
[0]);
3049 arm_patch_rel (buf
[1], code
, MONO_R_ARM64_BCC
);
3051 arm_movspx (code
, dreg
, ARMREG_SP
);
3052 if (cfg
->param_area
)
3053 code
= emit_subx_sp_imm (code
, cfg
->param_area
);
3056 case OP_LOCALLOC_IMM
: {
3059 imm
= ALIGN_TO (ins
->inst_imm
, MONO_ARCH_FRAME_ALIGNMENT
);
3060 g_assert (arm_is_arith_imm (imm
));
3061 arm_subx_imm (code
, ARMREG_SP
, ARMREG_SP
, imm
);
3064 g_assert (MONO_ARCH_FRAME_ALIGNMENT
== 16);
3066 while (offset
< imm
) {
3067 arm_stpx (code
, ARMREG_RZR
, ARMREG_RZR
, ARMREG_SP
, offset
);
3070 arm_movspx (code
, dreg
, ARMREG_SP
);
3071 if (cfg
->param_area
)
3072 code
= emit_subx_sp_imm (code
, cfg
->param_area
);
3076 code
= emit_aotconst (cfg
, code
, dreg
, (MonoJumpInfoType
)ins
->inst_i1
, ins
->inst_p0
);
3078 case OP_OBJC_GET_SELECTOR
:
3079 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_OBJC_SELECTOR_REF
, ins
->inst_p0
);
3080 /* See arch_emit_objc_selector_ref () in aot-compiler.c */
3081 arm_ldrx_lit (code
, ins
->dreg
, 0);
3085 case OP_SEQ_POINT
: {
3086 MonoInst
*info_var
= cfg
->arch
.seq_point_info_var
;
3089 * For AOT, we use one got slot per method, which will point to a
3090 * SeqPointInfo structure, containing all the information required
3091 * by the code below.
3093 if (cfg
->compile_aot
) {
3094 g_assert (info_var
);
3095 g_assert (info_var
->opcode
== OP_REGOFFSET
);
3098 if (ins
->flags
& MONO_INST_SINGLE_STEP_LOC
) {
3099 MonoInst
*var
= cfg
->arch
.ss_tramp_var
;
3102 g_assert (var
->opcode
== OP_REGOFFSET
);
3103 /* Load ss_tramp_var */
3104 /* This is equal to &ss_trampoline */
3105 arm_ldrx (code
, ARMREG_IP1
, var
->inst_basereg
, var
->inst_offset
);
3106 /* Load the trampoline address */
3107 arm_ldrx (code
, ARMREG_IP1
, ARMREG_IP1
, 0);
3108 /* Call it if it is non-null */
3109 arm_cbzx (code
, ARMREG_IP1
, code
+ 8);
3110 arm_blrx (code
, ARMREG_IP1
);
3113 mono_add_seq_point (cfg
, bb
, ins
, code
- cfg
->native_code
);
3115 if (cfg
->compile_aot
) {
3116 guint32 offset
= code
- cfg
->native_code
;
3119 arm_ldrx (code
, ARMREG_IP1
, info_var
->inst_basereg
, info_var
->inst_offset
);
3120 /* Add the offset */
3121 val
= ((offset
/ 4) * sizeof (guint8
*)) + MONO_STRUCT_OFFSET (SeqPointInfo
, bp_addrs
);
3122 /* Load the info->bp_addrs [offset], which is either 0 or the address of the bp trampoline */
3123 code
= emit_ldrx (code
, ARMREG_IP1
, ARMREG_IP1
, val
);
3124 /* Skip the load if its 0 */
3125 arm_cbzx (code
, ARMREG_IP1
, code
+ 8);
3126 /* Call the breakpoint trampoline */
3127 arm_blrx (code
, ARMREG_IP1
);
3129 MonoInst
*var
= cfg
->arch
.bp_tramp_var
;
3132 g_assert (var
->opcode
== OP_REGOFFSET
);
3133 /* Load the address of the bp trampoline into IP0 */
3134 arm_ldrx (code
, ARMREG_IP0
, var
->inst_basereg
, var
->inst_offset
);
3136 * A placeholder for a possible breakpoint inserted by
3137 * mono_arch_set_breakpoint ().
3146 mono_add_patch_info_rel (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
, MONO_R_ARM64_B
);
3150 arm_brx (code
, sreg1
);
3182 mono_add_patch_info_rel (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_true_bb
, MONO_R_ARM64_BCC
);
3183 cond
= opcode_to_armcond (ins
->opcode
);
3184 arm_bcc (code
, cond
, 0);
3188 mono_add_patch_info_rel (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_true_bb
, MONO_R_ARM64_BCC
);
3189 /* For fp compares, ARMCOND_LT is lt or unordered */
3190 arm_bcc (code
, ARMCOND_LT
, 0);
3193 mono_add_patch_info_rel (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_true_bb
, MONO_R_ARM64_BCC
);
3194 arm_bcc (code
, ARMCOND_EQ
, 0);
3195 offset
= code
- cfg
->native_code
;
3196 mono_add_patch_info_rel (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_true_bb
, MONO_R_ARM64_BCC
);
3197 /* For fp compares, ARMCOND_LT is lt or unordered */
3198 arm_bcc (code
, ARMCOND_LT
, 0);
3201 mono_add_patch_info_rel (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_true_bb
, MONO_R_ARM64_CBZ
);
3202 arm_cbzw (code
, sreg1
, 0);
3205 mono_add_patch_info_rel (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_true_bb
, MONO_R_ARM64_CBZ
);
3206 arm_cbzx (code
, sreg1
, 0);
3208 case OP_ARM64_CBNZW
:
3209 mono_add_patch_info_rel (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_true_bb
, MONO_R_ARM64_CBZ
);
3210 arm_cbnzw (code
, sreg1
, 0);
3212 case OP_ARM64_CBNZX
:
3213 mono_add_patch_info_rel (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_true_bb
, MONO_R_ARM64_CBZ
);
3214 arm_cbnzx (code
, sreg1
, 0);
3218 arm_addw (code
, dreg
, sreg1
, sreg2
);
3221 arm_addx (code
, dreg
, sreg1
, sreg2
);
3224 arm_subw (code
, dreg
, sreg1
, sreg2
);
3227 arm_subx (code
, dreg
, sreg1
, sreg2
);
3230 arm_andw (code
, dreg
, sreg1
, sreg2
);
3233 arm_andx (code
, dreg
, sreg1
, sreg2
);
3236 arm_orrw (code
, dreg
, sreg1
, sreg2
);
3239 arm_orrx (code
, dreg
, sreg1
, sreg2
);
3242 arm_eorw (code
, dreg
, sreg1
, sreg2
);
3245 arm_eorx (code
, dreg
, sreg1
, sreg2
);
3248 arm_negw (code
, dreg
, sreg1
);
3251 arm_negx (code
, dreg
, sreg1
);
3254 arm_mvnw (code
, dreg
, sreg1
);
3257 arm_mvnx (code
, dreg
, sreg1
);
3260 arm_addsw (code
, dreg
, sreg1
, sreg2
);
3264 arm_addsx (code
, dreg
, sreg1
, sreg2
);
3267 arm_subsw (code
, dreg
, sreg1
, sreg2
);
3271 arm_subsx (code
, dreg
, sreg1
, sreg2
);
3274 arm_cmpw (code
, sreg1
, sreg2
);
3278 arm_cmpx (code
, sreg1
, sreg2
);
3281 code
= emit_addw_imm (code
, dreg
, sreg1
, imm
);
3285 code
= emit_addx_imm (code
, dreg
, sreg1
, imm
);
3288 code
= emit_subw_imm (code
, dreg
, sreg1
, imm
);
3291 code
= emit_subx_imm (code
, dreg
, sreg1
, imm
);
3294 code
= emit_andw_imm (code
, dreg
, sreg1
, imm
);
3298 code
= emit_andx_imm (code
, dreg
, sreg1
, imm
);
3301 code
= emit_orrw_imm (code
, dreg
, sreg1
, imm
);
3304 code
= emit_orrx_imm (code
, dreg
, sreg1
, imm
);
3307 code
= emit_eorw_imm (code
, dreg
, sreg1
, imm
);
3310 code
= emit_eorx_imm (code
, dreg
, sreg1
, imm
);
3312 case OP_ICOMPARE_IMM
:
3313 code
= emit_cmpw_imm (code
, sreg1
, imm
);
3315 case OP_LCOMPARE_IMM
:
3316 case OP_COMPARE_IMM
:
3318 arm_cmpx (code
, sreg1
, ARMREG_RZR
);
3320 // FIXME: 32 vs 64 bit issues for 0xffffffff
3321 code
= emit_imm64 (code
, ARMREG_LR
, imm
);
3322 arm_cmpx (code
, sreg1
, ARMREG_LR
);
3326 arm_lslvw (code
, dreg
, sreg1
, sreg2
);
3329 arm_lslvx (code
, dreg
, sreg1
, sreg2
);
3332 arm_asrvw (code
, dreg
, sreg1
, sreg2
);
3335 arm_asrvx (code
, dreg
, sreg1
, sreg2
);
3338 arm_lsrvw (code
, dreg
, sreg1
, sreg2
);
3341 arm_lsrvx (code
, dreg
, sreg1
, sreg2
);
3345 arm_movx (code
, dreg
, sreg1
);
3347 arm_lslw (code
, dreg
, sreg1
, imm
);
3351 arm_movx (code
, dreg
, sreg1
);
3353 arm_lslx (code
, dreg
, sreg1
, imm
);
3357 arm_movx (code
, dreg
, sreg1
);
3359 arm_asrw (code
, dreg
, sreg1
, imm
);
3364 arm_movx (code
, dreg
, sreg1
);
3366 arm_asrx (code
, dreg
, sreg1
, imm
);
3368 case OP_ISHR_UN_IMM
:
3370 arm_movx (code
, dreg
, sreg1
);
3372 arm_lsrw (code
, dreg
, sreg1
, imm
);
3375 case OP_LSHR_UN_IMM
:
3377 arm_movx (code
, dreg
, sreg1
);
3379 arm_lsrx (code
, dreg
, sreg1
, imm
);
3384 arm_sxtwx (code
, dreg
, sreg1
);
3387 /* Clean out the upper word */
3388 arm_movw (code
, dreg
, sreg1
);
3391 arm_lslx (code
, dreg
, sreg1
, imm
);
3394 /* MULTIPLY/DIVISION */
3397 // FIXME: Optimize this
3398 /* Check for zero */
3399 arm_cmpx_imm (code
, sreg2
, 0);
3400 code
= emit_cond_exc (cfg
, code
, OP_COND_EXC_IEQ
, "DivideByZeroException");
3401 /* Check for INT_MIN/-1 */
3402 code
= emit_imm (code
, ARMREG_IP0
, 0x80000000);
3403 arm_cmpx (code
, sreg1
, ARMREG_IP0
);
3404 arm_cset (code
, ARMCOND_EQ
, ARMREG_IP1
);
3405 code
= emit_imm (code
, ARMREG_IP0
, 0xffffffff);
3406 arm_cmpx (code
, sreg2
, ARMREG_IP0
);
3407 arm_cset (code
, ARMCOND_EQ
, ARMREG_IP0
);
3408 arm_andx (code
, ARMREG_IP0
, ARMREG_IP0
, ARMREG_IP1
);
3409 arm_cmpx_imm (code
, ARMREG_IP0
, 1);
3410 code
= emit_cond_exc (cfg
, code
, OP_COND_EXC_IEQ
, "OverflowException");
3411 if (ins
->opcode
== OP_IREM
) {
3412 arm_sdivw (code
, ARMREG_LR
, sreg1
, sreg2
);
3413 arm_msubw (code
, dreg
, ARMREG_LR
, sreg2
, sreg1
);
3415 arm_sdivw (code
, dreg
, sreg1
, sreg2
);
3419 arm_cmpx_imm (code
, sreg2
, 0);
3420 code
= emit_cond_exc (cfg
, code
, OP_COND_EXC_IEQ
, "DivideByZeroException");
3421 arm_udivw (code
, dreg
, sreg1
, sreg2
);
3424 arm_cmpx_imm (code
, sreg2
, 0);
3425 code
= emit_cond_exc (cfg
, code
, OP_COND_EXC_IEQ
, "DivideByZeroException");
3426 arm_udivw (code
, ARMREG_LR
, sreg1
, sreg2
);
3427 arm_msubw (code
, dreg
, ARMREG_LR
, sreg2
, sreg1
);
3431 // FIXME: Optimize this
3432 /* Check for zero */
3433 arm_cmpx_imm (code
, sreg2
, 0);
3434 code
= emit_cond_exc (cfg
, code
, OP_COND_EXC_IEQ
, "DivideByZeroException");
3435 /* Check for INT64_MIN/-1 */
3436 code
= emit_imm64 (code
, ARMREG_IP0
, 0x8000000000000000);
3437 arm_cmpx (code
, sreg1
, ARMREG_IP0
);
3438 arm_cset (code
, ARMCOND_EQ
, ARMREG_IP1
);
3439 code
= emit_imm64 (code
, ARMREG_IP0
, 0xffffffffffffffff);
3440 arm_cmpx (code
, sreg2
, ARMREG_IP0
);
3441 arm_cset (code
, ARMCOND_EQ
, ARMREG_IP0
);
3442 arm_andx (code
, ARMREG_IP0
, ARMREG_IP0
, ARMREG_IP1
);
3443 arm_cmpx_imm (code
, ARMREG_IP0
, 1);
3444 /* 64 bit uses ArithmeticException */
3445 code
= emit_cond_exc (cfg
, code
, OP_COND_EXC_IEQ
, "ArithmeticException");
3446 if (ins
->opcode
== OP_LREM
) {
3447 arm_sdivx (code
, ARMREG_LR
, sreg1
, sreg2
);
3448 arm_msubx (code
, dreg
, ARMREG_LR
, sreg2
, sreg1
);
3450 arm_sdivx (code
, dreg
, sreg1
, sreg2
);
3454 arm_cmpx_imm (code
, sreg2
, 0);
3455 code
= emit_cond_exc (cfg
, code
, OP_COND_EXC_IEQ
, "DivideByZeroException");
3456 arm_udivx (code
, dreg
, sreg1
, sreg2
);
3459 arm_cmpx_imm (code
, sreg2
, 0);
3460 code
= emit_cond_exc (cfg
, code
, OP_COND_EXC_IEQ
, "DivideByZeroException");
3461 arm_udivx (code
, ARMREG_LR
, sreg1
, sreg2
);
3462 arm_msubx (code
, dreg
, ARMREG_LR
, sreg2
, sreg1
);
3465 arm_mulw (code
, dreg
, sreg1
, sreg2
);
3468 arm_mulx (code
, dreg
, sreg1
, sreg2
);
3471 code
= emit_imm (code
, ARMREG_LR
, imm
);
3472 arm_mulw (code
, dreg
, sreg1
, ARMREG_LR
);
3476 code
= emit_imm (code
, ARMREG_LR
, imm
);
3477 arm_mulx (code
, dreg
, sreg1
, ARMREG_LR
);
3481 case OP_ICONV_TO_I1
:
3482 case OP_LCONV_TO_I1
:
3483 arm_sxtbx (code
, dreg
, sreg1
);
3485 case OP_ICONV_TO_I2
:
3486 case OP_LCONV_TO_I2
:
3487 arm_sxthx (code
, dreg
, sreg1
);
3489 case OP_ICONV_TO_U1
:
3490 case OP_LCONV_TO_U1
:
3491 arm_uxtbw (code
, dreg
, sreg1
);
3493 case OP_ICONV_TO_U2
:
3494 case OP_LCONV_TO_U2
:
3495 arm_uxthw (code
, dreg
, sreg1
);
3521 cond
= opcode_to_armcond (ins
->opcode
);
3522 arm_cset (code
, cond
, dreg
);
3535 cond
= opcode_to_armcond (ins
->opcode
);
3536 arm_fcmpd (code
, sreg1
, sreg2
);
3537 arm_cset (code
, cond
, dreg
);
3542 case OP_LOADI1_MEMBASE
:
3543 code
= emit_ldrsbx (code
, dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3545 case OP_LOADU1_MEMBASE
:
3546 code
= emit_ldrb (code
, dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3548 case OP_LOADI2_MEMBASE
:
3549 code
= emit_ldrshx (code
, dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3551 case OP_LOADU2_MEMBASE
:
3552 code
= emit_ldrh (code
, dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3554 case OP_LOADI4_MEMBASE
:
3555 code
= emit_ldrswx (code
, dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3557 case OP_LOADU4_MEMBASE
:
3558 code
= emit_ldrw (code
, dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3560 case OP_LOAD_MEMBASE
:
3561 case OP_LOADI8_MEMBASE
:
3562 code
= emit_ldrx (code
, dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3564 case OP_STOREI1_MEMBASE_IMM
:
3565 case OP_STOREI2_MEMBASE_IMM
:
3566 case OP_STOREI4_MEMBASE_IMM
:
3567 case OP_STORE_MEMBASE_IMM
:
3568 case OP_STOREI8_MEMBASE_IMM
: {
3572 code
= emit_imm (code
, ARMREG_LR
, imm
);
3575 immreg
= ARMREG_RZR
;
3578 switch (ins
->opcode
) {
3579 case OP_STOREI1_MEMBASE_IMM
:
3580 code
= emit_strb (code
, immreg
, ins
->inst_destbasereg
, ins
->inst_offset
);
3582 case OP_STOREI2_MEMBASE_IMM
:
3583 code
= emit_strh (code
, immreg
, ins
->inst_destbasereg
, ins
->inst_offset
);
3585 case OP_STOREI4_MEMBASE_IMM
:
3586 code
= emit_strw (code
, immreg
, ins
->inst_destbasereg
, ins
->inst_offset
);
3588 case OP_STORE_MEMBASE_IMM
:
3589 case OP_STOREI8_MEMBASE_IMM
:
3590 code
= emit_strx (code
, immreg
, ins
->inst_destbasereg
, ins
->inst_offset
);
3593 g_assert_not_reached ();
3598 case OP_STOREI1_MEMBASE_REG
:
3599 code
= emit_strb (code
, sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3601 case OP_STOREI2_MEMBASE_REG
:
3602 code
= emit_strh (code
, sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3604 case OP_STOREI4_MEMBASE_REG
:
3605 code
= emit_strw (code
, sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3607 case OP_STORE_MEMBASE_REG
:
3608 case OP_STOREI8_MEMBASE_REG
:
3609 code
= emit_strx (code
, sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3612 code
= emit_tls_get (code
, dreg
, ins
->inst_offset
);
3615 code
= emit_tls_set (code
, sreg1
, ins
->inst_offset
);
3618 case OP_MEMORY_BARRIER
:
3621 case OP_ATOMIC_ADD_I4
: {
3625 arm_ldxrw (code
, ARMREG_IP0
, sreg1
);
3626 arm_addx (code
, ARMREG_IP0
, ARMREG_IP0
, sreg2
);
3627 arm_stlxrw (code
, ARMREG_IP1
, ARMREG_IP0
, sreg1
);
3628 arm_cbnzw (code
, ARMREG_IP1
, buf
[0]);
3631 arm_movx (code
, dreg
, ARMREG_IP0
);
3634 case OP_ATOMIC_ADD_I8
: {
3638 arm_ldxrx (code
, ARMREG_IP0
, sreg1
);
3639 arm_addx (code
, ARMREG_IP0
, ARMREG_IP0
, sreg2
);
3640 arm_stlxrx (code
, ARMREG_IP1
, ARMREG_IP0
, sreg1
);
3641 arm_cbnzx (code
, ARMREG_IP1
, buf
[0]);
3644 arm_movx (code
, dreg
, ARMREG_IP0
);
3647 case OP_ATOMIC_EXCHANGE_I4
: {
3651 arm_ldxrw (code
, ARMREG_IP0
, sreg1
);
3652 arm_stlxrw (code
, ARMREG_IP1
, sreg2
, sreg1
);
3653 arm_cbnzw (code
, ARMREG_IP1
, buf
[0]);
3656 arm_movx (code
, dreg
, ARMREG_IP0
);
3659 case OP_ATOMIC_EXCHANGE_I8
: {
3663 arm_ldxrx (code
, ARMREG_IP0
, sreg1
);
3664 arm_stlxrx (code
, ARMREG_IP1
, sreg2
, sreg1
);
3665 arm_cbnzw (code
, ARMREG_IP1
, buf
[0]);
3668 arm_movx (code
, dreg
, ARMREG_IP0
);
3671 case OP_ATOMIC_CAS_I4
: {
3674 /* sreg2 is the value, sreg3 is the comparand */
3676 arm_ldxrw (code
, ARMREG_IP0
, sreg1
);
3677 arm_cmpw (code
, ARMREG_IP0
, ins
->sreg3
);
3679 arm_bcc (code
, ARMCOND_NE
, 0);
3680 arm_stlxrw (code
, ARMREG_IP1
, sreg2
, sreg1
);
3681 arm_cbnzw (code
, ARMREG_IP1
, buf
[0]);
3682 arm_patch_rel (buf
[1], code
, MONO_R_ARM64_BCC
);
3685 arm_movx (code
, dreg
, ARMREG_IP0
);
3688 case OP_ATOMIC_CAS_I8
: {
3692 arm_ldxrx (code
, ARMREG_IP0
, sreg1
);
3693 arm_cmpx (code
, ARMREG_IP0
, ins
->sreg3
);
3695 arm_bcc (code
, ARMCOND_NE
, 0);
3696 arm_stlxrx (code
, ARMREG_IP1
, sreg2
, sreg1
);
3697 arm_cbnzw (code
, ARMREG_IP1
, buf
[0]);
3698 arm_patch_rel (buf
[1], code
, MONO_R_ARM64_BCC
);
3701 arm_movx (code
, dreg
, ARMREG_IP0
);
3704 case OP_ATOMIC_LOAD_I1
: {
3705 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_basereg
, ins
->inst_offset
);
3706 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
3708 arm_ldarb (code
, ins
->dreg
, ARMREG_LR
);
3709 arm_sxtbx (code
, ins
->dreg
, ins
->dreg
);
3712 case OP_ATOMIC_LOAD_U1
: {
3713 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_basereg
, ins
->inst_offset
);
3714 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
3716 arm_ldarb (code
, ins
->dreg
, ARMREG_LR
);
3717 arm_uxtbx (code
, ins
->dreg
, ins
->dreg
);
3720 case OP_ATOMIC_LOAD_I2
: {
3721 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_basereg
, ins
->inst_offset
);
3722 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
3724 arm_ldarh (code
, ins
->dreg
, ARMREG_LR
);
3725 arm_sxthx (code
, ins
->dreg
, ins
->dreg
);
3728 case OP_ATOMIC_LOAD_U2
: {
3729 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_basereg
, ins
->inst_offset
);
3730 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
3732 arm_ldarh (code
, ins
->dreg
, ARMREG_LR
);
3733 arm_uxthx (code
, ins
->dreg
, ins
->dreg
);
3736 case OP_ATOMIC_LOAD_I4
: {
3737 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_basereg
, ins
->inst_offset
);
3738 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
3740 arm_ldarw (code
, ins
->dreg
, ARMREG_LR
);
3741 arm_sxtwx (code
, ins
->dreg
, ins
->dreg
);
3744 case OP_ATOMIC_LOAD_U4
: {
3745 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_basereg
, ins
->inst_offset
);
3746 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
3748 arm_ldarw (code
, ins
->dreg
, ARMREG_LR
);
3749 arm_movw (code
, ins
->dreg
, ins
->dreg
); /* Clear upper half of the register. */
3752 case OP_ATOMIC_LOAD_I8
:
3753 case OP_ATOMIC_LOAD_U8
: {
3754 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_basereg
, ins
->inst_offset
);
3755 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
3757 arm_ldarx (code
, ins
->dreg
, ARMREG_LR
);
3760 case OP_ATOMIC_LOAD_R4
: {
3761 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_basereg
, ins
->inst_offset
);
3762 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
3765 arm_ldarw (code
, ARMREG_LR
, ARMREG_LR
);
3766 arm_fmov_rx_to_double (code
, ins
->dreg
, ARMREG_LR
);
3768 arm_ldarw (code
, ARMREG_LR
, ARMREG_LR
);
3769 arm_fmov_rx_to_double (code
, FP_TEMP_REG
, ARMREG_LR
);
3770 arm_fcvt_sd (code
, ins
->dreg
, FP_TEMP_REG
);
3774 case OP_ATOMIC_LOAD_R8
: {
3775 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_basereg
, ins
->inst_offset
);
3776 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
3778 arm_ldarx (code
, ARMREG_LR
, ARMREG_LR
);
3779 arm_fmov_rx_to_double (code
, ins
->dreg
, ARMREG_LR
);
3782 case OP_ATOMIC_STORE_I1
:
3783 case OP_ATOMIC_STORE_U1
: {
3784 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
3785 arm_stlrb (code
, ARMREG_LR
, ins
->sreg1
);
3786 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
3790 case OP_ATOMIC_STORE_I2
:
3791 case OP_ATOMIC_STORE_U2
: {
3792 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
3793 arm_stlrh (code
, ARMREG_LR
, ins
->sreg1
);
3794 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
3798 case OP_ATOMIC_STORE_I4
:
3799 case OP_ATOMIC_STORE_U4
: {
3800 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
3801 arm_stlrw (code
, ARMREG_LR
, ins
->sreg1
);
3802 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
3806 case OP_ATOMIC_STORE_I8
:
3807 case OP_ATOMIC_STORE_U8
: {
3808 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
3809 arm_stlrx (code
, ARMREG_LR
, ins
->sreg1
);
3810 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
3814 case OP_ATOMIC_STORE_R4
: {
3815 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
3817 arm_fmov_double_to_rx (code
, ARMREG_IP0
, ins
->sreg1
);
3818 arm_stlrw (code
, ARMREG_LR
, ARMREG_IP0
);
3820 arm_fcvt_ds (code
, FP_TEMP_REG
, ins
->sreg1
);
3821 arm_fmov_double_to_rx (code
, ARMREG_IP0
, FP_TEMP_REG
);
3822 arm_stlrw (code
, ARMREG_LR
, ARMREG_IP0
);
3824 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
3828 case OP_ATOMIC_STORE_R8
: {
3829 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
3830 arm_fmov_double_to_rx (code
, ARMREG_IP0
, ins
->sreg1
);
3831 arm_stlrx (code
, ARMREG_LR
, ARMREG_IP0
);
3832 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
3839 guint64 imm
= *(guint64
*)ins
->inst_p0
;
3842 arm_fmov_rx_to_double (code
, dreg
, ARMREG_RZR
);
3844 code
= emit_imm64 (code
, ARMREG_LR
, imm
);
3845 arm_fmov_rx_to_double (code
, ins
->dreg
, ARMREG_LR
);
3850 guint64 imm
= *(guint32
*)ins
->inst_p0
;
3852 code
= emit_imm64 (code
, ARMREG_LR
, imm
);
3854 arm_fmov_rx_to_double (code
, dreg
, ARMREG_LR
);
3856 arm_fmov_rx_to_double (code
, FP_TEMP_REG
, ARMREG_LR
);
3857 arm_fcvt_sd (code
, dreg
, FP_TEMP_REG
);
3861 case OP_LOADR8_MEMBASE
:
3862 code
= emit_ldrfpx (code
, dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3864 case OP_LOADR4_MEMBASE
:
3866 code
= emit_ldrfpw (code
, dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3868 code
= emit_ldrfpw (code
, FP_TEMP_REG
, ins
->inst_basereg
, ins
->inst_offset
);
3869 arm_fcvt_sd (code
, dreg
, FP_TEMP_REG
);
3872 case OP_STORER8_MEMBASE_REG
:
3873 code
= emit_strfpx (code
, sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3875 case OP_STORER4_MEMBASE_REG
:
3877 code
= emit_strfpw (code
, sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3879 arm_fcvt_ds (code
, FP_TEMP_REG
, sreg1
);
3880 code
= emit_strfpw (code
, FP_TEMP_REG
, ins
->inst_destbasereg
, ins
->inst_offset
);
3885 arm_fmovd (code
, dreg
, sreg1
);
3889 arm_fmovs (code
, dreg
, sreg1
);
3891 case OP_MOVE_F_TO_I4
:
3893 arm_fmov_double_to_rx (code
, ins
->dreg
, ins
->sreg1
);
3895 arm_fcvt_ds (code
, ins
->dreg
, ins
->sreg1
);
3896 arm_fmov_double_to_rx (code
, ins
->dreg
, ins
->dreg
);
3899 case OP_MOVE_I4_TO_F
:
3901 arm_fmov_rx_to_double (code
, ins
->dreg
, ins
->sreg1
);
3903 arm_fmov_rx_to_double (code
, ins
->dreg
, ins
->sreg1
);
3904 arm_fcvt_sd (code
, ins
->dreg
, ins
->dreg
);
3907 case OP_MOVE_F_TO_I8
:
3908 arm_fmov_double_to_rx (code
, ins
->dreg
, ins
->sreg1
);
3910 case OP_MOVE_I8_TO_F
:
3911 arm_fmov_rx_to_double (code
, ins
->dreg
, ins
->sreg1
);
3914 arm_fcmpd (code
, sreg1
, sreg2
);
3917 arm_fcmps (code
, sreg1
, sreg2
);
3919 case OP_FCONV_TO_I1
:
3920 arm_fcvtzs_dx (code
, dreg
, sreg1
);
3921 arm_sxtbx (code
, dreg
, dreg
);
3923 case OP_FCONV_TO_U1
:
3924 arm_fcvtzu_dx (code
, dreg
, sreg1
);
3925 arm_uxtbw (code
, dreg
, dreg
);
3927 case OP_FCONV_TO_I2
:
3928 arm_fcvtzs_dx (code
, dreg
, sreg1
);
3929 arm_sxthx (code
, dreg
, dreg
);
3931 case OP_FCONV_TO_U2
:
3932 arm_fcvtzu_dx (code
, dreg
, sreg1
);
3933 arm_uxthw (code
, dreg
, dreg
);
3935 case OP_FCONV_TO_I4
:
3936 arm_fcvtzs_dx (code
, dreg
, sreg1
);
3937 arm_sxtwx (code
, dreg
, dreg
);
3939 case OP_FCONV_TO_U4
:
3940 arm_fcvtzu_dx (code
, dreg
, sreg1
);
3942 case OP_FCONV_TO_I8
:
3943 arm_fcvtzs_dx (code
, dreg
, sreg1
);
3945 case OP_FCONV_TO_U8
:
3946 arm_fcvtzu_dx (code
, dreg
, sreg1
);
3948 case OP_FCONV_TO_R4
:
3950 arm_fcvt_ds (code
, dreg
, sreg1
);
3952 arm_fcvt_ds (code
, FP_TEMP_REG
, sreg1
);
3953 arm_fcvt_sd (code
, dreg
, FP_TEMP_REG
);
3956 case OP_ICONV_TO_R4
:
3958 arm_scvtf_rw_to_s (code
, dreg
, sreg1
);
3960 arm_scvtf_rw_to_s (code
, FP_TEMP_REG
, sreg1
);
3961 arm_fcvt_sd (code
, dreg
, FP_TEMP_REG
);
3964 case OP_LCONV_TO_R4
:
3966 arm_scvtf_rx_to_s (code
, dreg
, sreg1
);
3968 arm_scvtf_rx_to_s (code
, FP_TEMP_REG
, sreg1
);
3969 arm_fcvt_sd (code
, dreg
, FP_TEMP_REG
);
3972 case OP_ICONV_TO_R8
:
3973 arm_scvtf_rw_to_d (code
, dreg
, sreg1
);
3975 case OP_LCONV_TO_R8
:
3976 arm_scvtf_rx_to_d (code
, dreg
, sreg1
);
3978 case OP_ICONV_TO_R_UN
:
3979 arm_ucvtf_rw_to_d (code
, dreg
, sreg1
);
3981 case OP_LCONV_TO_R_UN
:
3982 arm_ucvtf_rx_to_d (code
, dreg
, sreg1
);
3985 arm_fadd_d (code
, dreg
, sreg1
, sreg2
);
3988 arm_fsub_d (code
, dreg
, sreg1
, sreg2
);
3991 arm_fmul_d (code
, dreg
, sreg1
, sreg2
);
3994 arm_fdiv_d (code
, dreg
, sreg1
, sreg2
);
3998 g_assert_not_reached ();
4001 arm_fneg_d (code
, dreg
, sreg1
);
4003 case OP_ARM_SETFREG_R4
:
4004 arm_fcvt_ds (code
, dreg
, sreg1
);
4007 /* Check for infinity */
4008 code
= emit_imm64 (code
, ARMREG_LR
, 0x7fefffffffffffffLL
);
4009 arm_fmov_rx_to_double (code
, FP_TEMP_REG
, ARMREG_LR
);
4010 arm_fabs_d (code
, FP_TEMP_REG2
, sreg1
);
4011 arm_fcmpd (code
, FP_TEMP_REG2
, FP_TEMP_REG
);
4012 code
= emit_cond_exc (cfg
, code
, OP_COND_EXC_GT
, "ArithmeticException");
4013 /* Check for nans */
4014 arm_fcmpd (code
, FP_TEMP_REG2
, FP_TEMP_REG2
);
4015 code
= emit_cond_exc (cfg
, code
, OP_COND_EXC_OV
, "ArithmeticException");
4016 arm_fmovd (code
, dreg
, sreg1
);
4021 arm_fadd_s (code
, dreg
, sreg1
, sreg2
);
4024 arm_fsub_s (code
, dreg
, sreg1
, sreg2
);
4027 arm_fmul_s (code
, dreg
, sreg1
, sreg2
);
4030 arm_fdiv_s (code
, dreg
, sreg1
, sreg2
);
4033 arm_fneg_s (code
, dreg
, sreg1
);
4035 case OP_RCONV_TO_I1
:
4036 arm_fcvtzs_sx (code
, dreg
, sreg1
);
4037 arm_sxtbx (code
, dreg
, dreg
);
4039 case OP_RCONV_TO_U1
:
4040 arm_fcvtzu_sx (code
, dreg
, sreg1
);
4041 arm_uxtbw (code
, dreg
, dreg
);
4043 case OP_RCONV_TO_I2
:
4044 arm_fcvtzs_sx (code
, dreg
, sreg1
);
4045 arm_sxthx (code
, dreg
, dreg
);
4047 case OP_RCONV_TO_U2
:
4048 arm_fcvtzu_sx (code
, dreg
, sreg1
);
4049 arm_uxthw (code
, dreg
, dreg
);
4051 case OP_RCONV_TO_I4
:
4052 arm_fcvtzs_sx (code
, dreg
, sreg1
);
4053 arm_sxtwx (code
, dreg
, dreg
);
4055 case OP_RCONV_TO_U4
:
4056 arm_fcvtzu_sx (code
, dreg
, sreg1
);
4058 case OP_RCONV_TO_I8
:
4059 arm_fcvtzs_sx (code
, dreg
, sreg1
);
4061 case OP_RCONV_TO_U8
:
4062 arm_fcvtzu_sx (code
, dreg
, sreg1
);
4064 case OP_RCONV_TO_R8
:
4065 arm_fcvt_sd (code
, dreg
, sreg1
);
4067 case OP_RCONV_TO_R4
:
4069 arm_fmovs (code
, dreg
, sreg1
);
4081 cond
= opcode_to_armcond (ins
->opcode
);
4082 arm_fcmps (code
, sreg1
, sreg2
);
4083 arm_cset (code
, cond
, dreg
);
4094 call
= (MonoCallInst
*)ins
;
4095 if (ins
->flags
& MONO_INST_HAS_METHOD
)
4096 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_METHOD
, call
->method
);
4098 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_ABS
, call
->fptr
);
4099 code
= emit_move_return_value (cfg
, code
, ins
);
4101 case OP_VOIDCALL_REG
:
4107 arm_blrx (code
, sreg1
);
4108 code
= emit_move_return_value (cfg
, code
, ins
);
4110 case OP_VOIDCALL_MEMBASE
:
4111 case OP_CALL_MEMBASE
:
4112 case OP_LCALL_MEMBASE
:
4113 case OP_FCALL_MEMBASE
:
4114 case OP_RCALL_MEMBASE
:
4115 case OP_VCALL2_MEMBASE
:
4116 code
= emit_ldrx (code
, ARMREG_IP0
, ins
->inst_basereg
, ins
->inst_offset
);
4117 arm_blrx (code
, ARMREG_IP0
);
4118 code
= emit_move_return_value (cfg
, code
, ins
);
4121 MonoCallInst
*call
= (MonoCallInst
*)ins
;
4123 g_assert (!cfg
->method
->save_lmf
);
4125 // FIXME: Copy stack arguments
4127 /* Restore registers */
4128 code
= emit_load_regset (code
, MONO_ARCH_CALLEE_SAVED_REGS
& cfg
->used_int_regs
, ARMREG_FP
, cfg
->arch
.saved_gregs_offset
);
4131 code
= mono_arm_emit_destroy_frame (code
, cfg
->stack_offset
, ((1 << ARMREG_IP0
) | (1 << ARMREG_IP1
)));
4133 if (cfg
->compile_aot
) {
4134 /* This is not a PLT patch */
4135 code
= emit_aotconst (cfg
, code
, ARMREG_IP0
, MONO_PATCH_INFO_METHOD_JUMP
, call
->method
);
4136 arm_brx (code
, ARMREG_IP0
);
4138 mono_add_patch_info_rel (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_METHOD_JUMP
, call
->method
, MONO_R_ARM64_B
);
4140 cfg
->thunk_area
+= THUNK_SIZE
;
4142 ins
->flags
|= MONO_INST_GC_CALLSITE
;
4143 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
4147 g_assert (cfg
->arch
.cinfo
);
4148 code
= emit_addx_imm (code
, ARMREG_IP0
, cfg
->arch
.args_reg
, ((CallInfo
*)cfg
->arch
.cinfo
)->sig_cookie
.offset
);
4149 arm_strx (code
, ARMREG_IP0
, sreg1
, 0);
4152 MonoInst
*var
= cfg
->dyn_call_var
;
4153 guint8
*labels
[16];
4157 * sreg1 points to a DynCallArgs structure initialized by mono_arch_start_dyn_call ().
4158 * sreg2 is the function to call.
4161 g_assert (var
->opcode
== OP_REGOFFSET
);
4163 arm_movx (code
, ARMREG_LR
, sreg1
);
4164 arm_movx (code
, ARMREG_IP1
, sreg2
);
4166 /* Save args buffer */
4167 code
= emit_strx (code
, ARMREG_LR
, var
->inst_basereg
, var
->inst_offset
);
4169 /* Set fp argument regs */
4170 code
= emit_ldrw (code
, ARMREG_R0
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, n_fpargs
));
4171 arm_cmpw (code
, ARMREG_R0
, ARMREG_RZR
);
4173 arm_bcc (code
, ARMCOND_EQ
, 0);
4174 for (i
= 0; i
< 8; ++i
)
4175 code
= emit_ldrfpx (code
, ARMREG_D0
+ i
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, fpregs
) + (i
* 8));
4176 arm_patch_rel (labels
[0], code
, MONO_R_ARM64_BCC
);
4178 /* Set stack args */
4179 for (i
= 0; i
< DYN_CALL_STACK_ARGS
; ++i
) {
4180 code
= emit_ldrx (code
, ARMREG_R0
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, regs
) + ((PARAM_REGS
+ 1 + i
) * sizeof (mgreg_t
)));
4181 code
= emit_strx (code
, ARMREG_R0
, ARMREG_SP
, i
* sizeof (mgreg_t
));
4184 /* Set argument registers + r8 */
4185 code
= mono_arm_emit_load_regarray (code
, 0x1ff, ARMREG_LR
, 0);
4188 arm_blrx (code
, ARMREG_IP1
);
4191 code
= emit_ldrx (code
, ARMREG_LR
, var
->inst_basereg
, var
->inst_offset
);
4192 arm_strx (code
, ARMREG_R0
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, res
));
4193 arm_strx (code
, ARMREG_R1
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, res2
));
4194 /* Save fp result */
4195 code
= emit_ldrw (code
, ARMREG_R0
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, n_fpret
));
4196 arm_cmpw (code
, ARMREG_R0
, ARMREG_RZR
);
4198 arm_bcc (code
, ARMCOND_EQ
, 0);
4199 for (i
= 0; i
< 8; ++i
)
4200 code
= emit_strfpx (code
, ARMREG_D0
+ i
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, fpregs
) + (i
* 8));
4201 arm_patch_rel (labels
[1], code
, MONO_R_ARM64_BCC
);
4205 case OP_GENERIC_CLASS_INIT
: {
4209 byte_offset
= MONO_STRUCT_OFFSET (MonoVTable
, initialized
);
4211 /* Load vtable->initialized */
4212 arm_ldrsbx (code
, ARMREG_IP0
, sreg1
, byte_offset
);
4214 arm_cbnzx (code
, ARMREG_IP0
, 0);
4217 g_assert (sreg1
== ARMREG_R0
);
4218 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
4219 (gpointer
)"mono_generic_class_init");
4221 mono_arm_patch (jump
, code
, MONO_R_ARM64_CBZ
);
4226 arm_ldrx (code
, ARMREG_LR
, sreg1
, 0);
4229 case OP_NOT_REACHED
:
4232 case OP_IL_SEQ_POINT
:
4233 mono_add_seq_point (cfg
, bb
, ins
, code
- cfg
->native_code
);
4238 case OP_COND_EXC_IC
:
4239 case OP_COND_EXC_OV
:
4240 case OP_COND_EXC_IOV
:
4241 case OP_COND_EXC_NC
:
4242 case OP_COND_EXC_INC
:
4243 case OP_COND_EXC_NO
:
4244 case OP_COND_EXC_INO
:
4245 case OP_COND_EXC_EQ
:
4246 case OP_COND_EXC_IEQ
:
4247 case OP_COND_EXC_NE_UN
:
4248 case OP_COND_EXC_INE_UN
:
4249 case OP_COND_EXC_ILT
:
4250 case OP_COND_EXC_LT
:
4251 case OP_COND_EXC_ILT_UN
:
4252 case OP_COND_EXC_LT_UN
:
4253 case OP_COND_EXC_IGT
:
4254 case OP_COND_EXC_GT
:
4255 case OP_COND_EXC_IGT_UN
:
4256 case OP_COND_EXC_GT_UN
:
4257 case OP_COND_EXC_IGE
:
4258 case OP_COND_EXC_GE
:
4259 case OP_COND_EXC_IGE_UN
:
4260 case OP_COND_EXC_GE_UN
:
4261 case OP_COND_EXC_ILE
:
4262 case OP_COND_EXC_LE
:
4263 case OP_COND_EXC_ILE_UN
:
4264 case OP_COND_EXC_LE_UN
:
4265 code
= emit_cond_exc (cfg
, code
, ins
->opcode
, ins
->inst_p1
);
4268 if (sreg1
!= ARMREG_R0
)
4269 arm_movx (code
, ARMREG_R0
, sreg1
);
4270 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
4271 (gpointer
)"mono_arch_throw_exception");
4274 if (sreg1
!= ARMREG_R0
)
4275 arm_movx (code
, ARMREG_R0
, sreg1
);
4276 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
4277 (gpointer
)"mono_arch_rethrow_exception");
4279 case OP_CALL_HANDLER
:
4280 mono_add_patch_info_rel (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
, MONO_R_ARM64_BL
);
4282 cfg
->thunk_area
+= THUNK_SIZE
;
4283 mono_cfg_add_try_hole (cfg
, ins
->inst_eh_block
, code
, bb
);
4285 case OP_START_HANDLER
: {
4286 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
4288 /* Save caller address */
4289 code
= emit_strx (code
, ARMREG_LR
, spvar
->inst_basereg
, spvar
->inst_offset
);
4292 * Reserve a param area, see test_0_finally_param_area ().
4293 * This is needed because the param area is not set up when
4294 * we are called from EH code.
4296 if (cfg
->param_area
)
4297 code
= emit_subx_sp_imm (code
, cfg
->param_area
);
4301 case OP_ENDFILTER
: {
4302 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
4304 if (cfg
->param_area
)
4305 code
= emit_addx_sp_imm (code
, cfg
->param_area
);
4307 if (ins
->opcode
== OP_ENDFILTER
&& sreg1
!= ARMREG_R0
)
4308 arm_movx (code
, ARMREG_R0
, sreg1
);
4310 /* Return to either after the branch in OP_CALL_HANDLER, or to the EH code */
4311 code
= emit_ldrx (code
, ARMREG_LR
, spvar
->inst_basereg
, spvar
->inst_offset
);
4312 arm_brx (code
, ARMREG_LR
);
4316 if (ins
->dreg
!= ARMREG_R0
)
4317 arm_movx (code
, ins
->dreg
, ARMREG_R0
);
4319 case OP_GC_SAFE_POINT
: {
4320 #if defined (USE_COOP_GC)
4323 arm_ldrx (code
, ARMREG_IP1
, ins
->sreg1
, 0);
4324 /* Call it if it is non-null */
4326 arm_cbzx (code
, ARMREG_IP1
, 0);
4327 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_INTERNAL_METHOD
, "mono_threads_state_poll");
4328 mono_arm_patch (buf
[0], code
, MONO_R_ARM64_CBZ
);
4332 case OP_FILL_PROF_CALL_CTX
:
4333 for (int i
= 0; i
< MONO_MAX_IREGS
; i
++)
4334 if ((MONO_ARCH_CALLEE_SAVED_REGS
& (1 << i
)) || i
== ARMREG_SP
|| i
== ARMREG_FP
)
4335 arm_strx (code
, i
, ins
->sreg1
, MONO_STRUCT_OFFSET (MonoContext
, regs
) + i
* sizeof (mgreg_t
));
4338 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins
->opcode
), __FUNCTION__
);
4339 g_assert_not_reached ();
4342 if ((cfg
->opt
& MONO_OPT_BRANCH
) && ((code
- cfg
->native_code
- offset
) > max_len
)) {
4343 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
4344 mono_inst_name (ins
->opcode
), max_len
, code
- cfg
->native_code
- offset
);
4345 g_assert_not_reached ();
4350 * If the compiled code size is larger than the bcc displacement (19 bits signed),
4351 * insert branch islands between/inside basic blocks.
4353 if (cfg
->arch
.cond_branch_islands
)
4354 code
= emit_branch_island (cfg
, code
, start_offset
);
4356 cfg
->code_len
= code
- cfg
->native_code
;
4360 emit_move_args (MonoCompile
*cfg
, guint8
*code
)
4367 cinfo
= cfg
->arch
.cinfo
;
4369 for (i
= 0; i
< cinfo
->nargs
; ++i
) {
4370 ainfo
= cinfo
->args
+ i
;
4371 ins
= cfg
->args
[i
];
4373 if (ins
->opcode
== OP_REGVAR
) {
4374 switch (ainfo
->storage
) {
4376 arm_movx (code
, ins
->dreg
, ainfo
->reg
);
4379 switch (ainfo
->slot_size
) {
4382 code
= emit_ldrsbx (code
, ins
->dreg
, cfg
->arch
.args_reg
, ainfo
->offset
);
4384 code
= emit_ldrb (code
, ins
->dreg
, cfg
->arch
.args_reg
, ainfo
->offset
);
4388 code
= emit_ldrshx (code
, ins
->dreg
, cfg
->arch
.args_reg
, ainfo
->offset
);
4390 code
= emit_ldrh (code
, ins
->dreg
, cfg
->arch
.args_reg
, ainfo
->offset
);
4394 code
= emit_ldrswx (code
, ins
->dreg
, cfg
->arch
.args_reg
, ainfo
->offset
);
4396 code
= emit_ldrw (code
, ins
->dreg
, cfg
->arch
.args_reg
, ainfo
->offset
);
4399 code
= emit_ldrx (code
, ins
->dreg
, cfg
->arch
.args_reg
, ainfo
->offset
);
4404 g_assert_not_reached ();
4408 if (ainfo
->storage
!= ArgVtypeByRef
&& ainfo
->storage
!= ArgVtypeByRefOnStack
)
4409 g_assert (ins
->opcode
== OP_REGOFFSET
);
4411 switch (ainfo
->storage
) {
4413 /* Stack slots for arguments have size 8 */
4414 code
= emit_strx (code
, ainfo
->reg
, ins
->inst_basereg
, ins
->inst_offset
);
4417 code
= emit_strfpx (code
, ainfo
->reg
, ins
->inst_basereg
, ins
->inst_offset
);
4420 code
= emit_strfpw (code
, ainfo
->reg
, ins
->inst_basereg
, ins
->inst_offset
);
4425 case ArgVtypeByRefOnStack
:
4426 case ArgVtypeOnStack
:
4428 case ArgVtypeByRef
: {
4429 MonoInst
*addr_arg
= ins
->inst_left
;
4431 if (ainfo
->gsharedvt
) {
4432 g_assert (ins
->opcode
== OP_GSHAREDVT_ARG_REGOFFSET
);
4433 arm_strx (code
, ainfo
->reg
, ins
->inst_basereg
, ins
->inst_offset
);
4435 g_assert (ins
->opcode
== OP_VTARG_ADDR
);
4436 g_assert (addr_arg
->opcode
== OP_REGOFFSET
);
4437 arm_strx (code
, ainfo
->reg
, addr_arg
->inst_basereg
, addr_arg
->inst_offset
);
4441 case ArgVtypeInIRegs
:
4442 for (part
= 0; part
< ainfo
->nregs
; part
++) {
4443 code
= emit_strx (code
, ainfo
->reg
+ part
, ins
->inst_basereg
, ins
->inst_offset
+ (part
* 8));
4447 for (part
= 0; part
< ainfo
->nregs
; part
++) {
4448 if (ainfo
->esize
== 4)
4449 code
= emit_strfpw (code
, ainfo
->reg
+ part
, ins
->inst_basereg
, ins
->inst_offset
+ ainfo
->foffsets
[part
]);
4451 code
= emit_strfpx (code
, ainfo
->reg
+ part
, ins
->inst_basereg
, ins
->inst_offset
+ ainfo
->foffsets
[part
]);
4455 g_assert_not_reached ();
4465 * emit_store_regarray:
4467 * Emit code to store the registers in REGS into the appropriate elements of
4468 * the register array at BASEREG+OFFSET.
4470 static __attribute__ ((__warn_unused_result__
)) guint8
*
4471 emit_store_regarray (guint8
*code
, guint64 regs
, int basereg
, int offset
)
4475 for (i
= 0; i
< 32; ++i
) {
4476 if (regs
& (1 << i
)) {
4477 if (i
+ 1 < 32 && (regs
& (1 << (i
+ 1))) && (i
+ 1 != ARMREG_SP
)) {
4478 arm_stpx (code
, i
, i
+ 1, basereg
, offset
+ (i
* 8));
4480 } else if (i
== ARMREG_SP
) {
4481 arm_movspx (code
, ARMREG_IP1
, ARMREG_SP
);
4482 arm_strx (code
, ARMREG_IP1
, basereg
, offset
+ (i
* 8));
4484 arm_strx (code
, i
, basereg
, offset
+ (i
* 8));
4492 * emit_load_regarray:
4494 * Emit code to load the registers in REGS from the appropriate elements of
4495 * the register array at BASEREG+OFFSET.
4497 static __attribute__ ((__warn_unused_result__
)) guint8
*
4498 emit_load_regarray (guint8
*code
, guint64 regs
, int basereg
, int offset
)
4502 for (i
= 0; i
< 32; ++i
) {
4503 if (regs
& (1 << i
)) {
4504 if ((regs
& (1 << (i
+ 1))) && (i
+ 1 != ARMREG_SP
)) {
4505 if (offset
+ (i
* 8) < 500)
4506 arm_ldpx (code
, i
, i
+ 1, basereg
, offset
+ (i
* 8));
4508 code
= emit_ldrx (code
, i
, basereg
, offset
+ (i
* 8));
4509 code
= emit_ldrx (code
, i
+ 1, basereg
, offset
+ ((i
+ 1) * 8));
4512 } else if (i
== ARMREG_SP
) {
4513 g_assert_not_reached ();
4515 code
= emit_ldrx (code
, i
, basereg
, offset
+ (i
* 8));
4523 * emit_store_regset:
4525 * Emit code to store the registers in REGS into consecutive memory locations starting
4526 * at BASEREG+OFFSET.
4528 static __attribute__ ((__warn_unused_result__
)) guint8
*
4529 emit_store_regset (guint8
*code
, guint64 regs
, int basereg
, int offset
)
4534 for (i
= 0; i
< 32; ++i
) {
4535 if (regs
& (1 << i
)) {
4536 if ((regs
& (1 << (i
+ 1))) && (i
+ 1 != ARMREG_SP
)) {
4537 arm_stpx (code
, i
, i
+ 1, basereg
, offset
+ (pos
* 8));
4540 } else if (i
== ARMREG_SP
) {
4541 arm_movspx (code
, ARMREG_IP1
, ARMREG_SP
);
4542 arm_strx (code
, ARMREG_IP1
, basereg
, offset
+ (pos
* 8));
4544 arm_strx (code
, i
, basereg
, offset
+ (pos
* 8));
4555 * Emit code to load the registers in REGS from consecutive memory locations starting
4556 * at BASEREG+OFFSET.
4558 static __attribute__ ((__warn_unused_result__
)) guint8
*
4559 emit_load_regset (guint8
*code
, guint64 regs
, int basereg
, int offset
)
4564 for (i
= 0; i
< 32; ++i
) {
4565 if (regs
& (1 << i
)) {
4566 if ((regs
& (1 << (i
+ 1))) && (i
+ 1 != ARMREG_SP
)) {
4567 arm_ldpx (code
, i
, i
+ 1, basereg
, offset
+ (pos
* 8));
4570 } else if (i
== ARMREG_SP
) {
4571 g_assert_not_reached ();
4573 arm_ldrx (code
, i
, basereg
, offset
+ (pos
* 8));
4581 __attribute__ ((__warn_unused_result__
)) guint8
*
4582 mono_arm_emit_load_regarray (guint8
*code
, guint64 regs
, int basereg
, int offset
)
4584 return emit_load_regarray (code
, regs
, basereg
, offset
);
4587 __attribute__ ((__warn_unused_result__
)) guint8
*
4588 mono_arm_emit_store_regarray (guint8
*code
, guint64 regs
, int basereg
, int offset
)
4590 return emit_store_regarray (code
, regs
, basereg
, offset
);
4593 __attribute__ ((__warn_unused_result__
)) guint8
*
4594 mono_arm_emit_store_regset (guint8
*code
, guint64 regs
, int basereg
, int offset
)
4596 return emit_store_regset (code
, regs
, basereg
, offset
);
4599 /* Same as emit_store_regset, but emit unwind info too */
4600 /* CFA_OFFSET is the offset between the CFA and basereg */
4601 static __attribute__ ((__warn_unused_result__
)) guint8
*
4602 emit_store_regset_cfa (MonoCompile
*cfg
, guint8
*code
, guint64 regs
, int basereg
, int offset
, int cfa_offset
, guint64 no_cfa_regset
)
4604 int i
, j
, pos
, nregs
;
4605 guint32 cfa_regset
= regs
& ~no_cfa_regset
;
4608 for (i
= 0; i
< 32; ++i
) {
4610 if (regs
& (1 << i
)) {
4611 if ((regs
& (1 << (i
+ 1))) && (i
+ 1 != ARMREG_SP
)) {
4613 arm_stpx (code
, i
, i
+ 1, basereg
, offset
+ (pos
* 8));
4615 code
= emit_strx (code
, i
, basereg
, offset
+ (pos
* 8));
4616 code
= emit_strx (code
, i
+ 1, basereg
, offset
+ (pos
* 8) + 8);
4619 } else if (i
== ARMREG_SP
) {
4620 arm_movspx (code
, ARMREG_IP1
, ARMREG_SP
);
4621 code
= emit_strx (code
, ARMREG_IP1
, basereg
, offset
+ (pos
* 8));
4623 code
= emit_strx (code
, i
, basereg
, offset
+ (pos
* 8));
4626 for (j
= 0; j
< nregs
; ++j
) {
4627 if (cfa_regset
& (1 << (i
+ j
)))
4628 mono_emit_unwind_op_offset (cfg
, code
, i
+ j
, (- cfa_offset
) + offset
+ ((pos
+ j
) * 8));
4641 * Emit code to initialize an LMF structure at LMF_OFFSET.
4645 emit_setup_lmf (MonoCompile
*cfg
, guint8
*code
, gint32 lmf_offset
, int cfa_offset
)
4648 * The LMF should contain all the state required to be able to reconstruct the machine state
4649 * at the current point of execution. Since the LMF is only read during EH, only callee
4650 * saved etc. registers need to be saved.
4651 * FIXME: Save callee saved fp regs, JITted code doesn't use them, but native code does, and they
4652 * need to be restored during EH.
4656 arm_adrx (code
, ARMREG_LR
, code
);
4657 code
= emit_strx (code
, ARMREG_LR
, ARMREG_FP
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, pc
));
4658 /* gregs + fp + sp */
4659 /* Don't emit unwind info for sp/fp, they are already handled in the prolog */
4660 code
= emit_store_regset_cfa (cfg
, code
, MONO_ARCH_LMF_REGS
, ARMREG_FP
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, gregs
), cfa_offset
, (1 << ARMREG_FP
) | (1 << ARMREG_SP
));
4666 mono_arch_emit_prolog (MonoCompile
*cfg
)
4668 MonoMethod
*method
= cfg
->method
;
4669 MonoMethodSignature
*sig
;
4672 int cfa_offset
, max_offset
;
4674 sig
= mono_method_signature (method
);
4675 cfg
->code_size
= 256 + sig
->param_count
* 64;
4676 code
= cfg
->native_code
= g_malloc (cfg
->code_size
);
4678 /* This can be unaligned */
4679 cfg
->stack_offset
= ALIGN_TO (cfg
->stack_offset
, MONO_ARCH_FRAME_ALIGNMENT
);
4685 mono_emit_unwind_op_def_cfa (cfg
, code
, ARMREG_SP
, 0);
4688 if (arm_is_ldpx_imm (-cfg
->stack_offset
)) {
4689 arm_stpx_pre (code
, ARMREG_FP
, ARMREG_LR
, ARMREG_SP
, -cfg
->stack_offset
);
4691 /* sp -= cfg->stack_offset */
4692 /* This clobbers ip0/ip1 */
4693 code
= emit_subx_sp_imm (code
, cfg
->stack_offset
);
4694 arm_stpx (code
, ARMREG_FP
, ARMREG_LR
, ARMREG_SP
, 0);
4696 cfa_offset
+= cfg
->stack_offset
;
4697 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, cfa_offset
);
4698 mono_emit_unwind_op_offset (cfg
, code
, ARMREG_FP
, (- cfa_offset
) + 0);
4699 mono_emit_unwind_op_offset (cfg
, code
, ARMREG_LR
, (- cfa_offset
) + 8);
4700 arm_movspx (code
, ARMREG_FP
, ARMREG_SP
);
4701 mono_emit_unwind_op_def_cfa_reg (cfg
, code
, ARMREG_FP
);
4702 if (cfg
->param_area
) {
4703 /* The param area is below the frame pointer */
4704 code
= emit_subx_sp_imm (code
, cfg
->param_area
);
4707 if (cfg
->method
->save_lmf
) {
4708 code
= emit_setup_lmf (cfg
, code
, cfg
->lmf_var
->inst_offset
, cfa_offset
);
4711 code
= emit_store_regset_cfa (cfg
, code
, MONO_ARCH_CALLEE_SAVED_REGS
& cfg
->used_int_regs
, ARMREG_FP
, cfg
->arch
.saved_gregs_offset
, cfa_offset
, 0);
4714 /* Setup args reg */
4715 if (cfg
->arch
.args_reg
) {
4716 /* The register was already saved above */
4717 code
= emit_addx_imm (code
, cfg
->arch
.args_reg
, ARMREG_FP
, cfg
->stack_offset
);
4720 /* Save return area addr received in R8 */
4721 if (cfg
->vret_addr
) {
4722 MonoInst
*ins
= cfg
->vret_addr
;
4724 g_assert (ins
->opcode
== OP_REGOFFSET
);
4725 code
= emit_strx (code
, ARMREG_R8
, ins
->inst_basereg
, ins
->inst_offset
);
4728 /* Save mrgctx received in MONO_ARCH_RGCTX_REG */
4729 if (cfg
->rgctx_var
) {
4730 MonoInst
*ins
= cfg
->rgctx_var
;
4732 g_assert (ins
->opcode
== OP_REGOFFSET
);
4734 code
= emit_strx (code
, MONO_ARCH_RGCTX_REG
, ins
->inst_basereg
, ins
->inst_offset
);
4738 * Move arguments to their registers/stack locations.
4740 code
= emit_move_args (cfg
, code
);
4742 /* Initialize seq_point_info_var */
4743 if (cfg
->arch
.seq_point_info_var
) {
4744 MonoInst
*ins
= cfg
->arch
.seq_point_info_var
;
4746 /* Initialize the variable from a GOT slot */
4747 code
= emit_aotconst (cfg
, code
, ARMREG_IP0
, MONO_PATCH_INFO_SEQ_POINT_INFO
, cfg
->method
);
4748 g_assert (ins
->opcode
== OP_REGOFFSET
);
4749 code
= emit_strx (code
, ARMREG_IP0
, ins
->inst_basereg
, ins
->inst_offset
);
4751 /* Initialize ss_tramp_var */
4752 ins
= cfg
->arch
.ss_tramp_var
;
4753 g_assert (ins
->opcode
== OP_REGOFFSET
);
4755 code
= emit_ldrx (code
, ARMREG_IP1
, ARMREG_IP0
, MONO_STRUCT_OFFSET (SeqPointInfo
, ss_tramp_addr
));
4756 code
= emit_strx (code
, ARMREG_IP1
, ins
->inst_basereg
, ins
->inst_offset
);
4760 if (cfg
->arch
.ss_tramp_var
) {
4761 /* Initialize ss_tramp_var */
4762 ins
= cfg
->arch
.ss_tramp_var
;
4763 g_assert (ins
->opcode
== OP_REGOFFSET
);
4765 code
= emit_imm64 (code
, ARMREG_IP0
, (guint64
)&ss_trampoline
);
4766 code
= emit_strx (code
, ARMREG_IP0
, ins
->inst_basereg
, ins
->inst_offset
);
4769 if (cfg
->arch
.bp_tramp_var
) {
4770 /* Initialize bp_tramp_var */
4771 ins
= cfg
->arch
.bp_tramp_var
;
4772 g_assert (ins
->opcode
== OP_REGOFFSET
);
4774 code
= emit_imm64 (code
, ARMREG_IP0
, (guint64
)bp_trampoline
);
4775 code
= emit_strx (code
, ARMREG_IP0
, ins
->inst_basereg
, ins
->inst_offset
);
4780 if (cfg
->opt
& MONO_OPT_BRANCH
) {
4781 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
4783 bb
->max_offset
= max_offset
;
4785 MONO_BB_FOR_EACH_INS (bb
, ins
) {
4786 max_offset
+= ((guint8
*)ins_get_spec (ins
->opcode
))[MONO_INST_LEN
];
4790 if (max_offset
> 0x3ffff * 4)
4791 cfg
->arch
.cond_branch_islands
= TRUE
;
4797 realloc_code (MonoCompile
*cfg
, int size
)
4799 while (cfg
->code_len
+ size
> (cfg
->code_size
- 16)) {
4800 cfg
->code_size
*= 2;
4801 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
4802 cfg
->stat_code_reallocs
++;
4804 return cfg
->native_code
+ cfg
->code_len
;
4808 mono_arch_emit_epilog (MonoCompile
*cfg
)
4811 int max_epilog_size
;
4815 max_epilog_size
= 16 + 20*4;
4816 code
= realloc_code (cfg
, max_epilog_size
);
4818 if (cfg
->method
->save_lmf
) {
4819 code
= mono_arm_emit_load_regarray (code
, MONO_ARCH_CALLEE_SAVED_REGS
& cfg
->used_int_regs
, ARMREG_FP
, cfg
->lmf_var
->inst_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, gregs
) - (MONO_ARCH_FIRST_LMF_REG
* 8));
4822 code
= emit_load_regset (code
, MONO_ARCH_CALLEE_SAVED_REGS
& cfg
->used_int_regs
, ARMREG_FP
, cfg
->arch
.saved_gregs_offset
);
4825 /* Load returned vtypes into registers if needed */
4826 cinfo
= cfg
->arch
.cinfo
;
4827 switch (cinfo
->ret
.storage
) {
4828 case ArgVtypeInIRegs
: {
4829 MonoInst
*ins
= cfg
->ret
;
4831 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
)
4832 code
= emit_ldrx (code
, cinfo
->ret
.reg
+ i
, ins
->inst_basereg
, ins
->inst_offset
+ (i
* 8));
4836 MonoInst
*ins
= cfg
->ret
;
4838 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
) {
4839 if (cinfo
->ret
.esize
== 4)
4840 code
= emit_ldrfpw (code
, cinfo
->ret
.reg
+ i
, ins
->inst_basereg
, ins
->inst_offset
+ cinfo
->ret
.foffsets
[i
]);
4842 code
= emit_ldrfpx (code
, cinfo
->ret
.reg
+ i
, ins
->inst_basereg
, ins
->inst_offset
+ cinfo
->ret
.foffsets
[i
]);
4851 code
= mono_arm_emit_destroy_frame (code
, cfg
->stack_offset
, ((1 << ARMREG_IP0
) | (1 << ARMREG_IP1
)));
4853 arm_retx (code
, ARMREG_LR
);
4855 g_assert (code
- (cfg
->native_code
+ cfg
->code_len
) < max_epilog_size
);
4857 cfg
->code_len
= code
- cfg
->native_code
;
4861 mono_arch_emit_exceptions (MonoCompile
*cfg
)
4864 MonoClass
*exc_class
;
4866 guint8
* exc_throw_pos
[MONO_EXC_INTRINS_NUM
];
4867 guint8 exc_throw_found
[MONO_EXC_INTRINS_NUM
];
4868 int i
, id
, size
= 0;
4870 for (i
= 0; i
< MONO_EXC_INTRINS_NUM
; i
++) {
4871 exc_throw_pos
[i
] = NULL
;
4872 exc_throw_found
[i
] = 0;
4875 for (ji
= cfg
->patch_info
; ji
; ji
= ji
->next
) {
4876 if (ji
->type
== MONO_PATCH_INFO_EXC
) {
4877 i
= mini_exception_id_by_name (ji
->data
.target
);
4878 if (!exc_throw_found
[i
]) {
4880 exc_throw_found
[i
] = TRUE
;
4885 code
= realloc_code (cfg
, size
);
4887 /* Emit code to raise corlib exceptions */
4888 for (ji
= cfg
->patch_info
; ji
; ji
= ji
->next
) {
4889 if (ji
->type
!= MONO_PATCH_INFO_EXC
)
4892 ip
= cfg
->native_code
+ ji
->ip
.i
;
4894 id
= mini_exception_id_by_name (ji
->data
.target
);
4896 if (exc_throw_pos
[id
]) {
4897 /* ip points to the bcc () in OP_COND_EXC_... */
4898 arm_patch_rel (ip
, exc_throw_pos
[id
], ji
->relocation
);
4899 ji
->type
= MONO_PATCH_INFO_NONE
;
4903 exc_throw_pos
[id
] = code
;
4904 arm_patch_rel (ip
, code
, ji
->relocation
);
4906 /* We are being branched to from the code generated by emit_cond_exc (), the pc is in ip1 */
4908 /* r0 = type token */
4909 exc_class
= mono_class_load_from_name (mono_defaults
.corlib
, "System", ji
->data
.name
);
4910 code
= emit_imm (code
, ARMREG_R0
, exc_class
->type_token
- MONO_TOKEN_TYPE_DEF
);
4912 arm_movx (code
, ARMREG_R1
, ARMREG_IP1
);
4913 /* Branch to the corlib exception throwing trampoline */
4914 ji
->ip
.i
= code
- cfg
->native_code
;
4915 ji
->type
= MONO_PATCH_INFO_INTERNAL_METHOD
;
4916 ji
->data
.name
= "mono_arch_throw_corlib_exception";
4917 ji
->relocation
= MONO_R_ARM64_BL
;
4919 cfg
->thunk_area
+= THUNK_SIZE
;
4922 cfg
->code_len
= code
- cfg
->native_code
;
4924 g_assert (cfg
->code_len
< cfg
->code_size
);
4928 mono_arch_emit_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
4934 mono_arch_print_tree (MonoInst
*tree
, int arity
)
4940 mono_arch_get_patch_offset (guint8
*code
)
4946 mono_arch_build_imt_trampoline (MonoVTable
*vtable
, MonoDomain
*domain
, MonoIMTCheckItem
**imt_entries
, int count
,
4947 gpointer fail_tramp
)
4949 int i
, buf_len
, imt_reg
;
4953 printf ("building IMT trampoline for class %s %s entries %d code size %d code at %p end %p vtable %p\n", vtable
->klass
->name_space
, vtable
->klass
->name
, count
, size
, start
, ((guint8
*)start
) + size
, vtable
);
4954 for (i
= 0; i
< count
; ++i
) {
4955 MonoIMTCheckItem
*item
= imt_entries
[i
];
4956 printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i
, item
->key
, item
->key
->name
, &vtable
->vtable
[item
->value
.vtable_slot
], item
->is_equals
, item
->chunk_size
);
4961 for (i
= 0; i
< count
; ++i
) {
4962 MonoIMTCheckItem
*item
= imt_entries
[i
];
4963 if (item
->is_equals
) {
4964 gboolean fail_case
= !item
->check_target_idx
&& fail_tramp
;
4966 if (item
->check_target_idx
|| fail_case
) {
4967 if (!item
->compare_done
|| fail_case
) {
4968 buf_len
+= 4 * 4 + 4;
4971 if (item
->has_target_code
) {
4988 buf
= mono_method_alloc_generic_virtual_trampoline (domain
, buf_len
);
4990 buf
= mono_domain_code_reserve (domain
, buf_len
);
4994 * We are called by JITted code, which passes in the IMT argument in
4995 * MONO_ARCH_RGCTX_REG (r27). We need to preserve all caller saved regs
4998 imt_reg
= MONO_ARCH_RGCTX_REG
;
4999 for (i
= 0; i
< count
; ++i
) {
5000 MonoIMTCheckItem
*item
= imt_entries
[i
];
5002 item
->code_target
= code
;
5004 if (item
->is_equals
) {
5006 * Check the imt argument against item->key, if equals, jump to either
5007 * item->value.target_code or to vtable [item->value.vtable_slot].
5008 * If fail_tramp is set, jump to it if not-equals.
5010 gboolean fail_case
= !item
->check_target_idx
&& fail_tramp
;
5012 if (item
->check_target_idx
|| fail_case
) {
5013 /* Compare imt_reg with item->key */
5014 if (!item
->compare_done
|| fail_case
) {
5015 // FIXME: Optimize this
5016 code
= emit_imm64 (code
, ARMREG_IP0
, (guint64
)item
->key
);
5017 arm_cmpx (code
, imt_reg
, ARMREG_IP0
);
5019 item
->jmp_code
= code
;
5020 arm_bcc (code
, ARMCOND_NE
, 0);
5021 /* Jump to target if equals */
5022 if (item
->has_target_code
) {
5023 code
= emit_imm64 (code
, ARMREG_IP0
, (guint64
)item
->value
.target_code
);
5024 arm_brx (code
, ARMREG_IP0
);
5026 guint64 imm
= (guint64
)&(vtable
->vtable
[item
->value
.vtable_slot
]);
5028 code
= emit_imm64 (code
, ARMREG_IP0
, imm
);
5029 arm_ldrx (code
, ARMREG_IP0
, ARMREG_IP0
, 0);
5030 arm_brx (code
, ARMREG_IP0
);
5034 arm_patch_rel (item
->jmp_code
, code
, MONO_R_ARM64_BCC
);
5035 item
->jmp_code
= NULL
;
5036 code
= emit_imm64 (code
, ARMREG_IP0
, (guint64
)fail_tramp
);
5037 arm_brx (code
, ARMREG_IP0
);
5040 guint64 imm
= (guint64
)&(vtable
->vtable
[item
->value
.vtable_slot
]);
5042 code
= emit_imm64 (code
, ARMREG_IP0
, imm
);
5043 arm_ldrx (code
, ARMREG_IP0
, ARMREG_IP0
, 0);
5044 arm_brx (code
, ARMREG_IP0
);
5047 code
= emit_imm64 (code
, ARMREG_IP0
, (guint64
)item
->key
);
5048 arm_cmpx (code
, imt_reg
, ARMREG_IP0
);
5049 item
->jmp_code
= code
;
5050 arm_bcc (code
, ARMCOND_HS
, 0);
5053 /* Patch the branches */
5054 for (i
= 0; i
< count
; ++i
) {
5055 MonoIMTCheckItem
*item
= imt_entries
[i
];
5056 if (item
->jmp_code
&& item
->check_target_idx
)
5057 arm_patch_rel (item
->jmp_code
, imt_entries
[item
->check_target_idx
]->code_target
, MONO_R_ARM64_BCC
);
5060 g_assert ((code
- buf
) < buf_len
);
5062 mono_arch_flush_icache (buf
, code
- buf
);
5068 mono_arch_get_trampolines (gboolean aot
)
5070 return mono_arm_get_exception_trampolines (aot
);
5073 #else /* DISABLE_JIT */
5076 mono_arch_build_imt_trampoline (MonoVTable
*vtable
, MonoDomain
*domain
, MonoIMTCheckItem
**imt_entries
, int count
,
5077 gpointer fail_tramp
)
5079 g_assert_not_reached ();
5083 #endif /* !DISABLE_JIT */
5085 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
5088 mono_arch_set_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
5091 guint32 native_offset
= ip
- (guint8
*)ji
->code_start
;
5094 SeqPointInfo
*info
= mono_arch_get_seq_point_info (mono_domain_get (), ji
->code_start
);
5096 g_assert (native_offset
% 4 == 0);
5097 g_assert (info
->bp_addrs
[native_offset
/ 4] == 0);
5098 info
->bp_addrs
[native_offset
/ 4] = mini_get_breakpoint_trampoline ();
5100 /* ip points to an ldrx */
5102 arm_blrx (code
, ARMREG_IP0
);
5103 mono_arch_flush_icache (ip
, code
- ip
);
5108 mono_arch_clear_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
5113 guint32 native_offset
= ip
- (guint8
*)ji
->code_start
;
5114 SeqPointInfo
*info
= mono_arch_get_seq_point_info (mono_domain_get (), ji
->code_start
);
5116 g_assert (native_offset
% 4 == 0);
5117 info
->bp_addrs
[native_offset
/ 4] = NULL
;
5119 /* ip points to an ldrx */
5122 mono_arch_flush_icache (ip
, code
- ip
);
5127 mono_arch_start_single_stepping (void)
5129 ss_trampoline
= mini_get_single_step_trampoline ();
5133 mono_arch_stop_single_stepping (void)
5135 ss_trampoline
= NULL
;
5139 mono_arch_is_single_step_event (void *info
, void *sigctx
)
5141 /* We use soft breakpoints on arm64 */
5146 mono_arch_is_breakpoint_event (void *info
, void *sigctx
)
5148 /* We use soft breakpoints on arm64 */
5153 mono_arch_skip_breakpoint (MonoContext
*ctx
, MonoJitInfo
*ji
)
5155 g_assert_not_reached ();
5159 mono_arch_skip_single_step (MonoContext
*ctx
)
5161 g_assert_not_reached ();
5165 mono_arch_get_seq_point_info (MonoDomain
*domain
, guint8
*code
)
5170 // FIXME: Add a free function
5172 mono_domain_lock (domain
);
5173 info
= g_hash_table_lookup (domain_jit_info (domain
)->arch_seq_points
,
5175 mono_domain_unlock (domain
);
5178 ji
= mono_jit_info_table_find (domain
, (char*)code
);
5181 info
= g_malloc0 (sizeof (SeqPointInfo
) + (ji
->code_size
/ 4) * sizeof(guint8
*));
5183 info
->ss_tramp_addr
= &ss_trampoline
;
5185 mono_domain_lock (domain
);
5186 g_hash_table_insert (domain_jit_info (domain
)->arch_seq_points
,
5188 mono_domain_unlock (domain
);
5195 mono_arch_init_lmf_ext (MonoLMFExt
*ext
, gpointer prev_lmf
)
5197 ext
->lmf
.previous_lmf
= prev_lmf
;
5198 /* Mark that this is a MonoLMFExt */
5199 ext
->lmf
.previous_lmf
= (gpointer
)(((gssize
)ext
->lmf
.previous_lmf
) | 2);
5200 ext
->lmf
.gregs
[MONO_ARCH_LMF_REG_SP
] = (gssize
)ext
;
5203 #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
5206 mono_arch_opcode_supported (int opcode
)
5209 case OP_ATOMIC_ADD_I4
:
5210 case OP_ATOMIC_ADD_I8
:
5211 case OP_ATOMIC_EXCHANGE_I4
:
5212 case OP_ATOMIC_EXCHANGE_I8
:
5213 case OP_ATOMIC_CAS_I4
:
5214 case OP_ATOMIC_CAS_I8
:
5215 case OP_ATOMIC_LOAD_I1
:
5216 case OP_ATOMIC_LOAD_I2
:
5217 case OP_ATOMIC_LOAD_I4
:
5218 case OP_ATOMIC_LOAD_I8
:
5219 case OP_ATOMIC_LOAD_U1
:
5220 case OP_ATOMIC_LOAD_U2
:
5221 case OP_ATOMIC_LOAD_U4
:
5222 case OP_ATOMIC_LOAD_U8
:
5223 case OP_ATOMIC_LOAD_R4
:
5224 case OP_ATOMIC_LOAD_R8
:
5225 case OP_ATOMIC_STORE_I1
:
5226 case OP_ATOMIC_STORE_I2
:
5227 case OP_ATOMIC_STORE_I4
:
5228 case OP_ATOMIC_STORE_I8
:
5229 case OP_ATOMIC_STORE_U1
:
5230 case OP_ATOMIC_STORE_U2
:
5231 case OP_ATOMIC_STORE_U4
:
5232 case OP_ATOMIC_STORE_U8
:
5233 case OP_ATOMIC_STORE_R4
:
5234 case OP_ATOMIC_STORE_R8
:
5242 mono_arch_get_call_info (MonoMemPool
*mp
, MonoMethodSignature
*sig
)
5244 return get_call_info (mp
, sig
);