3 * ARM64 backend for the Mono code generator
5 * Copyright 2013 Xamarin, Inc (http://www.xamarin.com)
10 * Paolo Molaro (lupus@ximian.com)
11 * Dietmar Maurer (dietmar@ximian.com)
13 * (C) 2003 Ximian, Inc.
14 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
15 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
16 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
20 #include "cpu-arm64.h"
22 #include "aot-runtime.h"
23 #include "mini-runtime.h"
25 #include <mono/arch/arm64/arm64-codegen.h>
26 #include <mono/utils/mono-mmap.h>
27 #include <mono/utils/mono-memory-model.h>
28 #include <mono/metadata/abi-details.h>
30 #include "interp/interp.h"
35 * - ARM(R) Architecture Reference Manual, ARMv8, for ARMv8-A architecture profile (DDI0487A_a_armv8_arm.pdf)
36 * - Procedure Call Standard for the ARM 64-bit Architecture (AArch64) (IHI0055B_aapcs64.pdf)
37 * - ELF for the ARM 64-bit Architecture (IHI0056B_aaelf64.pdf)
40 * - ip0/ip1/lr are used as temporary registers
41 * - r27 is used as the rgctx/imt register
42 * - r28 is used to access arguments passed on the stack
43 * - d15/d16 are used as fp temporary registers
46 #define FP_TEMP_REG ARMREG_D16
47 #define FP_TEMP_REG2 ARMREG_D17
49 #define THUNK_SIZE (4 * 4)
51 /* The single step trampoline */
52 static gpointer ss_trampoline
;
54 /* The breakpoint trampoline */
55 static gpointer bp_trampoline
;
57 static gboolean ios_abi
;
59 static __attribute__ ((__warn_unused_result__
)) guint8
* emit_load_regset (guint8
*code
, guint64 regs
, int basereg
, int offset
);
62 mono_arch_regname (int reg
)
64 static const char * rnames
[] = {
65 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9",
66 "r10", "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19",
67 "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "fp",
70 if (reg
>= 0 && reg
< 32)
76 mono_arch_fregname (int reg
)
78 static const char * rnames
[] = {
79 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9",
80 "d10", "d11", "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19",
81 "d20", "d21", "d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29",
84 if (reg
>= 0 && reg
< 32)
90 mono_arch_get_argument_info (MonoMethodSignature
*csig
, int param_count
, MonoJitArgumentInfo
*arg_info
)
96 #define MAX_ARCH_DELEGATE_PARAMS 7
99 get_delegate_invoke_impl (gboolean has_target
, gboolean param_count
, guint32
*code_size
)
101 guint8
*code
, *start
;
104 start
= code
= mono_global_codeman_reserve (12);
106 /* Replace the this argument with the target */
107 arm_ldrx (code
, ARMREG_IP0
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
108 arm_ldrx (code
, ARMREG_R0
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoDelegate
, target
));
109 arm_brx (code
, ARMREG_IP0
);
111 g_assert ((code
- start
) <= 12);
113 mono_arch_flush_icache (start
, 12);
114 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE
, NULL
));
118 size
= 8 + param_count
* 4;
119 start
= code
= mono_global_codeman_reserve (size
);
121 arm_ldrx (code
, ARMREG_IP0
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
122 /* slide down the arguments */
123 for (i
= 0; i
< param_count
; ++i
)
124 arm_movx (code
, i
, i
+ 1);
125 arm_brx (code
, ARMREG_IP0
);
127 g_assert ((code
- start
) <= size
);
129 mono_arch_flush_icache (start
, size
);
130 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE
, NULL
));
134 *code_size
= code
- start
;
140 * mono_arch_get_delegate_invoke_impls:
142 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
146 mono_arch_get_delegate_invoke_impls (void)
154 code
= (guint8
*)get_delegate_invoke_impl (TRUE
, 0, &code_len
);
155 res
= g_slist_prepend (res
, mono_tramp_info_create ("delegate_invoke_impl_has_target", code
, code_len
, NULL
, NULL
));
157 for (i
= 0; i
<= MAX_ARCH_DELEGATE_PARAMS
; ++i
) {
158 code
= (guint8
*)get_delegate_invoke_impl (FALSE
, i
, &code_len
);
159 tramp_name
= g_strdup_printf ("delegate_invoke_impl_target_%d", i
);
160 res
= g_slist_prepend (res
, mono_tramp_info_create (tramp_name
, code
, code_len
, NULL
, NULL
));
168 mono_arch_get_delegate_invoke_impl (MonoMethodSignature
*sig
, gboolean has_target
)
170 guint8
*code
, *start
;
173 * vtypes are returned in registers, or using the dedicated r8 register, so
174 * they can be supported by delegate invokes.
178 static guint8
* cached
= NULL
;
183 if (mono_ee_features
.use_aot_trampolines
)
184 start
= (guint8
*)mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
186 start
= (guint8
*)get_delegate_invoke_impl (TRUE
, 0, NULL
);
187 mono_memory_barrier ();
191 static guint8
* cache
[MAX_ARCH_DELEGATE_PARAMS
+ 1] = {NULL
};
194 if (sig
->param_count
> MAX_ARCH_DELEGATE_PARAMS
)
196 for (i
= 0; i
< sig
->param_count
; ++i
)
197 if (!mono_is_regsize_var (sig
->params
[i
]))
200 code
= cache
[sig
->param_count
];
204 if (mono_ee_features
.use_aot_trampolines
) {
205 char *name
= g_strdup_printf ("delegate_invoke_impl_target_%d", sig
->param_count
);
206 start
= (guint8
*)mono_aot_get_trampoline (name
);
209 start
= (guint8
*)get_delegate_invoke_impl (FALSE
, sig
->param_count
, NULL
);
211 mono_memory_barrier ();
212 cache
[sig
->param_count
] = start
;
220 mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature
*sig
, MonoMethod
*method
, int offset
, gboolean load_imt_reg
)
226 mono_arch_get_this_arg_from_call (host_mgreg_t
*regs
, guint8
*code
)
228 return (gpointer
)regs
[ARMREG_R0
];
232 mono_arch_cpu_init (void)
237 mono_arch_init (void)
240 bp_trampoline
= mini_get_breakpoint_trampoline ();
242 mono_arm_gsharedvt_init ();
244 #if defined(TARGET_IOS)
250 mono_arch_cleanup (void)
255 mono_arch_cpu_optimizations (guint32
*exclude_mask
)
262 mono_arch_cpu_enumerate_simd_versions (void)
268 mono_arch_register_lowlevel_calls (void)
273 mono_arch_finish_init (void)
277 /* The maximum length is 2 instructions */
279 emit_imm (guint8
*code
, int dreg
, int imm
)
281 // FIXME: Optimize this
284 arm_movnx (code
, dreg
, (~limm
) & 0xffff, 0);
285 arm_movkx (code
, dreg
, (limm
>> 16) & 0xffff, 16);
287 arm_movzx (code
, dreg
, imm
& 0xffff, 0);
289 arm_movkx (code
, dreg
, (imm
>> 16) & 0xffff, 16);
295 /* The maximum length is 4 instructions */
297 emit_imm64 (guint8
*code
, int dreg
, guint64 imm
)
299 // FIXME: Optimize this
300 arm_movzx (code
, dreg
, imm
& 0xffff, 0);
301 if ((imm
>> 16) & 0xffff)
302 arm_movkx (code
, dreg
, (imm
>> 16) & 0xffff, 16);
303 if ((imm
>> 32) & 0xffff)
304 arm_movkx (code
, dreg
, (imm
>> 32) & 0xffff, 32);
305 if ((imm
>> 48) & 0xffff)
306 arm_movkx (code
, dreg
, (imm
>> 48) & 0xffff, 48);
312 mono_arm_emit_imm64 (guint8
*code
, int dreg
, gint64 imm
)
314 return emit_imm64 (code
, dreg
, imm
);
320 * Emit a patchable code sequence for constructing a 64 bit immediate.
323 emit_imm64_template (guint8
*code
, int dreg
)
325 arm_movzx (code
, dreg
, 0, 0);
326 arm_movkx (code
, dreg
, 0, 16);
327 arm_movkx (code
, dreg
, 0, 32);
328 arm_movkx (code
, dreg
, 0, 48);
333 static __attribute__ ((__warn_unused_result__
)) guint8
*
334 emit_addw_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
336 if (!arm_is_arith_imm (imm
)) {
337 code
= emit_imm (code
, ARMREG_LR
, imm
);
338 arm_addw (code
, dreg
, sreg
, ARMREG_LR
);
340 arm_addw_imm (code
, dreg
, sreg
, imm
);
345 static __attribute__ ((__warn_unused_result__
)) guint8
*
346 emit_addx_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
348 if (!arm_is_arith_imm (imm
)) {
349 code
= emit_imm (code
, ARMREG_LR
, imm
);
350 arm_addx (code
, dreg
, sreg
, ARMREG_LR
);
352 arm_addx_imm (code
, dreg
, sreg
, imm
);
357 static __attribute__ ((__warn_unused_result__
)) guint8
*
358 emit_subw_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
360 if (!arm_is_arith_imm (imm
)) {
361 code
= emit_imm (code
, ARMREG_LR
, imm
);
362 arm_subw (code
, dreg
, sreg
, ARMREG_LR
);
364 arm_subw_imm (code
, dreg
, sreg
, imm
);
369 static __attribute__ ((__warn_unused_result__
)) guint8
*
370 emit_subx_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
372 if (!arm_is_arith_imm (imm
)) {
373 code
= emit_imm (code
, ARMREG_LR
, imm
);
374 arm_subx (code
, dreg
, sreg
, ARMREG_LR
);
376 arm_subx_imm (code
, dreg
, sreg
, imm
);
381 /* Emit sp+=imm. Clobbers ip0/ip1 */
382 static __attribute__ ((__warn_unused_result__
)) guint8
*
383 emit_addx_sp_imm (guint8
*code
, int imm
)
385 code
= emit_imm (code
, ARMREG_IP0
, imm
);
386 arm_movspx (code
, ARMREG_IP1
, ARMREG_SP
);
387 arm_addx (code
, ARMREG_IP1
, ARMREG_IP1
, ARMREG_IP0
);
388 arm_movspx (code
, ARMREG_SP
, ARMREG_IP1
);
392 /* Emit sp-=imm. Clobbers ip0/ip1 */
393 static __attribute__ ((__warn_unused_result__
)) guint8
*
394 emit_subx_sp_imm (guint8
*code
, int imm
)
396 code
= emit_imm (code
, ARMREG_IP0
, imm
);
397 arm_movspx (code
, ARMREG_IP1
, ARMREG_SP
);
398 arm_subx (code
, ARMREG_IP1
, ARMREG_IP1
, ARMREG_IP0
);
399 arm_movspx (code
, ARMREG_SP
, ARMREG_IP1
);
403 static __attribute__ ((__warn_unused_result__
)) guint8
*
404 emit_andw_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
407 code
= emit_imm (code
, ARMREG_LR
, imm
);
408 arm_andw (code
, dreg
, sreg
, ARMREG_LR
);
413 static __attribute__ ((__warn_unused_result__
)) guint8
*
414 emit_andx_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
417 code
= emit_imm (code
, ARMREG_LR
, imm
);
418 arm_andx (code
, dreg
, sreg
, ARMREG_LR
);
423 static __attribute__ ((__warn_unused_result__
)) guint8
*
424 emit_orrw_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
427 code
= emit_imm (code
, ARMREG_LR
, imm
);
428 arm_orrw (code
, dreg
, sreg
, ARMREG_LR
);
433 static __attribute__ ((__warn_unused_result__
)) guint8
*
434 emit_orrx_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
437 code
= emit_imm (code
, ARMREG_LR
, imm
);
438 arm_orrx (code
, dreg
, sreg
, ARMREG_LR
);
443 static __attribute__ ((__warn_unused_result__
)) guint8
*
444 emit_eorw_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
447 code
= emit_imm (code
, ARMREG_LR
, imm
);
448 arm_eorw (code
, dreg
, sreg
, ARMREG_LR
);
453 static __attribute__ ((__warn_unused_result__
)) guint8
*
454 emit_eorx_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
457 code
= emit_imm (code
, ARMREG_LR
, imm
);
458 arm_eorx (code
, dreg
, sreg
, ARMREG_LR
);
463 static __attribute__ ((__warn_unused_result__
)) guint8
*
464 emit_cmpw_imm (guint8
*code
, int sreg
, int imm
)
467 arm_cmpw (code
, sreg
, ARMREG_RZR
);
470 code
= emit_imm (code
, ARMREG_LR
, imm
);
471 arm_cmpw (code
, sreg
, ARMREG_LR
);
477 static __attribute__ ((__warn_unused_result__
)) guint8
*
478 emit_cmpx_imm (guint8
*code
, int sreg
, int imm
)
481 arm_cmpx (code
, sreg
, ARMREG_RZR
);
484 code
= emit_imm (code
, ARMREG_LR
, imm
);
485 arm_cmpx (code
, sreg
, ARMREG_LR
);
491 static __attribute__ ((__warn_unused_result__
)) guint8
*
492 emit_strb (guint8
*code
, int rt
, int rn
, int imm
)
494 if (arm_is_strb_imm (imm
)) {
495 arm_strb (code
, rt
, rn
, imm
);
497 g_assert (rt
!= ARMREG_IP0
);
498 g_assert (rn
!= ARMREG_IP0
);
499 code
= emit_imm (code
, ARMREG_IP0
, imm
);
500 arm_strb_reg (code
, rt
, rn
, ARMREG_IP0
);
505 static __attribute__ ((__warn_unused_result__
)) guint8
*
506 emit_strh (guint8
*code
, int rt
, int rn
, int imm
)
508 if (arm_is_strh_imm (imm
)) {
509 arm_strh (code
, rt
, rn
, imm
);
511 g_assert (rt
!= ARMREG_IP0
);
512 g_assert (rn
!= ARMREG_IP0
);
513 code
= emit_imm (code
, ARMREG_IP0
, imm
);
514 arm_strh_reg (code
, rt
, rn
, ARMREG_IP0
);
519 static __attribute__ ((__warn_unused_result__
)) guint8
*
520 emit_strw (guint8
*code
, int rt
, int rn
, int imm
)
522 if (arm_is_strw_imm (imm
)) {
523 arm_strw (code
, rt
, rn
, imm
);
525 g_assert (rt
!= ARMREG_IP0
);
526 g_assert (rn
!= ARMREG_IP0
);
527 code
= emit_imm (code
, ARMREG_IP0
, imm
);
528 arm_strw_reg (code
, rt
, rn
, ARMREG_IP0
);
533 static __attribute__ ((__warn_unused_result__
)) guint8
*
534 emit_strfpw (guint8
*code
, int rt
, int rn
, int imm
)
536 if (arm_is_strw_imm (imm
)) {
537 arm_strfpw (code
, rt
, rn
, imm
);
539 g_assert (rn
!= ARMREG_IP0
);
540 code
= emit_imm (code
, ARMREG_IP0
, imm
);
541 arm_addx (code
, ARMREG_IP0
, rn
, ARMREG_IP0
);
542 arm_strfpw (code
, rt
, ARMREG_IP0
, 0);
547 static __attribute__ ((__warn_unused_result__
)) guint8
*
548 emit_strfpx (guint8
*code
, int rt
, int rn
, int imm
)
550 if (arm_is_strx_imm (imm
)) {
551 arm_strfpx (code
, rt
, rn
, imm
);
553 g_assert (rn
!= ARMREG_IP0
);
554 code
= emit_imm (code
, ARMREG_IP0
, imm
);
555 arm_addx (code
, ARMREG_IP0
, rn
, ARMREG_IP0
);
556 arm_strfpx (code
, rt
, ARMREG_IP0
, 0);
561 static __attribute__ ((__warn_unused_result__
)) guint8
*
562 emit_strx (guint8
*code
, int rt
, int rn
, int imm
)
564 if (arm_is_strx_imm (imm
)) {
565 arm_strx (code
, rt
, rn
, imm
);
567 g_assert (rt
!= ARMREG_IP0
);
568 g_assert (rn
!= ARMREG_IP0
);
569 code
= emit_imm (code
, ARMREG_IP0
, imm
);
570 arm_strx_reg (code
, rt
, rn
, ARMREG_IP0
);
575 static __attribute__ ((__warn_unused_result__
)) guint8
*
576 emit_ldrb (guint8
*code
, int rt
, int rn
, int imm
)
578 if (arm_is_pimm12_scaled (imm
, 1)) {
579 arm_ldrb (code
, rt
, rn
, imm
);
581 g_assert (rt
!= ARMREG_IP0
);
582 g_assert (rn
!= ARMREG_IP0
);
583 code
= emit_imm (code
, ARMREG_IP0
, imm
);
584 arm_ldrb_reg (code
, rt
, rn
, ARMREG_IP0
);
589 static __attribute__ ((__warn_unused_result__
)) guint8
*
590 emit_ldrsbx (guint8
*code
, int rt
, int rn
, int imm
)
592 if (arm_is_pimm12_scaled (imm
, 1)) {
593 arm_ldrsbx (code
, rt
, rn
, imm
);
595 g_assert (rt
!= ARMREG_IP0
);
596 g_assert (rn
!= ARMREG_IP0
);
597 code
= emit_imm (code
, ARMREG_IP0
, imm
);
598 arm_ldrsbx_reg (code
, rt
, rn
, ARMREG_IP0
);
603 static __attribute__ ((__warn_unused_result__
)) guint8
*
604 emit_ldrh (guint8
*code
, int rt
, int rn
, int imm
)
606 if (arm_is_pimm12_scaled (imm
, 2)) {
607 arm_ldrh (code
, rt
, rn
, imm
);
609 g_assert (rt
!= ARMREG_IP0
);
610 g_assert (rn
!= ARMREG_IP0
);
611 code
= emit_imm (code
, ARMREG_IP0
, imm
);
612 arm_ldrh_reg (code
, rt
, rn
, ARMREG_IP0
);
617 static __attribute__ ((__warn_unused_result__
)) guint8
*
618 emit_ldrshx (guint8
*code
, int rt
, int rn
, int imm
)
620 if (arm_is_pimm12_scaled (imm
, 2)) {
621 arm_ldrshx (code
, rt
, rn
, imm
);
623 g_assert (rt
!= ARMREG_IP0
);
624 g_assert (rn
!= ARMREG_IP0
);
625 code
= emit_imm (code
, ARMREG_IP0
, imm
);
626 arm_ldrshx_reg (code
, rt
, rn
, ARMREG_IP0
);
631 static __attribute__ ((__warn_unused_result__
)) guint8
*
632 emit_ldrswx (guint8
*code
, int rt
, int rn
, int imm
)
634 if (arm_is_pimm12_scaled (imm
, 4)) {
635 arm_ldrswx (code
, rt
, rn
, imm
);
637 g_assert (rt
!= ARMREG_IP0
);
638 g_assert (rn
!= ARMREG_IP0
);
639 code
= emit_imm (code
, ARMREG_IP0
, imm
);
640 arm_ldrswx_reg (code
, rt
, rn
, ARMREG_IP0
);
645 static __attribute__ ((__warn_unused_result__
)) guint8
*
646 emit_ldrw (guint8
*code
, int rt
, int rn
, int imm
)
648 if (arm_is_pimm12_scaled (imm
, 4)) {
649 arm_ldrw (code
, rt
, rn
, imm
);
651 g_assert (rn
!= ARMREG_IP0
);
652 code
= emit_imm (code
, ARMREG_IP0
, imm
);
653 arm_ldrw_reg (code
, rt
, rn
, ARMREG_IP0
);
658 static __attribute__ ((__warn_unused_result__
)) guint8
*
659 emit_ldrx (guint8
*code
, int rt
, int rn
, int imm
)
661 if (arm_is_pimm12_scaled (imm
, 8)) {
662 arm_ldrx (code
, rt
, rn
, imm
);
664 g_assert (rn
!= ARMREG_IP0
);
665 code
= emit_imm (code
, ARMREG_IP0
, imm
);
666 arm_ldrx_reg (code
, rt
, rn
, ARMREG_IP0
);
671 static __attribute__ ((__warn_unused_result__
)) guint8
*
672 emit_ldrfpw (guint8
*code
, int rt
, int rn
, int imm
)
674 if (arm_is_pimm12_scaled (imm
, 4)) {
675 arm_ldrfpw (code
, rt
, rn
, imm
);
677 g_assert (rn
!= ARMREG_IP0
);
678 code
= emit_imm (code
, ARMREG_IP0
, imm
);
679 arm_addx (code
, ARMREG_IP0
, rn
, ARMREG_IP0
);
680 arm_ldrfpw (code
, rt
, ARMREG_IP0
, 0);
685 static __attribute__ ((__warn_unused_result__
)) guint8
*
686 emit_ldrfpx (guint8
*code
, int rt
, int rn
, int imm
)
688 if (arm_is_pimm12_scaled (imm
, 8)) {
689 arm_ldrfpx (code
, rt
, rn
, imm
);
691 g_assert (rn
!= ARMREG_IP0
);
692 code
= emit_imm (code
, ARMREG_IP0
, imm
);
693 arm_addx (code
, ARMREG_IP0
, rn
, ARMREG_IP0
);
694 arm_ldrfpx (code
, rt
, ARMREG_IP0
, 0);
700 mono_arm_emit_ldrx (guint8
*code
, int rt
, int rn
, int imm
)
702 return emit_ldrx (code
, rt
, rn
, imm
);
706 emit_call (MonoCompile
*cfg
, guint8
* code
, MonoJumpInfoType patch_type
, gconstpointer data
)
709 mono_add_patch_info_rel (cfg, code - cfg->native_code, patch_type, data, MONO_R_ARM64_IMM);
710 code = emit_imm64_template (code, ARMREG_LR);
711 arm_blrx (code, ARMREG_LR);
713 mono_add_patch_info_rel (cfg
, code
- cfg
->native_code
, patch_type
, data
, MONO_R_ARM64_BL
);
715 cfg
->thunk_area
+= THUNK_SIZE
;
720 emit_aotconst_full (MonoCompile
*cfg
, MonoJumpInfo
**ji
, guint8
*code
, guint8
*start
, int dreg
, guint32 patch_type
, gconstpointer data
)
723 mono_add_patch_info (cfg
, code
- cfg
->native_code
, (MonoJumpInfoType
)patch_type
, data
);
725 *ji
= mono_patch_info_list_prepend (*ji
, code
- start
, (MonoJumpInfoType
)patch_type
, data
);
726 /* See arch_emit_got_access () in aot-compiler.c */
727 arm_ldrx_lit (code
, dreg
, 0);
734 emit_aotconst (MonoCompile
*cfg
, guint8
*code
, int dreg
, guint32 patch_type
, gconstpointer data
)
736 return emit_aotconst_full (cfg
, NULL
, code
, NULL
, dreg
, patch_type
, data
);
740 * mono_arm_emit_aotconst:
742 * Emit code to load an AOT constant into DREG. Usable from trampolines.
745 mono_arm_emit_aotconst (gpointer ji
, guint8
*code
, guint8
*code_start
, int dreg
, guint32 patch_type
, gconstpointer data
)
747 return emit_aotconst_full (NULL
, (MonoJumpInfo
**)ji
, code
, code_start
, dreg
, patch_type
, data
);
751 mono_arch_have_fast_tls (void)
761 emit_tls_get (guint8
*code
, int dreg
, int tls_offset
)
763 arm_mrs (code
, dreg
, ARM_MRS_REG_TPIDR_EL0
);
764 if (tls_offset
< 256) {
765 arm_ldrx (code
, dreg
, dreg
, tls_offset
);
767 code
= emit_addx_imm (code
, dreg
, dreg
, tls_offset
);
768 arm_ldrx (code
, dreg
, dreg
, 0);
774 emit_tls_set (guint8
*code
, int sreg
, int tls_offset
)
776 int tmpreg
= ARMREG_IP0
;
778 g_assert (sreg
!= tmpreg
);
779 arm_mrs (code
, tmpreg
, ARM_MRS_REG_TPIDR_EL0
);
780 if (tls_offset
< 256) {
781 arm_strx (code
, sreg
, tmpreg
, tls_offset
);
783 code
= emit_addx_imm (code
, tmpreg
, tmpreg
, tls_offset
);
784 arm_strx (code
, sreg
, tmpreg
, 0);
792 * - ldrp [fp, lr], [sp], !stack_offfset
793 * Clobbers TEMP_REGS.
795 __attribute__ ((__warn_unused_result__
)) guint8
*
796 mono_arm_emit_destroy_frame (guint8
*code
, int stack_offset
, guint64 temp_regs
)
798 // At least one of these registers must be available, or both.
799 gboolean
const temp0
= (temp_regs
& (1 << ARMREG_IP0
)) != 0;
800 gboolean
const temp1
= (temp_regs
& (1 << ARMREG_IP1
)) != 0;
801 g_assert (temp0
|| temp1
);
802 int const temp
= temp0
? ARMREG_IP0
: ARMREG_IP1
;
804 arm_movspx (code
, ARMREG_SP
, ARMREG_FP
);
806 if (arm_is_ldpx_imm (stack_offset
)) {
807 arm_ldpx_post (code
, ARMREG_FP
, ARMREG_LR
, ARMREG_SP
, stack_offset
);
809 arm_ldpx (code
, ARMREG_FP
, ARMREG_LR
, ARMREG_SP
, 0);
810 /* sp += stack_offset */
811 if (temp0
&& temp1
) {
812 code
= emit_addx_sp_imm (code
, stack_offset
);
814 int imm
= stack_offset
;
816 /* Can't use addx_sp_imm () since we can't clobber both ip0/ip1 */
817 arm_addx_imm (code
, temp
, ARMREG_SP
, 0);
819 arm_addx_imm (code
, temp
, temp
, 256);
822 arm_addx_imm (code
, ARMREG_SP
, temp
, imm
);
828 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
831 emit_thunk (guint8
*code
, gconstpointer target
)
835 arm_ldrx_lit (code
, ARMREG_IP0
, code
+ 8);
836 arm_brx (code
, ARMREG_IP0
);
837 *(guint64
*)code
= (guint64
)target
;
838 code
+= sizeof (guint64
);
840 mono_arch_flush_icache (p
, code
- p
);
845 create_thunk (MonoCompile
*cfg
, MonoDomain
*domain
, guchar
*code
, const guchar
*target
)
848 MonoThunkJitInfo
*info
;
852 guint8
*target_thunk
;
855 domain
= mono_domain_get ();
859 * This can be called multiple times during JITting,
860 * save the current position in cfg->arch to avoid
861 * doing a O(n^2) search.
863 if (!cfg
->arch
.thunks
) {
864 cfg
->arch
.thunks
= cfg
->thunks
;
865 cfg
->arch
.thunks_size
= cfg
->thunk_area
;
867 thunks
= cfg
->arch
.thunks
;
868 thunks_size
= cfg
->arch
.thunks_size
;
870 g_print ("thunk failed %p->%p, thunk space=%d method %s", code
, target
, thunks_size
, mono_method_full_name (cfg
->method
, TRUE
));
871 g_assert_not_reached ();
874 g_assert (*(guint32
*)thunks
== 0);
875 emit_thunk (thunks
, target
);
877 cfg
->arch
.thunks
+= THUNK_SIZE
;
878 cfg
->arch
.thunks_size
-= THUNK_SIZE
;
882 ji
= mini_jit_info_table_find (domain
, (char*)code
, NULL
);
884 info
= mono_jit_info_get_thunk_info (ji
);
887 thunks
= (guint8
*)ji
->code_start
+ info
->thunks_offset
;
888 thunks_size
= info
->thunks_size
;
890 orig_target
= mono_arch_get_call_target (code
+ 4);
892 mono_domain_lock (domain
);
895 if (orig_target
>= thunks
&& orig_target
< thunks
+ thunks_size
) {
896 /* The call already points to a thunk, because of trampolines etc. */
897 target_thunk
= orig_target
;
899 for (p
= thunks
; p
< thunks
+ thunks_size
; p
+= THUNK_SIZE
) {
900 if (((guint32
*)p
) [0] == 0) {
904 } else if (((guint64
*)p
) [1] == (guint64
)target
) {
905 /* Thunk already points to target */
912 //printf ("THUNK: %p %p %p\n", code, target, target_thunk);
915 mono_domain_unlock (domain
);
916 g_print ("thunk failed %p->%p, thunk space=%d method %s", code
, target
, thunks_size
, cfg
? mono_method_full_name (cfg
->method
, TRUE
) : mono_method_full_name (jinfo_get_method (ji
), TRUE
));
917 g_assert_not_reached ();
920 emit_thunk (target_thunk
, target
);
922 mono_domain_unlock (domain
);
929 arm_patch_full (MonoCompile
*cfg
, MonoDomain
*domain
, guint8
*code
, guint8
*target
, int relocation
)
931 switch (relocation
) {
933 if (arm_is_bl_disp (code
, target
)) {
934 arm_b (code
, target
);
938 thunk
= create_thunk (cfg
, domain
, code
, target
);
939 g_assert (arm_is_bl_disp (code
, thunk
));
943 case MONO_R_ARM64_BCC
: {
946 cond
= arm_get_bcc_cond (code
);
947 arm_bcc (code
, cond
, target
);
950 case MONO_R_ARM64_CBZ
:
951 arm_set_cbz_target (code
, target
);
953 case MONO_R_ARM64_IMM
: {
954 guint64 imm
= (guint64
)target
;
957 /* emit_imm64_template () */
958 dreg
= arm_get_movzx_rd (code
);
959 arm_movzx (code
, dreg
, imm
& 0xffff, 0);
960 arm_movkx (code
, dreg
, (imm
>> 16) & 0xffff, 16);
961 arm_movkx (code
, dreg
, (imm
>> 32) & 0xffff, 32);
962 arm_movkx (code
, dreg
, (imm
>> 48) & 0xffff, 48);
965 case MONO_R_ARM64_BL
:
966 if (arm_is_bl_disp (code
, target
)) {
967 arm_bl (code
, target
);
971 thunk
= create_thunk (cfg
, domain
, code
, target
);
972 g_assert (arm_is_bl_disp (code
, thunk
));
973 arm_bl (code
, thunk
);
977 g_assert_not_reached ();
982 arm_patch_rel (guint8
*code
, guint8
*target
, int relocation
)
984 arm_patch_full (NULL
, NULL
, code
, target
, relocation
);
988 mono_arm_patch (guint8
*code
, guint8
*target
, int relocation
)
990 arm_patch_rel (code
, target
, relocation
);
994 mono_arch_patch_code_new (MonoCompile
*cfg
, MonoDomain
*domain
, guint8
*code
, MonoJumpInfo
*ji
, gpointer target
)
998 ip
= ji
->ip
.i
+ code
;
1001 case MONO_PATCH_INFO_METHOD_JUMP
:
1002 /* ji->relocation is not set by the caller */
1003 arm_patch_full (cfg
, domain
, ip
, (guint8
*)target
, MONO_R_ARM64_B
);
1006 arm_patch_full (cfg
, domain
, ip
, (guint8
*)target
, ji
->relocation
);
1008 case MONO_PATCH_INFO_NONE
:
1014 mono_arch_flush_register_windows (void)
1019 mono_arch_find_imt_method (host_mgreg_t
*regs
, guint8
*code
)
1021 return (MonoMethod
*)regs
[MONO_ARCH_RGCTX_REG
];
1025 mono_arch_find_static_call_vtable (host_mgreg_t
*regs
, guint8
*code
)
1027 return (MonoVTable
*)regs
[MONO_ARCH_RGCTX_REG
];
1031 mono_arch_context_get_int_reg (MonoContext
*ctx
, int reg
)
1033 return ctx
->regs
[reg
];
1037 mono_arch_context_set_int_reg (MonoContext
*ctx
, int reg
, host_mgreg_t val
)
1039 ctx
->regs
[reg
] = val
;
1043 * mono_arch_set_target:
1045 * Set the target architecture the JIT backend should generate code for, in the form
1046 * of a GNU target triplet. Only used in AOT mode.
1049 mono_arch_set_target (char *mtriple
)
1051 if (strstr (mtriple
, "darwin") || strstr (mtriple
, "ios")) {
1057 add_general (CallInfo
*cinfo
, ArgInfo
*ainfo
, int size
, gboolean sign
)
1059 if (cinfo
->gr
>= PARAM_REGS
) {
1060 ainfo
->storage
= ArgOnStack
;
1062 /* Assume size == align */
1064 /* Put arguments into 8 byte aligned stack slots */
1068 cinfo
->stack_usage
= ALIGN_TO (cinfo
->stack_usage
, size
);
1069 ainfo
->offset
= cinfo
->stack_usage
;
1070 ainfo
->slot_size
= size
;
1072 cinfo
->stack_usage
+= size
;
1074 ainfo
->storage
= ArgInIReg
;
1075 ainfo
->reg
= cinfo
->gr
;
1081 add_fp (CallInfo
*cinfo
, ArgInfo
*ainfo
, gboolean single
)
1083 int size
= single
? 4 : 8;
1085 if (cinfo
->fr
>= FP_PARAM_REGS
) {
1086 ainfo
->storage
= single
? ArgOnStackR4
: ArgOnStackR8
;
1088 cinfo
->stack_usage
= ALIGN_TO (cinfo
->stack_usage
, size
);
1089 ainfo
->offset
= cinfo
->stack_usage
;
1090 ainfo
->slot_size
= size
;
1091 cinfo
->stack_usage
+= size
;
1093 ainfo
->offset
= cinfo
->stack_usage
;
1094 ainfo
->slot_size
= 8;
1095 /* Put arguments into 8 byte aligned stack slots */
1096 cinfo
->stack_usage
+= 8;
1100 ainfo
->storage
= ArgInFRegR4
;
1102 ainfo
->storage
= ArgInFReg
;
1103 ainfo
->reg
= cinfo
->fr
;
1109 is_hfa (MonoType
*t
, int *out_nfields
, int *out_esize
, int *field_offsets
)
1113 MonoClassField
*field
;
1114 MonoType
*ftype
, *prev_ftype
= NULL
;
1117 klass
= mono_class_from_mono_type_internal (t
);
1119 while ((field
= mono_class_get_fields_internal (klass
, &iter
))) {
1120 if (field
->type
->attrs
& FIELD_ATTRIBUTE_STATIC
)
1122 ftype
= mono_field_get_type_internal (field
);
1123 ftype
= mini_get_underlying_type (ftype
);
1125 if (MONO_TYPE_ISSTRUCT (ftype
)) {
1126 int nested_nfields
, nested_esize
;
1127 int nested_field_offsets
[16];
1129 if (!is_hfa (ftype
, &nested_nfields
, &nested_esize
, nested_field_offsets
))
1131 if (nested_esize
== 4)
1132 ftype
= m_class_get_byval_arg (mono_defaults
.single_class
);
1134 ftype
= m_class_get_byval_arg (mono_defaults
.double_class
);
1135 if (prev_ftype
&& prev_ftype
->type
!= ftype
->type
)
1138 for (i
= 0; i
< nested_nfields
; ++i
) {
1139 if (nfields
+ i
< 4)
1140 field_offsets
[nfields
+ i
] = field
->offset
- MONO_ABI_SIZEOF (MonoObject
) + nested_field_offsets
[i
];
1142 nfields
+= nested_nfields
;
1144 if (!(!ftype
->byref
&& (ftype
->type
== MONO_TYPE_R4
|| ftype
->type
== MONO_TYPE_R8
)))
1146 if (prev_ftype
&& prev_ftype
->type
!= ftype
->type
)
1150 field_offsets
[nfields
] = field
->offset
- MONO_ABI_SIZEOF (MonoObject
);
1154 if (nfields
== 0 || nfields
> 4)
1156 *out_nfields
= nfields
;
1157 *out_esize
= prev_ftype
->type
== MONO_TYPE_R4
? 4 : 8;
1162 add_valuetype (CallInfo
*cinfo
, ArgInfo
*ainfo
, MonoType
*t
)
1164 int i
, size
, align_size
, nregs
, nfields
, esize
;
1165 int field_offsets
[16];
1168 size
= mini_type_stack_size_full (t
, &align
, cinfo
->pinvoke
);
1169 align_size
= ALIGN_TO (size
, 8);
1171 nregs
= align_size
/ 8;
1172 if (is_hfa (t
, &nfields
, &esize
, field_offsets
)) {
1174 * The struct might include nested float structs aligned at 8,
1175 * so need to keep track of the offsets of the individual fields.
1177 if (cinfo
->fr
+ nfields
<= FP_PARAM_REGS
) {
1178 ainfo
->storage
= ArgHFA
;
1179 ainfo
->reg
= cinfo
->fr
;
1180 ainfo
->nregs
= nfields
;
1182 ainfo
->esize
= esize
;
1183 for (i
= 0; i
< nfields
; ++i
)
1184 ainfo
->foffsets
[i
] = field_offsets
[i
];
1185 cinfo
->fr
+= ainfo
->nregs
;
1187 ainfo
->nfregs_to_skip
= FP_PARAM_REGS
> cinfo
->fr
? FP_PARAM_REGS
- cinfo
->fr
: 0;
1188 cinfo
->fr
= FP_PARAM_REGS
;
1189 size
= ALIGN_TO (size
, 8);
1190 ainfo
->storage
= ArgVtypeOnStack
;
1191 cinfo
->stack_usage
= ALIGN_TO (cinfo
->stack_usage
, align
);
1192 ainfo
->offset
= cinfo
->stack_usage
;
1195 ainfo
->nregs
= nfields
;
1196 ainfo
->esize
= esize
;
1197 cinfo
->stack_usage
+= size
;
1202 if (align_size
> 16) {
1203 ainfo
->storage
= ArgVtypeByRef
;
1208 if (cinfo
->gr
+ nregs
> PARAM_REGS
) {
1209 size
= ALIGN_TO (size
, 8);
1210 ainfo
->storage
= ArgVtypeOnStack
;
1211 cinfo
->stack_usage
= ALIGN_TO (cinfo
->stack_usage
, align
);
1212 ainfo
->offset
= cinfo
->stack_usage
;
1214 cinfo
->stack_usage
+= size
;
1215 cinfo
->gr
= PARAM_REGS
;
1217 ainfo
->storage
= ArgVtypeInIRegs
;
1218 ainfo
->reg
= cinfo
->gr
;
1219 ainfo
->nregs
= nregs
;
1226 add_param (CallInfo
*cinfo
, ArgInfo
*ainfo
, MonoType
*t
)
1230 ptype
= mini_get_underlying_type (t
);
1231 switch (ptype
->type
) {
1233 add_general (cinfo
, ainfo
, 1, TRUE
);
1236 add_general (cinfo
, ainfo
, 1, FALSE
);
1239 add_general (cinfo
, ainfo
, 2, TRUE
);
1242 add_general (cinfo
, ainfo
, 2, FALSE
);
1245 add_general (cinfo
, ainfo
, 4, TRUE
);
1248 add_general (cinfo
, ainfo
, 4, FALSE
);
1253 case MONO_TYPE_FNPTR
:
1254 case MONO_TYPE_OBJECT
:
1257 add_general (cinfo
, ainfo
, 8, FALSE
);
1260 add_fp (cinfo
, ainfo
, FALSE
);
1263 add_fp (cinfo
, ainfo
, TRUE
);
1265 case MONO_TYPE_VALUETYPE
:
1266 case MONO_TYPE_TYPEDBYREF
:
1267 add_valuetype (cinfo
, ainfo
, ptype
);
1269 case MONO_TYPE_VOID
:
1270 ainfo
->storage
= ArgNone
;
1272 case MONO_TYPE_GENERICINST
:
1273 if (!mono_type_generic_inst_is_valuetype (ptype
)) {
1274 add_general (cinfo
, ainfo
, 8, FALSE
);
1275 } else if (mini_is_gsharedvt_variable_type (ptype
)) {
1277 * Treat gsharedvt arguments as large vtypes
1279 ainfo
->storage
= ArgVtypeByRef
;
1280 ainfo
->gsharedvt
= TRUE
;
1282 add_valuetype (cinfo
, ainfo
, ptype
);
1286 case MONO_TYPE_MVAR
:
1287 g_assert (mini_is_gsharedvt_type (ptype
));
1288 ainfo
->storage
= ArgVtypeByRef
;
1289 ainfo
->gsharedvt
= TRUE
;
1292 g_assert_not_reached ();
1300 * Obtain information about a call according to the calling convention.
1303 get_call_info (MonoMemPool
*mp
, MonoMethodSignature
*sig
)
1307 int n
, pstart
, pindex
;
1309 n
= sig
->hasthis
+ sig
->param_count
;
1312 cinfo
= mono_mempool_alloc0 (mp
, sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
1314 cinfo
= g_malloc0 (sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
1317 cinfo
->pinvoke
= sig
->pinvoke
;
1320 add_param (cinfo
, &cinfo
->ret
, sig
->ret
);
1321 if (cinfo
->ret
.storage
== ArgVtypeByRef
)
1322 cinfo
->ret
.reg
= ARMREG_R8
;
1326 cinfo
->stack_usage
= 0;
1330 add_general (cinfo
, cinfo
->args
+ 0, 8, FALSE
);
1332 for (pindex
= pstart
; pindex
< sig
->param_count
; ++pindex
) {
1333 ainfo
= cinfo
->args
+ sig
->hasthis
+ pindex
;
1335 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (pindex
== sig
->sentinelpos
)) {
1336 /* Prevent implicit arguments and sig_cookie from
1337 being passed in registers */
1338 cinfo
->gr
= PARAM_REGS
;
1339 cinfo
->fr
= FP_PARAM_REGS
;
1340 /* Emit the signature cookie just before the implicit arguments */
1341 add_param (cinfo
, &cinfo
->sig_cookie
, mono_get_int_type ());
1344 add_param (cinfo
, ainfo
, sig
->params
[pindex
]);
1345 if (ainfo
->storage
== ArgVtypeByRef
) {
1346 /* Pass the argument address in the next register */
1347 if (cinfo
->gr
>= PARAM_REGS
) {
1348 ainfo
->storage
= ArgVtypeByRefOnStack
;
1349 cinfo
->stack_usage
= ALIGN_TO (cinfo
->stack_usage
, 8);
1350 ainfo
->offset
= cinfo
->stack_usage
;
1351 cinfo
->stack_usage
+= 8;
1353 ainfo
->reg
= cinfo
->gr
;
1359 /* Handle the case where there are no implicit arguments */
1360 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (pindex
== sig
->sentinelpos
)) {
1361 /* Prevent implicit arguments and sig_cookie from
1362 being passed in registers */
1363 cinfo
->gr
= PARAM_REGS
;
1364 cinfo
->fr
= FP_PARAM_REGS
;
1365 /* Emit the signature cookie just before the implicit arguments */
1366 add_param (cinfo
, &cinfo
->sig_cookie
, mono_get_int_type ());
1369 cinfo
->stack_usage
= ALIGN_TO (cinfo
->stack_usage
, MONO_ARCH_FRAME_ALIGNMENT
);
1375 arg_need_temp (ArgInfo
*ainfo
)
1377 if (ainfo
->storage
== ArgHFA
&& ainfo
->esize
== 4)
1383 arg_get_storage (CallContext
*ccontext
, ArgInfo
*ainfo
)
1385 switch (ainfo
->storage
) {
1386 case ArgVtypeInIRegs
:
1388 return &ccontext
->gregs
[ainfo
->reg
];
1392 return &ccontext
->fregs
[ainfo
->reg
];
1396 case ArgVtypeOnStack
:
1397 return ccontext
->stack
+ ainfo
->offset
;
1399 return (gpointer
) ccontext
->gregs
[ainfo
->reg
];
1401 g_error ("Arg storage type not yet supported");
1406 arg_get_val (CallContext
*ccontext
, ArgInfo
*ainfo
, gpointer dest
)
1408 g_assert (arg_need_temp (ainfo
));
1410 float *dest_float
= (float*)dest
;
1411 for (int k
= 0; k
< ainfo
->nregs
; k
++) {
1412 *dest_float
= *(float*)&ccontext
->fregs
[ainfo
->reg
+ k
];
1418 arg_set_val (CallContext
*ccontext
, ArgInfo
*ainfo
, gpointer src
)
1420 g_assert (arg_need_temp (ainfo
));
1422 float *src_float
= (float*)src
;
1423 for (int k
= 0; k
< ainfo
->nregs
; k
++) {
1424 *(float*)&ccontext
->fregs
[ainfo
->reg
+ k
] = *src_float
;
1429 /* Set arguments in the ccontext (for i2n entry) */
1431 mono_arch_set_native_call_context_args (CallContext
*ccontext
, gpointer frame
, MonoMethodSignature
*sig
)
1433 const MonoEECallbacks
*interp_cb
= mini_get_interp_callbacks ();
1434 CallInfo
*cinfo
= get_call_info (NULL
, sig
);
1438 memset (ccontext
, 0, sizeof (CallContext
));
1440 ccontext
->stack_size
= ALIGN_TO (cinfo
->stack_usage
, MONO_ARCH_FRAME_ALIGNMENT
);
1441 if (ccontext
->stack_size
)
1442 ccontext
->stack
= (guint8
*)g_calloc (1, ccontext
->stack_size
);
1444 if (sig
->ret
->type
!= MONO_TYPE_VOID
) {
1445 ainfo
= &cinfo
->ret
;
1446 if (ainfo
->storage
== ArgVtypeByRef
) {
1447 storage
= interp_cb
->frame_arg_to_storage ((MonoInterpFrameHandle
)frame
, sig
, -1);
1448 ccontext
->gregs
[cinfo
->ret
.reg
] = (gsize
)storage
;
1452 g_assert (!sig
->hasthis
);
1454 for (int i
= 0; i
< sig
->param_count
; i
++) {
1455 ainfo
= &cinfo
->args
[i
];
1457 if (ainfo
->storage
== ArgVtypeByRef
) {
1458 ccontext
->gregs
[ainfo
->reg
] = (host_mgreg_t
)interp_cb
->frame_arg_to_storage ((MonoInterpFrameHandle
)frame
, sig
, i
);
1462 int temp_size
= arg_need_temp (ainfo
);
1465 storage
= alloca (temp_size
); // FIXME? alloca in a loop
1467 storage
= arg_get_storage (ccontext
, ainfo
);
1469 interp_cb
->frame_arg_to_data ((MonoInterpFrameHandle
)frame
, sig
, i
, storage
);
1471 arg_set_val (ccontext
, ainfo
, storage
);
1477 /* Set return value in the ccontext (for n2i return) */
1479 mono_arch_set_native_call_context_ret (CallContext
*ccontext
, gpointer frame
, MonoMethodSignature
*sig
)
1481 const MonoEECallbacks
*interp_cb
;
1486 if (sig
->ret
->type
== MONO_TYPE_VOID
)
1489 interp_cb
= mini_get_interp_callbacks ();
1490 cinfo
= get_call_info (NULL
, sig
);
1491 ainfo
= &cinfo
->ret
;
1493 if (ainfo
->storage
!= ArgVtypeByRef
) {
1494 int temp_size
= arg_need_temp (ainfo
);
1497 storage
= alloca (temp_size
);
1499 storage
= arg_get_storage (ccontext
, ainfo
);
1500 memset (ccontext
, 0, sizeof (CallContext
)); // FIXME
1501 interp_cb
->frame_arg_to_data ((MonoInterpFrameHandle
)frame
, sig
, -1, storage
);
1503 arg_set_val (ccontext
, ainfo
, storage
);
1509 /* Gets the arguments from ccontext (for n2i entry) */
1511 mono_arch_get_native_call_context_args (CallContext
*ccontext
, gpointer frame
, MonoMethodSignature
*sig
)
1513 const MonoEECallbacks
*interp_cb
= mini_get_interp_callbacks ();
1514 CallInfo
*cinfo
= get_call_info (NULL
, sig
);
1518 if (sig
->ret
->type
!= MONO_TYPE_VOID
) {
1519 ainfo
= &cinfo
->ret
;
1520 if (ainfo
->storage
== ArgVtypeByRef
) {
1521 storage
= (gpointer
) ccontext
->gregs
[cinfo
->ret
.reg
];
1522 interp_cb
->frame_arg_set_storage ((MonoInterpFrameHandle
)frame
, sig
, -1, storage
);
1526 for (int i
= 0; i
< sig
->param_count
+ sig
->hasthis
; i
++) {
1527 ainfo
= &cinfo
->args
[i
];
1528 int temp_size
= arg_need_temp (ainfo
);
1531 storage
= alloca (temp_size
); // FIXME? alloca in a loop
1532 arg_get_val (ccontext
, ainfo
, storage
);
1534 storage
= arg_get_storage (ccontext
, ainfo
);
1536 interp_cb
->data_to_frame_arg ((MonoInterpFrameHandle
)frame
, sig
, i
, storage
);
1542 /* Gets the return value from ccontext (for i2n exit) */
1544 mono_arch_get_native_call_context_ret (CallContext
*ccontext
, gpointer frame
, MonoMethodSignature
*sig
)
1546 const MonoEECallbacks
*interp_cb
;
1551 if (sig
->ret
->type
== MONO_TYPE_VOID
)
1554 interp_cb
= mini_get_interp_callbacks ();
1555 cinfo
= get_call_info (NULL
, sig
);
1556 ainfo
= &cinfo
->ret
;
1558 if (ainfo
->storage
!= ArgVtypeByRef
) {
1559 int temp_size
= arg_need_temp (ainfo
);
1562 storage
= alloca (temp_size
);
1563 arg_get_val (ccontext
, ainfo
, storage
);
1565 storage
= arg_get_storage (ccontext
, ainfo
);
1567 interp_cb
->data_to_frame_arg ((MonoInterpFrameHandle
)frame
, sig
, -1, storage
);
1574 MonoMethodSignature
*sig
;
1577 MonoType
**param_types
;
1578 int n_fpargs
, n_fpret
, nullable_area
;
1582 dyn_call_supported (CallInfo
*cinfo
, MonoMethodSignature
*sig
)
1586 // FIXME: Add more cases
1587 switch (cinfo
->ret
.storage
) {
1594 case ArgVtypeInIRegs
:
1595 if (cinfo
->ret
.nregs
> 2)
1604 for (i
= 0; i
< cinfo
->nargs
; ++i
) {
1605 ArgInfo
*ainfo
= &cinfo
->args
[i
];
1607 switch (ainfo
->storage
) {
1609 case ArgVtypeInIRegs
:
1614 case ArgVtypeByRefOnStack
:
1616 case ArgVtypeOnStack
:
1627 mono_arch_dyn_call_prepare (MonoMethodSignature
*sig
)
1629 ArchDynCallInfo
*info
;
1633 cinfo
= get_call_info (NULL
, sig
);
1635 if (!dyn_call_supported (cinfo
, sig
)) {
1640 info
= g_new0 (ArchDynCallInfo
, 1);
1641 // FIXME: Preprocess the info to speed up start_dyn_call ()
1643 info
->cinfo
= cinfo
;
1644 info
->rtype
= mini_get_underlying_type (sig
->ret
);
1645 info
->param_types
= g_new0 (MonoType
*, sig
->param_count
);
1646 for (i
= 0; i
< sig
->param_count
; ++i
)
1647 info
->param_types
[i
] = mini_get_underlying_type (sig
->params
[i
]);
1649 switch (cinfo
->ret
.storage
) {
1655 info
->n_fpret
= cinfo
->ret
.nregs
;
1661 for (aindex
= 0; aindex
< sig
->param_count
; aindex
++) {
1662 MonoType
*t
= info
->param_types
[aindex
];
1668 case MONO_TYPE_GENERICINST
:
1669 if (t
->type
== MONO_TYPE_GENERICINST
&& mono_class_is_nullable (mono_class_from_mono_type_internal (t
))) {
1670 MonoClass
*klass
= mono_class_from_mono_type_internal (t
);
1673 /* Nullables need a temporary buffer, its stored at the end of DynCallArgs.regs after the stack args */
1674 size
= mono_class_value_size (klass
, NULL
);
1675 info
->nullable_area
+= size
;
1683 return (MonoDynCallInfo
*)info
;
1687 mono_arch_dyn_call_free (MonoDynCallInfo
*info
)
1689 ArchDynCallInfo
*ainfo
= (ArchDynCallInfo
*)info
;
1691 g_free (ainfo
->cinfo
);
1692 g_free (ainfo
->param_types
);
1697 mono_arch_dyn_call_get_buf_size (MonoDynCallInfo
*info
)
1699 ArchDynCallInfo
*ainfo
= (ArchDynCallInfo
*)info
;
1701 g_assert (ainfo
->cinfo
->stack_usage
% MONO_ARCH_FRAME_ALIGNMENT
== 0);
1702 return sizeof (DynCallArgs
) + ainfo
->cinfo
->stack_usage
+ ainfo
->nullable_area
;
1706 bitcast_r4_to_r8 (float f
)
1714 bitcast_r8_to_r4 (double f
)
1722 mono_arch_start_dyn_call (MonoDynCallInfo
*info
, gpointer
**args
, guint8
*ret
, guint8
*buf
)
1724 ArchDynCallInfo
*dinfo
= (ArchDynCallInfo
*)info
;
1725 DynCallArgs
*p
= (DynCallArgs
*)buf
;
1726 int aindex
, arg_index
, greg
, i
, pindex
;
1727 MonoMethodSignature
*sig
= dinfo
->sig
;
1728 CallInfo
*cinfo
= dinfo
->cinfo
;
1729 int buffer_offset
= 0;
1730 guint8
*nullable_buffer
;
1734 p
->n_fpargs
= dinfo
->n_fpargs
;
1735 p
->n_fpret
= dinfo
->n_fpret
;
1736 p
->n_stackargs
= cinfo
->stack_usage
/ sizeof (host_mgreg_t
);
1742 /* Stored after the stack arguments */
1743 nullable_buffer
= (guint8
*)&(p
->regs
[PARAM_REGS
+ 1 + (cinfo
->stack_usage
/ sizeof (host_mgreg_t
))]);
1746 p
->regs
[greg
++] = (host_mgreg_t
)*(args
[arg_index
++]);
1748 if (cinfo
->ret
.storage
== ArgVtypeByRef
)
1749 p
->regs
[ARMREG_R8
] = (host_mgreg_t
)ret
;
1751 for (aindex
= pindex
; aindex
< sig
->param_count
; aindex
++) {
1752 MonoType
*t
= dinfo
->param_types
[aindex
];
1753 gpointer
*arg
= args
[arg_index
++];
1754 ArgInfo
*ainfo
= &cinfo
->args
[aindex
+ sig
->hasthis
];
1757 if (ainfo
->storage
== ArgOnStack
|| ainfo
->storage
== ArgVtypeOnStack
|| ainfo
->storage
== ArgVtypeByRefOnStack
) {
1758 slot
= PARAM_REGS
+ 1 + (ainfo
->offset
/ sizeof (host_mgreg_t
));
1764 p
->regs
[slot
] = (host_mgreg_t
)*arg
;
1768 if (ios_abi
&& ainfo
->storage
== ArgOnStack
) {
1769 guint8
*stack_arg
= (guint8
*)&(p
->regs
[PARAM_REGS
+ 1]) + ainfo
->offset
;
1770 gboolean handled
= TRUE
;
1772 /* Special case arguments smaller than 1 machine word */
1775 *(guint8
*)stack_arg
= *(guint8
*)arg
;
1778 *(gint8
*)stack_arg
= *(gint8
*)arg
;
1781 *(guint16
*)stack_arg
= *(guint16
*)arg
;
1784 *(gint16
*)stack_arg
= *(gint16
*)arg
;
1787 *(gint32
*)stack_arg
= *(gint32
*)arg
;
1790 *(guint32
*)stack_arg
= *(guint32
*)arg
;
1801 case MONO_TYPE_OBJECT
:
1807 p
->regs
[slot
] = (host_mgreg_t
)*arg
;
1810 p
->regs
[slot
] = *(guint8
*)arg
;
1813 p
->regs
[slot
] = *(gint8
*)arg
;
1816 p
->regs
[slot
] = *(gint16
*)arg
;
1819 p
->regs
[slot
] = *(guint16
*)arg
;
1822 p
->regs
[slot
] = *(gint32
*)arg
;
1825 p
->regs
[slot
] = *(guint32
*)arg
;
1828 p
->fpregs
[ainfo
->reg
] = bitcast_r4_to_r8 (*(float*)arg
);
1832 p
->fpregs
[ainfo
->reg
] = *(double*)arg
;
1835 case MONO_TYPE_GENERICINST
:
1836 if (MONO_TYPE_IS_REFERENCE (t
)) {
1837 p
->regs
[slot
] = (host_mgreg_t
)*arg
;
1840 if (t
->type
== MONO_TYPE_GENERICINST
&& mono_class_is_nullable (mono_class_from_mono_type_internal (t
))) {
1841 MonoClass
*klass
= mono_class_from_mono_type_internal (t
);
1842 guint8
*nullable_buf
;
1846 * Use p->buffer as a temporary buffer since the data needs to be available after this call
1847 * if the nullable param is passed by ref.
1849 size
= mono_class_value_size (klass
, NULL
);
1850 nullable_buf
= nullable_buffer
+ buffer_offset
;
1851 buffer_offset
+= size
;
1852 g_assert (buffer_offset
<= dinfo
->nullable_area
);
1854 /* The argument pointed to by arg is either a boxed vtype or null */
1855 mono_nullable_init (nullable_buf
, (MonoObject
*)arg
, klass
);
1857 arg
= (gpointer
*)nullable_buf
;
1863 case MONO_TYPE_VALUETYPE
:
1864 switch (ainfo
->storage
) {
1865 case ArgVtypeInIRegs
:
1866 for (i
= 0; i
< ainfo
->nregs
; ++i
)
1867 p
->regs
[slot
++] = ((host_mgreg_t
*)arg
) [i
];
1870 if (ainfo
->esize
== 4) {
1871 for (i
= 0; i
< ainfo
->nregs
; ++i
)
1872 p
->fpregs
[ainfo
->reg
+ i
] = bitcast_r4_to_r8 (((float*)arg
) [ainfo
->foffsets
[i
] / 4]);
1874 for (i
= 0; i
< ainfo
->nregs
; ++i
)
1875 p
->fpregs
[ainfo
->reg
+ i
] = ((double*)arg
) [ainfo
->foffsets
[i
] / 8];
1877 p
->n_fpargs
+= ainfo
->nregs
;
1880 case ArgVtypeByRefOnStack
:
1881 p
->regs
[slot
] = (host_mgreg_t
)arg
;
1883 case ArgVtypeOnStack
:
1884 for (i
= 0; i
< ainfo
->size
/ 8; ++i
)
1885 p
->regs
[slot
++] = ((host_mgreg_t
*)arg
) [i
];
1888 g_assert_not_reached ();
1893 g_assert_not_reached ();
1899 mono_arch_finish_dyn_call (MonoDynCallInfo
*info
, guint8
*buf
)
1901 ArchDynCallInfo
*ainfo
= (ArchDynCallInfo
*)info
;
1902 CallInfo
*cinfo
= ainfo
->cinfo
;
1903 DynCallArgs
*args
= (DynCallArgs
*)buf
;
1904 MonoType
*ptype
= ainfo
->rtype
;
1905 guint8
*ret
= args
->ret
;
1906 host_mgreg_t res
= args
->res
;
1907 host_mgreg_t res2
= args
->res2
;
1910 if (cinfo
->ret
.storage
== ArgVtypeByRef
)
1913 switch (ptype
->type
) {
1914 case MONO_TYPE_VOID
:
1915 *(gpointer
*)ret
= NULL
;
1917 case MONO_TYPE_OBJECT
:
1921 *(gpointer
*)ret
= (gpointer
)res
;
1927 *(guint8
*)ret
= res
;
1930 *(gint16
*)ret
= res
;
1933 *(guint16
*)ret
= res
;
1936 *(gint32
*)ret
= res
;
1939 *(guint32
*)ret
= res
;
1943 *(guint64
*)ret
= res
;
1946 *(float*)ret
= bitcast_r8_to_r4 (args
->fpregs
[0]);
1949 *(double*)ret
= args
->fpregs
[0];
1951 case MONO_TYPE_GENERICINST
:
1952 if (MONO_TYPE_IS_REFERENCE (ptype
)) {
1953 *(gpointer
*)ret
= (gpointer
)res
;
1958 case MONO_TYPE_VALUETYPE
:
1959 switch (ainfo
->cinfo
->ret
.storage
) {
1960 case ArgVtypeInIRegs
:
1961 *(host_mgreg_t
*)ret
= res
;
1962 if (ainfo
->cinfo
->ret
.nregs
> 1)
1963 ((host_mgreg_t
*)ret
) [1] = res2
;
1966 /* Use the same area for returning fp values */
1967 if (cinfo
->ret
.esize
== 4) {
1968 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
)
1969 ((float*)ret
) [cinfo
->ret
.foffsets
[i
] / 4] = bitcast_r8_to_r4 (args
->fpregs
[i
]);
1971 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
)
1972 ((double*)ret
) [cinfo
->ret
.foffsets
[i
] / 8] = args
->fpregs
[i
];
1976 g_assert_not_reached ();
1981 g_assert_not_reached ();
1987 void sys_icache_invalidate (void *start
, size_t len
);
1992 mono_arch_flush_icache (guint8
*code
, gint size
)
1994 #ifndef MONO_CROSS_COMPILE
1996 sys_icache_invalidate (code
, size
);
1998 /* Don't rely on GCC's __clear_cache implementation, as it caches
1999 * icache/dcache cache line sizes, that can vary between cores on
2000 * big.LITTLE architectures. */
2001 guint64 end
= (guint64
) (code
+ size
);
2003 /* always go with cacheline size of 4 bytes as this code isn't perf critical
2004 * anyway. Reading the cache line size from a machine register can be racy
2005 * on a big.LITTLE architecture if the cores don't have the same cache line
2007 const size_t icache_line_size
= 4;
2008 const size_t dcache_line_size
= 4;
2010 addr
= (guint64
) code
& ~(guint64
) (dcache_line_size
- 1);
2011 for (; addr
< end
; addr
+= dcache_line_size
)
2012 asm volatile("dc civac, %0" : : "r" (addr
) : "memory");
2013 asm volatile("dsb ish" : : : "memory");
2015 addr
= (guint64
) code
& ~(guint64
) (icache_line_size
- 1);
2016 for (; addr
< end
; addr
+= icache_line_size
)
2017 asm volatile("ic ivau, %0" : : "r" (addr
) : "memory");
2019 asm volatile ("dsb ish" : : : "memory");
2020 asm volatile ("isb" : : : "memory");
2028 mono_arch_opcode_needs_emulation (MonoCompile
*cfg
, int opcode
)
2035 mono_arch_get_allocatable_int_vars (MonoCompile
*cfg
)
2040 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
2041 MonoInst
*ins
= cfg
->varinfo
[i
];
2042 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
2045 if (vmv
->range
.first_use
.abs_pos
>= vmv
->range
.last_use
.abs_pos
)
2048 if ((ins
->flags
& (MONO_INST_IS_DEAD
|MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)) ||
2049 (ins
->opcode
!= OP_LOCAL
&& ins
->opcode
!= OP_ARG
))
2052 if (mono_is_regsize_var (ins
->inst_vtype
)) {
2053 g_assert (MONO_VARINFO (cfg
, i
)->reg
== -1);
2054 g_assert (i
== vmv
->idx
);
2055 vars
= g_list_prepend (vars
, vmv
);
2059 vars
= mono_varlist_sort (cfg
, vars
, 0);
2065 mono_arch_get_global_int_regs (MonoCompile
*cfg
)
2070 /* r28 is reserved for cfg->arch.args_reg */
2071 /* r27 is reserved for the imt argument */
2072 for (i
= ARMREG_R19
; i
<= ARMREG_R26
; ++i
)
2073 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (i
));
2079 mono_arch_regalloc_cost (MonoCompile
*cfg
, MonoMethodVar
*vmv
)
2081 MonoInst
*ins
= cfg
->varinfo
[vmv
->idx
];
2083 if (ins
->opcode
== OP_ARG
)
2090 mono_arch_create_vars (MonoCompile
*cfg
)
2092 MonoMethodSignature
*sig
;
2095 sig
= mono_method_signature_internal (cfg
->method
);
2096 if (!cfg
->arch
.cinfo
)
2097 cfg
->arch
.cinfo
= get_call_info (cfg
->mempool
, sig
);
2098 cinfo
= cfg
->arch
.cinfo
;
2100 if (cinfo
->ret
.storage
== ArgVtypeByRef
) {
2101 cfg
->vret_addr
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
2102 cfg
->vret_addr
->flags
|= MONO_INST_VOLATILE
;
2105 if (cfg
->gen_sdb_seq_points
) {
2108 if (cfg
->compile_aot
) {
2109 ins
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
2110 ins
->flags
|= MONO_INST_VOLATILE
;
2111 cfg
->arch
.seq_point_info_var
= ins
;
2114 ins
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
2115 ins
->flags
|= MONO_INST_VOLATILE
;
2116 cfg
->arch
.ss_tramp_var
= ins
;
2118 ins
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
2119 ins
->flags
|= MONO_INST_VOLATILE
;
2120 cfg
->arch
.bp_tramp_var
= ins
;
2123 if (cfg
->method
->save_lmf
) {
2124 cfg
->create_lmf_var
= TRUE
;
2130 mono_arch_allocate_vars (MonoCompile
*cfg
)
2132 MonoMethodSignature
*sig
;
2136 int i
, offset
, size
, align
;
2137 guint32 locals_stack_size
, locals_stack_align
;
2141 * Allocate arguments and locals to either register (OP_REGVAR) or to a stack slot (OP_REGOFFSET).
2142 * Compute cfg->stack_offset and update cfg->used_int_regs.
2145 sig
= mono_method_signature_internal (cfg
->method
);
2147 if (!cfg
->arch
.cinfo
)
2148 cfg
->arch
.cinfo
= get_call_info (cfg
->mempool
, sig
);
2149 cinfo
= cfg
->arch
.cinfo
;
2152 * The ARM64 ABI always uses a frame pointer.
2153 * The instruction set prefers positive offsets, so fp points to the bottom of the
2154 * frame, and stack slots are at positive offsets.
2155 * If some arguments are received on the stack, their offsets relative to fp can
2156 * not be computed right now because the stack frame might grow due to spilling
2157 * done by the local register allocator. To solve this, we reserve a register
2158 * which points to them.
2159 * The stack frame looks like this:
2160 * args_reg -> <bottom of parent frame>
2162 * fp -> <saved fp+lr>
2163 * sp -> <localloc/params area>
2165 cfg
->frame_reg
= ARMREG_FP
;
2166 cfg
->flags
|= MONO_CFG_HAS_SPILLUP
;
2172 if (cinfo
->stack_usage
) {
2173 g_assert (!(cfg
->used_int_regs
& (1 << ARMREG_R28
)));
2174 cfg
->arch
.args_reg
= ARMREG_R28
;
2175 cfg
->used_int_regs
|= 1 << ARMREG_R28
;
2178 if (cfg
->method
->save_lmf
) {
2179 /* The LMF var is allocated normally */
2181 /* Callee saved regs */
2182 cfg
->arch
.saved_gregs_offset
= offset
;
2183 for (i
= 0; i
< 32; ++i
)
2184 if ((MONO_ARCH_CALLEE_SAVED_REGS
& (1 << i
)) && (cfg
->used_int_regs
& (1 << i
)))
2189 switch (cinfo
->ret
.storage
) {
2195 cfg
->ret
->opcode
= OP_REGVAR
;
2196 cfg
->ret
->dreg
= cinfo
->ret
.reg
;
2198 case ArgVtypeInIRegs
:
2200 /* Allocate a local to hold the result, the epilog will copy it to the correct place */
2201 cfg
->ret
->opcode
= OP_REGOFFSET
;
2202 cfg
->ret
->inst_basereg
= cfg
->frame_reg
;
2203 cfg
->ret
->inst_offset
= offset
;
2204 if (cinfo
->ret
.storage
== ArgHFA
)
2211 /* This variable will be initalized in the prolog from R8 */
2212 cfg
->vret_addr
->opcode
= OP_REGOFFSET
;
2213 cfg
->vret_addr
->inst_basereg
= cfg
->frame_reg
;
2214 cfg
->vret_addr
->inst_offset
= offset
;
2216 if (G_UNLIKELY (cfg
->verbose_level
> 1)) {
2217 printf ("vret_addr =");
2218 mono_print_ins (cfg
->vret_addr
);
2222 g_assert_not_reached ();
2227 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
2228 ainfo
= cinfo
->args
+ i
;
2230 ins
= cfg
->args
[i
];
2231 if (ins
->opcode
== OP_REGVAR
)
2234 ins
->opcode
= OP_REGOFFSET
;
2235 ins
->inst_basereg
= cfg
->frame_reg
;
2237 switch (ainfo
->storage
) {
2241 // FIXME: Use nregs/size
2242 /* These will be copied to the stack in the prolog */
2243 ins
->inst_offset
= offset
;
2249 case ArgVtypeOnStack
:
2250 /* These are in the parent frame */
2251 g_assert (cfg
->arch
.args_reg
);
2252 ins
->inst_basereg
= cfg
->arch
.args_reg
;
2253 ins
->inst_offset
= ainfo
->offset
;
2255 case ArgVtypeInIRegs
:
2257 ins
->opcode
= OP_REGOFFSET
;
2258 ins
->inst_basereg
= cfg
->frame_reg
;
2259 /* These arguments are saved to the stack in the prolog */
2260 ins
->inst_offset
= offset
;
2261 if (cfg
->verbose_level
>= 2)
2262 printf ("arg %d allocated to %s+0x%0x.\n", i
, mono_arch_regname (ins
->inst_basereg
), (int)ins
->inst_offset
);
2263 if (ainfo
->storage
== ArgHFA
)
2269 case ArgVtypeByRefOnStack
: {
2272 if (ainfo
->gsharedvt
) {
2273 ins
->opcode
= OP_REGOFFSET
;
2274 ins
->inst_basereg
= cfg
->arch
.args_reg
;
2275 ins
->inst_offset
= ainfo
->offset
;
2279 /* The vtype address is in the parent frame */
2280 g_assert (cfg
->arch
.args_reg
);
2281 MONO_INST_NEW (cfg
, vtaddr
, 0);
2282 vtaddr
->opcode
= OP_REGOFFSET
;
2283 vtaddr
->inst_basereg
= cfg
->arch
.args_reg
;
2284 vtaddr
->inst_offset
= ainfo
->offset
;
2286 /* Need an indirection */
2287 ins
->opcode
= OP_VTARG_ADDR
;
2288 ins
->inst_left
= vtaddr
;
2291 case ArgVtypeByRef
: {
2294 if (ainfo
->gsharedvt
) {
2295 ins
->opcode
= OP_REGOFFSET
;
2296 ins
->inst_basereg
= cfg
->frame_reg
;
2297 ins
->inst_offset
= offset
;
2302 /* The vtype address is in a register, will be copied to the stack in the prolog */
2303 MONO_INST_NEW (cfg
, vtaddr
, 0);
2304 vtaddr
->opcode
= OP_REGOFFSET
;
2305 vtaddr
->inst_basereg
= cfg
->frame_reg
;
2306 vtaddr
->inst_offset
= offset
;
2309 /* Need an indirection */
2310 ins
->opcode
= OP_VTARG_ADDR
;
2311 ins
->inst_left
= vtaddr
;
2315 g_assert_not_reached ();
2320 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
2321 // FIXME: Allocate these to registers
2322 ins
= cfg
->arch
.seq_point_info_var
;
2326 offset
+= align
- 1;
2327 offset
&= ~(align
- 1);
2328 ins
->opcode
= OP_REGOFFSET
;
2329 ins
->inst_basereg
= cfg
->frame_reg
;
2330 ins
->inst_offset
= offset
;
2333 ins
= cfg
->arch
.ss_tramp_var
;
2337 offset
+= align
- 1;
2338 offset
&= ~(align
- 1);
2339 ins
->opcode
= OP_REGOFFSET
;
2340 ins
->inst_basereg
= cfg
->frame_reg
;
2341 ins
->inst_offset
= offset
;
2344 ins
= cfg
->arch
.bp_tramp_var
;
2348 offset
+= align
- 1;
2349 offset
&= ~(align
- 1);
2350 ins
->opcode
= OP_REGOFFSET
;
2351 ins
->inst_basereg
= cfg
->frame_reg
;
2352 ins
->inst_offset
= offset
;
2357 offsets
= mono_allocate_stack_slots (cfg
, FALSE
, &locals_stack_size
, &locals_stack_align
);
2358 if (locals_stack_align
)
2359 offset
= ALIGN_TO (offset
, locals_stack_align
);
2361 for (i
= cfg
->locals_start
; i
< cfg
->num_varinfo
; i
++) {
2362 if (offsets
[i
] != -1) {
2363 ins
= cfg
->varinfo
[i
];
2364 ins
->opcode
= OP_REGOFFSET
;
2365 ins
->inst_basereg
= cfg
->frame_reg
;
2366 ins
->inst_offset
= offset
+ offsets
[i
];
2367 //printf ("allocated local %d to ", i); mono_print_tree_nl (ins);
2370 offset
+= locals_stack_size
;
2372 offset
= ALIGN_TO (offset
, MONO_ARCH_FRAME_ALIGNMENT
);
2374 cfg
->stack_offset
= offset
;
2379 mono_arch_get_llvm_call_info (MonoCompile
*cfg
, MonoMethodSignature
*sig
)
2384 LLVMCallInfo
*linfo
;
2386 n
= sig
->param_count
+ sig
->hasthis
;
2388 cinfo
= get_call_info (cfg
->mempool
, sig
);
2390 linfo
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (LLVMCallInfo
) + (sizeof (LLVMArgInfo
) * n
));
2392 switch (cinfo
->ret
.storage
) {
2399 linfo
->ret
.storage
= LLVMArgVtypeByRef
;
2402 // FIXME: This doesn't work yet since the llvm backend represents these types as an i8
2403 // array which is returned in int regs
2406 linfo
->ret
.storage
= LLVMArgFpStruct
;
2407 linfo
->ret
.nslots
= cinfo
->ret
.nregs
;
2408 linfo
->ret
.esize
= cinfo
->ret
.esize
;
2410 case ArgVtypeInIRegs
:
2411 /* LLVM models this by returning an int */
2412 linfo
->ret
.storage
= LLVMArgVtypeAsScalar
;
2413 linfo
->ret
.nslots
= cinfo
->ret
.nregs
;
2414 linfo
->ret
.esize
= cinfo
->ret
.esize
;
2417 g_assert_not_reached ();
2421 for (i
= 0; i
< n
; ++i
) {
2422 LLVMArgInfo
*lainfo
= &linfo
->args
[i
];
2424 ainfo
= cinfo
->args
+ i
;
2426 lainfo
->storage
= LLVMArgNone
;
2428 switch (ainfo
->storage
) {
2435 lainfo
->storage
= LLVMArgNormal
;
2438 case ArgVtypeByRefOnStack
:
2439 lainfo
->storage
= LLVMArgVtypeByRef
;
2444 lainfo
->storage
= LLVMArgAsFpArgs
;
2445 lainfo
->nslots
= ainfo
->nregs
;
2446 lainfo
->esize
= ainfo
->esize
;
2447 for (j
= 0; j
< ainfo
->nregs
; ++j
)
2448 lainfo
->pair_storage
[j
] = LLVMArgInFPReg
;
2451 case ArgVtypeInIRegs
:
2452 lainfo
->storage
= LLVMArgAsIArgs
;
2453 lainfo
->nslots
= ainfo
->nregs
;
2455 case ArgVtypeOnStack
:
2459 lainfo
->storage
= LLVMArgAsFpArgs
;
2460 lainfo
->nslots
= ainfo
->nregs
;
2461 lainfo
->esize
= ainfo
->esize
;
2462 lainfo
->ndummy_fpargs
= ainfo
->nfregs_to_skip
;
2463 for (j
= 0; j
< ainfo
->nregs
; ++j
)
2464 lainfo
->pair_storage
[j
] = LLVMArgInFPReg
;
2466 lainfo
->storage
= LLVMArgAsIArgs
;
2467 lainfo
->nslots
= ainfo
->size
/ 8;
2471 g_assert_not_reached ();
2481 add_outarg_reg (MonoCompile
*cfg
, MonoCallInst
*call
, ArgStorage storage
, int reg
, MonoInst
*arg
)
2487 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2488 ins
->dreg
= mono_alloc_ireg_copy (cfg
, arg
->dreg
);
2489 ins
->sreg1
= arg
->dreg
;
2490 MONO_ADD_INS (cfg
->cbb
, ins
);
2491 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, reg
, FALSE
);
2494 MONO_INST_NEW (cfg
, ins
, OP_FMOVE
);
2495 ins
->dreg
= mono_alloc_freg (cfg
);
2496 ins
->sreg1
= arg
->dreg
;
2497 MONO_ADD_INS (cfg
->cbb
, ins
);
2498 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, reg
, TRUE
);
2501 if (COMPILE_LLVM (cfg
))
2502 MONO_INST_NEW (cfg
, ins
, OP_FMOVE
);
2504 MONO_INST_NEW (cfg
, ins
, OP_RMOVE
);
2506 MONO_INST_NEW (cfg
, ins
, OP_ARM_SETFREG_R4
);
2507 ins
->dreg
= mono_alloc_freg (cfg
);
2508 ins
->sreg1
= arg
->dreg
;
2509 MONO_ADD_INS (cfg
->cbb
, ins
);
2510 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, reg
, TRUE
);
2513 g_assert_not_reached ();
2519 emit_sig_cookie (MonoCompile
*cfg
, MonoCallInst
*call
, CallInfo
*cinfo
)
2521 MonoMethodSignature
*tmp_sig
;
2524 if (MONO_IS_TAILCALL_OPCODE (call
))
2527 g_assert (cinfo
->sig_cookie
.storage
== ArgOnStack
);
2530 * mono_ArgIterator_Setup assumes the signature cookie is
2531 * passed first and all the arguments which were before it are
2532 * passed on the stack after the signature. So compensate by
2533 * passing a different signature.
2535 tmp_sig
= mono_metadata_signature_dup (call
->signature
);
2536 tmp_sig
->param_count
-= call
->signature
->sentinelpos
;
2537 tmp_sig
->sentinelpos
= 0;
2538 memcpy (tmp_sig
->params
, call
->signature
->params
+ call
->signature
->sentinelpos
, tmp_sig
->param_count
* sizeof (MonoType
*));
2540 sig_reg
= mono_alloc_ireg (cfg
);
2541 MONO_EMIT_NEW_SIGNATURECONST (cfg
, sig_reg
, tmp_sig
);
2543 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, cinfo
->sig_cookie
.offset
, sig_reg
);
2547 mono_arch_emit_call (MonoCompile
*cfg
, MonoCallInst
*call
)
2549 MonoMethodSignature
*sig
;
2550 MonoInst
*arg
, *vtarg
;
2555 sig
= call
->signature
;
2557 cinfo
= get_call_info (cfg
->mempool
, sig
);
2559 switch (cinfo
->ret
.storage
) {
2560 case ArgVtypeInIRegs
:
2562 if (MONO_IS_TAILCALL_OPCODE (call
))
2565 * The vtype is returned in registers, save the return area address in a local, and save the vtype into
2566 * the location pointed to by it after call in emit_move_return_value ().
2568 if (!cfg
->arch
.vret_addr_loc
) {
2569 cfg
->arch
.vret_addr_loc
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
2570 /* Prevent it from being register allocated or optimized away */
2571 cfg
->arch
.vret_addr_loc
->flags
|= MONO_INST_VOLATILE
;
2574 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->arch
.vret_addr_loc
->dreg
, call
->vret_var
->dreg
);
2577 /* Pass the vtype return address in R8 */
2578 g_assert (!MONO_IS_TAILCALL_OPCODE (call
) || call
->vret_var
== cfg
->vret_addr
);
2579 MONO_INST_NEW (cfg
, vtarg
, OP_MOVE
);
2580 vtarg
->sreg1
= call
->vret_var
->dreg
;
2581 vtarg
->dreg
= mono_alloc_preg (cfg
);
2582 MONO_ADD_INS (cfg
->cbb
, vtarg
);
2584 mono_call_inst_add_outarg_reg (cfg
, call
, vtarg
->dreg
, cinfo
->ret
.reg
, FALSE
);
2590 for (i
= 0; i
< cinfo
->nargs
; ++i
) {
2591 ainfo
= cinfo
->args
+ i
;
2592 arg
= call
->args
[i
];
2594 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
2595 /* Emit the signature cookie just before the implicit arguments */
2596 emit_sig_cookie (cfg
, call
, cinfo
);
2599 switch (ainfo
->storage
) {
2603 add_outarg_reg (cfg
, call
, ainfo
->storage
, ainfo
->reg
, arg
);
2606 switch (ainfo
->slot_size
) {
2608 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, arg
->dreg
);
2611 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, arg
->dreg
);
2614 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI2_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, arg
->dreg
);
2617 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, arg
->dreg
);
2620 g_assert_not_reached ();
2625 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, arg
->dreg
);
2628 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, arg
->dreg
);
2630 case ArgVtypeInIRegs
:
2632 case ArgVtypeByRefOnStack
:
2633 case ArgVtypeOnStack
:
2639 size
= mono_class_value_size (arg
->klass
, &align
);
2641 MONO_INST_NEW (cfg
, ins
, OP_OUTARG_VT
);
2642 ins
->sreg1
= arg
->dreg
;
2643 ins
->klass
= arg
->klass
;
2644 ins
->backend
.size
= size
;
2645 ins
->inst_p0
= call
;
2646 ins
->inst_p1
= mono_mempool_alloc (cfg
->mempool
, sizeof (ArgInfo
));
2647 memcpy (ins
->inst_p1
, ainfo
, sizeof (ArgInfo
));
2648 MONO_ADD_INS (cfg
->cbb
, ins
);
2652 g_assert_not_reached ();
2657 /* Handle the case where there are no implicit arguments */
2658 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (cinfo
->nargs
== sig
->sentinelpos
))
2659 emit_sig_cookie (cfg
, call
, cinfo
);
2661 call
->call_info
= cinfo
;
2662 call
->stack_usage
= cinfo
->stack_usage
;
2666 mono_arch_emit_outarg_vt (MonoCompile
*cfg
, MonoInst
*ins
, MonoInst
*src
)
2668 MonoCallInst
*call
= (MonoCallInst
*)ins
->inst_p0
;
2669 ArgInfo
*ainfo
= (ArgInfo
*)ins
->inst_p1
;
2673 if (ins
->backend
.size
== 0 && !ainfo
->gsharedvt
)
2676 switch (ainfo
->storage
) {
2677 case ArgVtypeInIRegs
:
2678 for (i
= 0; i
< ainfo
->nregs
; ++i
) {
2679 // FIXME: Smaller sizes
2680 MONO_INST_NEW (cfg
, load
, OP_LOADI8_MEMBASE
);
2681 load
->dreg
= mono_alloc_ireg (cfg
);
2682 load
->inst_basereg
= src
->dreg
;
2683 load
->inst_offset
= i
* sizeof (target_mgreg_t
);
2684 MONO_ADD_INS (cfg
->cbb
, load
);
2685 add_outarg_reg (cfg
, call
, ArgInIReg
, ainfo
->reg
+ i
, load
);
2689 for (i
= 0; i
< ainfo
->nregs
; ++i
) {
2690 if (ainfo
->esize
== 4)
2691 MONO_INST_NEW (cfg
, load
, OP_LOADR4_MEMBASE
);
2693 MONO_INST_NEW (cfg
, load
, OP_LOADR8_MEMBASE
);
2694 load
->dreg
= mono_alloc_freg (cfg
);
2695 load
->inst_basereg
= src
->dreg
;
2696 load
->inst_offset
= ainfo
->foffsets
[i
];
2697 MONO_ADD_INS (cfg
->cbb
, load
);
2698 add_outarg_reg (cfg
, call
, ainfo
->esize
== 4 ? ArgInFRegR4
: ArgInFReg
, ainfo
->reg
+ i
, load
);
2702 case ArgVtypeByRefOnStack
: {
2703 MonoInst
*vtaddr
, *load
, *arg
;
2705 /* Pass the vtype address in a reg/on the stack */
2706 if (ainfo
->gsharedvt
) {
2709 /* Make a copy of the argument */
2710 vtaddr
= mono_compile_create_var (cfg
, m_class_get_byval_arg (ins
->klass
), OP_LOCAL
);
2712 MONO_INST_NEW (cfg
, load
, OP_LDADDR
);
2713 load
->inst_p0
= vtaddr
;
2714 vtaddr
->flags
|= MONO_INST_INDIRECT
;
2715 load
->type
= STACK_MP
;
2716 load
->klass
= vtaddr
->klass
;
2717 load
->dreg
= mono_alloc_ireg (cfg
);
2718 MONO_ADD_INS (cfg
->cbb
, load
);
2719 mini_emit_memcpy (cfg
, load
->dreg
, 0, src
->dreg
, 0, ainfo
->size
, 8);
2722 if (ainfo
->storage
== ArgVtypeByRef
) {
2723 MONO_INST_NEW (cfg
, arg
, OP_MOVE
);
2724 arg
->dreg
= mono_alloc_preg (cfg
);
2725 arg
->sreg1
= load
->dreg
;
2726 MONO_ADD_INS (cfg
->cbb
, arg
);
2727 add_outarg_reg (cfg
, call
, ArgInIReg
, ainfo
->reg
, arg
);
2729 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, load
->dreg
);
2733 case ArgVtypeOnStack
:
2734 for (i
= 0; i
< ainfo
->size
/ 8; ++i
) {
2735 MONO_INST_NEW (cfg
, load
, OP_LOADI8_MEMBASE
);
2736 load
->dreg
= mono_alloc_ireg (cfg
);
2737 load
->inst_basereg
= src
->dreg
;
2738 load
->inst_offset
= i
* 8;
2739 MONO_ADD_INS (cfg
->cbb
, load
);
2740 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
+ (i
* 8), load
->dreg
);
2744 g_assert_not_reached ();
2750 mono_arch_emit_setret (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
*val
)
2752 MonoMethodSignature
*sig
;
2755 sig
= mono_method_signature_internal (cfg
->method
);
2756 if (!cfg
->arch
.cinfo
)
2757 cfg
->arch
.cinfo
= get_call_info (cfg
->mempool
, sig
);
2758 cinfo
= cfg
->arch
.cinfo
;
2760 switch (cinfo
->ret
.storage
) {
2764 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
2767 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, cfg
->ret
->dreg
, val
->dreg
);
2770 if (COMPILE_LLVM (cfg
))
2771 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, cfg
->ret
->dreg
, val
->dreg
);
2773 MONO_EMIT_NEW_UNALU (cfg
, OP_RMOVE
, cfg
->ret
->dreg
, val
->dreg
);
2775 MONO_EMIT_NEW_UNALU (cfg
, OP_ARM_SETFREG_R4
, cfg
->ret
->dreg
, val
->dreg
);
2778 g_assert_not_reached ();
2786 mono_arch_tailcall_supported (MonoCompile
*cfg
, MonoMethodSignature
*caller_sig
, MonoMethodSignature
*callee_sig
, gboolean virtual_
)
2788 g_assert (caller_sig
);
2789 g_assert (callee_sig
);
2791 CallInfo
*caller_info
= get_call_info (NULL
, caller_sig
);
2792 CallInfo
*callee_info
= get_call_info (NULL
, callee_sig
);
2794 gboolean res
= IS_SUPPORTED_TAILCALL (callee_info
->stack_usage
<= caller_info
->stack_usage
)
2795 && IS_SUPPORTED_TAILCALL (caller_info
->ret
.storage
== callee_info
->ret
.storage
);
2797 // FIXME Limit stack_usage to 1G. emit_ldrx / strx has 32bit limits.
2798 res
&= IS_SUPPORTED_TAILCALL (callee_info
->stack_usage
< (1 << 30));
2799 res
&= IS_SUPPORTED_TAILCALL (caller_info
->stack_usage
< (1 << 30));
2801 // valuetype parameters are the address of a local
2802 const ArgInfo
*ainfo
;
2803 ainfo
= callee_info
->args
+ callee_sig
->hasthis
;
2804 for (int i
= 0; res
&& i
< callee_sig
->param_count
; ++i
) {
2805 res
= IS_SUPPORTED_TAILCALL (ainfo
[i
].storage
!= ArgVtypeByRef
)
2806 && IS_SUPPORTED_TAILCALL (ainfo
[i
].storage
!= ArgVtypeByRefOnStack
);
2809 g_free (caller_info
);
2810 g_free (callee_info
);
2818 mono_arch_is_inst_imm (int opcode
, int imm_opcode
, gint64 imm
)
2820 return (imm
>= -((gint64
)1<<31) && imm
<= (((gint64
)1<<31)-1));
2824 mono_arch_peephole_pass_1 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
2830 mono_arch_peephole_pass_2 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
2835 #define ADD_NEW_INS(cfg,dest,op) do { \
2836 MONO_INST_NEW ((cfg), (dest), (op)); \
2837 mono_bblock_insert_before_ins (bb, ins, (dest)); \
2841 mono_arch_lowering_pass (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
2843 MonoInst
*ins
, *temp
, *last_ins
= NULL
;
2845 MONO_BB_FOR_EACH_INS (bb
, ins
) {
2846 switch (ins
->opcode
) {
2851 if (ins
->next
&& (ins
->next
->opcode
== OP_COND_EXC_C
|| ins
->next
->opcode
== OP_COND_EXC_IC
))
2852 /* ARM sets the C flag to 1 if there was _no_ overflow */
2853 ins
->next
->opcode
= OP_COND_EXC_NC
;
2857 case OP_IDIV_UN_IMM
:
2858 case OP_IREM_UN_IMM
:
2860 mono_decompose_op_imm (cfg
, bb
, ins
);
2862 case OP_LOCALLOC_IMM
:
2863 if (ins
->inst_imm
> 32) {
2864 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
2865 temp
->inst_c0
= ins
->inst_imm
;
2866 temp
->dreg
= mono_alloc_ireg (cfg
);
2867 ins
->sreg1
= temp
->dreg
;
2868 ins
->opcode
= mono_op_imm_to_op (ins
->opcode
);
2871 case OP_ICOMPARE_IMM
:
2872 if (ins
->inst_imm
== 0 && ins
->next
&& ins
->next
->opcode
== OP_IBEQ
) {
2873 ins
->next
->opcode
= OP_ARM64_CBZW
;
2874 ins
->next
->sreg1
= ins
->sreg1
;
2876 } else if (ins
->inst_imm
== 0 && ins
->next
&& ins
->next
->opcode
== OP_IBNE_UN
) {
2877 ins
->next
->opcode
= OP_ARM64_CBNZW
;
2878 ins
->next
->sreg1
= ins
->sreg1
;
2882 case OP_LCOMPARE_IMM
:
2883 case OP_COMPARE_IMM
:
2884 if (ins
->inst_imm
== 0 && ins
->next
&& ins
->next
->opcode
== OP_LBEQ
) {
2885 ins
->next
->opcode
= OP_ARM64_CBZX
;
2886 ins
->next
->sreg1
= ins
->sreg1
;
2888 } else if (ins
->inst_imm
== 0 && ins
->next
&& ins
->next
->opcode
== OP_LBNE_UN
) {
2889 ins
->next
->opcode
= OP_ARM64_CBNZX
;
2890 ins
->next
->sreg1
= ins
->sreg1
;
2896 gboolean swap
= FALSE
;
2900 /* Optimized away */
2906 * FP compares with unordered operands set the flags
2907 * to NZCV=0011, which matches some non-unordered compares
2908 * as well, like LE, so have to swap the operands.
2910 switch (ins
->next
->opcode
) {
2912 ins
->next
->opcode
= OP_FBGT
;
2916 ins
->next
->opcode
= OP_FBGE
;
2920 ins
->next
->opcode
= OP_RBGT
;
2924 ins
->next
->opcode
= OP_RBGE
;
2932 ins
->sreg1
= ins
->sreg2
;
2943 bb
->last_ins
= last_ins
;
2944 bb
->max_vreg
= cfg
->next_vreg
;
2948 mono_arch_decompose_long_opts (MonoCompile
*cfg
, MonoInst
*long_ins
)
2953 opcode_to_armcond (int opcode
)
2964 case OP_COND_EXC_IEQ
:
2965 case OP_COND_EXC_EQ
:
2982 case OP_COND_EXC_IGT
:
2983 case OP_COND_EXC_GT
:
2998 case OP_COND_EXC_ILT
:
2999 case OP_COND_EXC_LT
:
3007 case OP_COND_EXC_INE_UN
:
3008 case OP_COND_EXC_NE_UN
:
3014 case OP_COND_EXC_IGE_UN
:
3015 case OP_COND_EXC_GE_UN
:
3025 case OP_COND_EXC_IGT_UN
:
3026 case OP_COND_EXC_GT_UN
:
3032 case OP_COND_EXC_ILE_UN
:
3033 case OP_COND_EXC_LE_UN
:
3041 case OP_COND_EXC_ILT_UN
:
3042 case OP_COND_EXC_LT_UN
:
3045 * FCMP sets the NZCV condition bits as follows:
3050 * ARMCOND_LT is N!=V, so it matches unordered too, so
3051 * fclt and fclt_un need to be special cased.
3061 case OP_COND_EXC_IC
:
3063 case OP_COND_EXC_OV
:
3064 case OP_COND_EXC_IOV
:
3066 case OP_COND_EXC_NC
:
3067 case OP_COND_EXC_INC
:
3069 case OP_COND_EXC_NO
:
3070 case OP_COND_EXC_INO
:
3073 printf ("%s\n", mono_inst_name (opcode
));
3074 g_assert_not_reached ();
3079 /* This clobbers LR */
3080 static __attribute__ ((__warn_unused_result__
)) guint8
*
3081 emit_cond_exc (MonoCompile
*cfg
, guint8
*code
, int opcode
, const char *exc_name
)
3085 cond
= opcode_to_armcond (opcode
);
3087 arm_adrx (code
, ARMREG_IP1
, code
);
3088 mono_add_patch_info_rel (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_EXC
, exc_name
, MONO_R_ARM64_BCC
);
3089 arm_bcc (code
, cond
, 0);
3094 emit_move_return_value (MonoCompile
*cfg
, guint8
* code
, MonoInst
*ins
)
3099 call
= (MonoCallInst
*)ins
;
3100 cinfo
= call
->call_info
;
3102 switch (cinfo
->ret
.storage
) {
3106 /* LLVM compiled code might only set the bottom bits */
3107 if (call
->signature
&& mini_get_underlying_type (call
->signature
->ret
)->type
== MONO_TYPE_I4
)
3108 arm_sxtwx (code
, call
->inst
.dreg
, cinfo
->ret
.reg
);
3109 else if (call
->inst
.dreg
!= cinfo
->ret
.reg
)
3110 arm_movx (code
, call
->inst
.dreg
, cinfo
->ret
.reg
);
3113 if (call
->inst
.dreg
!= cinfo
->ret
.reg
)
3114 arm_fmovd (code
, call
->inst
.dreg
, cinfo
->ret
.reg
);
3118 arm_fmovs (code
, call
->inst
.dreg
, cinfo
->ret
.reg
);
3120 arm_fcvt_sd (code
, call
->inst
.dreg
, cinfo
->ret
.reg
);
3122 case ArgVtypeInIRegs
: {
3123 MonoInst
*loc
= cfg
->arch
.vret_addr_loc
;
3126 /* Load the destination address */
3127 g_assert (loc
&& loc
->opcode
== OP_REGOFFSET
);
3128 code
= emit_ldrx (code
, ARMREG_LR
, loc
->inst_basereg
, loc
->inst_offset
);
3129 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
)
3130 arm_strx (code
, cinfo
->ret
.reg
+ i
, ARMREG_LR
, i
* 8);
3134 MonoInst
*loc
= cfg
->arch
.vret_addr_loc
;
3137 /* Load the destination address */
3138 g_assert (loc
&& loc
->opcode
== OP_REGOFFSET
);
3139 code
= emit_ldrx (code
, ARMREG_LR
, loc
->inst_basereg
, loc
->inst_offset
);
3140 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
) {
3141 if (cinfo
->ret
.esize
== 4)
3142 arm_strfpw (code
, cinfo
->ret
.reg
+ i
, ARMREG_LR
, cinfo
->ret
.foffsets
[i
]);
3144 arm_strfpx (code
, cinfo
->ret
.reg
+ i
, ARMREG_LR
, cinfo
->ret
.foffsets
[i
]);
3151 g_assert_not_reached ();
3158 * emit_branch_island:
3160 * Emit a branch island for the conditional branches from cfg->native_code + start_offset to code.
3163 emit_branch_island (MonoCompile
*cfg
, guint8
*code
, int start_offset
)
3167 /* Iterate over the patch infos added so far by this bb */
3168 int island_size
= 0;
3169 for (ji
= cfg
->patch_info
; ji
; ji
= ji
->next
) {
3170 if (ji
->ip
.i
< start_offset
)
3171 /* The patch infos are in reverse order, so this means the end */
3173 if (ji
->relocation
== MONO_R_ARM64_BCC
|| ji
->relocation
== MONO_R_ARM64_CBZ
)
3178 code
= realloc_code (cfg
, island_size
);
3180 /* Branch over the island */
3181 arm_b (code
, code
+ 4 + island_size
);
3183 for (ji
= cfg
->patch_info
; ji
; ji
= ji
->next
) {
3184 if (ji
->ip
.i
< start_offset
)
3186 if (ji
->relocation
== MONO_R_ARM64_BCC
|| ji
->relocation
== MONO_R_ARM64_CBZ
) {
3187 /* Rewrite the cond branch so it branches to an unconditional branch in the branch island */
3188 arm_patch_rel (cfg
->native_code
+ ji
->ip
.i
, code
, ji
->relocation
);
3189 /* Rewrite the patch so it points to the unconditional branch */
3190 ji
->ip
.i
= code
- cfg
->native_code
;
3191 ji
->relocation
= MONO_R_ARM64_B
;
3195 set_code_cursor (cfg
, code
);
3201 mono_arch_output_basic_block (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
3205 guint8
*code
= cfg
->native_code
+ cfg
->code_len
;
3206 int start_offset
, max_len
, dreg
, sreg1
, sreg2
;
3209 if (cfg
->verbose_level
> 2)
3210 g_print ("Basic block %d starting at offset 0x%x\n", bb
->block_num
, bb
->native_offset
);
3212 start_offset
= code
- cfg
->native_code
;
3213 g_assert (start_offset
<= cfg
->code_size
);
3215 MONO_BB_FOR_EACH_INS (bb
, ins
) {
3216 guint offset
= code
- cfg
->native_code
;
3217 set_code_cursor (cfg
, code
);
3218 max_len
= ins_get_size (ins
->opcode
);
3219 code
= realloc_code (cfg
, max_len
);
3221 if (G_UNLIKELY (cfg
->arch
.cond_branch_islands
&& offset
- start_offset
> 4 * 0x1ffff)) {
3222 /* Emit a branch island for large basic blocks */
3223 code
= emit_branch_island (cfg
, code
, start_offset
);
3224 offset
= code
- cfg
->native_code
;
3225 start_offset
= offset
;
3228 mono_debug_record_line_number (cfg
, ins
, offset
);
3233 imm
= ins
->inst_imm
;
3235 switch (ins
->opcode
) {
3237 code
= emit_imm (code
, dreg
, ins
->inst_c0
);
3240 code
= emit_imm64 (code
, dreg
, ins
->inst_c0
);
3244 arm_movx (code
, dreg
, sreg1
);
3247 case OP_RELAXED_NOP
:
3250 mono_add_patch_info_rel (cfg
, offset
, (MonoJumpInfoType
)(gsize
)ins
->inst_i1
, ins
->inst_p0
, MONO_R_ARM64_IMM
);
3251 code
= emit_imm64_template (code
, dreg
);
3255 * gdb does not like encountering the hw breakpoint ins in the debugged code.
3256 * So instead of emitting a trap, we emit a call a C function and place a
3259 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_JIT_ICALL_ID
, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break
));
3264 arm_addx_imm (code
, ARMREG_IP0
, sreg1
, (MONO_ARCH_FRAME_ALIGNMENT
- 1));
3265 // FIXME: andx_imm doesn't work yet
3266 code
= emit_imm (code
, ARMREG_IP1
, -MONO_ARCH_FRAME_ALIGNMENT
);
3267 arm_andx (code
, ARMREG_IP0
, ARMREG_IP0
, ARMREG_IP1
);
3268 //arm_andx_imm (code, ARMREG_IP0, sreg1, - MONO_ARCH_FRAME_ALIGNMENT);
3269 arm_movspx (code
, ARMREG_IP1
, ARMREG_SP
);
3270 arm_subx (code
, ARMREG_IP1
, ARMREG_IP1
, ARMREG_IP0
);
3271 arm_movspx (code
, ARMREG_SP
, ARMREG_IP1
);
3274 /* ip1 = pointer, ip0 = end */
3275 arm_addx (code
, ARMREG_IP0
, ARMREG_IP1
, ARMREG_IP0
);
3277 arm_cmpx (code
, ARMREG_IP1
, ARMREG_IP0
);
3279 arm_bcc (code
, ARMCOND_EQ
, 0);
3280 arm_stpx (code
, ARMREG_RZR
, ARMREG_RZR
, ARMREG_IP1
, 0);
3281 arm_addx_imm (code
, ARMREG_IP1
, ARMREG_IP1
, 16);
3282 arm_b (code
, buf
[0]);
3283 arm_patch_rel (buf
[1], code
, MONO_R_ARM64_BCC
);
3285 arm_movspx (code
, dreg
, ARMREG_SP
);
3286 if (cfg
->param_area
)
3287 code
= emit_subx_sp_imm (code
, cfg
->param_area
);
3290 case OP_LOCALLOC_IMM
: {
3293 imm
= ALIGN_TO (ins
->inst_imm
, MONO_ARCH_FRAME_ALIGNMENT
);
3294 g_assert (arm_is_arith_imm (imm
));
3295 arm_subx_imm (code
, ARMREG_SP
, ARMREG_SP
, imm
);
3298 g_assert (MONO_ARCH_FRAME_ALIGNMENT
== 16);
3300 while (offset
< imm
) {
3301 arm_stpx (code
, ARMREG_RZR
, ARMREG_RZR
, ARMREG_SP
, offset
);
3304 arm_movspx (code
, dreg
, ARMREG_SP
);
3305 if (cfg
->param_area
)
3306 code
= emit_subx_sp_imm (code
, cfg
->param_area
);
3310 code
= emit_aotconst (cfg
, code
, dreg
, (MonoJumpInfoType
)(gsize
)ins
->inst_i1
, ins
->inst_p0
);
3312 case OP_OBJC_GET_SELECTOR
:
3313 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_OBJC_SELECTOR_REF
, ins
->inst_p0
);
3314 /* See arch_emit_objc_selector_ref () in aot-compiler.c */
3315 arm_ldrx_lit (code
, ins
->dreg
, 0);
3319 case OP_SEQ_POINT
: {
3320 MonoInst
*info_var
= cfg
->arch
.seq_point_info_var
;
3323 * For AOT, we use one got slot per method, which will point to a
3324 * SeqPointInfo structure, containing all the information required
3325 * by the code below.
3327 if (cfg
->compile_aot
) {
3328 g_assert (info_var
);
3329 g_assert (info_var
->opcode
== OP_REGOFFSET
);
3332 if (ins
->flags
& MONO_INST_SINGLE_STEP_LOC
) {
3333 MonoInst
*var
= cfg
->arch
.ss_tramp_var
;
3336 g_assert (var
->opcode
== OP_REGOFFSET
);
3337 /* Load ss_tramp_var */
3338 /* This is equal to &ss_trampoline */
3339 arm_ldrx (code
, ARMREG_IP1
, var
->inst_basereg
, var
->inst_offset
);
3340 /* Load the trampoline address */
3341 arm_ldrx (code
, ARMREG_IP1
, ARMREG_IP1
, 0);
3342 /* Call it if it is non-null */
3343 arm_cbzx (code
, ARMREG_IP1
, code
+ 8);
3344 arm_blrx (code
, ARMREG_IP1
);
3347 mono_add_seq_point (cfg
, bb
, ins
, code
- cfg
->native_code
);
3349 if (cfg
->compile_aot
) {
3350 const guint32 offset
= code
- cfg
->native_code
;
3353 arm_ldrx (code
, ARMREG_IP1
, info_var
->inst_basereg
, info_var
->inst_offset
);
3354 /* Add the offset */
3355 val
= ((offset
/ 4) * sizeof (target_mgreg_t
)) + MONO_STRUCT_OFFSET (SeqPointInfo
, bp_addrs
);
3356 /* Load the info->bp_addrs [offset], which is either 0 or the address of the bp trampoline */
3357 code
= emit_ldrx (code
, ARMREG_IP1
, ARMREG_IP1
, val
);
3358 /* Skip the load if its 0 */
3359 arm_cbzx (code
, ARMREG_IP1
, code
+ 8);
3360 /* Call the breakpoint trampoline */
3361 arm_blrx (code
, ARMREG_IP1
);
3363 MonoInst
*var
= cfg
->arch
.bp_tramp_var
;
3366 g_assert (var
->opcode
== OP_REGOFFSET
);
3367 /* Load the address of the bp trampoline into IP0 */
3368 arm_ldrx (code
, ARMREG_IP0
, var
->inst_basereg
, var
->inst_offset
);
3370 * A placeholder for a possible breakpoint inserted by
3371 * mono_arch_set_breakpoint ().
3380 mono_add_patch_info_rel (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
, MONO_R_ARM64_B
);
3384 arm_brx (code
, sreg1
);
3416 mono_add_patch_info_rel (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_true_bb
, MONO_R_ARM64_BCC
);
3417 cond
= opcode_to_armcond (ins
->opcode
);
3418 arm_bcc (code
, cond
, 0);
3422 mono_add_patch_info_rel (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_true_bb
, MONO_R_ARM64_BCC
);
3423 /* For fp compares, ARMCOND_LT is lt or unordered */
3424 arm_bcc (code
, ARMCOND_LT
, 0);
3427 mono_add_patch_info_rel (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_true_bb
, MONO_R_ARM64_BCC
);
3428 arm_bcc (code
, ARMCOND_EQ
, 0);
3429 mono_add_patch_info_rel (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_BB
, ins
->inst_true_bb
, MONO_R_ARM64_BCC
);
3430 /* For fp compares, ARMCOND_LT is lt or unordered */
3431 arm_bcc (code
, ARMCOND_LT
, 0);
3434 mono_add_patch_info_rel (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_true_bb
, MONO_R_ARM64_CBZ
);
3435 arm_cbzw (code
, sreg1
, 0);
3438 mono_add_patch_info_rel (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_true_bb
, MONO_R_ARM64_CBZ
);
3439 arm_cbzx (code
, sreg1
, 0);
3441 case OP_ARM64_CBNZW
:
3442 mono_add_patch_info_rel (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_true_bb
, MONO_R_ARM64_CBZ
);
3443 arm_cbnzw (code
, sreg1
, 0);
3445 case OP_ARM64_CBNZX
:
3446 mono_add_patch_info_rel (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_true_bb
, MONO_R_ARM64_CBZ
);
3447 arm_cbnzx (code
, sreg1
, 0);
3451 arm_addw (code
, dreg
, sreg1
, sreg2
);
3454 arm_addx (code
, dreg
, sreg1
, sreg2
);
3457 arm_subw (code
, dreg
, sreg1
, sreg2
);
3460 arm_subx (code
, dreg
, sreg1
, sreg2
);
3463 arm_andw (code
, dreg
, sreg1
, sreg2
);
3466 arm_andx (code
, dreg
, sreg1
, sreg2
);
3469 arm_orrw (code
, dreg
, sreg1
, sreg2
);
3472 arm_orrx (code
, dreg
, sreg1
, sreg2
);
3475 arm_eorw (code
, dreg
, sreg1
, sreg2
);
3478 arm_eorx (code
, dreg
, sreg1
, sreg2
);
3481 arm_negw (code
, dreg
, sreg1
);
3484 arm_negx (code
, dreg
, sreg1
);
3487 arm_mvnw (code
, dreg
, sreg1
);
3490 arm_mvnx (code
, dreg
, sreg1
);
3493 arm_addsw (code
, dreg
, sreg1
, sreg2
);
3497 arm_addsx (code
, dreg
, sreg1
, sreg2
);
3500 arm_subsw (code
, dreg
, sreg1
, sreg2
);
3504 arm_subsx (code
, dreg
, sreg1
, sreg2
);
3507 arm_cmpw (code
, sreg1
, sreg2
);
3511 arm_cmpx (code
, sreg1
, sreg2
);
3514 code
= emit_addw_imm (code
, dreg
, sreg1
, imm
);
3518 code
= emit_addx_imm (code
, dreg
, sreg1
, imm
);
3521 code
= emit_subw_imm (code
, dreg
, sreg1
, imm
);
3524 code
= emit_subx_imm (code
, dreg
, sreg1
, imm
);
3527 code
= emit_andw_imm (code
, dreg
, sreg1
, imm
);
3531 code
= emit_andx_imm (code
, dreg
, sreg1
, imm
);
3534 code
= emit_orrw_imm (code
, dreg
, sreg1
, imm
);
3537 code
= emit_orrx_imm (code
, dreg
, sreg1
, imm
);
3540 code
= emit_eorw_imm (code
, dreg
, sreg1
, imm
);
3543 code
= emit_eorx_imm (code
, dreg
, sreg1
, imm
);
3545 case OP_ICOMPARE_IMM
:
3546 code
= emit_cmpw_imm (code
, sreg1
, imm
);
3548 case OP_LCOMPARE_IMM
:
3549 case OP_COMPARE_IMM
:
3551 arm_cmpx (code
, sreg1
, ARMREG_RZR
);
3553 // FIXME: 32 vs 64 bit issues for 0xffffffff
3554 code
= emit_imm64 (code
, ARMREG_LR
, imm
);
3555 arm_cmpx (code
, sreg1
, ARMREG_LR
);
3559 arm_lslvw (code
, dreg
, sreg1
, sreg2
);
3562 arm_lslvx (code
, dreg
, sreg1
, sreg2
);
3565 arm_asrvw (code
, dreg
, sreg1
, sreg2
);
3568 arm_asrvx (code
, dreg
, sreg1
, sreg2
);
3571 arm_lsrvw (code
, dreg
, sreg1
, sreg2
);
3574 arm_lsrvx (code
, dreg
, sreg1
, sreg2
);
3578 arm_movx (code
, dreg
, sreg1
);
3580 arm_lslw (code
, dreg
, sreg1
, imm
);
3585 arm_movx (code
, dreg
, sreg1
);
3587 arm_lslx (code
, dreg
, sreg1
, imm
);
3591 arm_movx (code
, dreg
, sreg1
);
3593 arm_asrw (code
, dreg
, sreg1
, imm
);
3598 arm_movx (code
, dreg
, sreg1
);
3600 arm_asrx (code
, dreg
, sreg1
, imm
);
3602 case OP_ISHR_UN_IMM
:
3604 arm_movx (code
, dreg
, sreg1
);
3606 arm_lsrw (code
, dreg
, sreg1
, imm
);
3609 case OP_LSHR_UN_IMM
:
3611 arm_movx (code
, dreg
, sreg1
);
3613 arm_lsrx (code
, dreg
, sreg1
, imm
);
3618 arm_sxtwx (code
, dreg
, sreg1
);
3621 /* Clean out the upper word */
3622 arm_movw (code
, dreg
, sreg1
);
3625 /* MULTIPLY/DIVISION */
3628 // FIXME: Optimize this
3629 /* Check for zero */
3630 arm_cmpx_imm (code
, sreg2
, 0);
3631 code
= emit_cond_exc (cfg
, code
, OP_COND_EXC_IEQ
, "DivideByZeroException");
3632 /* Check for INT_MIN/-1 */
3633 code
= emit_imm (code
, ARMREG_IP0
, 0x80000000);
3634 arm_cmpx (code
, sreg1
, ARMREG_IP0
);
3635 arm_cset (code
, ARMCOND_EQ
, ARMREG_IP1
);
3636 code
= emit_imm (code
, ARMREG_IP0
, 0xffffffff);
3637 arm_cmpx (code
, sreg2
, ARMREG_IP0
);
3638 arm_cset (code
, ARMCOND_EQ
, ARMREG_IP0
);
3639 arm_andx (code
, ARMREG_IP0
, ARMREG_IP0
, ARMREG_IP1
);
3640 arm_cmpx_imm (code
, ARMREG_IP0
, 1);
3641 code
= emit_cond_exc (cfg
, code
, OP_COND_EXC_IEQ
, "OverflowException");
3642 if (ins
->opcode
== OP_IREM
) {
3643 arm_sdivw (code
, ARMREG_LR
, sreg1
, sreg2
);
3644 arm_msubw (code
, dreg
, ARMREG_LR
, sreg2
, sreg1
);
3646 arm_sdivw (code
, dreg
, sreg1
, sreg2
);
3650 arm_cmpx_imm (code
, sreg2
, 0);
3651 code
= emit_cond_exc (cfg
, code
, OP_COND_EXC_IEQ
, "DivideByZeroException");
3652 arm_udivw (code
, dreg
, sreg1
, sreg2
);
3655 arm_cmpx_imm (code
, sreg2
, 0);
3656 code
= emit_cond_exc (cfg
, code
, OP_COND_EXC_IEQ
, "DivideByZeroException");
3657 arm_udivw (code
, ARMREG_LR
, sreg1
, sreg2
);
3658 arm_msubw (code
, dreg
, ARMREG_LR
, sreg2
, sreg1
);
3662 // FIXME: Optimize this
3663 /* Check for zero */
3664 arm_cmpx_imm (code
, sreg2
, 0);
3665 code
= emit_cond_exc (cfg
, code
, OP_COND_EXC_IEQ
, "DivideByZeroException");
3666 /* Check for INT64_MIN/-1 */
3667 code
= emit_imm64 (code
, ARMREG_IP0
, 0x8000000000000000);
3668 arm_cmpx (code
, sreg1
, ARMREG_IP0
);
3669 arm_cset (code
, ARMCOND_EQ
, ARMREG_IP1
);
3670 code
= emit_imm64 (code
, ARMREG_IP0
, 0xffffffffffffffff);
3671 arm_cmpx (code
, sreg2
, ARMREG_IP0
);
3672 arm_cset (code
, ARMCOND_EQ
, ARMREG_IP0
);
3673 arm_andx (code
, ARMREG_IP0
, ARMREG_IP0
, ARMREG_IP1
);
3674 arm_cmpx_imm (code
, ARMREG_IP0
, 1);
3675 /* 64 bit uses OverflowException */
3676 code
= emit_cond_exc (cfg
, code
, OP_COND_EXC_IEQ
, "OverflowException");
3677 if (ins
->opcode
== OP_LREM
) {
3678 arm_sdivx (code
, ARMREG_LR
, sreg1
, sreg2
);
3679 arm_msubx (code
, dreg
, ARMREG_LR
, sreg2
, sreg1
);
3681 arm_sdivx (code
, dreg
, sreg1
, sreg2
);
3685 arm_cmpx_imm (code
, sreg2
, 0);
3686 code
= emit_cond_exc (cfg
, code
, OP_COND_EXC_IEQ
, "DivideByZeroException");
3687 arm_udivx (code
, dreg
, sreg1
, sreg2
);
3690 arm_cmpx_imm (code
, sreg2
, 0);
3691 code
= emit_cond_exc (cfg
, code
, OP_COND_EXC_IEQ
, "DivideByZeroException");
3692 arm_udivx (code
, ARMREG_LR
, sreg1
, sreg2
);
3693 arm_msubx (code
, dreg
, ARMREG_LR
, sreg2
, sreg1
);
3696 arm_mulw (code
, dreg
, sreg1
, sreg2
);
3699 arm_mulx (code
, dreg
, sreg1
, sreg2
);
3702 code
= emit_imm (code
, ARMREG_LR
, imm
);
3703 arm_mulw (code
, dreg
, sreg1
, ARMREG_LR
);
3707 code
= emit_imm (code
, ARMREG_LR
, imm
);
3708 arm_mulx (code
, dreg
, sreg1
, ARMREG_LR
);
3712 case OP_ICONV_TO_I1
:
3713 case OP_LCONV_TO_I1
:
3714 arm_sxtbx (code
, dreg
, sreg1
);
3716 case OP_ICONV_TO_I2
:
3717 case OP_LCONV_TO_I2
:
3718 arm_sxthx (code
, dreg
, sreg1
);
3720 case OP_ICONV_TO_U1
:
3721 case OP_LCONV_TO_U1
:
3722 arm_uxtbw (code
, dreg
, sreg1
);
3724 case OP_ICONV_TO_U2
:
3725 case OP_LCONV_TO_U2
:
3726 arm_uxthw (code
, dreg
, sreg1
);
3752 cond
= opcode_to_armcond (ins
->opcode
);
3753 arm_cset (code
, cond
, dreg
);
3766 cond
= opcode_to_armcond (ins
->opcode
);
3767 arm_fcmpd (code
, sreg1
, sreg2
);
3768 arm_cset (code
, cond
, dreg
);
3773 case OP_LOADI1_MEMBASE
:
3774 code
= emit_ldrsbx (code
, dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3776 case OP_LOADU1_MEMBASE
:
3777 code
= emit_ldrb (code
, dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3779 case OP_LOADI2_MEMBASE
:
3780 code
= emit_ldrshx (code
, dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3782 case OP_LOADU2_MEMBASE
:
3783 code
= emit_ldrh (code
, dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3785 case OP_LOADI4_MEMBASE
:
3786 code
= emit_ldrswx (code
, dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3788 case OP_LOADU4_MEMBASE
:
3789 code
= emit_ldrw (code
, dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3791 case OP_LOAD_MEMBASE
:
3792 case OP_LOADI8_MEMBASE
:
3793 code
= emit_ldrx (code
, dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3795 case OP_STOREI1_MEMBASE_IMM
:
3796 case OP_STOREI2_MEMBASE_IMM
:
3797 case OP_STOREI4_MEMBASE_IMM
:
3798 case OP_STORE_MEMBASE_IMM
:
3799 case OP_STOREI8_MEMBASE_IMM
: {
3803 code
= emit_imm (code
, ARMREG_LR
, imm
);
3806 immreg
= ARMREG_RZR
;
3809 switch (ins
->opcode
) {
3810 case OP_STOREI1_MEMBASE_IMM
:
3811 code
= emit_strb (code
, immreg
, ins
->inst_destbasereg
, ins
->inst_offset
);
3813 case OP_STOREI2_MEMBASE_IMM
:
3814 code
= emit_strh (code
, immreg
, ins
->inst_destbasereg
, ins
->inst_offset
);
3816 case OP_STOREI4_MEMBASE_IMM
:
3817 code
= emit_strw (code
, immreg
, ins
->inst_destbasereg
, ins
->inst_offset
);
3819 case OP_STORE_MEMBASE_IMM
:
3820 case OP_STOREI8_MEMBASE_IMM
:
3821 code
= emit_strx (code
, immreg
, ins
->inst_destbasereg
, ins
->inst_offset
);
3824 g_assert_not_reached ();
3829 case OP_STOREI1_MEMBASE_REG
:
3830 code
= emit_strb (code
, sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3832 case OP_STOREI2_MEMBASE_REG
:
3833 code
= emit_strh (code
, sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3835 case OP_STOREI4_MEMBASE_REG
:
3836 code
= emit_strw (code
, sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3838 case OP_STORE_MEMBASE_REG
:
3839 case OP_STOREI8_MEMBASE_REG
:
3840 code
= emit_strx (code
, sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3843 code
= emit_tls_get (code
, dreg
, ins
->inst_offset
);
3846 code
= emit_tls_set (code
, sreg1
, ins
->inst_offset
);
3849 case OP_MEMORY_BARRIER
:
3850 arm_dmb (code
, ARM_DMB_ISH
);
3852 case OP_ATOMIC_ADD_I4
: {
3856 arm_ldxrw (code
, ARMREG_IP0
, sreg1
);
3857 arm_addx (code
, ARMREG_IP0
, ARMREG_IP0
, sreg2
);
3858 arm_stlxrw (code
, ARMREG_IP1
, ARMREG_IP0
, sreg1
);
3859 arm_cbnzw (code
, ARMREG_IP1
, buf
[0]);
3861 arm_dmb (code
, ARM_DMB_ISH
);
3862 arm_movx (code
, dreg
, ARMREG_IP0
);
3865 case OP_ATOMIC_ADD_I8
: {
3869 arm_ldxrx (code
, ARMREG_IP0
, sreg1
);
3870 arm_addx (code
, ARMREG_IP0
, ARMREG_IP0
, sreg2
);
3871 arm_stlxrx (code
, ARMREG_IP1
, ARMREG_IP0
, sreg1
);
3872 arm_cbnzx (code
, ARMREG_IP1
, buf
[0]);
3874 arm_dmb (code
, ARM_DMB_ISH
);
3875 arm_movx (code
, dreg
, ARMREG_IP0
);
3878 case OP_ATOMIC_EXCHANGE_I4
: {
3882 arm_ldxrw (code
, ARMREG_IP0
, sreg1
);
3883 arm_stlxrw (code
, ARMREG_IP1
, sreg2
, sreg1
);
3884 arm_cbnzw (code
, ARMREG_IP1
, buf
[0]);
3886 arm_dmb (code
, ARM_DMB_ISH
);
3887 arm_movx (code
, dreg
, ARMREG_IP0
);
3890 case OP_ATOMIC_EXCHANGE_I8
: {
3894 arm_ldxrx (code
, ARMREG_IP0
, sreg1
);
3895 arm_stlxrx (code
, ARMREG_IP1
, sreg2
, sreg1
);
3896 arm_cbnzw (code
, ARMREG_IP1
, buf
[0]);
3898 arm_dmb (code
, ARM_DMB_ISH
);
3899 arm_movx (code
, dreg
, ARMREG_IP0
);
3902 case OP_ATOMIC_CAS_I4
: {
3905 /* sreg2 is the value, sreg3 is the comparand */
3907 arm_ldxrw (code
, ARMREG_IP0
, sreg1
);
3908 arm_cmpw (code
, ARMREG_IP0
, ins
->sreg3
);
3910 arm_bcc (code
, ARMCOND_NE
, 0);
3911 arm_stlxrw (code
, ARMREG_IP1
, sreg2
, sreg1
);
3912 arm_cbnzw (code
, ARMREG_IP1
, buf
[0]);
3913 arm_patch_rel (buf
[1], code
, MONO_R_ARM64_BCC
);
3915 arm_dmb (code
, ARM_DMB_ISH
);
3916 arm_movx (code
, dreg
, ARMREG_IP0
);
3919 case OP_ATOMIC_CAS_I8
: {
3923 arm_ldxrx (code
, ARMREG_IP0
, sreg1
);
3924 arm_cmpx (code
, ARMREG_IP0
, ins
->sreg3
);
3926 arm_bcc (code
, ARMCOND_NE
, 0);
3927 arm_stlxrx (code
, ARMREG_IP1
, sreg2
, sreg1
);
3928 arm_cbnzw (code
, ARMREG_IP1
, buf
[0]);
3929 arm_patch_rel (buf
[1], code
, MONO_R_ARM64_BCC
);
3931 arm_dmb (code
, ARM_DMB_ISH
);
3932 arm_movx (code
, dreg
, ARMREG_IP0
);
3935 case OP_ATOMIC_LOAD_I1
: {
3936 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_basereg
, ins
->inst_offset
);
3937 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
3938 arm_dmb (code
, ARM_DMB_ISH
);
3939 arm_ldarb (code
, ins
->dreg
, ARMREG_LR
);
3940 arm_sxtbx (code
, ins
->dreg
, ins
->dreg
);
3943 case OP_ATOMIC_LOAD_U1
: {
3944 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_basereg
, ins
->inst_offset
);
3945 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
3946 arm_dmb (code
, ARM_DMB_ISH
);
3947 arm_ldarb (code
, ins
->dreg
, ARMREG_LR
);
3948 arm_uxtbx (code
, ins
->dreg
, ins
->dreg
);
3951 case OP_ATOMIC_LOAD_I2
: {
3952 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_basereg
, ins
->inst_offset
);
3953 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
3954 arm_dmb (code
, ARM_DMB_ISH
);
3955 arm_ldarh (code
, ins
->dreg
, ARMREG_LR
);
3956 arm_sxthx (code
, ins
->dreg
, ins
->dreg
);
3959 case OP_ATOMIC_LOAD_U2
: {
3960 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_basereg
, ins
->inst_offset
);
3961 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
3962 arm_dmb (code
, ARM_DMB_ISH
);
3963 arm_ldarh (code
, ins
->dreg
, ARMREG_LR
);
3964 arm_uxthx (code
, ins
->dreg
, ins
->dreg
);
3967 case OP_ATOMIC_LOAD_I4
: {
3968 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_basereg
, ins
->inst_offset
);
3969 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
3970 arm_dmb (code
, ARM_DMB_ISH
);
3971 arm_ldarw (code
, ins
->dreg
, ARMREG_LR
);
3972 arm_sxtwx (code
, ins
->dreg
, ins
->dreg
);
3975 case OP_ATOMIC_LOAD_U4
: {
3976 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_basereg
, ins
->inst_offset
);
3977 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
3978 arm_dmb (code
, ARM_DMB_ISH
);
3979 arm_ldarw (code
, ins
->dreg
, ARMREG_LR
);
3980 arm_movw (code
, ins
->dreg
, ins
->dreg
); /* Clear upper half of the register. */
3983 case OP_ATOMIC_LOAD_I8
:
3984 case OP_ATOMIC_LOAD_U8
: {
3985 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_basereg
, ins
->inst_offset
);
3986 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
3987 arm_dmb (code
, ARM_DMB_ISH
);
3988 arm_ldarx (code
, ins
->dreg
, ARMREG_LR
);
3991 case OP_ATOMIC_LOAD_R4
: {
3992 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_basereg
, ins
->inst_offset
);
3993 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
3994 arm_dmb (code
, ARM_DMB_ISH
);
3996 arm_ldarw (code
, ARMREG_LR
, ARMREG_LR
);
3997 arm_fmov_rx_to_double (code
, ins
->dreg
, ARMREG_LR
);
3999 arm_ldarw (code
, ARMREG_LR
, ARMREG_LR
);
4000 arm_fmov_rx_to_double (code
, FP_TEMP_REG
, ARMREG_LR
);
4001 arm_fcvt_sd (code
, ins
->dreg
, FP_TEMP_REG
);
4005 case OP_ATOMIC_LOAD_R8
: {
4006 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_basereg
, ins
->inst_offset
);
4007 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4008 arm_dmb (code
, ARM_DMB_ISH
);
4009 arm_ldarx (code
, ARMREG_LR
, ARMREG_LR
);
4010 arm_fmov_rx_to_double (code
, ins
->dreg
, ARMREG_LR
);
4013 case OP_ATOMIC_STORE_I1
:
4014 case OP_ATOMIC_STORE_U1
: {
4015 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
4016 arm_stlrb (code
, ARMREG_LR
, ins
->sreg1
);
4017 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4018 arm_dmb (code
, ARM_DMB_ISH
);
4021 case OP_ATOMIC_STORE_I2
:
4022 case OP_ATOMIC_STORE_U2
: {
4023 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
4024 arm_stlrh (code
, ARMREG_LR
, ins
->sreg1
);
4025 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4026 arm_dmb (code
, ARM_DMB_ISH
);
4029 case OP_ATOMIC_STORE_I4
:
4030 case OP_ATOMIC_STORE_U4
: {
4031 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
4032 arm_stlrw (code
, ARMREG_LR
, ins
->sreg1
);
4033 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4034 arm_dmb (code
, ARM_DMB_ISH
);
4037 case OP_ATOMIC_STORE_I8
:
4038 case OP_ATOMIC_STORE_U8
: {
4039 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
4040 arm_stlrx (code
, ARMREG_LR
, ins
->sreg1
);
4041 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4042 arm_dmb (code
, ARM_DMB_ISH
);
4045 case OP_ATOMIC_STORE_R4
: {
4046 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
4048 arm_fmov_double_to_rx (code
, ARMREG_IP0
, ins
->sreg1
);
4049 arm_stlrw (code
, ARMREG_LR
, ARMREG_IP0
);
4051 arm_fcvt_ds (code
, FP_TEMP_REG
, ins
->sreg1
);
4052 arm_fmov_double_to_rx (code
, ARMREG_IP0
, FP_TEMP_REG
);
4053 arm_stlrw (code
, ARMREG_LR
, ARMREG_IP0
);
4055 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4056 arm_dmb (code
, ARM_DMB_ISH
);
4059 case OP_ATOMIC_STORE_R8
: {
4060 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
4061 arm_fmov_double_to_rx (code
, ARMREG_IP0
, ins
->sreg1
);
4062 arm_stlrx (code
, ARMREG_LR
, ARMREG_IP0
);
4063 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4064 arm_dmb (code
, ARM_DMB_ISH
);
4070 guint64 imm
= *(guint64
*)ins
->inst_p0
;
4073 arm_fmov_rx_to_double (code
, dreg
, ARMREG_RZR
);
4075 code
= emit_imm64 (code
, ARMREG_LR
, imm
);
4076 arm_fmov_rx_to_double (code
, ins
->dreg
, ARMREG_LR
);
4081 guint64 imm
= *(guint32
*)ins
->inst_p0
;
4083 code
= emit_imm64 (code
, ARMREG_LR
, imm
);
4085 arm_fmov_rx_to_double (code
, dreg
, ARMREG_LR
);
4087 arm_fmov_rx_to_double (code
, FP_TEMP_REG
, ARMREG_LR
);
4088 arm_fcvt_sd (code
, dreg
, FP_TEMP_REG
);
4092 case OP_LOADR8_MEMBASE
:
4093 code
= emit_ldrfpx (code
, dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4095 case OP_LOADR4_MEMBASE
:
4097 code
= emit_ldrfpw (code
, dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4099 code
= emit_ldrfpw (code
, FP_TEMP_REG
, ins
->inst_basereg
, ins
->inst_offset
);
4100 arm_fcvt_sd (code
, dreg
, FP_TEMP_REG
);
4103 case OP_STORER8_MEMBASE_REG
:
4104 code
= emit_strfpx (code
, sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
4106 case OP_STORER4_MEMBASE_REG
:
4108 code
= emit_strfpw (code
, sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
4110 arm_fcvt_ds (code
, FP_TEMP_REG
, sreg1
);
4111 code
= emit_strfpw (code
, FP_TEMP_REG
, ins
->inst_destbasereg
, ins
->inst_offset
);
4116 arm_fmovd (code
, dreg
, sreg1
);
4120 arm_fmovs (code
, dreg
, sreg1
);
4122 case OP_MOVE_F_TO_I4
:
4124 arm_fmov_double_to_rx (code
, ins
->dreg
, ins
->sreg1
);
4126 arm_fcvt_ds (code
, ins
->dreg
, ins
->sreg1
);
4127 arm_fmov_double_to_rx (code
, ins
->dreg
, ins
->dreg
);
4130 case OP_MOVE_I4_TO_F
:
4132 arm_fmov_rx_to_double (code
, ins
->dreg
, ins
->sreg1
);
4134 arm_fmov_rx_to_double (code
, ins
->dreg
, ins
->sreg1
);
4135 arm_fcvt_sd (code
, ins
->dreg
, ins
->dreg
);
4138 case OP_MOVE_F_TO_I8
:
4139 arm_fmov_double_to_rx (code
, ins
->dreg
, ins
->sreg1
);
4141 case OP_MOVE_I8_TO_F
:
4142 arm_fmov_rx_to_double (code
, ins
->dreg
, ins
->sreg1
);
4145 arm_fcmpd (code
, sreg1
, sreg2
);
4148 arm_fcmps (code
, sreg1
, sreg2
);
4150 case OP_FCONV_TO_I1
:
4151 arm_fcvtzs_dx (code
, dreg
, sreg1
);
4152 arm_sxtbx (code
, dreg
, dreg
);
4154 case OP_FCONV_TO_U1
:
4155 arm_fcvtzu_dx (code
, dreg
, sreg1
);
4156 arm_uxtbw (code
, dreg
, dreg
);
4158 case OP_FCONV_TO_I2
:
4159 arm_fcvtzs_dx (code
, dreg
, sreg1
);
4160 arm_sxthx (code
, dreg
, dreg
);
4162 case OP_FCONV_TO_U2
:
4163 arm_fcvtzu_dx (code
, dreg
, sreg1
);
4164 arm_uxthw (code
, dreg
, dreg
);
4166 case OP_FCONV_TO_I4
:
4167 arm_fcvtzs_dx (code
, dreg
, sreg1
);
4168 arm_sxtwx (code
, dreg
, dreg
);
4170 case OP_FCONV_TO_U4
:
4171 arm_fcvtzu_dx (code
, dreg
, sreg1
);
4173 case OP_FCONV_TO_I8
:
4174 arm_fcvtzs_dx (code
, dreg
, sreg1
);
4176 case OP_FCONV_TO_U8
:
4177 arm_fcvtzu_dx (code
, dreg
, sreg1
);
4179 case OP_FCONV_TO_R4
:
4181 arm_fcvt_ds (code
, dreg
, sreg1
);
4183 arm_fcvt_ds (code
, FP_TEMP_REG
, sreg1
);
4184 arm_fcvt_sd (code
, dreg
, FP_TEMP_REG
);
4187 case OP_ICONV_TO_R4
:
4189 arm_scvtf_rw_to_s (code
, dreg
, sreg1
);
4191 arm_scvtf_rw_to_s (code
, FP_TEMP_REG
, sreg1
);
4192 arm_fcvt_sd (code
, dreg
, FP_TEMP_REG
);
4195 case OP_LCONV_TO_R4
:
4197 arm_scvtf_rx_to_s (code
, dreg
, sreg1
);
4199 arm_scvtf_rx_to_s (code
, FP_TEMP_REG
, sreg1
);
4200 arm_fcvt_sd (code
, dreg
, FP_TEMP_REG
);
4203 case OP_ICONV_TO_R8
:
4204 arm_scvtf_rw_to_d (code
, dreg
, sreg1
);
4206 case OP_LCONV_TO_R8
:
4207 arm_scvtf_rx_to_d (code
, dreg
, sreg1
);
4209 case OP_ICONV_TO_R_UN
:
4210 arm_ucvtf_rw_to_d (code
, dreg
, sreg1
);
4212 case OP_LCONV_TO_R_UN
:
4213 arm_ucvtf_rx_to_d (code
, dreg
, sreg1
);
4216 arm_fadd_d (code
, dreg
, sreg1
, sreg2
);
4219 arm_fsub_d (code
, dreg
, sreg1
, sreg2
);
4222 arm_fmul_d (code
, dreg
, sreg1
, sreg2
);
4225 arm_fdiv_d (code
, dreg
, sreg1
, sreg2
);
4229 g_assert_not_reached ();
4232 arm_fneg_d (code
, dreg
, sreg1
);
4234 case OP_ARM_SETFREG_R4
:
4235 arm_fcvt_ds (code
, dreg
, sreg1
);
4238 /* Check for infinity */
4239 code
= emit_imm64 (code
, ARMREG_LR
, 0x7fefffffffffffffLL
);
4240 arm_fmov_rx_to_double (code
, FP_TEMP_REG
, ARMREG_LR
);
4241 arm_fabs_d (code
, FP_TEMP_REG2
, sreg1
);
4242 arm_fcmpd (code
, FP_TEMP_REG2
, FP_TEMP_REG
);
4243 code
= emit_cond_exc (cfg
, code
, OP_COND_EXC_GT
, "ArithmeticException");
4244 /* Check for nans */
4245 arm_fcmpd (code
, FP_TEMP_REG2
, FP_TEMP_REG2
);
4246 code
= emit_cond_exc (cfg
, code
, OP_COND_EXC_OV
, "ArithmeticException");
4247 arm_fmovd (code
, dreg
, sreg1
);
4252 arm_fadd_s (code
, dreg
, sreg1
, sreg2
);
4255 arm_fsub_s (code
, dreg
, sreg1
, sreg2
);
4258 arm_fmul_s (code
, dreg
, sreg1
, sreg2
);
4261 arm_fdiv_s (code
, dreg
, sreg1
, sreg2
);
4264 arm_fneg_s (code
, dreg
, sreg1
);
4266 case OP_RCONV_TO_I1
:
4267 arm_fcvtzs_sx (code
, dreg
, sreg1
);
4268 arm_sxtbx (code
, dreg
, dreg
);
4270 case OP_RCONV_TO_U1
:
4271 arm_fcvtzu_sx (code
, dreg
, sreg1
);
4272 arm_uxtbw (code
, dreg
, dreg
);
4274 case OP_RCONV_TO_I2
:
4275 arm_fcvtzs_sx (code
, dreg
, sreg1
);
4276 arm_sxthx (code
, dreg
, dreg
);
4278 case OP_RCONV_TO_U2
:
4279 arm_fcvtzu_sx (code
, dreg
, sreg1
);
4280 arm_uxthw (code
, dreg
, dreg
);
4282 case OP_RCONV_TO_I4
:
4283 arm_fcvtzs_sx (code
, dreg
, sreg1
);
4284 arm_sxtwx (code
, dreg
, dreg
);
4286 case OP_RCONV_TO_U4
:
4287 arm_fcvtzu_sx (code
, dreg
, sreg1
);
4289 case OP_RCONV_TO_I8
:
4290 arm_fcvtzs_sx (code
, dreg
, sreg1
);
4292 case OP_RCONV_TO_U8
:
4293 arm_fcvtzu_sx (code
, dreg
, sreg1
);
4295 case OP_RCONV_TO_R8
:
4296 arm_fcvt_sd (code
, dreg
, sreg1
);
4298 case OP_RCONV_TO_R4
:
4300 arm_fmovs (code
, dreg
, sreg1
);
4312 cond
= opcode_to_armcond (ins
->opcode
);
4313 arm_fcmps (code
, sreg1
, sreg2
);
4314 arm_cset (code
, cond
, dreg
);
4326 call
= (MonoCallInst
*)ins
;
4327 const MonoJumpInfoTarget patch
= mono_call_to_patch (call
);
4328 code
= emit_call (cfg
, code
, patch
.type
, patch
.target
);
4329 code
= emit_move_return_value (cfg
, code
, ins
);
4332 case OP_VOIDCALL_REG
:
4338 arm_blrx (code
, sreg1
);
4339 code
= emit_move_return_value (cfg
, code
, ins
);
4341 case OP_VOIDCALL_MEMBASE
:
4342 case OP_CALL_MEMBASE
:
4343 case OP_LCALL_MEMBASE
:
4344 case OP_FCALL_MEMBASE
:
4345 case OP_RCALL_MEMBASE
:
4346 case OP_VCALL2_MEMBASE
:
4347 code
= emit_ldrx (code
, ARMREG_IP0
, ins
->inst_basereg
, ins
->inst_offset
);
4348 arm_blrx (code
, ARMREG_IP0
);
4349 code
= emit_move_return_value (cfg
, code
, ins
);
4352 case OP_TAILCALL_PARAMETER
:
4353 // This opcode helps compute sizes, i.e.
4354 // of the subsequent OP_TAILCALL, but contributes no code.
4355 g_assert (ins
->next
);
4359 case OP_TAILCALL_MEMBASE
:
4360 case OP_TAILCALL_REG
: {
4361 int branch_reg
= ARMREG_IP0
;
4362 guint64 free_reg
= 1 << ARMREG_IP1
;
4363 call
= (MonoCallInst
*)ins
;
4365 g_assert (!cfg
->method
->save_lmf
);
4367 max_len
+= call
->stack_usage
/ sizeof (target_mgreg_t
) * ins_get_size (OP_TAILCALL_PARAMETER
);
4368 while (G_UNLIKELY (offset
+ max_len
> cfg
->code_size
)) {
4369 cfg
->code_size
*= 2;
4370 cfg
->native_code
= (unsigned char *)mono_realloc_native_code (cfg
);
4371 code
= cfg
->native_code
+ offset
;
4372 cfg
->stat_code_reallocs
++;
4375 switch (ins
->opcode
) {
4377 free_reg
= (1 << ARMREG_IP0
) | (1 << ARMREG_IP1
);
4380 case OP_TAILCALL_REG
:
4381 g_assert (sreg1
!= -1);
4382 g_assert (sreg1
!= ARMREG_IP0
);
4383 g_assert (sreg1
!= ARMREG_IP1
);
4384 g_assert (sreg1
!= ARMREG_LR
);
4385 g_assert (sreg1
!= ARMREG_SP
);
4386 g_assert (sreg1
!= ARMREG_R28
);
4387 if ((sreg1
<< 1) & MONO_ARCH_CALLEE_SAVED_REGS
) {
4388 arm_movx (code
, branch_reg
, sreg1
);
4390 free_reg
= (1 << ARMREG_IP0
) | (1 << ARMREG_IP1
);
4395 case OP_TAILCALL_MEMBASE
:
4396 g_assert (ins
->inst_basereg
!= -1);
4397 g_assert (ins
->inst_basereg
!= ARMREG_IP0
);
4398 g_assert (ins
->inst_basereg
!= ARMREG_IP1
);
4399 g_assert (ins
->inst_basereg
!= ARMREG_LR
);
4400 g_assert (ins
->inst_basereg
!= ARMREG_SP
);
4401 g_assert (ins
->inst_basereg
!= ARMREG_R28
);
4402 code
= emit_ldrx (code
, branch_reg
, ins
->inst_basereg
, ins
->inst_offset
);
4406 g_assert_not_reached ();
4409 // Copy stack arguments.
4410 // FIXME a fixed size memcpy is desirable here,
4411 // at least for larger values of stack_usage.
4412 for (int i
= 0; i
< call
->stack_usage
; i
+= sizeof (target_mgreg_t
)) {
4413 code
= emit_ldrx (code
, ARMREG_LR
, ARMREG_SP
, i
);
4414 code
= emit_strx (code
, ARMREG_LR
, ARMREG_R28
, i
);
4417 /* Restore registers */
4418 code
= emit_load_regset (code
, MONO_ARCH_CALLEE_SAVED_REGS
& cfg
->used_int_regs
, ARMREG_FP
, cfg
->arch
.saved_gregs_offset
);
4421 code
= mono_arm_emit_destroy_frame (code
, cfg
->stack_offset
, free_reg
);
4423 switch (ins
->opcode
) {
4425 if (cfg
->compile_aot
) {
4426 /* This is not a PLT patch */
4427 code
= emit_aotconst (cfg
, code
, branch_reg
, MONO_PATCH_INFO_METHOD_JUMP
, call
->method
);
4429 mono_add_patch_info_rel (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_METHOD_JUMP
, call
->method
, MONO_R_ARM64_B
);
4431 cfg
->thunk_area
+= THUNK_SIZE
;
4435 case OP_TAILCALL_MEMBASE
:
4436 case OP_TAILCALL_REG
:
4437 arm_brx (code
, branch_reg
);
4441 g_assert_not_reached ();
4444 ins
->flags
|= MONO_INST_GC_CALLSITE
;
4445 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
4449 g_assert (cfg
->arch
.cinfo
);
4450 code
= emit_addx_imm (code
, ARMREG_IP0
, cfg
->arch
.args_reg
, cfg
->arch
.cinfo
->sig_cookie
.offset
);
4451 arm_strx (code
, ARMREG_IP0
, sreg1
, 0);
4454 MonoInst
*var
= cfg
->dyn_call_var
;
4455 guint8
*labels
[16];
4459 * sreg1 points to a DynCallArgs structure initialized by mono_arch_start_dyn_call ().
4460 * sreg2 is the function to call.
4463 g_assert (var
->opcode
== OP_REGOFFSET
);
4465 arm_movx (code
, ARMREG_LR
, sreg1
);
4466 arm_movx (code
, ARMREG_IP1
, sreg2
);
4468 /* Save args buffer */
4469 code
= emit_strx (code
, ARMREG_LR
, var
->inst_basereg
, var
->inst_offset
);
4471 /* Set fp argument regs */
4472 code
= emit_ldrw (code
, ARMREG_R0
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, n_fpargs
));
4473 arm_cmpw (code
, ARMREG_R0
, ARMREG_RZR
);
4475 arm_bcc (code
, ARMCOND_EQ
, 0);
4476 for (i
= 0; i
< 8; ++i
)
4477 code
= emit_ldrfpx (code
, ARMREG_D0
+ i
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, fpregs
) + (i
* 8));
4478 arm_patch_rel (labels
[0], code
, MONO_R_ARM64_BCC
);
4480 /* Allocate callee area */
4481 code
= emit_ldrx (code
, ARMREG_R0
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, n_stackargs
));
4482 arm_lslw (code
, ARMREG_R0
, ARMREG_R0
, 3);
4483 arm_movspx (code
, ARMREG_R1
, ARMREG_SP
);
4484 arm_subx (code
, ARMREG_R1
, ARMREG_R1
, ARMREG_R0
);
4485 arm_movspx (code
, ARMREG_SP
, ARMREG_R1
);
4487 /* Set stack args */
4489 code
= emit_ldrx (code
, ARMREG_R1
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, n_stackargs
));
4490 /* R2 = pointer into 'regs' */
4491 code
= emit_imm (code
, ARMREG_R2
, MONO_STRUCT_OFFSET (DynCallArgs
, regs
) + ((PARAM_REGS
+ 1) * sizeof (target_mgreg_t
)));
4492 arm_addx (code
, ARMREG_R2
, ARMREG_LR
, ARMREG_R2
);
4493 /* R3 = pointer to stack */
4494 arm_movspx (code
, ARMREG_R3
, ARMREG_SP
);
4498 code
= emit_ldrx (code
, ARMREG_R5
, ARMREG_R2
, 0);
4499 code
= emit_strx (code
, ARMREG_R5
, ARMREG_R3
, 0);
4500 code
= emit_addx_imm (code
, ARMREG_R2
, ARMREG_R2
, sizeof (target_mgreg_t
));
4501 code
= emit_addx_imm (code
, ARMREG_R3
, ARMREG_R3
, sizeof (target_mgreg_t
));
4502 code
= emit_subx_imm (code
, ARMREG_R1
, ARMREG_R1
, 1);
4503 arm_patch_rel (labels
[0], code
, MONO_R_ARM64_B
);
4504 arm_cmpw (code
, ARMREG_R1
, ARMREG_RZR
);
4505 arm_bcc (code
, ARMCOND_GT
, labels
[1]);
4507 /* Set argument registers + r8 */
4508 code
= mono_arm_emit_load_regarray (code
, 0x1ff, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, regs
));
4511 arm_blrx (code
, ARMREG_IP1
);
4514 code
= emit_ldrx (code
, ARMREG_LR
, var
->inst_basereg
, var
->inst_offset
);
4515 arm_strx (code
, ARMREG_R0
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, res
));
4516 arm_strx (code
, ARMREG_R1
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, res2
));
4517 /* Save fp result */
4518 code
= emit_ldrw (code
, ARMREG_R0
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, n_fpret
));
4519 arm_cmpw (code
, ARMREG_R0
, ARMREG_RZR
);
4521 arm_bcc (code
, ARMCOND_EQ
, 0);
4522 for (i
= 0; i
< 8; ++i
)
4523 code
= emit_strfpx (code
, ARMREG_D0
+ i
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, fpregs
) + (i
* 8));
4524 arm_patch_rel (labels
[1], code
, MONO_R_ARM64_BCC
);
4528 case OP_GENERIC_CLASS_INIT
: {
4532 byte_offset
= MONO_STRUCT_OFFSET (MonoVTable
, initialized
);
4534 /* Load vtable->initialized */
4535 arm_ldrsbx (code
, ARMREG_IP0
, sreg1
, byte_offset
);
4537 arm_cbnzx (code
, ARMREG_IP0
, 0);
4540 g_assert (sreg1
== ARMREG_R0
);
4541 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
4542 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_generic_class_init
));
4544 mono_arm_patch (jump
, code
, MONO_R_ARM64_CBZ
);
4549 arm_ldrb (code
, ARMREG_LR
, sreg1
, 0);
4552 case OP_NOT_REACHED
:
4554 case OP_DUMMY_ICONST
:
4555 case OP_DUMMY_I8CONST
:
4556 case OP_DUMMY_R8CONST
:
4557 case OP_DUMMY_R4CONST
:
4559 case OP_IL_SEQ_POINT
:
4560 mono_add_seq_point (cfg
, bb
, ins
, code
- cfg
->native_code
);
4565 case OP_COND_EXC_IC
:
4566 case OP_COND_EXC_OV
:
4567 case OP_COND_EXC_IOV
:
4568 case OP_COND_EXC_NC
:
4569 case OP_COND_EXC_INC
:
4570 case OP_COND_EXC_NO
:
4571 case OP_COND_EXC_INO
:
4572 case OP_COND_EXC_EQ
:
4573 case OP_COND_EXC_IEQ
:
4574 case OP_COND_EXC_NE_UN
:
4575 case OP_COND_EXC_INE_UN
:
4576 case OP_COND_EXC_ILT
:
4577 case OP_COND_EXC_LT
:
4578 case OP_COND_EXC_ILT_UN
:
4579 case OP_COND_EXC_LT_UN
:
4580 case OP_COND_EXC_IGT
:
4581 case OP_COND_EXC_GT
:
4582 case OP_COND_EXC_IGT_UN
:
4583 case OP_COND_EXC_GT_UN
:
4584 case OP_COND_EXC_IGE
:
4585 case OP_COND_EXC_GE
:
4586 case OP_COND_EXC_IGE_UN
:
4587 case OP_COND_EXC_GE_UN
:
4588 case OP_COND_EXC_ILE
:
4589 case OP_COND_EXC_LE
:
4590 case OP_COND_EXC_ILE_UN
:
4591 case OP_COND_EXC_LE_UN
:
4592 code
= emit_cond_exc (cfg
, code
, ins
->opcode
, (const char*)ins
->inst_p1
);
4595 if (sreg1
!= ARMREG_R0
)
4596 arm_movx (code
, ARMREG_R0
, sreg1
);
4597 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
4598 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_throw_exception
));
4601 if (sreg1
!= ARMREG_R0
)
4602 arm_movx (code
, ARMREG_R0
, sreg1
);
4603 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
4604 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_rethrow_exception
));
4606 case OP_CALL_HANDLER
:
4607 mono_add_patch_info_rel (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
, MONO_R_ARM64_BL
);
4609 cfg
->thunk_area
+= THUNK_SIZE
;
4610 for (GList
*tmp
= ins
->inst_eh_blocks
; tmp
!= bb
->clause_holes
; tmp
= tmp
->prev
)
4611 mono_cfg_add_try_hole (cfg
, ((MonoLeaveClause
*) tmp
->data
)->clause
, code
, bb
);
4613 case OP_START_HANDLER
: {
4614 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
4616 /* Save caller address */
4617 code
= emit_strx (code
, ARMREG_LR
, spvar
->inst_basereg
, spvar
->inst_offset
);
4620 * Reserve a param area, see test_0_finally_param_area ().
4621 * This is needed because the param area is not set up when
4622 * we are called from EH code.
4624 if (cfg
->param_area
)
4625 code
= emit_subx_sp_imm (code
, cfg
->param_area
);
4629 case OP_ENDFILTER
: {
4630 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
4632 if (cfg
->param_area
)
4633 code
= emit_addx_sp_imm (code
, cfg
->param_area
);
4635 if (ins
->opcode
== OP_ENDFILTER
&& sreg1
!= ARMREG_R0
)
4636 arm_movx (code
, ARMREG_R0
, sreg1
);
4638 /* Return to either after the branch in OP_CALL_HANDLER, or to the EH code */
4639 code
= emit_ldrx (code
, ARMREG_LR
, spvar
->inst_basereg
, spvar
->inst_offset
);
4640 arm_brx (code
, ARMREG_LR
);
4644 if (ins
->dreg
!= ARMREG_R0
)
4645 arm_movx (code
, ins
->dreg
, ARMREG_R0
);
4647 case OP_LIVERANGE_START
: {
4648 if (cfg
->verbose_level
> 1)
4649 printf ("R%d START=0x%x\n", MONO_VARINFO (cfg
, ins
->inst_c0
)->vreg
, (int)(code
- cfg
->native_code
));
4650 MONO_VARINFO (cfg
, ins
->inst_c0
)->live_range_start
= code
- cfg
->native_code
;
4653 case OP_LIVERANGE_END
: {
4654 if (cfg
->verbose_level
> 1)
4655 printf ("R%d END=0x%x\n", MONO_VARINFO (cfg
, ins
->inst_c0
)->vreg
, (int)(code
- cfg
->native_code
));
4656 MONO_VARINFO (cfg
, ins
->inst_c0
)->live_range_end
= code
- cfg
->native_code
;
4659 case OP_GC_SAFE_POINT
: {
4662 arm_ldrx (code
, ARMREG_IP1
, ins
->sreg1
, 0);
4663 /* Call it if it is non-null */
4665 arm_cbzx (code
, ARMREG_IP1
, 0);
4666 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_JIT_ICALL_ID
, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_threads_state_poll
));
4667 mono_arm_patch (buf
[0], code
, MONO_R_ARM64_CBZ
);
4670 case OP_FILL_PROF_CALL_CTX
:
4671 for (int i
= 0; i
< MONO_MAX_IREGS
; i
++)
4672 if ((MONO_ARCH_CALLEE_SAVED_REGS
& (1 << i
)) || i
== ARMREG_SP
|| i
== ARMREG_FP
)
4673 arm_strx (code
, i
, ins
->sreg1
, MONO_STRUCT_OFFSET (MonoContext
, regs
) + i
* sizeof (target_mgreg_t
));
4676 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins
->opcode
), __FUNCTION__
);
4677 g_assert_not_reached ();
4680 if ((cfg
->opt
& MONO_OPT_BRANCH
) && ((code
- cfg
->native_code
- offset
) > max_len
)) {
4681 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
4682 mono_inst_name (ins
->opcode
), max_len
, code
- cfg
->native_code
- offset
);
4683 g_assert_not_reached ();
4686 set_code_cursor (cfg
, code
);
4689 * If the compiled code size is larger than the bcc displacement (19 bits signed),
4690 * insert branch islands between/inside basic blocks.
4692 if (cfg
->arch
.cond_branch_islands
)
4693 code
= emit_branch_island (cfg
, code
, start_offset
);
4697 emit_move_args (MonoCompile
*cfg
, guint8
*code
)
4703 MonoMethodSignature
*sig
= mono_method_signature_internal (cfg
->method
);
4705 cinfo
= cfg
->arch
.cinfo
;
4707 for (i
= 0; i
< cinfo
->nargs
; ++i
) {
4708 ainfo
= cinfo
->args
+ i
;
4709 ins
= cfg
->args
[i
];
4711 if (ins
->opcode
== OP_REGVAR
) {
4712 switch (ainfo
->storage
) {
4714 arm_movx (code
, ins
->dreg
, ainfo
->reg
);
4715 if (i
== 0 && sig
->hasthis
) {
4716 mono_add_var_location (cfg
, ins
, TRUE
, ainfo
->reg
, 0, 0, code
- cfg
->native_code
);
4717 mono_add_var_location (cfg
, ins
, TRUE
, ins
->dreg
, 0, code
- cfg
->native_code
, 0);
4721 switch (ainfo
->slot_size
) {
4724 code
= emit_ldrsbx (code
, ins
->dreg
, cfg
->arch
.args_reg
, ainfo
->offset
);
4726 code
= emit_ldrb (code
, ins
->dreg
, cfg
->arch
.args_reg
, ainfo
->offset
);
4730 code
= emit_ldrshx (code
, ins
->dreg
, cfg
->arch
.args_reg
, ainfo
->offset
);
4732 code
= emit_ldrh (code
, ins
->dreg
, cfg
->arch
.args_reg
, ainfo
->offset
);
4736 code
= emit_ldrswx (code
, ins
->dreg
, cfg
->arch
.args_reg
, ainfo
->offset
);
4738 code
= emit_ldrw (code
, ins
->dreg
, cfg
->arch
.args_reg
, ainfo
->offset
);
4741 code
= emit_ldrx (code
, ins
->dreg
, cfg
->arch
.args_reg
, ainfo
->offset
);
4746 g_assert_not_reached ();
4750 if (ainfo
->storage
!= ArgVtypeByRef
&& ainfo
->storage
!= ArgVtypeByRefOnStack
)
4751 g_assert (ins
->opcode
== OP_REGOFFSET
);
4753 switch (ainfo
->storage
) {
4755 /* Stack slots for arguments have size 8 */
4756 code
= emit_strx (code
, ainfo
->reg
, ins
->inst_basereg
, ins
->inst_offset
);
4757 if (i
== 0 && sig
->hasthis
) {
4758 mono_add_var_location (cfg
, ins
, TRUE
, ainfo
->reg
, 0, 0, code
- cfg
->native_code
);
4759 mono_add_var_location (cfg
, ins
, FALSE
, ins
->inst_basereg
, ins
->inst_offset
, code
- cfg
->native_code
, 0);
4763 code
= emit_strfpx (code
, ainfo
->reg
, ins
->inst_basereg
, ins
->inst_offset
);
4766 code
= emit_strfpw (code
, ainfo
->reg
, ins
->inst_basereg
, ins
->inst_offset
);
4771 case ArgVtypeByRefOnStack
:
4772 case ArgVtypeOnStack
:
4774 case ArgVtypeByRef
: {
4775 MonoInst
*addr_arg
= ins
->inst_left
;
4777 if (ainfo
->gsharedvt
) {
4778 g_assert (ins
->opcode
== OP_GSHAREDVT_ARG_REGOFFSET
);
4779 arm_strx (code
, ainfo
->reg
, ins
->inst_basereg
, ins
->inst_offset
);
4781 g_assert (ins
->opcode
== OP_VTARG_ADDR
);
4782 g_assert (addr_arg
->opcode
== OP_REGOFFSET
);
4783 arm_strx (code
, ainfo
->reg
, addr_arg
->inst_basereg
, addr_arg
->inst_offset
);
4787 case ArgVtypeInIRegs
:
4788 for (part
= 0; part
< ainfo
->nregs
; part
++) {
4789 code
= emit_strx (code
, ainfo
->reg
+ part
, ins
->inst_basereg
, ins
->inst_offset
+ (part
* 8));
4793 for (part
= 0; part
< ainfo
->nregs
; part
++) {
4794 if (ainfo
->esize
== 4)
4795 code
= emit_strfpw (code
, ainfo
->reg
+ part
, ins
->inst_basereg
, ins
->inst_offset
+ ainfo
->foffsets
[part
]);
4797 code
= emit_strfpx (code
, ainfo
->reg
+ part
, ins
->inst_basereg
, ins
->inst_offset
+ ainfo
->foffsets
[part
]);
4801 g_assert_not_reached ();
4811 * emit_store_regarray:
4813 * Emit code to store the registers in REGS into the appropriate elements of
4814 * the register array at BASEREG+OFFSET.
4816 static __attribute__ ((__warn_unused_result__
)) guint8
*
4817 emit_store_regarray (guint8
*code
, guint64 regs
, int basereg
, int offset
)
4821 for (i
= 0; i
< 32; ++i
) {
4822 if (regs
& (1 << i
)) {
4823 if (i
+ 1 < 32 && (regs
& (1 << (i
+ 1))) && (i
+ 1 != ARMREG_SP
)) {
4824 arm_stpx (code
, i
, i
+ 1, basereg
, offset
+ (i
* 8));
4826 } else if (i
== ARMREG_SP
) {
4827 arm_movspx (code
, ARMREG_IP1
, ARMREG_SP
);
4828 arm_strx (code
, ARMREG_IP1
, basereg
, offset
+ (i
* 8));
4830 arm_strx (code
, i
, basereg
, offset
+ (i
* 8));
4838 * emit_load_regarray:
4840 * Emit code to load the registers in REGS from the appropriate elements of
4841 * the register array at BASEREG+OFFSET.
4843 static __attribute__ ((__warn_unused_result__
)) guint8
*
4844 emit_load_regarray (guint8
*code
, guint64 regs
, int basereg
, int offset
)
4848 for (i
= 0; i
< 32; ++i
) {
4849 if (regs
& (1 << i
)) {
4850 if ((regs
& (1 << (i
+ 1))) && (i
+ 1 != ARMREG_SP
)) {
4851 if (offset
+ (i
* 8) < 500)
4852 arm_ldpx (code
, i
, i
+ 1, basereg
, offset
+ (i
* 8));
4854 code
= emit_ldrx (code
, i
, basereg
, offset
+ (i
* 8));
4855 code
= emit_ldrx (code
, i
+ 1, basereg
, offset
+ ((i
+ 1) * 8));
4858 } else if (i
== ARMREG_SP
) {
4859 g_assert_not_reached ();
4861 code
= emit_ldrx (code
, i
, basereg
, offset
+ (i
* 8));
4869 * emit_store_regset:
4871 * Emit code to store the registers in REGS into consecutive memory locations starting
4872 * at BASEREG+OFFSET.
4874 static __attribute__ ((__warn_unused_result__
)) guint8
*
4875 emit_store_regset (guint8
*code
, guint64 regs
, int basereg
, int offset
)
4880 for (i
= 0; i
< 32; ++i
) {
4881 if (regs
& (1 << i
)) {
4882 if ((regs
& (1 << (i
+ 1))) && (i
+ 1 != ARMREG_SP
)) {
4883 arm_stpx (code
, i
, i
+ 1, basereg
, offset
+ (pos
* 8));
4886 } else if (i
== ARMREG_SP
) {
4887 arm_movspx (code
, ARMREG_IP1
, ARMREG_SP
);
4888 arm_strx (code
, ARMREG_IP1
, basereg
, offset
+ (pos
* 8));
4890 arm_strx (code
, i
, basereg
, offset
+ (pos
* 8));
4901 * Emit code to load the registers in REGS from consecutive memory locations starting
4902 * at BASEREG+OFFSET.
4904 static __attribute__ ((__warn_unused_result__
)) guint8
*
4905 emit_load_regset (guint8
*code
, guint64 regs
, int basereg
, int offset
)
4910 for (i
= 0; i
< 32; ++i
) {
4911 if (regs
& (1 << i
)) {
4912 if ((regs
& (1 << (i
+ 1))) && (i
+ 1 != ARMREG_SP
)) {
4913 arm_ldpx (code
, i
, i
+ 1, basereg
, offset
+ (pos
* 8));
4916 } else if (i
== ARMREG_SP
) {
4917 g_assert_not_reached ();
4919 arm_ldrx (code
, i
, basereg
, offset
+ (pos
* 8));
4927 __attribute__ ((__warn_unused_result__
)) guint8
*
4928 mono_arm_emit_load_regarray (guint8
*code
, guint64 regs
, int basereg
, int offset
)
4930 return emit_load_regarray (code
, regs
, basereg
, offset
);
4933 __attribute__ ((__warn_unused_result__
)) guint8
*
4934 mono_arm_emit_store_regarray (guint8
*code
, guint64 regs
, int basereg
, int offset
)
4936 return emit_store_regarray (code
, regs
, basereg
, offset
);
4939 __attribute__ ((__warn_unused_result__
)) guint8
*
4940 mono_arm_emit_store_regset (guint8
*code
, guint64 regs
, int basereg
, int offset
)
4942 return emit_store_regset (code
, regs
, basereg
, offset
);
4945 /* Same as emit_store_regset, but emit unwind info too */
4946 /* CFA_OFFSET is the offset between the CFA and basereg */
4947 static __attribute__ ((__warn_unused_result__
)) guint8
*
4948 emit_store_regset_cfa (MonoCompile
*cfg
, guint8
*code
, guint64 regs
, int basereg
, int offset
, int cfa_offset
, guint64 no_cfa_regset
)
4950 int i
, j
, pos
, nregs
;
4951 guint32 cfa_regset
= regs
& ~no_cfa_regset
;
4954 for (i
= 0; i
< 32; ++i
) {
4956 if (regs
& (1 << i
)) {
4957 if ((regs
& (1 << (i
+ 1))) && (i
+ 1 != ARMREG_SP
)) {
4959 arm_stpx (code
, i
, i
+ 1, basereg
, offset
+ (pos
* 8));
4961 code
= emit_strx (code
, i
, basereg
, offset
+ (pos
* 8));
4962 code
= emit_strx (code
, i
+ 1, basereg
, offset
+ (pos
* 8) + 8);
4965 } else if (i
== ARMREG_SP
) {
4966 arm_movspx (code
, ARMREG_IP1
, ARMREG_SP
);
4967 code
= emit_strx (code
, ARMREG_IP1
, basereg
, offset
+ (pos
* 8));
4969 code
= emit_strx (code
, i
, basereg
, offset
+ (pos
* 8));
4972 for (j
= 0; j
< nregs
; ++j
) {
4973 if (cfa_regset
& (1 << (i
+ j
)))
4974 mono_emit_unwind_op_offset (cfg
, code
, i
+ j
, (- cfa_offset
) + offset
+ ((pos
+ j
) * 8));
4987 * Emit code to initialize an LMF structure at LMF_OFFSET.
4991 emit_setup_lmf (MonoCompile
*cfg
, guint8
*code
, gint32 lmf_offset
, int cfa_offset
)
4994 * The LMF should contain all the state required to be able to reconstruct the machine state
4995 * at the current point of execution. Since the LMF is only read during EH, only callee
4996 * saved etc. registers need to be saved.
4997 * FIXME: Save callee saved fp regs, JITted code doesn't use them, but native code does, and they
4998 * need to be restored during EH.
5002 arm_adrx (code
, ARMREG_LR
, code
);
5003 code
= emit_strx (code
, ARMREG_LR
, ARMREG_FP
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, pc
));
5004 /* gregs + fp + sp */
5005 /* Don't emit unwind info for sp/fp, they are already handled in the prolog */
5006 code
= emit_store_regset_cfa (cfg
, code
, MONO_ARCH_LMF_REGS
, ARMREG_FP
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, gregs
), cfa_offset
, (1 << ARMREG_FP
) | (1 << ARMREG_SP
));
5012 mono_arch_emit_prolog (MonoCompile
*cfg
)
5014 MonoMethod
*method
= cfg
->method
;
5015 MonoMethodSignature
*sig
;
5018 int cfa_offset
, max_offset
;
5020 sig
= mono_method_signature_internal (method
);
5021 cfg
->code_size
= 256 + sig
->param_count
* 64;
5022 code
= cfg
->native_code
= g_malloc (cfg
->code_size
);
5024 /* This can be unaligned */
5025 cfg
->stack_offset
= ALIGN_TO (cfg
->stack_offset
, MONO_ARCH_FRAME_ALIGNMENT
);
5031 mono_emit_unwind_op_def_cfa (cfg
, code
, ARMREG_SP
, 0);
5034 if (arm_is_ldpx_imm (-cfg
->stack_offset
)) {
5035 arm_stpx_pre (code
, ARMREG_FP
, ARMREG_LR
, ARMREG_SP
, -cfg
->stack_offset
);
5037 /* sp -= cfg->stack_offset */
5038 /* This clobbers ip0/ip1 */
5039 code
= emit_subx_sp_imm (code
, cfg
->stack_offset
);
5040 arm_stpx (code
, ARMREG_FP
, ARMREG_LR
, ARMREG_SP
, 0);
5042 cfa_offset
+= cfg
->stack_offset
;
5043 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, cfa_offset
);
5044 mono_emit_unwind_op_offset (cfg
, code
, ARMREG_FP
, (- cfa_offset
) + 0);
5045 mono_emit_unwind_op_offset (cfg
, code
, ARMREG_LR
, (- cfa_offset
) + 8);
5046 arm_movspx (code
, ARMREG_FP
, ARMREG_SP
);
5047 mono_emit_unwind_op_def_cfa_reg (cfg
, code
, ARMREG_FP
);
5048 if (cfg
->param_area
) {
5049 /* The param area is below the frame pointer */
5050 code
= emit_subx_sp_imm (code
, cfg
->param_area
);
5053 if (cfg
->method
->save_lmf
) {
5054 code
= emit_setup_lmf (cfg
, code
, cfg
->lmf_var
->inst_offset
, cfa_offset
);
5057 code
= emit_store_regset_cfa (cfg
, code
, MONO_ARCH_CALLEE_SAVED_REGS
& cfg
->used_int_regs
, ARMREG_FP
, cfg
->arch
.saved_gregs_offset
, cfa_offset
, 0);
5060 /* Setup args reg */
5061 if (cfg
->arch
.args_reg
) {
5062 /* The register was already saved above */
5063 code
= emit_addx_imm (code
, cfg
->arch
.args_reg
, ARMREG_FP
, cfg
->stack_offset
);
5066 /* Save return area addr received in R8 */
5067 if (cfg
->vret_addr
) {
5068 MonoInst
*ins
= cfg
->vret_addr
;
5070 g_assert (ins
->opcode
== OP_REGOFFSET
);
5071 code
= emit_strx (code
, ARMREG_R8
, ins
->inst_basereg
, ins
->inst_offset
);
5074 /* Save mrgctx received in MONO_ARCH_RGCTX_REG */
5075 if (cfg
->rgctx_var
) {
5076 MonoInst
*ins
= cfg
->rgctx_var
;
5078 g_assert (ins
->opcode
== OP_REGOFFSET
);
5080 code
= emit_strx (code
, MONO_ARCH_RGCTX_REG
, ins
->inst_basereg
, ins
->inst_offset
);
5082 mono_add_var_location (cfg
, cfg
->rgctx_var
, TRUE
, MONO_ARCH_RGCTX_REG
, 0, 0, code
- cfg
->native_code
);
5083 mono_add_var_location (cfg
, cfg
->rgctx_var
, FALSE
, ins
->inst_basereg
, ins
->inst_offset
, code
- cfg
->native_code
, 0);
5087 * Move arguments to their registers/stack locations.
5089 code
= emit_move_args (cfg
, code
);
5091 /* Initialize seq_point_info_var */
5092 if (cfg
->arch
.seq_point_info_var
) {
5093 MonoInst
*ins
= cfg
->arch
.seq_point_info_var
;
5095 /* Initialize the variable from a GOT slot */
5096 code
= emit_aotconst (cfg
, code
, ARMREG_IP0
, MONO_PATCH_INFO_SEQ_POINT_INFO
, cfg
->method
);
5097 g_assert (ins
->opcode
== OP_REGOFFSET
);
5098 code
= emit_strx (code
, ARMREG_IP0
, ins
->inst_basereg
, ins
->inst_offset
);
5100 /* Initialize ss_tramp_var */
5101 ins
= cfg
->arch
.ss_tramp_var
;
5102 g_assert (ins
->opcode
== OP_REGOFFSET
);
5104 code
= emit_ldrx (code
, ARMREG_IP1
, ARMREG_IP0
, MONO_STRUCT_OFFSET (SeqPointInfo
, ss_tramp_addr
));
5105 code
= emit_strx (code
, ARMREG_IP1
, ins
->inst_basereg
, ins
->inst_offset
);
5109 if (cfg
->arch
.ss_tramp_var
) {
5110 /* Initialize ss_tramp_var */
5111 ins
= cfg
->arch
.ss_tramp_var
;
5112 g_assert (ins
->opcode
== OP_REGOFFSET
);
5114 code
= emit_imm64 (code
, ARMREG_IP0
, (guint64
)&ss_trampoline
);
5115 code
= emit_strx (code
, ARMREG_IP0
, ins
->inst_basereg
, ins
->inst_offset
);
5118 if (cfg
->arch
.bp_tramp_var
) {
5119 /* Initialize bp_tramp_var */
5120 ins
= cfg
->arch
.bp_tramp_var
;
5121 g_assert (ins
->opcode
== OP_REGOFFSET
);
5123 code
= emit_imm64 (code
, ARMREG_IP0
, (guint64
)bp_trampoline
);
5124 code
= emit_strx (code
, ARMREG_IP0
, ins
->inst_basereg
, ins
->inst_offset
);
5129 if (cfg
->opt
& MONO_OPT_BRANCH
) {
5130 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
5132 bb
->max_offset
= max_offset
;
5134 MONO_BB_FOR_EACH_INS (bb
, ins
) {
5135 max_offset
+= ins_get_size (ins
->opcode
);
5139 if (max_offset
> 0x3ffff * 4)
5140 cfg
->arch
.cond_branch_islands
= TRUE
;
5146 mono_arch_emit_epilog (MonoCompile
*cfg
)
5149 int max_epilog_size
;
5153 max_epilog_size
= 16 + 20*4;
5154 code
= realloc_code (cfg
, max_epilog_size
);
5156 if (cfg
->method
->save_lmf
) {
5157 code
= mono_arm_emit_load_regarray (code
, MONO_ARCH_CALLEE_SAVED_REGS
& cfg
->used_int_regs
, ARMREG_FP
, cfg
->lmf_var
->inst_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, gregs
) - (MONO_ARCH_FIRST_LMF_REG
* 8));
5160 code
= emit_load_regset (code
, MONO_ARCH_CALLEE_SAVED_REGS
& cfg
->used_int_regs
, ARMREG_FP
, cfg
->arch
.saved_gregs_offset
);
5163 /* Load returned vtypes into registers if needed */
5164 cinfo
= cfg
->arch
.cinfo
;
5165 switch (cinfo
->ret
.storage
) {
5166 case ArgVtypeInIRegs
: {
5167 MonoInst
*ins
= cfg
->ret
;
5169 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
)
5170 code
= emit_ldrx (code
, cinfo
->ret
.reg
+ i
, ins
->inst_basereg
, ins
->inst_offset
+ (i
* 8));
5174 MonoInst
*ins
= cfg
->ret
;
5176 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
) {
5177 if (cinfo
->ret
.esize
== 4)
5178 code
= emit_ldrfpw (code
, cinfo
->ret
.reg
+ i
, ins
->inst_basereg
, ins
->inst_offset
+ cinfo
->ret
.foffsets
[i
]);
5180 code
= emit_ldrfpx (code
, cinfo
->ret
.reg
+ i
, ins
->inst_basereg
, ins
->inst_offset
+ cinfo
->ret
.foffsets
[i
]);
5189 code
= mono_arm_emit_destroy_frame (code
, cfg
->stack_offset
, (1 << ARMREG_IP0
) | (1 << ARMREG_IP1
));
5191 arm_retx (code
, ARMREG_LR
);
5193 g_assert (code
- (cfg
->native_code
+ cfg
->code_len
) < max_epilog_size
);
5195 set_code_cursor (cfg
, code
);
5199 mono_arch_emit_exceptions (MonoCompile
*cfg
)
5202 MonoClass
*exc_class
;
5204 guint8
* exc_throw_pos
[MONO_EXC_INTRINS_NUM
];
5205 guint8 exc_throw_found
[MONO_EXC_INTRINS_NUM
];
5206 int i
, id
, size
= 0;
5208 for (i
= 0; i
< MONO_EXC_INTRINS_NUM
; i
++) {
5209 exc_throw_pos
[i
] = NULL
;
5210 exc_throw_found
[i
] = 0;
5213 for (ji
= cfg
->patch_info
; ji
; ji
= ji
->next
) {
5214 if (ji
->type
== MONO_PATCH_INFO_EXC
) {
5215 i
= mini_exception_id_by_name ((const char*)ji
->data
.target
);
5216 if (!exc_throw_found
[i
]) {
5218 exc_throw_found
[i
] = TRUE
;
5223 code
= realloc_code (cfg
, size
);
5225 /* Emit code to raise corlib exceptions */
5226 for (ji
= cfg
->patch_info
; ji
; ji
= ji
->next
) {
5227 if (ji
->type
!= MONO_PATCH_INFO_EXC
)
5230 ip
= cfg
->native_code
+ ji
->ip
.i
;
5232 id
= mini_exception_id_by_name ((const char*)ji
->data
.target
);
5234 if (exc_throw_pos
[id
]) {
5235 /* ip points to the bcc () in OP_COND_EXC_... */
5236 arm_patch_rel (ip
, exc_throw_pos
[id
], ji
->relocation
);
5237 ji
->type
= MONO_PATCH_INFO_NONE
;
5241 exc_throw_pos
[id
] = code
;
5242 arm_patch_rel (ip
, code
, ji
->relocation
);
5244 /* We are being branched to from the code generated by emit_cond_exc (), the pc is in ip1 */
5246 /* r0 = type token */
5247 exc_class
= mono_class_load_from_name (mono_defaults
.corlib
, "System", ji
->data
.name
);
5248 code
= emit_imm (code
, ARMREG_R0
, m_class_get_type_token (exc_class
) - MONO_TOKEN_TYPE_DEF
);
5250 arm_movx (code
, ARMREG_R1
, ARMREG_IP1
);
5251 /* Branch to the corlib exception throwing trampoline */
5252 ji
->ip
.i
= code
- cfg
->native_code
;
5253 ji
->type
= MONO_PATCH_INFO_JIT_ICALL_ID
;
5254 ji
->data
.jit_icall_id
= MONO_JIT_ICALL_mono_arch_throw_corlib_exception
;
5255 ji
->relocation
= MONO_R_ARM64_BL
;
5257 cfg
->thunk_area
+= THUNK_SIZE
;
5258 set_code_cursor (cfg
, code
);
5261 set_code_cursor (cfg
, code
);
5265 mono_arch_emit_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
5271 mono_arch_get_patch_offset (guint8
*code
)
5277 mono_arch_build_imt_trampoline (MonoVTable
*vtable
, MonoDomain
*domain
, MonoIMTCheckItem
**imt_entries
, int count
,
5278 gpointer fail_tramp
)
5280 int i
, buf_len
, imt_reg
;
5284 printf ("building IMT trampoline for class %s %s entries %d code size %d code at %p end %p vtable %p\n", m_class_get_name_space (vtable
->klass
), m_class_get_name (vtable
->klass
), count
, size
, start
, ((guint8
*)start
) + size
, vtable
);
5285 for (i
= 0; i
< count
; ++i
) {
5286 MonoIMTCheckItem
*item
= imt_entries
[i
];
5287 printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i
, item
->key
, item
->key
->name
, &vtable
->vtable
[item
->value
.vtable_slot
], item
->is_equals
, item
->chunk_size
);
5292 for (i
= 0; i
< count
; ++i
) {
5293 MonoIMTCheckItem
*item
= imt_entries
[i
];
5294 if (item
->is_equals
) {
5295 gboolean fail_case
= !item
->check_target_idx
&& fail_tramp
;
5297 if (item
->check_target_idx
|| fail_case
) {
5298 if (!item
->compare_done
|| fail_case
) {
5299 buf_len
+= 4 * 4 + 4;
5302 if (item
->has_target_code
) {
5319 buf
= (guint8
*)mono_method_alloc_generic_virtual_trampoline (domain
, buf_len
);
5321 buf
= mono_domain_code_reserve (domain
, buf_len
);
5325 * We are called by JITted code, which passes in the IMT argument in
5326 * MONO_ARCH_RGCTX_REG (r27). We need to preserve all caller saved regs
5329 imt_reg
= MONO_ARCH_RGCTX_REG
;
5330 for (i
= 0; i
< count
; ++i
) {
5331 MonoIMTCheckItem
*item
= imt_entries
[i
];
5333 item
->code_target
= code
;
5335 if (item
->is_equals
) {
5337 * Check the imt argument against item->key, if equals, jump to either
5338 * item->value.target_code or to vtable [item->value.vtable_slot].
5339 * If fail_tramp is set, jump to it if not-equals.
5341 gboolean fail_case
= !item
->check_target_idx
&& fail_tramp
;
5343 if (item
->check_target_idx
|| fail_case
) {
5344 /* Compare imt_reg with item->key */
5345 if (!item
->compare_done
|| fail_case
) {
5346 // FIXME: Optimize this
5347 code
= emit_imm64 (code
, ARMREG_IP0
, (guint64
)item
->key
);
5348 arm_cmpx (code
, imt_reg
, ARMREG_IP0
);
5350 item
->jmp_code
= code
;
5351 arm_bcc (code
, ARMCOND_NE
, 0);
5352 /* Jump to target if equals */
5353 if (item
->has_target_code
) {
5354 code
= emit_imm64 (code
, ARMREG_IP0
, (guint64
)item
->value
.target_code
);
5355 arm_brx (code
, ARMREG_IP0
);
5357 guint64 imm
= (guint64
)&(vtable
->vtable
[item
->value
.vtable_slot
]);
5359 code
= emit_imm64 (code
, ARMREG_IP0
, imm
);
5360 arm_ldrx (code
, ARMREG_IP0
, ARMREG_IP0
, 0);
5361 arm_brx (code
, ARMREG_IP0
);
5365 arm_patch_rel (item
->jmp_code
, code
, MONO_R_ARM64_BCC
);
5366 item
->jmp_code
= NULL
;
5367 code
= emit_imm64 (code
, ARMREG_IP0
, (guint64
)fail_tramp
);
5368 arm_brx (code
, ARMREG_IP0
);
5371 guint64 imm
= (guint64
)&(vtable
->vtable
[item
->value
.vtable_slot
]);
5373 code
= emit_imm64 (code
, ARMREG_IP0
, imm
);
5374 arm_ldrx (code
, ARMREG_IP0
, ARMREG_IP0
, 0);
5375 arm_brx (code
, ARMREG_IP0
);
5378 code
= emit_imm64 (code
, ARMREG_IP0
, (guint64
)item
->key
);
5379 arm_cmpx (code
, imt_reg
, ARMREG_IP0
);
5380 item
->jmp_code
= code
;
5381 arm_bcc (code
, ARMCOND_HS
, 0);
5384 /* Patch the branches */
5385 for (i
= 0; i
< count
; ++i
) {
5386 MonoIMTCheckItem
*item
= imt_entries
[i
];
5387 if (item
->jmp_code
&& item
->check_target_idx
)
5388 arm_patch_rel (item
->jmp_code
, imt_entries
[item
->check_target_idx
]->code_target
, MONO_R_ARM64_BCC
);
5391 g_assert ((code
- buf
) < buf_len
);
5393 mono_arch_flush_icache (buf
, code
- buf
);
5394 MONO_PROFILER_RAISE (jit_code_buffer
, (buf
, code
- buf
, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE
, NULL
));
5400 mono_arch_get_trampolines (gboolean aot
)
5402 return mono_arm_get_exception_trampolines (aot
);
5405 #else /* DISABLE_JIT */
5408 mono_arch_build_imt_trampoline (MonoVTable
*vtable
, MonoDomain
*domain
, MonoIMTCheckItem
**imt_entries
, int count
,
5409 gpointer fail_tramp
)
5411 g_assert_not_reached ();
5415 #endif /* !DISABLE_JIT */
5417 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
5420 mono_arch_set_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
5423 guint32 native_offset
= ip
- (guint8
*)ji
->code_start
;
5426 SeqPointInfo
*info
= mono_arch_get_seq_point_info (mono_domain_get (), (guint8
*)ji
->code_start
);
5428 g_assert (native_offset
% 4 == 0);
5429 g_assert (info
->bp_addrs
[native_offset
/ 4] == 0);
5430 info
->bp_addrs
[native_offset
/ 4] = (guint8
*)mini_get_breakpoint_trampoline ();
5432 /* ip points to an ldrx */
5434 arm_blrx (code
, ARMREG_IP0
);
5435 mono_arch_flush_icache (ip
, code
- ip
);
5440 mono_arch_clear_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
5445 guint32 native_offset
= ip
- (guint8
*)ji
->code_start
;
5446 SeqPointInfo
*info
= mono_arch_get_seq_point_info (mono_domain_get (), (guint8
*)ji
->code_start
);
5448 g_assert (native_offset
% 4 == 0);
5449 info
->bp_addrs
[native_offset
/ 4] = NULL
;
5451 /* ip points to an ldrx */
5454 mono_arch_flush_icache (ip
, code
- ip
);
5459 mono_arch_start_single_stepping (void)
5461 ss_trampoline
= mini_get_single_step_trampoline ();
5465 mono_arch_stop_single_stepping (void)
5467 ss_trampoline
= NULL
;
5471 mono_arch_is_single_step_event (void *info
, void *sigctx
)
5473 /* We use soft breakpoints on arm64 */
5478 mono_arch_is_breakpoint_event (void *info
, void *sigctx
)
5480 /* We use soft breakpoints on arm64 */
5485 mono_arch_skip_breakpoint (MonoContext
*ctx
, MonoJitInfo
*ji
)
5487 g_assert_not_reached ();
5491 mono_arch_skip_single_step (MonoContext
*ctx
)
5493 g_assert_not_reached ();
5497 mono_arch_get_seq_point_info (MonoDomain
*domain
, guint8
*code
)
5502 // FIXME: Add a free function
5504 mono_domain_lock (domain
);
5505 info
= (SeqPointInfo
*)g_hash_table_lookup (domain_jit_info (domain
)->arch_seq_points
,
5507 mono_domain_unlock (domain
);
5510 ji
= mono_jit_info_table_find (domain
, code
);
5513 info
= g_malloc0 (sizeof (SeqPointInfo
) + (ji
->code_size
/ 4) * sizeof(guint8
*));
5515 info
->ss_tramp_addr
= &ss_trampoline
;
5517 mono_domain_lock (domain
);
5518 g_hash_table_insert (domain_jit_info (domain
)->arch_seq_points
,
5520 mono_domain_unlock (domain
);
5526 #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
5529 mono_arch_opcode_supported (int opcode
)
5532 case OP_ATOMIC_ADD_I4
:
5533 case OP_ATOMIC_ADD_I8
:
5534 case OP_ATOMIC_EXCHANGE_I4
:
5535 case OP_ATOMIC_EXCHANGE_I8
:
5536 case OP_ATOMIC_CAS_I4
:
5537 case OP_ATOMIC_CAS_I8
:
5538 case OP_ATOMIC_LOAD_I1
:
5539 case OP_ATOMIC_LOAD_I2
:
5540 case OP_ATOMIC_LOAD_I4
:
5541 case OP_ATOMIC_LOAD_I8
:
5542 case OP_ATOMIC_LOAD_U1
:
5543 case OP_ATOMIC_LOAD_U2
:
5544 case OP_ATOMIC_LOAD_U4
:
5545 case OP_ATOMIC_LOAD_U8
:
5546 case OP_ATOMIC_LOAD_R4
:
5547 case OP_ATOMIC_LOAD_R8
:
5548 case OP_ATOMIC_STORE_I1
:
5549 case OP_ATOMIC_STORE_I2
:
5550 case OP_ATOMIC_STORE_I4
:
5551 case OP_ATOMIC_STORE_I8
:
5552 case OP_ATOMIC_STORE_U1
:
5553 case OP_ATOMIC_STORE_U2
:
5554 case OP_ATOMIC_STORE_U4
:
5555 case OP_ATOMIC_STORE_U8
:
5556 case OP_ATOMIC_STORE_R4
:
5557 case OP_ATOMIC_STORE_R8
:
5565 mono_arch_get_call_info (MonoMemPool
*mp
, MonoMethodSignature
*sig
)
5567 return get_call_info (mp
, sig
);
5571 mono_arch_load_function (MonoJitICallId jit_icall_id
)
5573 gpointer target
= NULL
;
5574 switch (jit_icall_id
) {
5575 #undef MONO_AOT_ICALL
5576 #define MONO_AOT_ICALL(x) case MONO_JIT_ICALL_ ## x: target = (gpointer)x; break;
5577 MONO_AOT_ICALL (mono_arm_resume_unwind
)
5578 MONO_AOT_ICALL (mono_arm_start_gsharedvt_call
)
5579 MONO_AOT_ICALL (mono_arm_throw_exception
)