3 * ARM64 backend for the Mono code generator
5 * Copyright 2013 Xamarin, Inc (http://www.xamarin.com)
10 * Paolo Molaro (lupus@ximian.com)
11 * Dietmar Maurer (dietmar@ximian.com)
13 * (C) 2003 Ximian, Inc.
14 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
15 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
16 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
20 #include "cpu-arm64.h"
22 #include "aot-runtime.h"
23 #include "mini-runtime.h"
25 #include <mono/arch/arm64/arm64-codegen.h>
26 #include <mono/utils/mono-mmap.h>
27 #include <mono/utils/mono-memory-model.h>
28 #include <mono/metadata/abi-details.h>
30 #include "interp/interp.h"
35 * - ARM(R) Architecture Reference Manual, ARMv8, for ARMv8-A architecture profile (DDI0487A_a_armv8_arm.pdf)
36 * - Procedure Call Standard for the ARM 64-bit Architecture (AArch64) (IHI0055B_aapcs64.pdf)
37 * - ELF for the ARM 64-bit Architecture (IHI0056B_aaelf64.pdf)
40 * - ip0/ip1/lr are used as temporary registers
41 * - r27 is used as the rgctx/imt register
42 * - r28 is used to access arguments passed on the stack
43 * - d15/d16 are used as fp temporary registers
46 #define FP_TEMP_REG ARMREG_D16
47 #define FP_TEMP_REG2 ARMREG_D17
49 #define THUNK_SIZE (4 * 4)
51 /* The single step trampoline */
52 static gpointer ss_trampoline
;
54 /* The breakpoint trampoline */
55 static gpointer bp_trampoline
;
57 static gboolean ios_abi
;
58 static gboolean enable_ptrauth
;
60 static __attribute__ ((__warn_unused_result__
)) guint8
* emit_load_regset (guint8
*code
, guint64 regs
, int basereg
, int offset
);
61 static guint8
* emit_brx (guint8
*code
, int reg
);
62 static guint8
* emit_blrx (guint8
*code
, int reg
);
65 mono_arch_regname (int reg
)
67 static const char * rnames
[] = {
68 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9",
69 "r10", "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19",
70 "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "fp",
73 if (reg
>= 0 && reg
< 32)
79 mono_arch_fregname (int reg
)
81 static const char * rnames
[] = {
82 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9",
83 "d10", "d11", "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19",
84 "d20", "d21", "d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29",
87 if (reg
>= 0 && reg
< 32)
93 mono_arch_xregname (int reg
)
95 static const char * rnames
[] = {
96 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9",
97 "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19",
98 "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29",
101 if (reg
>= 0 && reg
< 32)
107 mono_arch_get_argument_info (MonoMethodSignature
*csig
, int param_count
, MonoJitArgumentInfo
*arg_info
)
113 #define MAX_ARCH_DELEGATE_PARAMS 7
116 get_delegate_invoke_impl (gboolean has_target
, gboolean param_count
, guint32
*code_size
)
118 guint8
*code
, *start
;
120 MINI_BEGIN_CODEGEN ();
123 start
= code
= mono_global_codeman_reserve (12);
125 /* Replace the this argument with the target */
126 arm_ldrx (code
, ARMREG_IP0
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
127 arm_ldrx (code
, ARMREG_R0
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoDelegate
, target
));
128 code
= mono_arm_emit_brx (code
, ARMREG_IP0
);
130 g_assert ((code
- start
) <= 12);
134 size
= 8 + param_count
* 4;
135 start
= code
= mono_global_codeman_reserve (size
);
137 arm_ldrx (code
, ARMREG_IP0
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
138 /* slide down the arguments */
139 for (i
= 0; i
< param_count
; ++i
)
140 arm_movx (code
, i
, i
+ 1);
141 code
= mono_arm_emit_brx (code
, ARMREG_IP0
);
143 g_assert ((code
- start
) <= size
);
145 MINI_END_CODEGEN (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE
, NULL
);
148 *code_size
= code
- start
;
150 return MINI_ADDR_TO_FTNPTR (start
);
154 * mono_arch_get_delegate_invoke_impls:
156 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
160 mono_arch_get_delegate_invoke_impls (void)
168 code
= (guint8
*)get_delegate_invoke_impl (TRUE
, 0, &code_len
);
169 res
= g_slist_prepend (res
, mono_tramp_info_create ("delegate_invoke_impl_has_target", code
, code_len
, NULL
, NULL
));
171 for (i
= 0; i
<= MAX_ARCH_DELEGATE_PARAMS
; ++i
) {
172 code
= (guint8
*)get_delegate_invoke_impl (FALSE
, i
, &code_len
);
173 tramp_name
= g_strdup_printf ("delegate_invoke_impl_target_%d", i
);
174 res
= g_slist_prepend (res
, mono_tramp_info_create (tramp_name
, code
, code_len
, NULL
, NULL
));
182 mono_arch_get_delegate_invoke_impl (MonoMethodSignature
*sig
, gboolean has_target
)
184 guint8
*code
, *start
;
187 * vtypes are returned in registers, or using the dedicated r8 register, so
188 * they can be supported by delegate invokes.
192 static guint8
* cached
= NULL
;
197 if (mono_ee_features
.use_aot_trampolines
)
198 start
= (guint8
*)mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
200 start
= (guint8
*)get_delegate_invoke_impl (TRUE
, 0, NULL
);
201 mono_memory_barrier ();
205 static guint8
* cache
[MAX_ARCH_DELEGATE_PARAMS
+ 1] = {NULL
};
208 if (sig
->param_count
> MAX_ARCH_DELEGATE_PARAMS
)
210 for (i
= 0; i
< sig
->param_count
; ++i
)
211 if (!mono_is_regsize_var (sig
->params
[i
]))
214 code
= cache
[sig
->param_count
];
218 if (mono_ee_features
.use_aot_trampolines
) {
219 char *name
= g_strdup_printf ("delegate_invoke_impl_target_%d", sig
->param_count
);
220 start
= (guint8
*)mono_aot_get_trampoline (name
);
223 start
= (guint8
*)get_delegate_invoke_impl (FALSE
, sig
->param_count
, NULL
);
225 mono_memory_barrier ();
226 cache
[sig
->param_count
] = start
;
234 mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature
*sig
, MonoMethod
*method
, int offset
, gboolean load_imt_reg
)
240 mono_arch_get_this_arg_from_call (host_mgreg_t
*regs
, guint8
*code
)
242 return (gpointer
)regs
[ARMREG_R0
];
246 mono_arch_cpu_init (void)
251 mono_arch_init (void)
253 #if defined(TARGET_IOS) || defined(TARGET_WATCHOS) || defined(TARGET_OSX)
256 #ifdef MONO_ARCH_ENABLE_PTRAUTH
257 enable_ptrauth
= TRUE
;
261 bp_trampoline
= mini_get_breakpoint_trampoline ();
263 mono_arm_gsharedvt_init ();
267 mono_arch_cleanup (void)
272 mono_arch_cpu_optimizations (guint32
*exclude_mask
)
279 mono_arch_register_lowlevel_calls (void)
284 mono_arch_finish_init (void)
288 /* The maximum length is 2 instructions */
290 emit_imm (guint8
*code
, int dreg
, int imm
)
292 // FIXME: Optimize this
295 arm_movnx (code
, dreg
, (~limm
) & 0xffff, 0);
296 arm_movkx (code
, dreg
, (limm
>> 16) & 0xffff, 16);
298 arm_movzx (code
, dreg
, imm
& 0xffff, 0);
300 arm_movkx (code
, dreg
, (imm
>> 16) & 0xffff, 16);
306 /* The maximum length is 4 instructions */
308 emit_imm64 (guint8
*code
, int dreg
, guint64 imm
)
310 // FIXME: Optimize this
311 arm_movzx (code
, dreg
, imm
& 0xffff, 0);
312 if ((imm
>> 16) & 0xffff)
313 arm_movkx (code
, dreg
, (imm
>> 16) & 0xffff, 16);
314 if ((imm
>> 32) & 0xffff)
315 arm_movkx (code
, dreg
, (imm
>> 32) & 0xffff, 32);
316 if ((imm
>> 48) & 0xffff)
317 arm_movkx (code
, dreg
, (imm
>> 48) & 0xffff, 48);
323 mono_arm_emit_imm64 (guint8
*code
, int dreg
, gint64 imm
)
325 return emit_imm64 (code
, dreg
, imm
);
331 * Emit a patchable code sequence for constructing a 64 bit immediate.
334 emit_imm64_template (guint8
*code
, int dreg
)
336 arm_movzx (code
, dreg
, 0, 0);
337 arm_movkx (code
, dreg
, 0, 16);
338 arm_movkx (code
, dreg
, 0, 32);
339 arm_movkx (code
, dreg
, 0, 48);
344 static __attribute__ ((__warn_unused_result__
)) guint8
*
345 emit_addw_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
347 if (!arm_is_arith_imm (imm
)) {
348 code
= emit_imm (code
, ARMREG_LR
, imm
);
349 arm_addw (code
, dreg
, sreg
, ARMREG_LR
);
351 arm_addw_imm (code
, dreg
, sreg
, imm
);
356 static __attribute__ ((__warn_unused_result__
)) guint8
*
357 emit_addx_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
359 if (!arm_is_arith_imm (imm
)) {
360 code
= emit_imm (code
, ARMREG_LR
, imm
);
361 arm_addx (code
, dreg
, sreg
, ARMREG_LR
);
363 arm_addx_imm (code
, dreg
, sreg
, imm
);
368 static __attribute__ ((__warn_unused_result__
)) guint8
*
369 emit_subw_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
371 if (!arm_is_arith_imm (imm
)) {
372 code
= emit_imm (code
, ARMREG_LR
, imm
);
373 arm_subw (code
, dreg
, sreg
, ARMREG_LR
);
375 arm_subw_imm (code
, dreg
, sreg
, imm
);
380 static __attribute__ ((__warn_unused_result__
)) guint8
*
381 emit_subx_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
383 if (!arm_is_arith_imm (imm
)) {
384 code
= emit_imm (code
, ARMREG_LR
, imm
);
385 arm_subx (code
, dreg
, sreg
, ARMREG_LR
);
387 arm_subx_imm (code
, dreg
, sreg
, imm
);
392 /* Emit sp+=imm. Clobbers ip0/ip1 */
393 static __attribute__ ((__warn_unused_result__
)) guint8
*
394 emit_addx_sp_imm (guint8
*code
, int imm
)
396 code
= emit_imm (code
, ARMREG_IP0
, imm
);
397 arm_movspx (code
, ARMREG_IP1
, ARMREG_SP
);
398 arm_addx (code
, ARMREG_IP1
, ARMREG_IP1
, ARMREG_IP0
);
399 arm_movspx (code
, ARMREG_SP
, ARMREG_IP1
);
403 /* Emit sp-=imm. Clobbers ip0/ip1 */
404 static __attribute__ ((__warn_unused_result__
)) guint8
*
405 emit_subx_sp_imm (guint8
*code
, int imm
)
407 code
= emit_imm (code
, ARMREG_IP0
, imm
);
408 arm_movspx (code
, ARMREG_IP1
, ARMREG_SP
);
409 arm_subx (code
, ARMREG_IP1
, ARMREG_IP1
, ARMREG_IP0
);
410 arm_movspx (code
, ARMREG_SP
, ARMREG_IP1
);
414 static __attribute__ ((__warn_unused_result__
)) guint8
*
415 emit_andw_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
418 code
= emit_imm (code
, ARMREG_LR
, imm
);
419 arm_andw (code
, dreg
, sreg
, ARMREG_LR
);
424 static __attribute__ ((__warn_unused_result__
)) guint8
*
425 emit_andx_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
428 code
= emit_imm (code
, ARMREG_LR
, imm
);
429 arm_andx (code
, dreg
, sreg
, ARMREG_LR
);
434 static __attribute__ ((__warn_unused_result__
)) guint8
*
435 emit_orrw_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
438 code
= emit_imm (code
, ARMREG_LR
, imm
);
439 arm_orrw (code
, dreg
, sreg
, ARMREG_LR
);
444 static __attribute__ ((__warn_unused_result__
)) guint8
*
445 emit_orrx_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
448 code
= emit_imm (code
, ARMREG_LR
, imm
);
449 arm_orrx (code
, dreg
, sreg
, ARMREG_LR
);
454 static __attribute__ ((__warn_unused_result__
)) guint8
*
455 emit_eorw_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
458 code
= emit_imm (code
, ARMREG_LR
, imm
);
459 arm_eorw (code
, dreg
, sreg
, ARMREG_LR
);
464 static __attribute__ ((__warn_unused_result__
)) guint8
*
465 emit_eorx_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
468 code
= emit_imm (code
, ARMREG_LR
, imm
);
469 arm_eorx (code
, dreg
, sreg
, ARMREG_LR
);
474 static __attribute__ ((__warn_unused_result__
)) guint8
*
475 emit_cmpw_imm (guint8
*code
, int sreg
, int imm
)
478 arm_cmpw (code
, sreg
, ARMREG_RZR
);
481 code
= emit_imm (code
, ARMREG_LR
, imm
);
482 arm_cmpw (code
, sreg
, ARMREG_LR
);
488 static __attribute__ ((__warn_unused_result__
)) guint8
*
489 emit_cmpx_imm (guint8
*code
, int sreg
, int imm
)
492 arm_cmpx (code
, sreg
, ARMREG_RZR
);
495 code
= emit_imm (code
, ARMREG_LR
, imm
);
496 arm_cmpx (code
, sreg
, ARMREG_LR
);
502 static __attribute__ ((__warn_unused_result__
)) guint8
*
503 emit_strb (guint8
*code
, int rt
, int rn
, int imm
)
505 if (arm_is_strb_imm (imm
)) {
506 arm_strb (code
, rt
, rn
, imm
);
508 g_assert (rt
!= ARMREG_IP0
);
509 g_assert (rn
!= ARMREG_IP0
);
510 code
= emit_imm (code
, ARMREG_IP0
, imm
);
511 arm_strb_reg (code
, rt
, rn
, ARMREG_IP0
);
516 static __attribute__ ((__warn_unused_result__
)) guint8
*
517 emit_strh (guint8
*code
, int rt
, int rn
, int imm
)
519 if (arm_is_strh_imm (imm
)) {
520 arm_strh (code
, rt
, rn
, imm
);
522 g_assert (rt
!= ARMREG_IP0
);
523 g_assert (rn
!= ARMREG_IP0
);
524 code
= emit_imm (code
, ARMREG_IP0
, imm
);
525 arm_strh_reg (code
, rt
, rn
, ARMREG_IP0
);
530 static __attribute__ ((__warn_unused_result__
)) guint8
*
531 emit_strw (guint8
*code
, int rt
, int rn
, int imm
)
533 if (arm_is_strw_imm (imm
)) {
534 arm_strw (code
, rt
, rn
, imm
);
536 g_assert (rt
!= ARMREG_IP0
);
537 g_assert (rn
!= ARMREG_IP0
);
538 code
= emit_imm (code
, ARMREG_IP0
, imm
);
539 arm_strw_reg (code
, rt
, rn
, ARMREG_IP0
);
544 static __attribute__ ((__warn_unused_result__
)) guint8
*
545 emit_strfpw (guint8
*code
, int rt
, int rn
, int imm
)
547 if (arm_is_strw_imm (imm
)) {
548 arm_strfpw (code
, rt
, rn
, imm
);
550 g_assert (rn
!= ARMREG_IP0
);
551 code
= emit_imm (code
, ARMREG_IP0
, imm
);
552 arm_addx (code
, ARMREG_IP0
, rn
, ARMREG_IP0
);
553 arm_strfpw (code
, rt
, ARMREG_IP0
, 0);
558 static __attribute__ ((__warn_unused_result__
)) guint8
*
559 emit_strfpx (guint8
*code
, int rt
, int rn
, int imm
)
561 if (arm_is_strx_imm (imm
)) {
562 arm_strfpx (code
, rt
, rn
, imm
);
564 g_assert (rn
!= ARMREG_IP0
);
565 code
= emit_imm (code
, ARMREG_IP0
, imm
);
566 arm_addx (code
, ARMREG_IP0
, rn
, ARMREG_IP0
);
567 arm_strfpx (code
, rt
, ARMREG_IP0
, 0);
572 static __attribute__ ((__warn_unused_result__
)) guint8
*
573 emit_strx (guint8
*code
, int rt
, int rn
, int imm
)
575 if (arm_is_strx_imm (imm
)) {
576 arm_strx (code
, rt
, rn
, imm
);
578 g_assert (rt
!= ARMREG_IP0
);
579 g_assert (rn
!= ARMREG_IP0
);
580 code
= emit_imm (code
, ARMREG_IP0
, imm
);
581 arm_strx_reg (code
, rt
, rn
, ARMREG_IP0
);
586 static __attribute__ ((__warn_unused_result__
)) guint8
*
587 emit_ldrb (guint8
*code
, int rt
, int rn
, int imm
)
589 if (arm_is_pimm12_scaled (imm
, 1)) {
590 arm_ldrb (code
, rt
, rn
, imm
);
592 g_assert (rt
!= ARMREG_IP0
);
593 g_assert (rn
!= ARMREG_IP0
);
594 code
= emit_imm (code
, ARMREG_IP0
, imm
);
595 arm_ldrb_reg (code
, rt
, rn
, ARMREG_IP0
);
600 static __attribute__ ((__warn_unused_result__
)) guint8
*
601 emit_ldrsbx (guint8
*code
, int rt
, int rn
, int imm
)
603 if (arm_is_pimm12_scaled (imm
, 1)) {
604 arm_ldrsbx (code
, rt
, rn
, imm
);
606 g_assert (rt
!= ARMREG_IP0
);
607 g_assert (rn
!= ARMREG_IP0
);
608 code
= emit_imm (code
, ARMREG_IP0
, imm
);
609 arm_ldrsbx_reg (code
, rt
, rn
, ARMREG_IP0
);
614 static __attribute__ ((__warn_unused_result__
)) guint8
*
615 emit_ldrh (guint8
*code
, int rt
, int rn
, int imm
)
617 if (arm_is_pimm12_scaled (imm
, 2)) {
618 arm_ldrh (code
, rt
, rn
, imm
);
620 g_assert (rt
!= ARMREG_IP0
);
621 g_assert (rn
!= ARMREG_IP0
);
622 code
= emit_imm (code
, ARMREG_IP0
, imm
);
623 arm_ldrh_reg (code
, rt
, rn
, ARMREG_IP0
);
628 static __attribute__ ((__warn_unused_result__
)) guint8
*
629 emit_ldrshx (guint8
*code
, int rt
, int rn
, int imm
)
631 if (arm_is_pimm12_scaled (imm
, 2)) {
632 arm_ldrshx (code
, rt
, rn
, imm
);
634 g_assert (rt
!= ARMREG_IP0
);
635 g_assert (rn
!= ARMREG_IP0
);
636 code
= emit_imm (code
, ARMREG_IP0
, imm
);
637 arm_ldrshx_reg (code
, rt
, rn
, ARMREG_IP0
);
642 static __attribute__ ((__warn_unused_result__
)) guint8
*
643 emit_ldrswx (guint8
*code
, int rt
, int rn
, int imm
)
645 if (arm_is_pimm12_scaled (imm
, 4)) {
646 arm_ldrswx (code
, rt
, rn
, imm
);
648 g_assert (rt
!= ARMREG_IP0
);
649 g_assert (rn
!= ARMREG_IP0
);
650 code
= emit_imm (code
, ARMREG_IP0
, imm
);
651 arm_ldrswx_reg (code
, rt
, rn
, ARMREG_IP0
);
656 static __attribute__ ((__warn_unused_result__
)) guint8
*
657 emit_ldrw (guint8
*code
, int rt
, int rn
, int imm
)
659 if (arm_is_pimm12_scaled (imm
, 4)) {
660 arm_ldrw (code
, rt
, rn
, imm
);
662 g_assert (rn
!= ARMREG_IP0
);
663 code
= emit_imm (code
, ARMREG_IP0
, imm
);
664 arm_ldrw_reg (code
, rt
, rn
, ARMREG_IP0
);
669 static __attribute__ ((__warn_unused_result__
)) guint8
*
670 emit_ldrx (guint8
*code
, int rt
, int rn
, int imm
)
672 if (arm_is_pimm12_scaled (imm
, 8)) {
673 arm_ldrx (code
, rt
, rn
, imm
);
675 g_assert (rn
!= ARMREG_IP0
);
676 code
= emit_imm (code
, ARMREG_IP0
, imm
);
677 arm_ldrx_reg (code
, rt
, rn
, ARMREG_IP0
);
682 static __attribute__ ((__warn_unused_result__
)) guint8
*
683 emit_ldrfpw (guint8
*code
, int rt
, int rn
, int imm
)
685 if (arm_is_pimm12_scaled (imm
, 4)) {
686 arm_ldrfpw (code
, rt
, rn
, imm
);
688 g_assert (rn
!= ARMREG_IP0
);
689 code
= emit_imm (code
, ARMREG_IP0
, imm
);
690 arm_addx (code
, ARMREG_IP0
, rn
, ARMREG_IP0
);
691 arm_ldrfpw (code
, rt
, ARMREG_IP0
, 0);
696 static __attribute__ ((__warn_unused_result__
)) guint8
*
697 emit_ldrfpx (guint8
*code
, int rt
, int rn
, int imm
)
699 if (arm_is_pimm12_scaled (imm
, 8)) {
700 arm_ldrfpx (code
, rt
, rn
, imm
);
702 g_assert (rn
!= ARMREG_IP0
);
703 code
= emit_imm (code
, ARMREG_IP0
, imm
);
704 arm_addx (code
, ARMREG_IP0
, rn
, ARMREG_IP0
);
705 arm_ldrfpx (code
, rt
, ARMREG_IP0
, 0);
711 mono_arm_emit_ldrx (guint8
*code
, int rt
, int rn
, int imm
)
713 return emit_ldrx (code
, rt
, rn
, imm
);
717 emit_call (MonoCompile
*cfg
, guint8
* code
, MonoJumpInfoType patch_type
, gconstpointer data
)
720 mono_add_patch_info_rel (cfg, code - cfg->native_code, patch_type, data, MONO_R_ARM64_IMM);
721 code = emit_imm64_template (code, ARMREG_LR);
722 arm_blrx (code, ARMREG_LR);
724 mono_add_patch_info_rel (cfg
, code
- cfg
->native_code
, patch_type
, data
, MONO_R_ARM64_BL
);
726 cfg
->thunk_area
+= THUNK_SIZE
;
731 emit_aotconst_full (MonoCompile
*cfg
, MonoJumpInfo
**ji
, guint8
*code
, guint8
*start
, int dreg
, guint32 patch_type
, gconstpointer data
)
734 mono_add_patch_info (cfg
, code
- cfg
->native_code
, (MonoJumpInfoType
)patch_type
, data
);
736 *ji
= mono_patch_info_list_prepend (*ji
, code
- start
, (MonoJumpInfoType
)patch_type
, data
);
737 /* See arch_emit_got_access () in aot-compiler.c */
738 arm_ldrx_lit (code
, dreg
, 0);
745 emit_aotconst (MonoCompile
*cfg
, guint8
*code
, int dreg
, guint32 patch_type
, gconstpointer data
)
747 return emit_aotconst_full (cfg
, NULL
, code
, NULL
, dreg
, patch_type
, data
);
751 * mono_arm_emit_aotconst:
753 * Emit code to load an AOT constant into DREG. Usable from trampolines.
756 mono_arm_emit_aotconst (gpointer ji
, guint8
*code
, guint8
*code_start
, int dreg
, guint32 patch_type
, gconstpointer data
)
758 return emit_aotconst_full (NULL
, (MonoJumpInfo
**)ji
, code
, code_start
, dreg
, patch_type
, data
);
762 mono_arch_have_fast_tls (void)
772 emit_tls_get (guint8
*code
, int dreg
, int tls_offset
)
774 arm_mrs (code
, dreg
, ARM_MRS_REG_TPIDR_EL0
);
775 if (tls_offset
< 256) {
776 arm_ldrx (code
, dreg
, dreg
, tls_offset
);
778 code
= emit_addx_imm (code
, dreg
, dreg
, tls_offset
);
779 arm_ldrx (code
, dreg
, dreg
, 0);
785 emit_tls_set (guint8
*code
, int sreg
, int tls_offset
)
787 int tmpreg
= ARMREG_IP0
;
789 g_assert (sreg
!= tmpreg
);
790 arm_mrs (code
, tmpreg
, ARM_MRS_REG_TPIDR_EL0
);
791 if (tls_offset
< 256) {
792 arm_strx (code
, sreg
, tmpreg
, tls_offset
);
794 code
= emit_addx_imm (code
, tmpreg
, tmpreg
, tls_offset
);
795 arm_strx (code
, sreg
, tmpreg
, 0);
803 * - ldrp [fp, lr], [sp], !stack_offfset
804 * Clobbers TEMP_REGS.
806 __attribute__ ((__warn_unused_result__
)) guint8
*
807 mono_arm_emit_destroy_frame (guint8
*code
, int stack_offset
, guint64 temp_regs
)
809 // At least one of these registers must be available, or both.
810 gboolean
const temp0
= (temp_regs
& (1 << ARMREG_IP0
)) != 0;
811 gboolean
const temp1
= (temp_regs
& (1 << ARMREG_IP1
)) != 0;
812 g_assert (temp0
|| temp1
);
813 int const temp
= temp0
? ARMREG_IP0
: ARMREG_IP1
;
815 arm_movspx (code
, ARMREG_SP
, ARMREG_FP
);
817 if (arm_is_ldpx_imm (stack_offset
)) {
818 arm_ldpx_post (code
, ARMREG_FP
, ARMREG_LR
, ARMREG_SP
, stack_offset
);
820 arm_ldpx (code
, ARMREG_FP
, ARMREG_LR
, ARMREG_SP
, 0);
821 /* sp += stack_offset */
822 if (temp0
&& temp1
) {
823 code
= emit_addx_sp_imm (code
, stack_offset
);
825 int imm
= stack_offset
;
827 /* Can't use addx_sp_imm () since we can't clobber both ip0/ip1 */
828 arm_addx_imm (code
, temp
, ARMREG_SP
, 0);
830 arm_addx_imm (code
, temp
, temp
, 256);
833 arm_addx_imm (code
, ARMREG_SP
, temp
, imm
);
839 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
842 emit_thunk (guint8
*code
, gconstpointer target
)
846 arm_ldrx_lit (code
, ARMREG_IP0
, code
+ 8);
847 arm_brx (code
, ARMREG_IP0
);
848 *(guint64
*)code
= (guint64
)target
;
849 code
+= sizeof (guint64
);
851 mono_arch_flush_icache (p
, code
- p
);
856 create_thunk (MonoCompile
*cfg
, MonoDomain
*domain
, guchar
*code
, const guchar
*target
)
859 MonoThunkJitInfo
*info
;
863 guint8
*target_thunk
;
866 domain
= mono_domain_get ();
870 * This can be called multiple times during JITting,
871 * save the current position in cfg->arch to avoid
872 * doing a O(n^2) search.
874 if (!cfg
->arch
.thunks
) {
875 cfg
->arch
.thunks
= cfg
->thunks
;
876 cfg
->arch
.thunks_size
= cfg
->thunk_area
;
878 thunks
= cfg
->arch
.thunks
;
879 thunks_size
= cfg
->arch
.thunks_size
;
881 g_print ("thunk failed %p->%p, thunk space=%d method %s", code
, target
, thunks_size
, mono_method_full_name (cfg
->method
, TRUE
));
882 g_assert_not_reached ();
885 g_assert (*(guint32
*)thunks
== 0);
886 emit_thunk (thunks
, target
);
888 cfg
->arch
.thunks
+= THUNK_SIZE
;
889 cfg
->arch
.thunks_size
-= THUNK_SIZE
;
893 ji
= mini_jit_info_table_find (domain
, (char*)code
, NULL
);
895 info
= mono_jit_info_get_thunk_info (ji
);
898 thunks
= (guint8
*)ji
->code_start
+ info
->thunks_offset
;
899 thunks_size
= info
->thunks_size
;
901 orig_target
= mono_arch_get_call_target (code
+ 4);
903 mono_domain_lock (domain
);
906 if (orig_target
>= thunks
&& orig_target
< thunks
+ thunks_size
) {
907 /* The call already points to a thunk, because of trampolines etc. */
908 target_thunk
= orig_target
;
910 for (p
= thunks
; p
< thunks
+ thunks_size
; p
+= THUNK_SIZE
) {
911 if (((guint32
*)p
) [0] == 0) {
915 } else if (((guint64
*)p
) [1] == (guint64
)target
) {
916 /* Thunk already points to target */
923 //printf ("THUNK: %p %p %p\n", code, target, target_thunk);
926 mono_domain_unlock (domain
);
927 g_print ("thunk failed %p->%p, thunk space=%d method %s", code
, target
, thunks_size
, cfg
? mono_method_full_name (cfg
->method
, TRUE
) : mono_method_full_name (jinfo_get_method (ji
), TRUE
));
928 g_assert_not_reached ();
931 emit_thunk (target_thunk
, target
);
933 mono_domain_unlock (domain
);
940 arm_patch_full (MonoCompile
*cfg
, MonoDomain
*domain
, guint8
*code
, guint8
*target
, int relocation
)
942 switch (relocation
) {
944 target
= MINI_FTNPTR_TO_ADDR (target
);
945 if (arm_is_bl_disp (code
, target
)) {
946 arm_b (code
, target
);
950 thunk
= create_thunk (cfg
, domain
, code
, target
);
951 g_assert (arm_is_bl_disp (code
, thunk
));
955 case MONO_R_ARM64_BCC
: {
958 cond
= arm_get_bcc_cond (code
);
959 arm_bcc (code
, cond
, target
);
962 case MONO_R_ARM64_CBZ
:
963 arm_set_cbz_target (code
, target
);
965 case MONO_R_ARM64_IMM
: {
966 guint64 imm
= (guint64
)target
;
969 /* emit_imm64_template () */
970 dreg
= arm_get_movzx_rd (code
);
971 arm_movzx (code
, dreg
, imm
& 0xffff, 0);
972 arm_movkx (code
, dreg
, (imm
>> 16) & 0xffff, 16);
973 arm_movkx (code
, dreg
, (imm
>> 32) & 0xffff, 32);
974 arm_movkx (code
, dreg
, (imm
>> 48) & 0xffff, 48);
977 case MONO_R_ARM64_BL
:
978 target
= MINI_FTNPTR_TO_ADDR (target
);
979 if (arm_is_bl_disp (code
, target
)) {
980 arm_bl (code
, target
);
984 thunk
= create_thunk (cfg
, domain
, code
, target
);
985 g_assert (arm_is_bl_disp (code
, thunk
));
986 arm_bl (code
, thunk
);
990 g_assert_not_reached ();
995 arm_patch_rel (guint8
*code
, guint8
*target
, int relocation
)
997 arm_patch_full (NULL
, NULL
, code
, target
, relocation
);
1001 mono_arm_patch (guint8
*code
, guint8
*target
, int relocation
)
1003 arm_patch_rel (code
, target
, relocation
);
1007 mono_arch_patch_code_new (MonoCompile
*cfg
, MonoDomain
*domain
, guint8
*code
, MonoJumpInfo
*ji
, gpointer target
)
1011 ip
= ji
->ip
.i
+ code
;
1014 case MONO_PATCH_INFO_METHOD_JUMP
:
1015 /* ji->relocation is not set by the caller */
1016 arm_patch_full (cfg
, domain
, ip
, (guint8
*)target
, MONO_R_ARM64_B
);
1017 mono_arch_flush_icache (ip
, 8);
1020 arm_patch_full (cfg
, domain
, ip
, (guint8
*)target
, ji
->relocation
);
1022 case MONO_PATCH_INFO_NONE
:
1028 mono_arch_flush_register_windows (void)
1033 mono_arch_find_imt_method (host_mgreg_t
*regs
, guint8
*code
)
1035 return (MonoMethod
*)regs
[MONO_ARCH_RGCTX_REG
];
1039 mono_arch_find_static_call_vtable (host_mgreg_t
*regs
, guint8
*code
)
1041 return (MonoVTable
*)regs
[MONO_ARCH_RGCTX_REG
];
1045 mono_arch_get_cie_program (void)
1049 mono_add_unwind_op_def_cfa (l
, (guint8
*)NULL
, (guint8
*)NULL
, ARMREG_SP
, 0);
1055 mono_arch_context_get_int_reg (MonoContext
*ctx
, int reg
)
1057 return ctx
->regs
[reg
];
1061 mono_arch_context_set_int_reg (MonoContext
*ctx
, int reg
, host_mgreg_t val
)
1063 ctx
->regs
[reg
] = val
;
1067 * mono_arch_set_target:
1069 * Set the target architecture the JIT backend should generate code for, in the form
1070 * of a GNU target triplet. Only used in AOT mode.
1073 mono_arch_set_target (char *mtriple
)
1075 if (strstr (mtriple
, "darwin") || strstr (mtriple
, "ios")) {
1081 add_general (CallInfo
*cinfo
, ArgInfo
*ainfo
, int size
, gboolean sign
)
1083 if (cinfo
->gr
>= PARAM_REGS
) {
1084 ainfo
->storage
= ArgOnStack
;
1086 * FIXME: The vararg argument handling code in ves_icall_System_ArgIterator_IntGetNextArg
1087 * assumes every argument is allocated to a separate full size stack slot.
1089 if (ios_abi
&& !cinfo
->vararg
) {
1090 /* Assume size == align */
1092 /* Put arguments into 8 byte aligned stack slots */
1096 cinfo
->stack_usage
= ALIGN_TO (cinfo
->stack_usage
, size
);
1097 ainfo
->offset
= cinfo
->stack_usage
;
1098 ainfo
->slot_size
= size
;
1100 cinfo
->stack_usage
+= size
;
1102 ainfo
->storage
= ArgInIReg
;
1103 ainfo
->reg
= cinfo
->gr
;
1109 add_fp (CallInfo
*cinfo
, ArgInfo
*ainfo
, gboolean single
)
1111 int size
= single
? 4 : 8;
1113 if (cinfo
->fr
>= FP_PARAM_REGS
) {
1114 ainfo
->storage
= single
? ArgOnStackR4
: ArgOnStackR8
;
1116 cinfo
->stack_usage
= ALIGN_TO (cinfo
->stack_usage
, size
);
1117 ainfo
->offset
= cinfo
->stack_usage
;
1118 ainfo
->slot_size
= size
;
1119 cinfo
->stack_usage
+= size
;
1121 ainfo
->offset
= cinfo
->stack_usage
;
1122 ainfo
->slot_size
= 8;
1123 /* Put arguments into 8 byte aligned stack slots */
1124 cinfo
->stack_usage
+= 8;
1128 ainfo
->storage
= ArgInFRegR4
;
1130 ainfo
->storage
= ArgInFReg
;
1131 ainfo
->reg
= cinfo
->fr
;
1137 is_hfa (MonoType
*t
, int *out_nfields
, int *out_esize
, int *field_offsets
)
1141 MonoClassField
*field
;
1142 MonoType
*ftype
, *prev_ftype
= NULL
;
1145 klass
= mono_class_from_mono_type_internal (t
);
1147 while ((field
= mono_class_get_fields_internal (klass
, &iter
))) {
1148 if (field
->type
->attrs
& FIELD_ATTRIBUTE_STATIC
)
1150 ftype
= mono_field_get_type_internal (field
);
1151 ftype
= mini_get_underlying_type (ftype
);
1153 if (MONO_TYPE_ISSTRUCT (ftype
)) {
1154 int nested_nfields
, nested_esize
;
1155 int nested_field_offsets
[16];
1157 if (!is_hfa (ftype
, &nested_nfields
, &nested_esize
, nested_field_offsets
))
1159 if (nested_esize
== 4)
1160 ftype
= m_class_get_byval_arg (mono_defaults
.single_class
);
1162 ftype
= m_class_get_byval_arg (mono_defaults
.double_class
);
1163 if (prev_ftype
&& prev_ftype
->type
!= ftype
->type
)
1166 for (i
= 0; i
< nested_nfields
; ++i
) {
1167 if (nfields
+ i
< 4)
1168 field_offsets
[nfields
+ i
] = field
->offset
- MONO_ABI_SIZEOF (MonoObject
) + nested_field_offsets
[i
];
1170 nfields
+= nested_nfields
;
1172 if (!(!ftype
->byref
&& (ftype
->type
== MONO_TYPE_R4
|| ftype
->type
== MONO_TYPE_R8
)))
1174 if (prev_ftype
&& prev_ftype
->type
!= ftype
->type
)
1178 field_offsets
[nfields
] = field
->offset
- MONO_ABI_SIZEOF (MonoObject
);
1182 if (nfields
== 0 || nfields
> 4)
1184 *out_nfields
= nfields
;
1185 *out_esize
= prev_ftype
->type
== MONO_TYPE_R4
? 4 : 8;
1190 add_valuetype (CallInfo
*cinfo
, ArgInfo
*ainfo
, MonoType
*t
)
1192 int i
, size
, align_size
, nregs
, nfields
, esize
;
1193 int field_offsets
[16];
1196 size
= mini_type_stack_size_full (t
, &align
, cinfo
->pinvoke
);
1197 align_size
= ALIGN_TO (size
, 8);
1199 nregs
= align_size
/ 8;
1200 if (is_hfa (t
, &nfields
, &esize
, field_offsets
)) {
1202 * The struct might include nested float structs aligned at 8,
1203 * so need to keep track of the offsets of the individual fields.
1205 if (cinfo
->fr
+ nfields
<= FP_PARAM_REGS
) {
1206 ainfo
->storage
= ArgHFA
;
1207 ainfo
->reg
= cinfo
->fr
;
1208 ainfo
->nregs
= nfields
;
1210 ainfo
->esize
= esize
;
1211 for (i
= 0; i
< nfields
; ++i
)
1212 ainfo
->foffsets
[i
] = field_offsets
[i
];
1213 cinfo
->fr
+= ainfo
->nregs
;
1215 ainfo
->nfregs_to_skip
= FP_PARAM_REGS
> cinfo
->fr
? FP_PARAM_REGS
- cinfo
->fr
: 0;
1216 cinfo
->fr
= FP_PARAM_REGS
;
1217 size
= ALIGN_TO (size
, 8);
1218 ainfo
->storage
= ArgVtypeOnStack
;
1219 cinfo
->stack_usage
= ALIGN_TO (cinfo
->stack_usage
, align
);
1220 ainfo
->offset
= cinfo
->stack_usage
;
1223 ainfo
->nregs
= nfields
;
1224 ainfo
->esize
= esize
;
1225 cinfo
->stack_usage
+= size
;
1230 if (align_size
> 16) {
1231 ainfo
->storage
= ArgVtypeByRef
;
1236 if (cinfo
->gr
+ nregs
> PARAM_REGS
) {
1237 size
= ALIGN_TO (size
, 8);
1238 ainfo
->storage
= ArgVtypeOnStack
;
1239 cinfo
->stack_usage
= ALIGN_TO (cinfo
->stack_usage
, align
);
1240 ainfo
->offset
= cinfo
->stack_usage
;
1242 cinfo
->stack_usage
+= size
;
1243 cinfo
->gr
= PARAM_REGS
;
1245 ainfo
->storage
= ArgVtypeInIRegs
;
1246 ainfo
->reg
= cinfo
->gr
;
1247 ainfo
->nregs
= nregs
;
1254 add_param (CallInfo
*cinfo
, ArgInfo
*ainfo
, MonoType
*t
)
1258 ptype
= mini_get_underlying_type (t
);
1259 switch (ptype
->type
) {
1261 add_general (cinfo
, ainfo
, 1, TRUE
);
1264 add_general (cinfo
, ainfo
, 1, FALSE
);
1267 add_general (cinfo
, ainfo
, 2, TRUE
);
1270 add_general (cinfo
, ainfo
, 2, FALSE
);
1272 #ifdef MONO_ARCH_ILP32
1276 add_general (cinfo
, ainfo
, 4, TRUE
);
1278 #ifdef MONO_ARCH_ILP32
1281 case MONO_TYPE_FNPTR
:
1282 case MONO_TYPE_OBJECT
:
1285 add_general (cinfo
, ainfo
, 4, FALSE
);
1287 #ifndef MONO_ARCH_ILP32
1291 case MONO_TYPE_FNPTR
:
1292 case MONO_TYPE_OBJECT
:
1296 add_general (cinfo
, ainfo
, 8, FALSE
);
1299 add_fp (cinfo
, ainfo
, FALSE
);
1302 add_fp (cinfo
, ainfo
, TRUE
);
1304 case MONO_TYPE_VALUETYPE
:
1305 case MONO_TYPE_TYPEDBYREF
:
1306 add_valuetype (cinfo
, ainfo
, ptype
);
1308 case MONO_TYPE_VOID
:
1309 ainfo
->storage
= ArgNone
;
1311 case MONO_TYPE_GENERICINST
:
1312 if (!mono_type_generic_inst_is_valuetype (ptype
)) {
1313 add_general (cinfo
, ainfo
, 8, FALSE
);
1314 } else if (mini_is_gsharedvt_variable_type (ptype
)) {
1316 * Treat gsharedvt arguments as large vtypes
1318 ainfo
->storage
= ArgVtypeByRef
;
1319 ainfo
->gsharedvt
= TRUE
;
1321 add_valuetype (cinfo
, ainfo
, ptype
);
1325 case MONO_TYPE_MVAR
:
1326 g_assert (mini_is_gsharedvt_type (ptype
));
1327 ainfo
->storage
= ArgVtypeByRef
;
1328 ainfo
->gsharedvt
= TRUE
;
1331 g_assert_not_reached ();
1339 * Obtain information about a call according to the calling convention.
1342 get_call_info (MonoMemPool
*mp
, MonoMethodSignature
*sig
)
1346 int n
, pstart
, pindex
;
1348 n
= sig
->hasthis
+ sig
->param_count
;
1351 cinfo
= mono_mempool_alloc0 (mp
, sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
1353 cinfo
= g_malloc0 (sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
1356 cinfo
->pinvoke
= sig
->pinvoke
;
1357 // Constrain this to OSX only for now
1359 cinfo
->vararg
= sig
->call_convention
== MONO_CALL_VARARG
;
1363 add_param (cinfo
, &cinfo
->ret
, sig
->ret
);
1364 if (cinfo
->ret
.storage
== ArgVtypeByRef
)
1365 cinfo
->ret
.reg
= ARMREG_R8
;
1369 cinfo
->stack_usage
= 0;
1373 add_general (cinfo
, cinfo
->args
+ 0, 8, FALSE
);
1375 for (pindex
= pstart
; pindex
< sig
->param_count
; ++pindex
) {
1376 ainfo
= cinfo
->args
+ sig
->hasthis
+ pindex
;
1378 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (pindex
== sig
->sentinelpos
)) {
1379 /* Prevent implicit arguments and sig_cookie from
1380 being passed in registers */
1381 cinfo
->gr
= PARAM_REGS
;
1382 cinfo
->fr
= FP_PARAM_REGS
;
1383 /* Emit the signature cookie just before the implicit arguments */
1384 add_param (cinfo
, &cinfo
->sig_cookie
, mono_get_int_type ());
1387 add_param (cinfo
, ainfo
, sig
->params
[pindex
]);
1388 if (ainfo
->storage
== ArgVtypeByRef
) {
1389 /* Pass the argument address in the next register */
1390 if (cinfo
->gr
>= PARAM_REGS
) {
1391 ainfo
->storage
= ArgVtypeByRefOnStack
;
1392 cinfo
->stack_usage
= ALIGN_TO (cinfo
->stack_usage
, 8);
1393 ainfo
->offset
= cinfo
->stack_usage
;
1394 cinfo
->stack_usage
+= 8;
1396 ainfo
->reg
= cinfo
->gr
;
1402 /* Handle the case where there are no implicit arguments */
1403 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (pindex
== sig
->sentinelpos
)) {
1404 /* Prevent implicit arguments and sig_cookie from
1405 being passed in registers */
1406 cinfo
->gr
= PARAM_REGS
;
1407 cinfo
->fr
= FP_PARAM_REGS
;
1408 /* Emit the signature cookie just before the implicit arguments */
1409 add_param (cinfo
, &cinfo
->sig_cookie
, mono_get_int_type ());
1412 cinfo
->stack_usage
= ALIGN_TO (cinfo
->stack_usage
, MONO_ARCH_FRAME_ALIGNMENT
);
1418 arg_need_temp (ArgInfo
*ainfo
)
1420 if (ainfo
->storage
== ArgHFA
&& ainfo
->esize
== 4)
1426 arg_get_storage (CallContext
*ccontext
, ArgInfo
*ainfo
)
1428 switch (ainfo
->storage
) {
1429 case ArgVtypeInIRegs
:
1431 return &ccontext
->gregs
[ainfo
->reg
];
1435 return &ccontext
->fregs
[ainfo
->reg
];
1439 case ArgVtypeOnStack
:
1440 return ccontext
->stack
+ ainfo
->offset
;
1442 return (gpointer
) ccontext
->gregs
[ainfo
->reg
];
1444 g_error ("Arg storage type not yet supported");
1449 arg_get_val (CallContext
*ccontext
, ArgInfo
*ainfo
, gpointer dest
)
1451 g_assert (arg_need_temp (ainfo
));
1453 float *dest_float
= (float*)dest
;
1454 for (int k
= 0; k
< ainfo
->nregs
; k
++) {
1455 *dest_float
= *(float*)&ccontext
->fregs
[ainfo
->reg
+ k
];
1461 arg_set_val (CallContext
*ccontext
, ArgInfo
*ainfo
, gpointer src
)
1463 g_assert (arg_need_temp (ainfo
));
1465 float *src_float
= (float*)src
;
1466 for (int k
= 0; k
< ainfo
->nregs
; k
++) {
1467 *(float*)&ccontext
->fregs
[ainfo
->reg
+ k
] = *src_float
;
1472 /* Set arguments in the ccontext (for i2n entry) */
1474 mono_arch_set_native_call_context_args (CallContext
*ccontext
, gpointer frame
, MonoMethodSignature
*sig
)
1476 const MonoEECallbacks
*interp_cb
= mini_get_interp_callbacks ();
1477 CallInfo
*cinfo
= get_call_info (NULL
, sig
);
1481 memset (ccontext
, 0, sizeof (CallContext
));
1483 ccontext
->stack_size
= ALIGN_TO (cinfo
->stack_usage
, MONO_ARCH_FRAME_ALIGNMENT
);
1484 if (ccontext
->stack_size
)
1485 ccontext
->stack
= (guint8
*)g_calloc (1, ccontext
->stack_size
);
1487 if (sig
->ret
->type
!= MONO_TYPE_VOID
) {
1488 ainfo
= &cinfo
->ret
;
1489 if (ainfo
->storage
== ArgVtypeByRef
) {
1490 storage
= interp_cb
->frame_arg_to_storage ((MonoInterpFrameHandle
)frame
, sig
, -1);
1491 ccontext
->gregs
[cinfo
->ret
.reg
] = (gsize
)storage
;
1495 g_assert (!sig
->hasthis
);
1497 for (int i
= 0; i
< sig
->param_count
; i
++) {
1498 ainfo
= &cinfo
->args
[i
];
1500 if (ainfo
->storage
== ArgVtypeByRef
) {
1501 ccontext
->gregs
[ainfo
->reg
] = (host_mgreg_t
)interp_cb
->frame_arg_to_storage ((MonoInterpFrameHandle
)frame
, sig
, i
);
1505 int temp_size
= arg_need_temp (ainfo
);
1508 storage
= alloca (temp_size
); // FIXME? alloca in a loop
1510 storage
= arg_get_storage (ccontext
, ainfo
);
1512 interp_cb
->frame_arg_to_data ((MonoInterpFrameHandle
)frame
, sig
, i
, storage
);
1514 arg_set_val (ccontext
, ainfo
, storage
);
1520 /* Set return value in the ccontext (for n2i return) */
1522 mono_arch_set_native_call_context_ret (CallContext
*ccontext
, gpointer frame
, MonoMethodSignature
*sig
, gpointer retp
)
1524 const MonoEECallbacks
*interp_cb
;
1529 if (sig
->ret
->type
== MONO_TYPE_VOID
)
1532 interp_cb
= mini_get_interp_callbacks ();
1533 cinfo
= get_call_info (NULL
, sig
);
1534 ainfo
= &cinfo
->ret
;
1537 g_assert (ainfo
->storage
== ArgVtypeByRef
);
1538 interp_cb
->frame_arg_to_data ((MonoInterpFrameHandle
)frame
, sig
, -1, retp
);
1540 g_assert (ainfo
->storage
!= ArgVtypeByRef
);
1541 int temp_size
= arg_need_temp (ainfo
);
1544 storage
= alloca (temp_size
);
1546 storage
= arg_get_storage (ccontext
, ainfo
);
1547 memset (ccontext
, 0, sizeof (CallContext
)); // FIXME
1548 interp_cb
->frame_arg_to_data ((MonoInterpFrameHandle
)frame
, sig
, -1, storage
);
1550 arg_set_val (ccontext
, ainfo
, storage
);
1556 /* Gets the arguments from ccontext (for n2i entry) */
1558 mono_arch_get_native_call_context_args (CallContext
*ccontext
, gpointer frame
, MonoMethodSignature
*sig
)
1560 const MonoEECallbacks
*interp_cb
= mini_get_interp_callbacks ();
1561 CallInfo
*cinfo
= get_call_info (NULL
, sig
);
1565 for (int i
= 0; i
< sig
->param_count
+ sig
->hasthis
; i
++) {
1566 ainfo
= &cinfo
->args
[i
];
1567 int temp_size
= arg_need_temp (ainfo
);
1570 storage
= alloca (temp_size
); // FIXME? alloca in a loop
1571 arg_get_val (ccontext
, ainfo
, storage
);
1573 storage
= arg_get_storage (ccontext
, ainfo
);
1575 interp_cb
->data_to_frame_arg ((MonoInterpFrameHandle
)frame
, sig
, i
, storage
);
1579 if (sig
->ret
->type
!= MONO_TYPE_VOID
) {
1580 ainfo
= &cinfo
->ret
;
1581 if (ainfo
->storage
== ArgVtypeByRef
)
1582 storage
= (gpointer
) ccontext
->gregs
[cinfo
->ret
.reg
];
1588 /* Gets the return value from ccontext (for i2n exit) */
1590 mono_arch_get_native_call_context_ret (CallContext
*ccontext
, gpointer frame
, MonoMethodSignature
*sig
)
1592 const MonoEECallbacks
*interp_cb
;
1597 if (sig
->ret
->type
== MONO_TYPE_VOID
)
1600 interp_cb
= mini_get_interp_callbacks ();
1601 cinfo
= get_call_info (NULL
, sig
);
1602 ainfo
= &cinfo
->ret
;
1604 if (ainfo
->storage
!= ArgVtypeByRef
) {
1605 int temp_size
= arg_need_temp (ainfo
);
1608 storage
= alloca (temp_size
);
1609 arg_get_val (ccontext
, ainfo
, storage
);
1611 storage
= arg_get_storage (ccontext
, ainfo
);
1613 interp_cb
->data_to_frame_arg ((MonoInterpFrameHandle
)frame
, sig
, -1, storage
);
1620 MonoMethodSignature
*sig
;
1623 MonoType
**param_types
;
1624 int n_fpargs
, n_fpret
, nullable_area
;
1628 dyn_call_supported (CallInfo
*cinfo
, MonoMethodSignature
*sig
)
1632 // FIXME: Add more cases
1633 switch (cinfo
->ret
.storage
) {
1640 case ArgVtypeInIRegs
:
1641 if (cinfo
->ret
.nregs
> 2)
1650 for (i
= 0; i
< cinfo
->nargs
; ++i
) {
1651 ArgInfo
*ainfo
= &cinfo
->args
[i
];
1653 switch (ainfo
->storage
) {
1655 case ArgVtypeInIRegs
:
1660 case ArgVtypeByRefOnStack
:
1662 case ArgVtypeOnStack
:
1673 mono_arch_dyn_call_prepare (MonoMethodSignature
*sig
)
1675 ArchDynCallInfo
*info
;
1679 cinfo
= get_call_info (NULL
, sig
);
1681 if (!dyn_call_supported (cinfo
, sig
)) {
1686 info
= g_new0 (ArchDynCallInfo
, 1);
1687 // FIXME: Preprocess the info to speed up start_dyn_call ()
1689 info
->cinfo
= cinfo
;
1690 info
->rtype
= mini_get_underlying_type (sig
->ret
);
1691 info
->param_types
= g_new0 (MonoType
*, sig
->param_count
);
1692 for (i
= 0; i
< sig
->param_count
; ++i
)
1693 info
->param_types
[i
] = mini_get_underlying_type (sig
->params
[i
]);
1695 switch (cinfo
->ret
.storage
) {
1701 info
->n_fpret
= cinfo
->ret
.nregs
;
1707 for (aindex
= 0; aindex
< sig
->param_count
; aindex
++) {
1708 MonoType
*t
= info
->param_types
[aindex
];
1714 case MONO_TYPE_GENERICINST
:
1715 if (t
->type
== MONO_TYPE_GENERICINST
&& mono_class_is_nullable (mono_class_from_mono_type_internal (t
))) {
1716 MonoClass
*klass
= mono_class_from_mono_type_internal (t
);
1719 /* Nullables need a temporary buffer, its stored at the end of DynCallArgs.regs after the stack args */
1720 size
= mono_class_value_size (klass
, NULL
);
1721 info
->nullable_area
+= size
;
1729 return (MonoDynCallInfo
*)info
;
1733 mono_arch_dyn_call_free (MonoDynCallInfo
*info
)
1735 ArchDynCallInfo
*ainfo
= (ArchDynCallInfo
*)info
;
1737 g_free (ainfo
->cinfo
);
1738 g_free (ainfo
->param_types
);
1743 mono_arch_dyn_call_get_buf_size (MonoDynCallInfo
*info
)
1745 ArchDynCallInfo
*ainfo
= (ArchDynCallInfo
*)info
;
1747 g_assert (ainfo
->cinfo
->stack_usage
% MONO_ARCH_FRAME_ALIGNMENT
== 0);
1748 return sizeof (DynCallArgs
) + ainfo
->cinfo
->stack_usage
+ ainfo
->nullable_area
;
1752 bitcast_r4_to_r8 (float f
)
1760 bitcast_r8_to_r4 (double f
)
1768 mono_arch_start_dyn_call (MonoDynCallInfo
*info
, gpointer
**args
, guint8
*ret
, guint8
*buf
)
1770 ArchDynCallInfo
*dinfo
= (ArchDynCallInfo
*)info
;
1771 DynCallArgs
*p
= (DynCallArgs
*)buf
;
1772 int aindex
, arg_index
, greg
, i
, pindex
;
1773 MonoMethodSignature
*sig
= dinfo
->sig
;
1774 CallInfo
*cinfo
= dinfo
->cinfo
;
1775 int buffer_offset
= 0;
1776 guint8
*nullable_buffer
;
1780 p
->n_fpargs
= dinfo
->n_fpargs
;
1781 p
->n_fpret
= dinfo
->n_fpret
;
1782 p
->n_stackargs
= cinfo
->stack_usage
/ sizeof (host_mgreg_t
);
1788 /* Stored after the stack arguments */
1789 nullable_buffer
= (guint8
*)&(p
->regs
[PARAM_REGS
+ 1 + (cinfo
->stack_usage
/ sizeof (host_mgreg_t
))]);
1792 p
->regs
[greg
++] = (host_mgreg_t
)*(args
[arg_index
++]);
1794 if (cinfo
->ret
.storage
== ArgVtypeByRef
)
1795 p
->regs
[ARMREG_R8
] = (host_mgreg_t
)ret
;
1797 for (aindex
= pindex
; aindex
< sig
->param_count
; aindex
++) {
1798 MonoType
*t
= dinfo
->param_types
[aindex
];
1799 gpointer
*arg
= args
[arg_index
++];
1800 ArgInfo
*ainfo
= &cinfo
->args
[aindex
+ sig
->hasthis
];
1803 if (ainfo
->storage
== ArgOnStack
|| ainfo
->storage
== ArgVtypeOnStack
|| ainfo
->storage
== ArgVtypeByRefOnStack
) {
1804 slot
= PARAM_REGS
+ 1 + (ainfo
->offset
/ sizeof (host_mgreg_t
));
1810 p
->regs
[slot
] = (host_mgreg_t
)*arg
;
1814 if (ios_abi
&& ainfo
->storage
== ArgOnStack
) {
1815 guint8
*stack_arg
= (guint8
*)&(p
->regs
[PARAM_REGS
+ 1]) + ainfo
->offset
;
1816 gboolean handled
= TRUE
;
1818 /* Special case arguments smaller than 1 machine word */
1821 *(guint8
*)stack_arg
= *(guint8
*)arg
;
1824 *(gint8
*)stack_arg
= *(gint8
*)arg
;
1827 *(guint16
*)stack_arg
= *(guint16
*)arg
;
1830 *(gint16
*)stack_arg
= *(gint16
*)arg
;
1833 *(gint32
*)stack_arg
= *(gint32
*)arg
;
1836 *(guint32
*)stack_arg
= *(guint32
*)arg
;
1847 case MONO_TYPE_OBJECT
:
1853 p
->regs
[slot
] = (host_mgreg_t
)*arg
;
1856 p
->regs
[slot
] = *(guint8
*)arg
;
1859 p
->regs
[slot
] = *(gint8
*)arg
;
1862 p
->regs
[slot
] = *(gint16
*)arg
;
1865 p
->regs
[slot
] = *(guint16
*)arg
;
1868 p
->regs
[slot
] = *(gint32
*)arg
;
1871 p
->regs
[slot
] = *(guint32
*)arg
;
1874 p
->fpregs
[ainfo
->reg
] = bitcast_r4_to_r8 (*(float*)arg
);
1878 p
->fpregs
[ainfo
->reg
] = *(double*)arg
;
1881 case MONO_TYPE_GENERICINST
:
1882 if (MONO_TYPE_IS_REFERENCE (t
)) {
1883 p
->regs
[slot
] = (host_mgreg_t
)*arg
;
1886 if (t
->type
== MONO_TYPE_GENERICINST
&& mono_class_is_nullable (mono_class_from_mono_type_internal (t
))) {
1887 MonoClass
*klass
= mono_class_from_mono_type_internal (t
);
1888 guint8
*nullable_buf
;
1892 * Use p->buffer as a temporary buffer since the data needs to be available after this call
1893 * if the nullable param is passed by ref.
1895 size
= mono_class_value_size (klass
, NULL
);
1896 nullable_buf
= nullable_buffer
+ buffer_offset
;
1897 buffer_offset
+= size
;
1898 g_assert (buffer_offset
<= dinfo
->nullable_area
);
1900 /* The argument pointed to by arg is either a boxed vtype or null */
1901 mono_nullable_init (nullable_buf
, (MonoObject
*)arg
, klass
);
1903 arg
= (gpointer
*)nullable_buf
;
1909 case MONO_TYPE_VALUETYPE
:
1910 switch (ainfo
->storage
) {
1911 case ArgVtypeInIRegs
:
1912 for (i
= 0; i
< ainfo
->nregs
; ++i
)
1913 p
->regs
[slot
++] = ((host_mgreg_t
*)arg
) [i
];
1916 if (ainfo
->esize
== 4) {
1917 for (i
= 0; i
< ainfo
->nregs
; ++i
)
1918 p
->fpregs
[ainfo
->reg
+ i
] = bitcast_r4_to_r8 (((float*)arg
) [ainfo
->foffsets
[i
] / 4]);
1920 for (i
= 0; i
< ainfo
->nregs
; ++i
)
1921 p
->fpregs
[ainfo
->reg
+ i
] = ((double*)arg
) [ainfo
->foffsets
[i
] / 8];
1923 p
->n_fpargs
+= ainfo
->nregs
;
1926 case ArgVtypeByRefOnStack
:
1927 p
->regs
[slot
] = (host_mgreg_t
)arg
;
1929 case ArgVtypeOnStack
:
1930 for (i
= 0; i
< ainfo
->size
/ 8; ++i
)
1931 p
->regs
[slot
++] = ((host_mgreg_t
*)arg
) [i
];
1934 g_assert_not_reached ();
1939 g_assert_not_reached ();
1945 mono_arch_finish_dyn_call (MonoDynCallInfo
*info
, guint8
*buf
)
1947 ArchDynCallInfo
*ainfo
= (ArchDynCallInfo
*)info
;
1948 CallInfo
*cinfo
= ainfo
->cinfo
;
1949 DynCallArgs
*args
= (DynCallArgs
*)buf
;
1950 MonoType
*ptype
= ainfo
->rtype
;
1951 guint8
*ret
= args
->ret
;
1952 host_mgreg_t res
= args
->res
;
1953 host_mgreg_t res2
= args
->res2
;
1956 if (cinfo
->ret
.storage
== ArgVtypeByRef
)
1959 switch (ptype
->type
) {
1960 case MONO_TYPE_VOID
:
1961 *(gpointer
*)ret
= NULL
;
1963 case MONO_TYPE_OBJECT
:
1967 *(gpointer
*)ret
= (gpointer
)res
;
1973 *(guint8
*)ret
= res
;
1976 *(gint16
*)ret
= res
;
1979 *(guint16
*)ret
= res
;
1982 *(gint32
*)ret
= res
;
1985 *(guint32
*)ret
= res
;
1989 *(guint64
*)ret
= res
;
1992 *(float*)ret
= bitcast_r8_to_r4 (args
->fpregs
[0]);
1995 *(double*)ret
= args
->fpregs
[0];
1997 case MONO_TYPE_GENERICINST
:
1998 if (MONO_TYPE_IS_REFERENCE (ptype
)) {
1999 *(gpointer
*)ret
= (gpointer
)res
;
2004 case MONO_TYPE_VALUETYPE
:
2005 switch (ainfo
->cinfo
->ret
.storage
) {
2006 case ArgVtypeInIRegs
:
2007 *(host_mgreg_t
*)ret
= res
;
2008 if (ainfo
->cinfo
->ret
.nregs
> 1)
2009 ((host_mgreg_t
*)ret
) [1] = res2
;
2012 /* Use the same area for returning fp values */
2013 if (cinfo
->ret
.esize
== 4) {
2014 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
)
2015 ((float*)ret
) [cinfo
->ret
.foffsets
[i
] / 4] = bitcast_r8_to_r4 (args
->fpregs
[i
]);
2017 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
)
2018 ((double*)ret
) [cinfo
->ret
.foffsets
[i
] / 8] = args
->fpregs
[i
];
2022 g_assert_not_reached ();
2027 g_assert_not_reached ();
2033 void sys_icache_invalidate (void *start
, size_t len
);
2038 mono_arch_flush_icache (guint8
*code
, gint size
)
2040 #ifndef MONO_CROSS_COMPILE
2042 sys_icache_invalidate (code
, size
);
2044 /* Don't rely on GCC's __clear_cache implementation, as it caches
2045 * icache/dcache cache line sizes, that can vary between cores on
2046 * big.LITTLE architectures. */
2047 guint64 end
= (guint64
) (code
+ size
);
2049 /* always go with cacheline size of 4 bytes as this code isn't perf critical
2050 * anyway. Reading the cache line size from a machine register can be racy
2051 * on a big.LITTLE architecture if the cores don't have the same cache line
2053 const size_t icache_line_size
= 4;
2054 const size_t dcache_line_size
= 4;
2056 addr
= (guint64
) code
& ~(guint64
) (dcache_line_size
- 1);
2057 for (; addr
< end
; addr
+= dcache_line_size
)
2058 asm volatile("dc civac, %0" : : "r" (addr
) : "memory");
2059 asm volatile("dsb ish" : : : "memory");
2061 addr
= (guint64
) code
& ~(guint64
) (icache_line_size
- 1);
2062 for (; addr
< end
; addr
+= icache_line_size
)
2063 asm volatile("ic ivau, %0" : : "r" (addr
) : "memory");
2065 asm volatile ("dsb ish" : : : "memory");
2066 asm volatile ("isb" : : : "memory");
2074 mono_arch_opcode_needs_emulation (MonoCompile
*cfg
, int opcode
)
2081 mono_arch_get_allocatable_int_vars (MonoCompile
*cfg
)
2086 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
2087 MonoInst
*ins
= cfg
->varinfo
[i
];
2088 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
2091 if (vmv
->range
.first_use
.abs_pos
>= vmv
->range
.last_use
.abs_pos
)
2094 if ((ins
->flags
& (MONO_INST_IS_DEAD
|MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)) ||
2095 (ins
->opcode
!= OP_LOCAL
&& ins
->opcode
!= OP_ARG
))
2098 if (mono_is_regsize_var (ins
->inst_vtype
)) {
2099 g_assert (MONO_VARINFO (cfg
, i
)->reg
== -1);
2100 g_assert (i
== vmv
->idx
);
2101 vars
= g_list_prepend (vars
, vmv
);
2105 vars
= mono_varlist_sort (cfg
, vars
, 0);
2111 mono_arch_get_global_int_regs (MonoCompile
*cfg
)
2116 /* r28 is reserved for cfg->arch.args_reg */
2117 /* r27 is reserved for the imt argument */
2118 for (i
= ARMREG_R19
; i
<= ARMREG_R26
; ++i
)
2119 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (i
));
2125 mono_arch_regalloc_cost (MonoCompile
*cfg
, MonoMethodVar
*vmv
)
2127 MonoInst
*ins
= cfg
->varinfo
[vmv
->idx
];
2129 if (ins
->opcode
== OP_ARG
)
2136 mono_arch_create_vars (MonoCompile
*cfg
)
2138 MonoMethodSignature
*sig
;
2141 sig
= mono_method_signature_internal (cfg
->method
);
2142 if (!cfg
->arch
.cinfo
)
2143 cfg
->arch
.cinfo
= get_call_info (cfg
->mempool
, sig
);
2144 cinfo
= cfg
->arch
.cinfo
;
2146 if (cinfo
->ret
.storage
== ArgVtypeByRef
) {
2147 cfg
->vret_addr
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
2148 cfg
->vret_addr
->flags
|= MONO_INST_VOLATILE
;
2151 if (cfg
->gen_sdb_seq_points
) {
2154 if (cfg
->compile_aot
) {
2155 ins
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
2156 ins
->flags
|= MONO_INST_VOLATILE
;
2157 cfg
->arch
.seq_point_info_var
= ins
;
2160 ins
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
2161 ins
->flags
|= MONO_INST_VOLATILE
;
2162 cfg
->arch
.ss_tramp_var
= ins
;
2164 ins
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
2165 ins
->flags
|= MONO_INST_VOLATILE
;
2166 cfg
->arch
.bp_tramp_var
= ins
;
2169 if (cfg
->method
->save_lmf
) {
2170 cfg
->create_lmf_var
= TRUE
;
2176 mono_arch_allocate_vars (MonoCompile
*cfg
)
2178 MonoMethodSignature
*sig
;
2182 int i
, offset
, size
, align
;
2183 guint32 locals_stack_size
, locals_stack_align
;
2187 * Allocate arguments and locals to either register (OP_REGVAR) or to a stack slot (OP_REGOFFSET).
2188 * Compute cfg->stack_offset and update cfg->used_int_regs.
2191 sig
= mono_method_signature_internal (cfg
->method
);
2193 if (!cfg
->arch
.cinfo
)
2194 cfg
->arch
.cinfo
= get_call_info (cfg
->mempool
, sig
);
2195 cinfo
= cfg
->arch
.cinfo
;
2198 * The ARM64 ABI always uses a frame pointer.
2199 * The instruction set prefers positive offsets, so fp points to the bottom of the
2200 * frame, and stack slots are at positive offsets.
2201 * If some arguments are received on the stack, their offsets relative to fp can
2202 * not be computed right now because the stack frame might grow due to spilling
2203 * done by the local register allocator. To solve this, we reserve a register
2204 * which points to them.
2205 * The stack frame looks like this:
2206 * args_reg -> <bottom of parent frame>
2208 * fp -> <saved fp+lr>
2209 * sp -> <localloc/params area>
2211 cfg
->frame_reg
= ARMREG_FP
;
2212 cfg
->flags
|= MONO_CFG_HAS_SPILLUP
;
2218 if (cinfo
->stack_usage
) {
2219 g_assert (!(cfg
->used_int_regs
& (1 << ARMREG_R28
)));
2220 cfg
->arch
.args_reg
= ARMREG_R28
;
2221 cfg
->used_int_regs
|= 1 << ARMREG_R28
;
2224 if (cfg
->method
->save_lmf
) {
2225 /* The LMF var is allocated normally */
2227 /* Callee saved regs */
2228 cfg
->arch
.saved_gregs_offset
= offset
;
2229 for (i
= 0; i
< 32; ++i
)
2230 if ((MONO_ARCH_CALLEE_SAVED_REGS
& (1 << i
)) && (cfg
->used_int_regs
& (1 << i
)))
2235 switch (cinfo
->ret
.storage
) {
2241 cfg
->ret
->opcode
= OP_REGVAR
;
2242 cfg
->ret
->dreg
= cinfo
->ret
.reg
;
2244 case ArgVtypeInIRegs
:
2246 /* Allocate a local to hold the result, the epilog will copy it to the correct place */
2247 cfg
->ret
->opcode
= OP_REGOFFSET
;
2248 cfg
->ret
->inst_basereg
= cfg
->frame_reg
;
2249 cfg
->ret
->inst_offset
= offset
;
2250 if (cinfo
->ret
.storage
== ArgHFA
)
2257 /* This variable will be initalized in the prolog from R8 */
2258 cfg
->vret_addr
->opcode
= OP_REGOFFSET
;
2259 cfg
->vret_addr
->inst_basereg
= cfg
->frame_reg
;
2260 cfg
->vret_addr
->inst_offset
= offset
;
2262 if (G_UNLIKELY (cfg
->verbose_level
> 1)) {
2263 printf ("vret_addr =");
2264 mono_print_ins (cfg
->vret_addr
);
2268 g_assert_not_reached ();
2273 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
2274 ainfo
= cinfo
->args
+ i
;
2276 ins
= cfg
->args
[i
];
2277 if (ins
->opcode
== OP_REGVAR
)
2280 ins
->opcode
= OP_REGOFFSET
;
2281 ins
->inst_basereg
= cfg
->frame_reg
;
2283 switch (ainfo
->storage
) {
2287 // FIXME: Use nregs/size
2288 /* These will be copied to the stack in the prolog */
2289 ins
->inst_offset
= offset
;
2295 case ArgVtypeOnStack
:
2296 /* These are in the parent frame */
2297 g_assert (cfg
->arch
.args_reg
);
2298 ins
->inst_basereg
= cfg
->arch
.args_reg
;
2299 ins
->inst_offset
= ainfo
->offset
;
2301 case ArgVtypeInIRegs
:
2303 ins
->opcode
= OP_REGOFFSET
;
2304 ins
->inst_basereg
= cfg
->frame_reg
;
2305 /* These arguments are saved to the stack in the prolog */
2306 ins
->inst_offset
= offset
;
2307 if (cfg
->verbose_level
>= 2)
2308 printf ("arg %d allocated to %s+0x%0x.\n", i
, mono_arch_regname (ins
->inst_basereg
), (int)ins
->inst_offset
);
2309 if (ainfo
->storage
== ArgHFA
)
2315 case ArgVtypeByRefOnStack
: {
2318 if (ainfo
->gsharedvt
) {
2319 ins
->opcode
= OP_REGOFFSET
;
2320 ins
->inst_basereg
= cfg
->arch
.args_reg
;
2321 ins
->inst_offset
= ainfo
->offset
;
2325 /* The vtype address is in the parent frame */
2326 g_assert (cfg
->arch
.args_reg
);
2327 MONO_INST_NEW (cfg
, vtaddr
, 0);
2328 vtaddr
->opcode
= OP_REGOFFSET
;
2329 vtaddr
->inst_basereg
= cfg
->arch
.args_reg
;
2330 vtaddr
->inst_offset
= ainfo
->offset
;
2332 /* Need an indirection */
2333 ins
->opcode
= OP_VTARG_ADDR
;
2334 ins
->inst_left
= vtaddr
;
2337 case ArgVtypeByRef
: {
2340 if (ainfo
->gsharedvt
) {
2341 ins
->opcode
= OP_REGOFFSET
;
2342 ins
->inst_basereg
= cfg
->frame_reg
;
2343 ins
->inst_offset
= offset
;
2348 /* The vtype address is in a register, will be copied to the stack in the prolog */
2349 MONO_INST_NEW (cfg
, vtaddr
, 0);
2350 vtaddr
->opcode
= OP_REGOFFSET
;
2351 vtaddr
->inst_basereg
= cfg
->frame_reg
;
2352 vtaddr
->inst_offset
= offset
;
2355 /* Need an indirection */
2356 ins
->opcode
= OP_VTARG_ADDR
;
2357 ins
->inst_left
= vtaddr
;
2361 g_assert_not_reached ();
2366 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
2367 // FIXME: Allocate these to registers
2368 ins
= cfg
->arch
.seq_point_info_var
;
2372 offset
+= align
- 1;
2373 offset
&= ~(align
- 1);
2374 ins
->opcode
= OP_REGOFFSET
;
2375 ins
->inst_basereg
= cfg
->frame_reg
;
2376 ins
->inst_offset
= offset
;
2379 ins
= cfg
->arch
.ss_tramp_var
;
2383 offset
+= align
- 1;
2384 offset
&= ~(align
- 1);
2385 ins
->opcode
= OP_REGOFFSET
;
2386 ins
->inst_basereg
= cfg
->frame_reg
;
2387 ins
->inst_offset
= offset
;
2390 ins
= cfg
->arch
.bp_tramp_var
;
2394 offset
+= align
- 1;
2395 offset
&= ~(align
- 1);
2396 ins
->opcode
= OP_REGOFFSET
;
2397 ins
->inst_basereg
= cfg
->frame_reg
;
2398 ins
->inst_offset
= offset
;
2403 offsets
= mono_allocate_stack_slots (cfg
, FALSE
, &locals_stack_size
, &locals_stack_align
);
2404 if (locals_stack_align
)
2405 offset
= ALIGN_TO (offset
, locals_stack_align
);
2407 for (i
= cfg
->locals_start
; i
< cfg
->num_varinfo
; i
++) {
2408 if (offsets
[i
] != -1) {
2409 ins
= cfg
->varinfo
[i
];
2410 ins
->opcode
= OP_REGOFFSET
;
2411 ins
->inst_basereg
= cfg
->frame_reg
;
2412 ins
->inst_offset
= offset
+ offsets
[i
];
2413 //printf ("allocated local %d to ", i); mono_print_tree_nl (ins);
2416 offset
+= locals_stack_size
;
2418 offset
= ALIGN_TO (offset
, MONO_ARCH_FRAME_ALIGNMENT
);
2420 cfg
->stack_offset
= offset
;
2425 mono_arch_get_llvm_call_info (MonoCompile
*cfg
, MonoMethodSignature
*sig
)
2430 LLVMCallInfo
*linfo
;
2432 n
= sig
->param_count
+ sig
->hasthis
;
2434 cinfo
= get_call_info (cfg
->mempool
, sig
);
2436 linfo
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (LLVMCallInfo
) + (sizeof (LLVMArgInfo
) * n
));
2438 switch (cinfo
->ret
.storage
) {
2445 linfo
->ret
.storage
= LLVMArgVtypeByRef
;
2448 // FIXME: This doesn't work yet since the llvm backend represents these types as an i8
2449 // array which is returned in int regs
2452 linfo
->ret
.storage
= LLVMArgFpStruct
;
2453 linfo
->ret
.nslots
= cinfo
->ret
.nregs
;
2454 linfo
->ret
.esize
= cinfo
->ret
.esize
;
2456 case ArgVtypeInIRegs
:
2457 /* LLVM models this by returning an int */
2458 linfo
->ret
.storage
= LLVMArgVtypeAsScalar
;
2459 linfo
->ret
.nslots
= cinfo
->ret
.nregs
;
2460 linfo
->ret
.esize
= cinfo
->ret
.esize
;
2463 g_assert_not_reached ();
2467 for (i
= 0; i
< n
; ++i
) {
2468 LLVMArgInfo
*lainfo
= &linfo
->args
[i
];
2470 ainfo
= cinfo
->args
+ i
;
2472 lainfo
->storage
= LLVMArgNone
;
2474 switch (ainfo
->storage
) {
2481 lainfo
->storage
= LLVMArgNormal
;
2484 case ArgVtypeByRefOnStack
:
2485 lainfo
->storage
= LLVMArgVtypeByRef
;
2490 lainfo
->storage
= LLVMArgAsFpArgs
;
2491 lainfo
->nslots
= ainfo
->nregs
;
2492 lainfo
->esize
= ainfo
->esize
;
2493 for (j
= 0; j
< ainfo
->nregs
; ++j
)
2494 lainfo
->pair_storage
[j
] = LLVMArgInFPReg
;
2497 case ArgVtypeInIRegs
:
2498 lainfo
->storage
= LLVMArgAsIArgs
;
2499 lainfo
->nslots
= ainfo
->nregs
;
2501 case ArgVtypeOnStack
:
2505 lainfo
->storage
= LLVMArgAsFpArgs
;
2506 lainfo
->nslots
= ainfo
->nregs
;
2507 lainfo
->esize
= ainfo
->esize
;
2508 lainfo
->ndummy_fpargs
= ainfo
->nfregs_to_skip
;
2509 for (j
= 0; j
< ainfo
->nregs
; ++j
)
2510 lainfo
->pair_storage
[j
] = LLVMArgInFPReg
;
2512 lainfo
->storage
= LLVMArgAsIArgs
;
2513 lainfo
->nslots
= ainfo
->size
/ 8;
2517 g_assert_not_reached ();
2527 add_outarg_reg (MonoCompile
*cfg
, MonoCallInst
*call
, ArgStorage storage
, int reg
, MonoInst
*arg
)
2533 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2534 ins
->dreg
= mono_alloc_ireg_copy (cfg
, arg
->dreg
);
2535 ins
->sreg1
= arg
->dreg
;
2536 MONO_ADD_INS (cfg
->cbb
, ins
);
2537 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, reg
, FALSE
);
2540 MONO_INST_NEW (cfg
, ins
, OP_FMOVE
);
2541 ins
->dreg
= mono_alloc_freg (cfg
);
2542 ins
->sreg1
= arg
->dreg
;
2543 MONO_ADD_INS (cfg
->cbb
, ins
);
2544 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, reg
, TRUE
);
2547 if (COMPILE_LLVM (cfg
))
2548 MONO_INST_NEW (cfg
, ins
, OP_FMOVE
);
2550 MONO_INST_NEW (cfg
, ins
, OP_RMOVE
);
2552 MONO_INST_NEW (cfg
, ins
, OP_ARM_SETFREG_R4
);
2553 ins
->dreg
= mono_alloc_freg (cfg
);
2554 ins
->sreg1
= arg
->dreg
;
2555 MONO_ADD_INS (cfg
->cbb
, ins
);
2556 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, reg
, TRUE
);
2559 g_assert_not_reached ();
2565 emit_sig_cookie (MonoCompile
*cfg
, MonoCallInst
*call
, CallInfo
*cinfo
)
2567 MonoMethodSignature
*tmp_sig
;
2570 if (MONO_IS_TAILCALL_OPCODE (call
))
2573 g_assert (cinfo
->sig_cookie
.storage
== ArgOnStack
);
2576 * mono_ArgIterator_Setup assumes the signature cookie is
2577 * passed first and all the arguments which were before it are
2578 * passed on the stack after the signature. So compensate by
2579 * passing a different signature.
2581 tmp_sig
= mono_metadata_signature_dup (call
->signature
);
2582 tmp_sig
->param_count
-= call
->signature
->sentinelpos
;
2583 tmp_sig
->sentinelpos
= 0;
2584 memcpy (tmp_sig
->params
, call
->signature
->params
+ call
->signature
->sentinelpos
, tmp_sig
->param_count
* sizeof (MonoType
*));
2586 sig_reg
= mono_alloc_ireg (cfg
);
2587 MONO_EMIT_NEW_SIGNATURECONST (cfg
, sig_reg
, tmp_sig
);
2589 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, cinfo
->sig_cookie
.offset
, sig_reg
);
2593 mono_arch_emit_call (MonoCompile
*cfg
, MonoCallInst
*call
)
2595 MonoMethodSignature
*sig
;
2596 MonoInst
*arg
, *vtarg
;
2601 sig
= call
->signature
;
2603 cinfo
= get_call_info (cfg
->mempool
, sig
);
2605 switch (cinfo
->ret
.storage
) {
2606 case ArgVtypeInIRegs
:
2608 if (MONO_IS_TAILCALL_OPCODE (call
))
2611 * The vtype is returned in registers, save the return area address in a local, and save the vtype into
2612 * the location pointed to by it after call in emit_move_return_value ().
2614 if (!cfg
->arch
.vret_addr_loc
) {
2615 cfg
->arch
.vret_addr_loc
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
2616 /* Prevent it from being register allocated or optimized away */
2617 cfg
->arch
.vret_addr_loc
->flags
|= MONO_INST_VOLATILE
;
2620 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->arch
.vret_addr_loc
->dreg
, call
->vret_var
->dreg
);
2623 /* Pass the vtype return address in R8 */
2624 g_assert (!MONO_IS_TAILCALL_OPCODE (call
) || call
->vret_var
== cfg
->vret_addr
);
2625 MONO_INST_NEW (cfg
, vtarg
, OP_MOVE
);
2626 vtarg
->sreg1
= call
->vret_var
->dreg
;
2627 vtarg
->dreg
= mono_alloc_preg (cfg
);
2628 MONO_ADD_INS (cfg
->cbb
, vtarg
);
2630 mono_call_inst_add_outarg_reg (cfg
, call
, vtarg
->dreg
, cinfo
->ret
.reg
, FALSE
);
2636 for (i
= 0; i
< cinfo
->nargs
; ++i
) {
2637 ainfo
= cinfo
->args
+ i
;
2638 arg
= call
->args
[i
];
2640 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
2641 /* Emit the signature cookie just before the implicit arguments */
2642 emit_sig_cookie (cfg
, call
, cinfo
);
2645 switch (ainfo
->storage
) {
2649 add_outarg_reg (cfg
, call
, ainfo
->storage
, ainfo
->reg
, arg
);
2652 switch (ainfo
->slot_size
) {
2654 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, arg
->dreg
);
2657 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, arg
->dreg
);
2660 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI2_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, arg
->dreg
);
2663 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, arg
->dreg
);
2666 g_assert_not_reached ();
2671 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, arg
->dreg
);
2674 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, arg
->dreg
);
2676 case ArgVtypeInIRegs
:
2678 case ArgVtypeByRefOnStack
:
2679 case ArgVtypeOnStack
:
2685 size
= mono_class_value_size (arg
->klass
, &align
);
2687 MONO_INST_NEW (cfg
, ins
, OP_OUTARG_VT
);
2688 ins
->sreg1
= arg
->dreg
;
2689 ins
->klass
= arg
->klass
;
2690 ins
->backend
.size
= size
;
2691 ins
->inst_p0
= call
;
2692 ins
->inst_p1
= mono_mempool_alloc (cfg
->mempool
, sizeof (ArgInfo
));
2693 memcpy (ins
->inst_p1
, ainfo
, sizeof (ArgInfo
));
2694 MONO_ADD_INS (cfg
->cbb
, ins
);
2698 g_assert_not_reached ();
2703 /* Handle the case where there are no implicit arguments */
2704 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (cinfo
->nargs
== sig
->sentinelpos
))
2705 emit_sig_cookie (cfg
, call
, cinfo
);
2707 call
->call_info
= cinfo
;
2708 call
->stack_usage
= cinfo
->stack_usage
;
2712 mono_arch_emit_outarg_vt (MonoCompile
*cfg
, MonoInst
*ins
, MonoInst
*src
)
2714 MonoCallInst
*call
= (MonoCallInst
*)ins
->inst_p0
;
2715 ArgInfo
*ainfo
= (ArgInfo
*)ins
->inst_p1
;
2719 if (ins
->backend
.size
== 0 && !ainfo
->gsharedvt
)
2722 switch (ainfo
->storage
) {
2723 case ArgVtypeInIRegs
:
2724 for (i
= 0; i
< ainfo
->nregs
; ++i
) {
2725 // FIXME: Smaller sizes
2726 MONO_INST_NEW (cfg
, load
, OP_LOADI8_MEMBASE
);
2727 load
->dreg
= mono_alloc_ireg (cfg
);
2728 load
->inst_basereg
= src
->dreg
;
2729 load
->inst_offset
= i
* sizeof (target_mgreg_t
);
2730 MONO_ADD_INS (cfg
->cbb
, load
);
2731 add_outarg_reg (cfg
, call
, ArgInIReg
, ainfo
->reg
+ i
, load
);
2735 for (i
= 0; i
< ainfo
->nregs
; ++i
) {
2736 if (ainfo
->esize
== 4)
2737 MONO_INST_NEW (cfg
, load
, OP_LOADR4_MEMBASE
);
2739 MONO_INST_NEW (cfg
, load
, OP_LOADR8_MEMBASE
);
2740 load
->dreg
= mono_alloc_freg (cfg
);
2741 load
->inst_basereg
= src
->dreg
;
2742 load
->inst_offset
= ainfo
->foffsets
[i
];
2743 MONO_ADD_INS (cfg
->cbb
, load
);
2744 add_outarg_reg (cfg
, call
, ainfo
->esize
== 4 ? ArgInFRegR4
: ArgInFReg
, ainfo
->reg
+ i
, load
);
2748 case ArgVtypeByRefOnStack
: {
2749 MonoInst
*vtaddr
, *load
, *arg
;
2751 /* Pass the vtype address in a reg/on the stack */
2752 if (ainfo
->gsharedvt
) {
2755 /* Make a copy of the argument */
2756 vtaddr
= mono_compile_create_var (cfg
, m_class_get_byval_arg (ins
->klass
), OP_LOCAL
);
2758 MONO_INST_NEW (cfg
, load
, OP_LDADDR
);
2759 load
->inst_p0
= vtaddr
;
2760 vtaddr
->flags
|= MONO_INST_INDIRECT
;
2761 load
->type
= STACK_MP
;
2762 load
->klass
= vtaddr
->klass
;
2763 load
->dreg
= mono_alloc_ireg (cfg
);
2764 MONO_ADD_INS (cfg
->cbb
, load
);
2765 mini_emit_memcpy (cfg
, load
->dreg
, 0, src
->dreg
, 0, ainfo
->size
, 8);
2768 if (ainfo
->storage
== ArgVtypeByRef
) {
2769 MONO_INST_NEW (cfg
, arg
, OP_MOVE
);
2770 arg
->dreg
= mono_alloc_preg (cfg
);
2771 arg
->sreg1
= load
->dreg
;
2772 MONO_ADD_INS (cfg
->cbb
, arg
);
2773 add_outarg_reg (cfg
, call
, ArgInIReg
, ainfo
->reg
, arg
);
2775 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, load
->dreg
);
2779 case ArgVtypeOnStack
:
2780 for (i
= 0; i
< ainfo
->size
/ 8; ++i
) {
2781 MONO_INST_NEW (cfg
, load
, OP_LOADI8_MEMBASE
);
2782 load
->dreg
= mono_alloc_ireg (cfg
);
2783 load
->inst_basereg
= src
->dreg
;
2784 load
->inst_offset
= i
* 8;
2785 MONO_ADD_INS (cfg
->cbb
, load
);
2786 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
+ (i
* 8), load
->dreg
);
2790 g_assert_not_reached ();
2796 mono_arch_emit_setret (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
*val
)
2798 MonoMethodSignature
*sig
;
2801 sig
= mono_method_signature_internal (cfg
->method
);
2802 if (!cfg
->arch
.cinfo
)
2803 cfg
->arch
.cinfo
= get_call_info (cfg
->mempool
, sig
);
2804 cinfo
= cfg
->arch
.cinfo
;
2806 switch (cinfo
->ret
.storage
) {
2810 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
2813 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, cfg
->ret
->dreg
, val
->dreg
);
2816 if (COMPILE_LLVM (cfg
))
2817 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, cfg
->ret
->dreg
, val
->dreg
);
2819 MONO_EMIT_NEW_UNALU (cfg
, OP_RMOVE
, cfg
->ret
->dreg
, val
->dreg
);
2821 MONO_EMIT_NEW_UNALU (cfg
, OP_ARM_SETFREG_R4
, cfg
->ret
->dreg
, val
->dreg
);
2824 g_assert_not_reached ();
2832 mono_arch_tailcall_supported (MonoCompile
*cfg
, MonoMethodSignature
*caller_sig
, MonoMethodSignature
*callee_sig
, gboolean virtual_
)
2834 g_assert (caller_sig
);
2835 g_assert (callee_sig
);
2837 CallInfo
*caller_info
= get_call_info (NULL
, caller_sig
);
2838 CallInfo
*callee_info
= get_call_info (NULL
, callee_sig
);
2840 gboolean res
= IS_SUPPORTED_TAILCALL (callee_info
->stack_usage
<= caller_info
->stack_usage
)
2841 && IS_SUPPORTED_TAILCALL (caller_info
->ret
.storage
== callee_info
->ret
.storage
);
2843 // FIXME Limit stack_usage to 1G. emit_ldrx / strx has 32bit limits.
2844 res
&= IS_SUPPORTED_TAILCALL (callee_info
->stack_usage
< (1 << 30));
2845 res
&= IS_SUPPORTED_TAILCALL (caller_info
->stack_usage
< (1 << 30));
2847 // valuetype parameters are the address of a local
2848 const ArgInfo
*ainfo
;
2849 ainfo
= callee_info
->args
+ callee_sig
->hasthis
;
2850 for (int i
= 0; res
&& i
< callee_sig
->param_count
; ++i
) {
2851 res
= IS_SUPPORTED_TAILCALL (ainfo
[i
].storage
!= ArgVtypeByRef
)
2852 && IS_SUPPORTED_TAILCALL (ainfo
[i
].storage
!= ArgVtypeByRefOnStack
);
2855 g_free (caller_info
);
2856 g_free (callee_info
);
2864 mono_arch_is_inst_imm (int opcode
, int imm_opcode
, gint64 imm
)
2866 return (imm
>= -((gint64
)1<<31) && imm
<= (((gint64
)1<<31)-1));
2870 mono_arch_peephole_pass_1 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
2876 mono_arch_peephole_pass_2 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
2881 #define ADD_NEW_INS(cfg,dest,op) do { \
2882 MONO_INST_NEW ((cfg), (dest), (op)); \
2883 mono_bblock_insert_before_ins (bb, ins, (dest)); \
2887 mono_arch_lowering_pass (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
2889 MonoInst
*ins
, *temp
, *last_ins
= NULL
;
2891 MONO_BB_FOR_EACH_INS (bb
, ins
) {
2892 switch (ins
->opcode
) {
2897 if (ins
->next
&& (ins
->next
->opcode
== OP_COND_EXC_C
|| ins
->next
->opcode
== OP_COND_EXC_IC
))
2898 /* ARM sets the C flag to 1 if there was _no_ overflow */
2899 ins
->next
->opcode
= OP_COND_EXC_NC
;
2903 case OP_IDIV_UN_IMM
:
2904 case OP_IREM_UN_IMM
:
2906 mono_decompose_op_imm (cfg
, bb
, ins
);
2908 case OP_LOCALLOC_IMM
:
2909 if (ins
->inst_imm
> 32) {
2910 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
2911 temp
->inst_c0
= ins
->inst_imm
;
2912 temp
->dreg
= mono_alloc_ireg (cfg
);
2913 ins
->sreg1
= temp
->dreg
;
2914 ins
->opcode
= mono_op_imm_to_op (ins
->opcode
);
2917 case OP_ICOMPARE_IMM
:
2918 if (ins
->inst_imm
== 0 && ins
->next
&& ins
->next
->opcode
== OP_IBEQ
) {
2919 ins
->next
->opcode
= OP_ARM64_CBZW
;
2920 ins
->next
->sreg1
= ins
->sreg1
;
2922 } else if (ins
->inst_imm
== 0 && ins
->next
&& ins
->next
->opcode
== OP_IBNE_UN
) {
2923 ins
->next
->opcode
= OP_ARM64_CBNZW
;
2924 ins
->next
->sreg1
= ins
->sreg1
;
2928 case OP_LCOMPARE_IMM
:
2929 case OP_COMPARE_IMM
:
2930 if (ins
->inst_imm
== 0 && ins
->next
&& ins
->next
->opcode
== OP_LBEQ
) {
2931 ins
->next
->opcode
= OP_ARM64_CBZX
;
2932 ins
->next
->sreg1
= ins
->sreg1
;
2934 } else if (ins
->inst_imm
== 0 && ins
->next
&& ins
->next
->opcode
== OP_LBNE_UN
) {
2935 ins
->next
->opcode
= OP_ARM64_CBNZX
;
2936 ins
->next
->sreg1
= ins
->sreg1
;
2942 gboolean swap
= FALSE
;
2946 /* Optimized away */
2952 * FP compares with unordered operands set the flags
2953 * to NZCV=0011, which matches some non-unordered compares
2954 * as well, like LE, so have to swap the operands.
2956 switch (ins
->next
->opcode
) {
2958 ins
->next
->opcode
= OP_FBGT
;
2962 ins
->next
->opcode
= OP_FBGE
;
2966 ins
->next
->opcode
= OP_RBGT
;
2970 ins
->next
->opcode
= OP_RBGE
;
2978 ins
->sreg1
= ins
->sreg2
;
2989 bb
->last_ins
= last_ins
;
2990 bb
->max_vreg
= cfg
->next_vreg
;
2994 mono_arch_decompose_long_opts (MonoCompile
*cfg
, MonoInst
*long_ins
)
2999 opcode_to_armcond (int opcode
)
3010 case OP_COND_EXC_IEQ
:
3011 case OP_COND_EXC_EQ
:
3028 case OP_COND_EXC_IGT
:
3029 case OP_COND_EXC_GT
:
3044 case OP_COND_EXC_ILT
:
3045 case OP_COND_EXC_LT
:
3053 case OP_COND_EXC_INE_UN
:
3054 case OP_COND_EXC_NE_UN
:
3060 case OP_COND_EXC_IGE_UN
:
3061 case OP_COND_EXC_GE_UN
:
3071 case OP_COND_EXC_IGT_UN
:
3072 case OP_COND_EXC_GT_UN
:
3078 case OP_COND_EXC_ILE_UN
:
3079 case OP_COND_EXC_LE_UN
:
3087 case OP_COND_EXC_ILT_UN
:
3088 case OP_COND_EXC_LT_UN
:
3091 * FCMP sets the NZCV condition bits as follows:
3096 * ARMCOND_LT is N!=V, so it matches unordered too, so
3097 * fclt and fclt_un need to be special cased.
3107 case OP_COND_EXC_IC
:
3109 case OP_COND_EXC_OV
:
3110 case OP_COND_EXC_IOV
:
3112 case OP_COND_EXC_NC
:
3113 case OP_COND_EXC_INC
:
3115 case OP_COND_EXC_NO
:
3116 case OP_COND_EXC_INO
:
3119 printf ("%s\n", mono_inst_name (opcode
));
3120 g_assert_not_reached ();
3125 /* This clobbers LR */
3126 static __attribute__ ((__warn_unused_result__
)) guint8
*
3127 emit_cond_exc (MonoCompile
*cfg
, guint8
*code
, int opcode
, const char *exc_name
)
3131 cond
= opcode_to_armcond (opcode
);
3133 arm_adrx (code
, ARMREG_IP1
, code
);
3134 mono_add_patch_info_rel (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_EXC
, exc_name
, MONO_R_ARM64_BCC
);
3135 arm_bcc (code
, cond
, 0);
3140 emit_move_return_value (MonoCompile
*cfg
, guint8
* code
, MonoInst
*ins
)
3145 call
= (MonoCallInst
*)ins
;
3146 cinfo
= call
->call_info
;
3148 switch (cinfo
->ret
.storage
) {
3152 /* LLVM compiled code might only set the bottom bits */
3153 if (call
->signature
&& mini_get_underlying_type (call
->signature
->ret
)->type
== MONO_TYPE_I4
)
3154 arm_sxtwx (code
, call
->inst
.dreg
, cinfo
->ret
.reg
);
3155 else if (call
->inst
.dreg
!= cinfo
->ret
.reg
)
3156 arm_movx (code
, call
->inst
.dreg
, cinfo
->ret
.reg
);
3159 if (call
->inst
.dreg
!= cinfo
->ret
.reg
)
3160 arm_fmovd (code
, call
->inst
.dreg
, cinfo
->ret
.reg
);
3164 arm_fmovs (code
, call
->inst
.dreg
, cinfo
->ret
.reg
);
3166 arm_fcvt_sd (code
, call
->inst
.dreg
, cinfo
->ret
.reg
);
3168 case ArgVtypeInIRegs
: {
3169 MonoInst
*loc
= cfg
->arch
.vret_addr_loc
;
3172 /* Load the destination address */
3173 g_assert (loc
&& loc
->opcode
== OP_REGOFFSET
);
3174 code
= emit_ldrx (code
, ARMREG_LR
, loc
->inst_basereg
, loc
->inst_offset
);
3175 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
)
3176 arm_strx (code
, cinfo
->ret
.reg
+ i
, ARMREG_LR
, i
* 8);
3180 MonoInst
*loc
= cfg
->arch
.vret_addr_loc
;
3183 /* Load the destination address */
3184 g_assert (loc
&& loc
->opcode
== OP_REGOFFSET
);
3185 code
= emit_ldrx (code
, ARMREG_LR
, loc
->inst_basereg
, loc
->inst_offset
);
3186 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
) {
3187 if (cinfo
->ret
.esize
== 4)
3188 arm_strfpw (code
, cinfo
->ret
.reg
+ i
, ARMREG_LR
, cinfo
->ret
.foffsets
[i
]);
3190 arm_strfpx (code
, cinfo
->ret
.reg
+ i
, ARMREG_LR
, cinfo
->ret
.foffsets
[i
]);
3197 g_assert_not_reached ();
3204 * emit_branch_island:
3206 * Emit a branch island for the conditional branches from cfg->native_code + start_offset to code.
3209 emit_branch_island (MonoCompile
*cfg
, guint8
*code
, int start_offset
)
3213 /* Iterate over the patch infos added so far by this bb */
3214 int island_size
= 0;
3215 for (ji
= cfg
->patch_info
; ji
; ji
= ji
->next
) {
3216 if (ji
->ip
.i
< start_offset
)
3217 /* The patch infos are in reverse order, so this means the end */
3219 if (ji
->relocation
== MONO_R_ARM64_BCC
|| ji
->relocation
== MONO_R_ARM64_CBZ
)
3224 code
= realloc_code (cfg
, island_size
);
3226 /* Branch over the island */
3227 arm_b (code
, code
+ 4 + island_size
);
3229 for (ji
= cfg
->patch_info
; ji
; ji
= ji
->next
) {
3230 if (ji
->ip
.i
< start_offset
)
3232 if (ji
->relocation
== MONO_R_ARM64_BCC
|| ji
->relocation
== MONO_R_ARM64_CBZ
) {
3233 /* Rewrite the cond branch so it branches to an unconditional branch in the branch island */
3234 arm_patch_rel (cfg
->native_code
+ ji
->ip
.i
, code
, ji
->relocation
);
3235 /* Rewrite the patch so it points to the unconditional branch */
3236 ji
->ip
.i
= code
- cfg
->native_code
;
3237 ji
->relocation
= MONO_R_ARM64_B
;
3241 set_code_cursor (cfg
, code
);
3247 mono_arch_output_basic_block (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
3251 guint8
*code
= cfg
->native_code
+ cfg
->code_len
;
3252 int start_offset
, max_len
, dreg
, sreg1
, sreg2
;
3255 if (cfg
->verbose_level
> 2)
3256 g_print ("Basic block %d starting at offset 0x%x\n", bb
->block_num
, bb
->native_offset
);
3258 start_offset
= code
- cfg
->native_code
;
3259 g_assert (start_offset
<= cfg
->code_size
);
3261 MONO_BB_FOR_EACH_INS (bb
, ins
) {
3262 guint offset
= code
- cfg
->native_code
;
3263 set_code_cursor (cfg
, code
);
3264 max_len
= ins_get_size (ins
->opcode
);
3265 code
= realloc_code (cfg
, max_len
);
3267 if (G_UNLIKELY (cfg
->arch
.cond_branch_islands
&& offset
- start_offset
> 4 * 0x1ffff)) {
3268 /* Emit a branch island for large basic blocks */
3269 code
= emit_branch_island (cfg
, code
, start_offset
);
3270 offset
= code
- cfg
->native_code
;
3271 start_offset
= offset
;
3274 mono_debug_record_line_number (cfg
, ins
, offset
);
3279 imm
= ins
->inst_imm
;
3281 switch (ins
->opcode
) {
3283 code
= emit_imm (code
, dreg
, ins
->inst_c0
);
3286 code
= emit_imm64 (code
, dreg
, ins
->inst_c0
);
3290 arm_movx (code
, dreg
, sreg1
);
3293 case OP_RELAXED_NOP
:
3296 mono_add_patch_info_rel (cfg
, offset
, (MonoJumpInfoType
)(gsize
)ins
->inst_i1
, ins
->inst_p0
, MONO_R_ARM64_IMM
);
3297 code
= emit_imm64_template (code
, dreg
);
3301 * gdb does not like encountering the hw breakpoint ins in the debugged code.
3302 * So instead of emitting a trap, we emit a call a C function and place a
3305 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_JIT_ICALL_ID
, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break
));
3310 arm_addx_imm (code
, ARMREG_IP0
, sreg1
, (MONO_ARCH_FRAME_ALIGNMENT
- 1));
3311 // FIXME: andx_imm doesn't work yet
3312 code
= emit_imm (code
, ARMREG_IP1
, -MONO_ARCH_FRAME_ALIGNMENT
);
3313 arm_andx (code
, ARMREG_IP0
, ARMREG_IP0
, ARMREG_IP1
);
3314 //arm_andx_imm (code, ARMREG_IP0, sreg1, - MONO_ARCH_FRAME_ALIGNMENT);
3315 arm_movspx (code
, ARMREG_IP1
, ARMREG_SP
);
3316 arm_subx (code
, ARMREG_IP1
, ARMREG_IP1
, ARMREG_IP0
);
3317 arm_movspx (code
, ARMREG_SP
, ARMREG_IP1
);
3320 /* ip1 = pointer, ip0 = end */
3321 arm_addx (code
, ARMREG_IP0
, ARMREG_IP1
, ARMREG_IP0
);
3323 arm_cmpx (code
, ARMREG_IP1
, ARMREG_IP0
);
3325 arm_bcc (code
, ARMCOND_EQ
, 0);
3326 arm_stpx (code
, ARMREG_RZR
, ARMREG_RZR
, ARMREG_IP1
, 0);
3327 arm_addx_imm (code
, ARMREG_IP1
, ARMREG_IP1
, 16);
3328 arm_b (code
, buf
[0]);
3329 arm_patch_rel (buf
[1], code
, MONO_R_ARM64_BCC
);
3331 arm_movspx (code
, dreg
, ARMREG_SP
);
3332 if (cfg
->param_area
)
3333 code
= emit_subx_sp_imm (code
, cfg
->param_area
);
3336 case OP_LOCALLOC_IMM
: {
3339 imm
= ALIGN_TO (ins
->inst_imm
, MONO_ARCH_FRAME_ALIGNMENT
);
3340 g_assert (arm_is_arith_imm (imm
));
3341 arm_subx_imm (code
, ARMREG_SP
, ARMREG_SP
, imm
);
3344 g_assert (MONO_ARCH_FRAME_ALIGNMENT
== 16);
3346 while (offset
< imm
) {
3347 arm_stpx (code
, ARMREG_RZR
, ARMREG_RZR
, ARMREG_SP
, offset
);
3350 arm_movspx (code
, dreg
, ARMREG_SP
);
3351 if (cfg
->param_area
)
3352 code
= emit_subx_sp_imm (code
, cfg
->param_area
);
3356 code
= emit_aotconst (cfg
, code
, dreg
, (MonoJumpInfoType
)(gsize
)ins
->inst_i1
, ins
->inst_p0
);
3358 case OP_OBJC_GET_SELECTOR
:
3359 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_OBJC_SELECTOR_REF
, ins
->inst_p0
);
3360 /* See arch_emit_objc_selector_ref () in aot-compiler.c */
3361 arm_ldrx_lit (code
, ins
->dreg
, 0);
3365 case OP_SEQ_POINT
: {
3366 MonoInst
*info_var
= cfg
->arch
.seq_point_info_var
;
3369 * For AOT, we use one got slot per method, which will point to a
3370 * SeqPointInfo structure, containing all the information required
3371 * by the code below.
3373 if (cfg
->compile_aot
) {
3374 g_assert (info_var
);
3375 g_assert (info_var
->opcode
== OP_REGOFFSET
);
3378 if (ins
->flags
& MONO_INST_SINGLE_STEP_LOC
) {
3379 MonoInst
*var
= cfg
->arch
.ss_tramp_var
;
3382 g_assert (var
->opcode
== OP_REGOFFSET
);
3383 /* Load ss_tramp_var */
3384 /* This is equal to &ss_trampoline */
3385 arm_ldrx (code
, ARMREG_IP1
, var
->inst_basereg
, var
->inst_offset
);
3386 /* Load the trampoline address */
3387 arm_ldrx (code
, ARMREG_IP1
, ARMREG_IP1
, 0);
3388 /* Call it if it is non-null */
3389 arm_cbzx (code
, ARMREG_IP1
, code
+ 8);
3390 code
= mono_arm_emit_blrx (code
, ARMREG_IP1
);
3393 mono_add_seq_point (cfg
, bb
, ins
, code
- cfg
->native_code
);
3395 if (cfg
->compile_aot
) {
3396 const guint32 offset
= code
- cfg
->native_code
;
3399 arm_ldrx (code
, ARMREG_IP1
, info_var
->inst_basereg
, info_var
->inst_offset
);
3400 /* Add the offset */
3401 val
= ((offset
/ 4) * sizeof (target_mgreg_t
)) + MONO_STRUCT_OFFSET (SeqPointInfo
, bp_addrs
);
3402 /* Load the info->bp_addrs [offset], which is either 0 or the address of the bp trampoline */
3403 code
= emit_ldrx (code
, ARMREG_IP1
, ARMREG_IP1
, val
);
3404 /* Skip the load if its 0 */
3405 arm_cbzx (code
, ARMREG_IP1
, code
+ 8);
3406 /* Call the breakpoint trampoline */
3407 code
= mono_arm_emit_blrx (code
, ARMREG_IP1
);
3409 MonoInst
*var
= cfg
->arch
.bp_tramp_var
;
3412 g_assert (var
->opcode
== OP_REGOFFSET
);
3413 /* Load the address of the bp trampoline into IP0 */
3414 arm_ldrx (code
, ARMREG_IP0
, var
->inst_basereg
, var
->inst_offset
);
3416 * A placeholder for a possible breakpoint inserted by
3417 * mono_arch_set_breakpoint ().
3426 mono_add_patch_info_rel (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
, MONO_R_ARM64_B
);
3430 arm_brx (code
, sreg1
);
3462 mono_add_patch_info_rel (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_true_bb
, MONO_R_ARM64_BCC
);
3463 cond
= opcode_to_armcond (ins
->opcode
);
3464 arm_bcc (code
, cond
, 0);
3468 mono_add_patch_info_rel (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_true_bb
, MONO_R_ARM64_BCC
);
3469 /* For fp compares, ARMCOND_LT is lt or unordered */
3470 arm_bcc (code
, ARMCOND_LT
, 0);
3473 mono_add_patch_info_rel (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_true_bb
, MONO_R_ARM64_BCC
);
3474 arm_bcc (code
, ARMCOND_EQ
, 0);
3475 mono_add_patch_info_rel (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_BB
, ins
->inst_true_bb
, MONO_R_ARM64_BCC
);
3476 /* For fp compares, ARMCOND_LT is lt or unordered */
3477 arm_bcc (code
, ARMCOND_LT
, 0);
3480 mono_add_patch_info_rel (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_true_bb
, MONO_R_ARM64_CBZ
);
3481 arm_cbzw (code
, sreg1
, 0);
3484 mono_add_patch_info_rel (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_true_bb
, MONO_R_ARM64_CBZ
);
3485 arm_cbzx (code
, sreg1
, 0);
3487 case OP_ARM64_CBNZW
:
3488 mono_add_patch_info_rel (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_true_bb
, MONO_R_ARM64_CBZ
);
3489 arm_cbnzw (code
, sreg1
, 0);
3491 case OP_ARM64_CBNZX
:
3492 mono_add_patch_info_rel (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_true_bb
, MONO_R_ARM64_CBZ
);
3493 arm_cbnzx (code
, sreg1
, 0);
3497 arm_addw (code
, dreg
, sreg1
, sreg2
);
3500 arm_addx (code
, dreg
, sreg1
, sreg2
);
3503 arm_subw (code
, dreg
, sreg1
, sreg2
);
3506 arm_subx (code
, dreg
, sreg1
, sreg2
);
3509 arm_andw (code
, dreg
, sreg1
, sreg2
);
3512 arm_andx (code
, dreg
, sreg1
, sreg2
);
3515 arm_orrw (code
, dreg
, sreg1
, sreg2
);
3518 arm_orrx (code
, dreg
, sreg1
, sreg2
);
3521 arm_eorw (code
, dreg
, sreg1
, sreg2
);
3524 arm_eorx (code
, dreg
, sreg1
, sreg2
);
3527 arm_negw (code
, dreg
, sreg1
);
3530 arm_negx (code
, dreg
, sreg1
);
3533 arm_mvnw (code
, dreg
, sreg1
);
3536 arm_mvnx (code
, dreg
, sreg1
);
3539 arm_addsw (code
, dreg
, sreg1
, sreg2
);
3543 arm_addsx (code
, dreg
, sreg1
, sreg2
);
3546 arm_subsw (code
, dreg
, sreg1
, sreg2
);
3550 arm_subsx (code
, dreg
, sreg1
, sreg2
);
3553 arm_cmpw (code
, sreg1
, sreg2
);
3557 arm_cmpx (code
, sreg1
, sreg2
);
3560 code
= emit_addw_imm (code
, dreg
, sreg1
, imm
);
3564 code
= emit_addx_imm (code
, dreg
, sreg1
, imm
);
3567 code
= emit_subw_imm (code
, dreg
, sreg1
, imm
);
3570 code
= emit_subx_imm (code
, dreg
, sreg1
, imm
);
3573 code
= emit_andw_imm (code
, dreg
, sreg1
, imm
);
3577 code
= emit_andx_imm (code
, dreg
, sreg1
, imm
);
3580 code
= emit_orrw_imm (code
, dreg
, sreg1
, imm
);
3583 code
= emit_orrx_imm (code
, dreg
, sreg1
, imm
);
3586 code
= emit_eorw_imm (code
, dreg
, sreg1
, imm
);
3589 code
= emit_eorx_imm (code
, dreg
, sreg1
, imm
);
3591 case OP_ICOMPARE_IMM
:
3592 code
= emit_cmpw_imm (code
, sreg1
, imm
);
3594 case OP_LCOMPARE_IMM
:
3595 case OP_COMPARE_IMM
:
3597 arm_cmpx (code
, sreg1
, ARMREG_RZR
);
3599 // FIXME: 32 vs 64 bit issues for 0xffffffff
3600 code
= emit_imm64 (code
, ARMREG_LR
, imm
);
3601 arm_cmpx (code
, sreg1
, ARMREG_LR
);
3605 arm_lslvw (code
, dreg
, sreg1
, sreg2
);
3608 arm_lslvx (code
, dreg
, sreg1
, sreg2
);
3611 arm_asrvw (code
, dreg
, sreg1
, sreg2
);
3614 arm_asrvx (code
, dreg
, sreg1
, sreg2
);
3617 arm_lsrvw (code
, dreg
, sreg1
, sreg2
);
3620 arm_lsrvx (code
, dreg
, sreg1
, sreg2
);
3624 arm_movx (code
, dreg
, sreg1
);
3626 arm_lslw (code
, dreg
, sreg1
, imm
);
3631 arm_movx (code
, dreg
, sreg1
);
3633 arm_lslx (code
, dreg
, sreg1
, imm
);
3637 arm_movx (code
, dreg
, sreg1
);
3639 arm_asrw (code
, dreg
, sreg1
, imm
);
3644 arm_movx (code
, dreg
, sreg1
);
3646 arm_asrx (code
, dreg
, sreg1
, imm
);
3648 case OP_ISHR_UN_IMM
:
3650 arm_movx (code
, dreg
, sreg1
);
3652 arm_lsrw (code
, dreg
, sreg1
, imm
);
3655 case OP_LSHR_UN_IMM
:
3657 arm_movx (code
, dreg
, sreg1
);
3659 arm_lsrx (code
, dreg
, sreg1
, imm
);
3664 arm_sxtwx (code
, dreg
, sreg1
);
3667 /* Clean out the upper word */
3668 arm_movw (code
, dreg
, sreg1
);
3671 /* MULTIPLY/DIVISION */
3674 // FIXME: Optimize this
3675 /* Check for zero */
3676 arm_cmpx_imm (code
, sreg2
, 0);
3677 code
= emit_cond_exc (cfg
, code
, OP_COND_EXC_IEQ
, "DivideByZeroException");
3678 /* Check for INT_MIN/-1 */
3679 code
= emit_imm (code
, ARMREG_IP0
, 0x80000000);
3680 arm_cmpx (code
, sreg1
, ARMREG_IP0
);
3681 arm_cset (code
, ARMCOND_EQ
, ARMREG_IP1
);
3682 code
= emit_imm (code
, ARMREG_IP0
, 0xffffffff);
3683 arm_cmpx (code
, sreg2
, ARMREG_IP0
);
3684 arm_cset (code
, ARMCOND_EQ
, ARMREG_IP0
);
3685 arm_andx (code
, ARMREG_IP0
, ARMREG_IP0
, ARMREG_IP1
);
3686 arm_cmpx_imm (code
, ARMREG_IP0
, 1);
3687 code
= emit_cond_exc (cfg
, code
, OP_COND_EXC_IEQ
, "OverflowException");
3688 if (ins
->opcode
== OP_IREM
) {
3689 arm_sdivw (code
, ARMREG_LR
, sreg1
, sreg2
);
3690 arm_msubw (code
, dreg
, ARMREG_LR
, sreg2
, sreg1
);
3692 arm_sdivw (code
, dreg
, sreg1
, sreg2
);
3696 arm_cmpx_imm (code
, sreg2
, 0);
3697 code
= emit_cond_exc (cfg
, code
, OP_COND_EXC_IEQ
, "DivideByZeroException");
3698 arm_udivw (code
, dreg
, sreg1
, sreg2
);
3701 arm_cmpx_imm (code
, sreg2
, 0);
3702 code
= emit_cond_exc (cfg
, code
, OP_COND_EXC_IEQ
, "DivideByZeroException");
3703 arm_udivw (code
, ARMREG_LR
, sreg1
, sreg2
);
3704 arm_msubw (code
, dreg
, ARMREG_LR
, sreg2
, sreg1
);
3708 // FIXME: Optimize this
3709 /* Check for zero */
3710 arm_cmpx_imm (code
, sreg2
, 0);
3711 code
= emit_cond_exc (cfg
, code
, OP_COND_EXC_IEQ
, "DivideByZeroException");
3712 /* Check for INT64_MIN/-1 */
3713 code
= emit_imm64 (code
, ARMREG_IP0
, 0x8000000000000000);
3714 arm_cmpx (code
, sreg1
, ARMREG_IP0
);
3715 arm_cset (code
, ARMCOND_EQ
, ARMREG_IP1
);
3716 code
= emit_imm64 (code
, ARMREG_IP0
, 0xffffffffffffffff);
3717 arm_cmpx (code
, sreg2
, ARMREG_IP0
);
3718 arm_cset (code
, ARMCOND_EQ
, ARMREG_IP0
);
3719 arm_andx (code
, ARMREG_IP0
, ARMREG_IP0
, ARMREG_IP1
);
3720 arm_cmpx_imm (code
, ARMREG_IP0
, 1);
3721 /* 64 bit uses OverflowException */
3722 code
= emit_cond_exc (cfg
, code
, OP_COND_EXC_IEQ
, "OverflowException");
3723 if (ins
->opcode
== OP_LREM
) {
3724 arm_sdivx (code
, ARMREG_LR
, sreg1
, sreg2
);
3725 arm_msubx (code
, dreg
, ARMREG_LR
, sreg2
, sreg1
);
3727 arm_sdivx (code
, dreg
, sreg1
, sreg2
);
3731 arm_cmpx_imm (code
, sreg2
, 0);
3732 code
= emit_cond_exc (cfg
, code
, OP_COND_EXC_IEQ
, "DivideByZeroException");
3733 arm_udivx (code
, dreg
, sreg1
, sreg2
);
3736 arm_cmpx_imm (code
, sreg2
, 0);
3737 code
= emit_cond_exc (cfg
, code
, OP_COND_EXC_IEQ
, "DivideByZeroException");
3738 arm_udivx (code
, ARMREG_LR
, sreg1
, sreg2
);
3739 arm_msubx (code
, dreg
, ARMREG_LR
, sreg2
, sreg1
);
3742 arm_mulw (code
, dreg
, sreg1
, sreg2
);
3745 arm_mulx (code
, dreg
, sreg1
, sreg2
);
3748 code
= emit_imm (code
, ARMREG_LR
, imm
);
3749 arm_mulw (code
, dreg
, sreg1
, ARMREG_LR
);
3753 code
= emit_imm (code
, ARMREG_LR
, imm
);
3754 arm_mulx (code
, dreg
, sreg1
, ARMREG_LR
);
3758 case OP_ICONV_TO_I1
:
3759 case OP_LCONV_TO_I1
:
3760 arm_sxtbx (code
, dreg
, sreg1
);
3762 case OP_ICONV_TO_I2
:
3763 case OP_LCONV_TO_I2
:
3764 arm_sxthx (code
, dreg
, sreg1
);
3766 case OP_ICONV_TO_U1
:
3767 case OP_LCONV_TO_U1
:
3768 arm_uxtbw (code
, dreg
, sreg1
);
3770 case OP_ICONV_TO_U2
:
3771 case OP_LCONV_TO_U2
:
3772 arm_uxthw (code
, dreg
, sreg1
);
3798 cond
= opcode_to_armcond (ins
->opcode
);
3799 arm_cset (code
, cond
, dreg
);
3812 cond
= opcode_to_armcond (ins
->opcode
);
3813 arm_fcmpd (code
, sreg1
, sreg2
);
3814 arm_cset (code
, cond
, dreg
);
3819 case OP_LOADI1_MEMBASE
:
3820 code
= emit_ldrsbx (code
, dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3822 case OP_LOADU1_MEMBASE
:
3823 code
= emit_ldrb (code
, dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3825 case OP_LOADI2_MEMBASE
:
3826 code
= emit_ldrshx (code
, dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3828 case OP_LOADU2_MEMBASE
:
3829 code
= emit_ldrh (code
, dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3831 case OP_LOADI4_MEMBASE
:
3832 code
= emit_ldrswx (code
, dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3834 case OP_LOADU4_MEMBASE
:
3835 code
= emit_ldrw (code
, dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3837 case OP_LOAD_MEMBASE
:
3838 case OP_LOADI8_MEMBASE
:
3839 code
= emit_ldrx (code
, dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3841 case OP_STOREI1_MEMBASE_IMM
:
3842 case OP_STOREI2_MEMBASE_IMM
:
3843 case OP_STOREI4_MEMBASE_IMM
:
3844 case OP_STORE_MEMBASE_IMM
:
3845 case OP_STOREI8_MEMBASE_IMM
: {
3849 code
= emit_imm (code
, ARMREG_LR
, imm
);
3852 immreg
= ARMREG_RZR
;
3855 switch (ins
->opcode
) {
3856 case OP_STOREI1_MEMBASE_IMM
:
3857 code
= emit_strb (code
, immreg
, ins
->inst_destbasereg
, ins
->inst_offset
);
3859 case OP_STOREI2_MEMBASE_IMM
:
3860 code
= emit_strh (code
, immreg
, ins
->inst_destbasereg
, ins
->inst_offset
);
3862 case OP_STOREI4_MEMBASE_IMM
:
3863 code
= emit_strw (code
, immreg
, ins
->inst_destbasereg
, ins
->inst_offset
);
3865 case OP_STORE_MEMBASE_IMM
:
3866 case OP_STOREI8_MEMBASE_IMM
:
3867 code
= emit_strx (code
, immreg
, ins
->inst_destbasereg
, ins
->inst_offset
);
3870 g_assert_not_reached ();
3875 case OP_STOREI1_MEMBASE_REG
:
3876 code
= emit_strb (code
, sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3878 case OP_STOREI2_MEMBASE_REG
:
3879 code
= emit_strh (code
, sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3881 case OP_STOREI4_MEMBASE_REG
:
3882 code
= emit_strw (code
, sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3884 case OP_STORE_MEMBASE_REG
:
3885 case OP_STOREI8_MEMBASE_REG
:
3886 code
= emit_strx (code
, sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3889 code
= emit_tls_get (code
, dreg
, ins
->inst_offset
);
3892 code
= emit_tls_set (code
, sreg1
, ins
->inst_offset
);
3895 case OP_MEMORY_BARRIER
:
3896 arm_dmb (code
, ARM_DMB_ISH
);
3898 case OP_ATOMIC_ADD_I4
: {
3902 arm_ldxrw (code
, ARMREG_IP0
, sreg1
);
3903 arm_addx (code
, ARMREG_IP0
, ARMREG_IP0
, sreg2
);
3904 arm_stlxrw (code
, ARMREG_IP1
, ARMREG_IP0
, sreg1
);
3905 arm_cbnzw (code
, ARMREG_IP1
, buf
[0]);
3907 arm_dmb (code
, ARM_DMB_ISH
);
3908 arm_movx (code
, dreg
, ARMREG_IP0
);
3911 case OP_ATOMIC_ADD_I8
: {
3915 arm_ldxrx (code
, ARMREG_IP0
, sreg1
);
3916 arm_addx (code
, ARMREG_IP0
, ARMREG_IP0
, sreg2
);
3917 arm_stlxrx (code
, ARMREG_IP1
, ARMREG_IP0
, sreg1
);
3918 arm_cbnzx (code
, ARMREG_IP1
, buf
[0]);
3920 arm_dmb (code
, ARM_DMB_ISH
);
3921 arm_movx (code
, dreg
, ARMREG_IP0
);
3924 case OP_ATOMIC_EXCHANGE_I4
: {
3928 arm_ldxrw (code
, ARMREG_IP0
, sreg1
);
3929 arm_stlxrw (code
, ARMREG_IP1
, sreg2
, sreg1
);
3930 arm_cbnzw (code
, ARMREG_IP1
, buf
[0]);
3932 arm_dmb (code
, ARM_DMB_ISH
);
3933 arm_movx (code
, dreg
, ARMREG_IP0
);
3936 case OP_ATOMIC_EXCHANGE_I8
: {
3940 arm_ldxrx (code
, ARMREG_IP0
, sreg1
);
3941 arm_stlxrx (code
, ARMREG_IP1
, sreg2
, sreg1
);
3942 arm_cbnzw (code
, ARMREG_IP1
, buf
[0]);
3944 arm_dmb (code
, ARM_DMB_ISH
);
3945 arm_movx (code
, dreg
, ARMREG_IP0
);
3948 case OP_ATOMIC_CAS_I4
: {
3951 /* sreg2 is the value, sreg3 is the comparand */
3953 arm_ldxrw (code
, ARMREG_IP0
, sreg1
);
3954 arm_cmpw (code
, ARMREG_IP0
, ins
->sreg3
);
3956 arm_bcc (code
, ARMCOND_NE
, 0);
3957 arm_stlxrw (code
, ARMREG_IP1
, sreg2
, sreg1
);
3958 arm_cbnzw (code
, ARMREG_IP1
, buf
[0]);
3959 arm_patch_rel (buf
[1], code
, MONO_R_ARM64_BCC
);
3961 arm_dmb (code
, ARM_DMB_ISH
);
3962 arm_movx (code
, dreg
, ARMREG_IP0
);
3965 case OP_ATOMIC_CAS_I8
: {
3969 arm_ldxrx (code
, ARMREG_IP0
, sreg1
);
3970 arm_cmpx (code
, ARMREG_IP0
, ins
->sreg3
);
3972 arm_bcc (code
, ARMCOND_NE
, 0);
3973 arm_stlxrx (code
, ARMREG_IP1
, sreg2
, sreg1
);
3974 arm_cbnzw (code
, ARMREG_IP1
, buf
[0]);
3975 arm_patch_rel (buf
[1], code
, MONO_R_ARM64_BCC
);
3977 arm_dmb (code
, ARM_DMB_ISH
);
3978 arm_movx (code
, dreg
, ARMREG_IP0
);
3981 case OP_ATOMIC_LOAD_I1
: {
3982 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_basereg
, ins
->inst_offset
);
3983 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
3984 arm_dmb (code
, ARM_DMB_ISH
);
3985 arm_ldarb (code
, ins
->dreg
, ARMREG_LR
);
3986 arm_sxtbx (code
, ins
->dreg
, ins
->dreg
);
3989 case OP_ATOMIC_LOAD_U1
: {
3990 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_basereg
, ins
->inst_offset
);
3991 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
3992 arm_dmb (code
, ARM_DMB_ISH
);
3993 arm_ldarb (code
, ins
->dreg
, ARMREG_LR
);
3994 arm_uxtbx (code
, ins
->dreg
, ins
->dreg
);
3997 case OP_ATOMIC_LOAD_I2
: {
3998 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_basereg
, ins
->inst_offset
);
3999 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4000 arm_dmb (code
, ARM_DMB_ISH
);
4001 arm_ldarh (code
, ins
->dreg
, ARMREG_LR
);
4002 arm_sxthx (code
, ins
->dreg
, ins
->dreg
);
4005 case OP_ATOMIC_LOAD_U2
: {
4006 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_basereg
, ins
->inst_offset
);
4007 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4008 arm_dmb (code
, ARM_DMB_ISH
);
4009 arm_ldarh (code
, ins
->dreg
, ARMREG_LR
);
4010 arm_uxthx (code
, ins
->dreg
, ins
->dreg
);
4013 case OP_ATOMIC_LOAD_I4
: {
4014 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_basereg
, ins
->inst_offset
);
4015 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4016 arm_dmb (code
, ARM_DMB_ISH
);
4017 arm_ldarw (code
, ins
->dreg
, ARMREG_LR
);
4018 arm_sxtwx (code
, ins
->dreg
, ins
->dreg
);
4021 case OP_ATOMIC_LOAD_U4
: {
4022 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_basereg
, ins
->inst_offset
);
4023 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4024 arm_dmb (code
, ARM_DMB_ISH
);
4025 arm_ldarw (code
, ins
->dreg
, ARMREG_LR
);
4026 arm_movw (code
, ins
->dreg
, ins
->dreg
); /* Clear upper half of the register. */
4029 case OP_ATOMIC_LOAD_I8
:
4030 case OP_ATOMIC_LOAD_U8
: {
4031 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_basereg
, ins
->inst_offset
);
4032 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4033 arm_dmb (code
, ARM_DMB_ISH
);
4034 arm_ldarx (code
, ins
->dreg
, ARMREG_LR
);
4037 case OP_ATOMIC_LOAD_R4
: {
4038 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_basereg
, ins
->inst_offset
);
4039 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4040 arm_dmb (code
, ARM_DMB_ISH
);
4042 arm_ldarw (code
, ARMREG_LR
, ARMREG_LR
);
4043 arm_fmov_rx_to_double (code
, ins
->dreg
, ARMREG_LR
);
4045 arm_ldarw (code
, ARMREG_LR
, ARMREG_LR
);
4046 arm_fmov_rx_to_double (code
, FP_TEMP_REG
, ARMREG_LR
);
4047 arm_fcvt_sd (code
, ins
->dreg
, FP_TEMP_REG
);
4051 case OP_ATOMIC_LOAD_R8
: {
4052 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_basereg
, ins
->inst_offset
);
4053 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4054 arm_dmb (code
, ARM_DMB_ISH
);
4055 arm_ldarx (code
, ARMREG_LR
, ARMREG_LR
);
4056 arm_fmov_rx_to_double (code
, ins
->dreg
, ARMREG_LR
);
4059 case OP_ATOMIC_STORE_I1
:
4060 case OP_ATOMIC_STORE_U1
: {
4061 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
4062 arm_stlrb (code
, ARMREG_LR
, ins
->sreg1
);
4063 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4064 arm_dmb (code
, ARM_DMB_ISH
);
4067 case OP_ATOMIC_STORE_I2
:
4068 case OP_ATOMIC_STORE_U2
: {
4069 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
4070 arm_stlrh (code
, ARMREG_LR
, ins
->sreg1
);
4071 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4072 arm_dmb (code
, ARM_DMB_ISH
);
4075 case OP_ATOMIC_STORE_I4
:
4076 case OP_ATOMIC_STORE_U4
: {
4077 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
4078 arm_stlrw (code
, ARMREG_LR
, ins
->sreg1
);
4079 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4080 arm_dmb (code
, ARM_DMB_ISH
);
4083 case OP_ATOMIC_STORE_I8
:
4084 case OP_ATOMIC_STORE_U8
: {
4085 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
4086 arm_stlrx (code
, ARMREG_LR
, ins
->sreg1
);
4087 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4088 arm_dmb (code
, ARM_DMB_ISH
);
4091 case OP_ATOMIC_STORE_R4
: {
4092 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
4094 arm_fmov_double_to_rx (code
, ARMREG_IP0
, ins
->sreg1
);
4095 arm_stlrw (code
, ARMREG_LR
, ARMREG_IP0
);
4097 arm_fcvt_ds (code
, FP_TEMP_REG
, ins
->sreg1
);
4098 arm_fmov_double_to_rx (code
, ARMREG_IP0
, FP_TEMP_REG
);
4099 arm_stlrw (code
, ARMREG_LR
, ARMREG_IP0
);
4101 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4102 arm_dmb (code
, ARM_DMB_ISH
);
4105 case OP_ATOMIC_STORE_R8
: {
4106 code
= emit_addx_imm (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
4107 arm_fmov_double_to_rx (code
, ARMREG_IP0
, ins
->sreg1
);
4108 arm_stlrx (code
, ARMREG_LR
, ARMREG_IP0
);
4109 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4110 arm_dmb (code
, ARM_DMB_ISH
);
4116 guint64 imm
= *(guint64
*)ins
->inst_p0
;
4119 arm_fmov_rx_to_double (code
, dreg
, ARMREG_RZR
);
4121 code
= emit_imm64 (code
, ARMREG_LR
, imm
);
4122 arm_fmov_rx_to_double (code
, ins
->dreg
, ARMREG_LR
);
4127 guint64 imm
= *(guint32
*)ins
->inst_p0
;
4129 code
= emit_imm64 (code
, ARMREG_LR
, imm
);
4131 arm_fmov_rx_to_double (code
, dreg
, ARMREG_LR
);
4133 arm_fmov_rx_to_double (code
, FP_TEMP_REG
, ARMREG_LR
);
4134 arm_fcvt_sd (code
, dreg
, FP_TEMP_REG
);
4138 case OP_LOADR8_MEMBASE
:
4139 code
= emit_ldrfpx (code
, dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4141 case OP_LOADR4_MEMBASE
:
4143 code
= emit_ldrfpw (code
, dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4145 code
= emit_ldrfpw (code
, FP_TEMP_REG
, ins
->inst_basereg
, ins
->inst_offset
);
4146 arm_fcvt_sd (code
, dreg
, FP_TEMP_REG
);
4149 case OP_STORER8_MEMBASE_REG
:
4150 code
= emit_strfpx (code
, sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
4152 case OP_STORER4_MEMBASE_REG
:
4154 code
= emit_strfpw (code
, sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
4156 arm_fcvt_ds (code
, FP_TEMP_REG
, sreg1
);
4157 code
= emit_strfpw (code
, FP_TEMP_REG
, ins
->inst_destbasereg
, ins
->inst_offset
);
4162 arm_fmovd (code
, dreg
, sreg1
);
4166 arm_fmovs (code
, dreg
, sreg1
);
4168 case OP_MOVE_F_TO_I4
:
4170 arm_fmov_double_to_rx (code
, ins
->dreg
, ins
->sreg1
);
4172 arm_fcvt_ds (code
, ins
->dreg
, ins
->sreg1
);
4173 arm_fmov_double_to_rx (code
, ins
->dreg
, ins
->dreg
);
4176 case OP_MOVE_I4_TO_F
:
4178 arm_fmov_rx_to_double (code
, ins
->dreg
, ins
->sreg1
);
4180 arm_fmov_rx_to_double (code
, ins
->dreg
, ins
->sreg1
);
4181 arm_fcvt_sd (code
, ins
->dreg
, ins
->dreg
);
4184 case OP_MOVE_F_TO_I8
:
4185 arm_fmov_double_to_rx (code
, ins
->dreg
, ins
->sreg1
);
4187 case OP_MOVE_I8_TO_F
:
4188 arm_fmov_rx_to_double (code
, ins
->dreg
, ins
->sreg1
);
4191 arm_fcmpd (code
, sreg1
, sreg2
);
4194 arm_fcmps (code
, sreg1
, sreg2
);
4196 case OP_FCONV_TO_I1
:
4197 arm_fcvtzs_dx (code
, dreg
, sreg1
);
4198 arm_sxtbx (code
, dreg
, dreg
);
4200 case OP_FCONV_TO_U1
:
4201 arm_fcvtzu_dx (code
, dreg
, sreg1
);
4202 arm_uxtbw (code
, dreg
, dreg
);
4204 case OP_FCONV_TO_I2
:
4205 arm_fcvtzs_dx (code
, dreg
, sreg1
);
4206 arm_sxthx (code
, dreg
, dreg
);
4208 case OP_FCONV_TO_U2
:
4209 arm_fcvtzu_dx (code
, dreg
, sreg1
);
4210 arm_uxthw (code
, dreg
, dreg
);
4212 case OP_FCONV_TO_I4
:
4214 arm_fcvtzs_dx (code
, dreg
, sreg1
);
4215 arm_sxtwx (code
, dreg
, dreg
);
4217 case OP_FCONV_TO_U4
:
4218 arm_fcvtzu_dx (code
, dreg
, sreg1
);
4220 case OP_FCONV_TO_I8
:
4221 arm_fcvtzs_dx (code
, dreg
, sreg1
);
4223 case OP_FCONV_TO_U8
:
4224 arm_fcvtzu_dx (code
, dreg
, sreg1
);
4226 case OP_FCONV_TO_R4
:
4228 arm_fcvt_ds (code
, dreg
, sreg1
);
4230 arm_fcvt_ds (code
, FP_TEMP_REG
, sreg1
);
4231 arm_fcvt_sd (code
, dreg
, FP_TEMP_REG
);
4234 case OP_ICONV_TO_R4
:
4236 arm_scvtf_rw_to_s (code
, dreg
, sreg1
);
4238 arm_scvtf_rw_to_s (code
, FP_TEMP_REG
, sreg1
);
4239 arm_fcvt_sd (code
, dreg
, FP_TEMP_REG
);
4242 case OP_LCONV_TO_R4
:
4244 arm_scvtf_rx_to_s (code
, dreg
, sreg1
);
4246 arm_scvtf_rx_to_s (code
, FP_TEMP_REG
, sreg1
);
4247 arm_fcvt_sd (code
, dreg
, FP_TEMP_REG
);
4250 case OP_ICONV_TO_R8
:
4251 arm_scvtf_rw_to_d (code
, dreg
, sreg1
);
4253 case OP_LCONV_TO_R8
:
4254 arm_scvtf_rx_to_d (code
, dreg
, sreg1
);
4256 case OP_ICONV_TO_R_UN
:
4257 arm_ucvtf_rw_to_d (code
, dreg
, sreg1
);
4259 case OP_LCONV_TO_R_UN
:
4260 arm_ucvtf_rx_to_d (code
, dreg
, sreg1
);
4263 arm_fadd_d (code
, dreg
, sreg1
, sreg2
);
4266 arm_fsub_d (code
, dreg
, sreg1
, sreg2
);
4269 arm_fmul_d (code
, dreg
, sreg1
, sreg2
);
4272 arm_fdiv_d (code
, dreg
, sreg1
, sreg2
);
4276 g_assert_not_reached ();
4279 arm_fneg_d (code
, dreg
, sreg1
);
4281 case OP_ARM_SETFREG_R4
:
4282 arm_fcvt_ds (code
, dreg
, sreg1
);
4285 /* Check for infinity */
4286 code
= emit_imm64 (code
, ARMREG_LR
, 0x7fefffffffffffffLL
);
4287 arm_fmov_rx_to_double (code
, FP_TEMP_REG
, ARMREG_LR
);
4288 arm_fabs_d (code
, FP_TEMP_REG2
, sreg1
);
4289 arm_fcmpd (code
, FP_TEMP_REG2
, FP_TEMP_REG
);
4290 code
= emit_cond_exc (cfg
, code
, OP_COND_EXC_GT
, "ArithmeticException");
4291 /* Check for nans */
4292 arm_fcmpd (code
, FP_TEMP_REG2
, FP_TEMP_REG2
);
4293 code
= emit_cond_exc (cfg
, code
, OP_COND_EXC_OV
, "ArithmeticException");
4294 arm_fmovd (code
, dreg
, sreg1
);
4299 arm_fadd_s (code
, dreg
, sreg1
, sreg2
);
4302 arm_fsub_s (code
, dreg
, sreg1
, sreg2
);
4305 arm_fmul_s (code
, dreg
, sreg1
, sreg2
);
4308 arm_fdiv_s (code
, dreg
, sreg1
, sreg2
);
4311 arm_fneg_s (code
, dreg
, sreg1
);
4313 case OP_RCONV_TO_I1
:
4314 arm_fcvtzs_sx (code
, dreg
, sreg1
);
4315 arm_sxtbx (code
, dreg
, dreg
);
4317 case OP_RCONV_TO_U1
:
4318 arm_fcvtzu_sx (code
, dreg
, sreg1
);
4319 arm_uxtbw (code
, dreg
, dreg
);
4321 case OP_RCONV_TO_I2
:
4322 arm_fcvtzs_sx (code
, dreg
, sreg1
);
4323 arm_sxthx (code
, dreg
, dreg
);
4325 case OP_RCONV_TO_U2
:
4326 arm_fcvtzu_sx (code
, dreg
, sreg1
);
4327 arm_uxthw (code
, dreg
, dreg
);
4329 case OP_RCONV_TO_I4
:
4330 arm_fcvtzs_sx (code
, dreg
, sreg1
);
4331 arm_sxtwx (code
, dreg
, dreg
);
4333 case OP_RCONV_TO_U4
:
4334 arm_fcvtzu_sx (code
, dreg
, sreg1
);
4336 case OP_RCONV_TO_I8
:
4337 arm_fcvtzs_sx (code
, dreg
, sreg1
);
4339 case OP_RCONV_TO_U8
:
4340 arm_fcvtzu_sx (code
, dreg
, sreg1
);
4342 case OP_RCONV_TO_R8
:
4343 arm_fcvt_sd (code
, dreg
, sreg1
);
4345 case OP_RCONV_TO_R4
:
4347 arm_fmovs (code
, dreg
, sreg1
);
4359 cond
= opcode_to_armcond (ins
->opcode
);
4360 arm_fcmps (code
, sreg1
, sreg2
);
4361 arm_cset (code
, cond
, dreg
);
4373 call
= (MonoCallInst
*)ins
;
4374 const MonoJumpInfoTarget patch
= mono_call_to_patch (call
);
4375 code
= emit_call (cfg
, code
, patch
.type
, patch
.target
);
4376 code
= emit_move_return_value (cfg
, code
, ins
);
4379 case OP_VOIDCALL_REG
:
4385 code
= mono_arm_emit_blrx (code
, sreg1
);
4386 code
= emit_move_return_value (cfg
, code
, ins
);
4388 case OP_VOIDCALL_MEMBASE
:
4389 case OP_CALL_MEMBASE
:
4390 case OP_LCALL_MEMBASE
:
4391 case OP_FCALL_MEMBASE
:
4392 case OP_RCALL_MEMBASE
:
4393 case OP_VCALL2_MEMBASE
:
4394 code
= emit_ldrx (code
, ARMREG_IP0
, ins
->inst_basereg
, ins
->inst_offset
);
4395 code
= mono_arm_emit_blrx (code
, ARMREG_IP0
);
4396 code
= emit_move_return_value (cfg
, code
, ins
);
4399 case OP_TAILCALL_PARAMETER
:
4400 // This opcode helps compute sizes, i.e.
4401 // of the subsequent OP_TAILCALL, but contributes no code.
4402 g_assert (ins
->next
);
4406 case OP_TAILCALL_MEMBASE
:
4407 case OP_TAILCALL_REG
: {
4408 int branch_reg
= ARMREG_IP0
;
4409 guint64 free_reg
= 1 << ARMREG_IP1
;
4410 call
= (MonoCallInst
*)ins
;
4412 g_assert (!cfg
->method
->save_lmf
);
4414 max_len
+= call
->stack_usage
/ sizeof (target_mgreg_t
) * ins_get_size (OP_TAILCALL_PARAMETER
);
4415 while (G_UNLIKELY (offset
+ max_len
> cfg
->code_size
)) {
4416 cfg
->code_size
*= 2;
4417 cfg
->native_code
= (unsigned char *)mono_realloc_native_code (cfg
);
4418 code
= cfg
->native_code
+ offset
;
4419 cfg
->stat_code_reallocs
++;
4422 switch (ins
->opcode
) {
4424 free_reg
= (1 << ARMREG_IP0
) | (1 << ARMREG_IP1
);
4427 case OP_TAILCALL_REG
:
4428 g_assert (sreg1
!= -1);
4429 g_assert (sreg1
!= ARMREG_IP0
);
4430 g_assert (sreg1
!= ARMREG_IP1
);
4431 g_assert (sreg1
!= ARMREG_LR
);
4432 g_assert (sreg1
!= ARMREG_SP
);
4433 g_assert (sreg1
!= ARMREG_R28
);
4434 if ((sreg1
<< 1) & MONO_ARCH_CALLEE_SAVED_REGS
) {
4435 arm_movx (code
, branch_reg
, sreg1
);
4437 free_reg
= (1 << ARMREG_IP0
) | (1 << ARMREG_IP1
);
4442 case OP_TAILCALL_MEMBASE
:
4443 g_assert (ins
->inst_basereg
!= -1);
4444 g_assert (ins
->inst_basereg
!= ARMREG_IP0
);
4445 g_assert (ins
->inst_basereg
!= ARMREG_IP1
);
4446 g_assert (ins
->inst_basereg
!= ARMREG_LR
);
4447 g_assert (ins
->inst_basereg
!= ARMREG_SP
);
4448 g_assert (ins
->inst_basereg
!= ARMREG_R28
);
4449 code
= emit_ldrx (code
, branch_reg
, ins
->inst_basereg
, ins
->inst_offset
);
4453 g_assert_not_reached ();
4456 // Copy stack arguments.
4457 // FIXME a fixed size memcpy is desirable here,
4458 // at least for larger values of stack_usage.
4459 for (int i
= 0; i
< call
->stack_usage
; i
+= sizeof (target_mgreg_t
)) {
4460 code
= emit_ldrx (code
, ARMREG_LR
, ARMREG_SP
, i
);
4461 code
= emit_strx (code
, ARMREG_LR
, ARMREG_R28
, i
);
4464 /* Restore registers */
4465 code
= emit_load_regset (code
, MONO_ARCH_CALLEE_SAVED_REGS
& cfg
->used_int_regs
, ARMREG_FP
, cfg
->arch
.saved_gregs_offset
);
4468 code
= mono_arm_emit_destroy_frame (code
, cfg
->stack_offset
, free_reg
);
4471 /* There is no retab to authenticate lr */
4474 switch (ins
->opcode
) {
4476 if (cfg
->compile_aot
) {
4477 /* This is not a PLT patch */
4478 code
= emit_aotconst (cfg
, code
, branch_reg
, MONO_PATCH_INFO_METHOD_JUMP
, call
->method
);
4480 mono_add_patch_info_rel (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_METHOD_JUMP
, call
->method
, MONO_R_ARM64_B
);
4482 cfg
->thunk_area
+= THUNK_SIZE
;
4486 case OP_TAILCALL_MEMBASE
:
4487 case OP_TAILCALL_REG
:
4488 code
= mono_arm_emit_brx (code
, branch_reg
);
4492 g_assert_not_reached ();
4495 ins
->flags
|= MONO_INST_GC_CALLSITE
;
4496 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
4500 g_assert (cfg
->arch
.cinfo
);
4501 code
= emit_addx_imm (code
, ARMREG_IP0
, cfg
->arch
.args_reg
, cfg
->arch
.cinfo
->sig_cookie
.offset
);
4502 arm_strx (code
, ARMREG_IP0
, sreg1
, 0);
4505 MonoInst
*var
= cfg
->dyn_call_var
;
4506 guint8
*labels
[16];
4510 * sreg1 points to a DynCallArgs structure initialized by mono_arch_start_dyn_call ().
4511 * sreg2 is the function to call.
4514 g_assert (var
->opcode
== OP_REGOFFSET
);
4516 arm_movx (code
, ARMREG_LR
, sreg1
);
4517 arm_movx (code
, ARMREG_IP1
, sreg2
);
4519 /* Save args buffer */
4520 code
= emit_strx (code
, ARMREG_LR
, var
->inst_basereg
, var
->inst_offset
);
4522 /* Set fp argument regs */
4523 code
= emit_ldrw (code
, ARMREG_R0
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, n_fpargs
));
4524 arm_cmpw (code
, ARMREG_R0
, ARMREG_RZR
);
4526 arm_bcc (code
, ARMCOND_EQ
, 0);
4527 for (i
= 0; i
< 8; ++i
)
4528 code
= emit_ldrfpx (code
, ARMREG_D0
+ i
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, fpregs
) + (i
* 8));
4529 arm_patch_rel (labels
[0], code
, MONO_R_ARM64_BCC
);
4531 /* Allocate callee area */
4532 code
= emit_ldrx (code
, ARMREG_R0
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, n_stackargs
));
4533 arm_lslw (code
, ARMREG_R0
, ARMREG_R0
, 3);
4534 arm_movspx (code
, ARMREG_R1
, ARMREG_SP
);
4535 arm_subx (code
, ARMREG_R1
, ARMREG_R1
, ARMREG_R0
);
4536 arm_movspx (code
, ARMREG_SP
, ARMREG_R1
);
4538 /* Set stack args */
4540 code
= emit_ldrx (code
, ARMREG_R1
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, n_stackargs
));
4541 /* R2 = pointer into 'regs' */
4542 code
= emit_imm (code
, ARMREG_R2
, MONO_STRUCT_OFFSET (DynCallArgs
, regs
) + ((PARAM_REGS
+ 1) * sizeof (target_mgreg_t
)));
4543 arm_addx (code
, ARMREG_R2
, ARMREG_LR
, ARMREG_R2
);
4544 /* R3 = pointer to stack */
4545 arm_movspx (code
, ARMREG_R3
, ARMREG_SP
);
4549 code
= emit_ldrx (code
, ARMREG_R5
, ARMREG_R2
, 0);
4550 code
= emit_strx (code
, ARMREG_R5
, ARMREG_R3
, 0);
4551 code
= emit_addx_imm (code
, ARMREG_R2
, ARMREG_R2
, sizeof (target_mgreg_t
));
4552 code
= emit_addx_imm (code
, ARMREG_R3
, ARMREG_R3
, sizeof (target_mgreg_t
));
4553 code
= emit_subx_imm (code
, ARMREG_R1
, ARMREG_R1
, 1);
4554 arm_patch_rel (labels
[0], code
, MONO_R_ARM64_B
);
4555 arm_cmpw (code
, ARMREG_R1
, ARMREG_RZR
);
4556 arm_bcc (code
, ARMCOND_GT
, labels
[1]);
4558 /* Set argument registers + r8 */
4559 code
= mono_arm_emit_load_regarray (code
, 0x1ff, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, regs
));
4562 code
= mono_arm_emit_blrx (code
, ARMREG_IP1
);
4565 code
= emit_ldrx (code
, ARMREG_LR
, var
->inst_basereg
, var
->inst_offset
);
4566 arm_strx (code
, ARMREG_R0
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, res
));
4567 arm_strx (code
, ARMREG_R1
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, res2
));
4568 /* Save fp result */
4569 code
= emit_ldrw (code
, ARMREG_R0
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, n_fpret
));
4570 arm_cmpw (code
, ARMREG_R0
, ARMREG_RZR
);
4572 arm_bcc (code
, ARMCOND_EQ
, 0);
4573 for (i
= 0; i
< 8; ++i
)
4574 code
= emit_strfpx (code
, ARMREG_D0
+ i
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, fpregs
) + (i
* 8));
4575 arm_patch_rel (labels
[1], code
, MONO_R_ARM64_BCC
);
4579 case OP_GENERIC_CLASS_INIT
: {
4583 byte_offset
= MONO_STRUCT_OFFSET (MonoVTable
, initialized
);
4585 /* Load vtable->initialized */
4586 arm_ldrsbx (code
, ARMREG_IP0
, sreg1
, byte_offset
);
4588 arm_cbnzx (code
, ARMREG_IP0
, 0);
4591 g_assert (sreg1
== ARMREG_R0
);
4592 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
4593 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_generic_class_init
));
4595 mono_arm_patch (jump
, code
, MONO_R_ARM64_CBZ
);
4600 arm_ldrb (code
, ARMREG_LR
, sreg1
, 0);
4603 case OP_NOT_REACHED
:
4605 case OP_DUMMY_ICONST
:
4606 case OP_DUMMY_I8CONST
:
4607 case OP_DUMMY_R8CONST
:
4608 case OP_DUMMY_R4CONST
:
4610 case OP_IL_SEQ_POINT
:
4611 mono_add_seq_point (cfg
, bb
, ins
, code
- cfg
->native_code
);
4616 case OP_COND_EXC_IC
:
4617 case OP_COND_EXC_OV
:
4618 case OP_COND_EXC_IOV
:
4619 case OP_COND_EXC_NC
:
4620 case OP_COND_EXC_INC
:
4621 case OP_COND_EXC_NO
:
4622 case OP_COND_EXC_INO
:
4623 case OP_COND_EXC_EQ
:
4624 case OP_COND_EXC_IEQ
:
4625 case OP_COND_EXC_NE_UN
:
4626 case OP_COND_EXC_INE_UN
:
4627 case OP_COND_EXC_ILT
:
4628 case OP_COND_EXC_LT
:
4629 case OP_COND_EXC_ILT_UN
:
4630 case OP_COND_EXC_LT_UN
:
4631 case OP_COND_EXC_IGT
:
4632 case OP_COND_EXC_GT
:
4633 case OP_COND_EXC_IGT_UN
:
4634 case OP_COND_EXC_GT_UN
:
4635 case OP_COND_EXC_IGE
:
4636 case OP_COND_EXC_GE
:
4637 case OP_COND_EXC_IGE_UN
:
4638 case OP_COND_EXC_GE_UN
:
4639 case OP_COND_EXC_ILE
:
4640 case OP_COND_EXC_LE
:
4641 case OP_COND_EXC_ILE_UN
:
4642 case OP_COND_EXC_LE_UN
:
4643 code
= emit_cond_exc (cfg
, code
, ins
->opcode
, (const char*)ins
->inst_p1
);
4646 if (sreg1
!= ARMREG_R0
)
4647 arm_movx (code
, ARMREG_R0
, sreg1
);
4648 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
4649 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_throw_exception
));
4652 if (sreg1
!= ARMREG_R0
)
4653 arm_movx (code
, ARMREG_R0
, sreg1
);
4654 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
4655 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_rethrow_exception
));
4657 case OP_CALL_HANDLER
:
4658 mono_add_patch_info_rel (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
, MONO_R_ARM64_BL
);
4660 cfg
->thunk_area
+= THUNK_SIZE
;
4661 for (GList
*tmp
= ins
->inst_eh_blocks
; tmp
!= bb
->clause_holes
; tmp
= tmp
->prev
)
4662 mono_cfg_add_try_hole (cfg
, ((MonoLeaveClause
*) tmp
->data
)->clause
, code
, bb
);
4664 case OP_START_HANDLER
: {
4665 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
4667 /* Save caller address */
4668 code
= emit_strx (code
, ARMREG_LR
, spvar
->inst_basereg
, spvar
->inst_offset
);
4671 * Reserve a param area, see test_0_finally_param_area ().
4672 * This is needed because the param area is not set up when
4673 * we are called from EH code.
4675 if (cfg
->param_area
)
4676 code
= emit_subx_sp_imm (code
, cfg
->param_area
);
4680 case OP_ENDFILTER
: {
4681 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
4683 if (cfg
->param_area
)
4684 code
= emit_addx_sp_imm (code
, cfg
->param_area
);
4686 if (ins
->opcode
== OP_ENDFILTER
&& sreg1
!= ARMREG_R0
)
4687 arm_movx (code
, ARMREG_R0
, sreg1
);
4689 /* Return to either after the branch in OP_CALL_HANDLER, or to the EH code */
4690 code
= emit_ldrx (code
, ARMREG_LR
, spvar
->inst_basereg
, spvar
->inst_offset
);
4691 arm_brx (code
, ARMREG_LR
);
4695 if (ins
->dreg
!= ARMREG_R0
)
4696 arm_movx (code
, ins
->dreg
, ARMREG_R0
);
4698 case OP_LIVERANGE_START
: {
4699 if (cfg
->verbose_level
> 1)
4700 printf ("R%d START=0x%x\n", MONO_VARINFO (cfg
, ins
->inst_c0
)->vreg
, (int)(code
- cfg
->native_code
));
4701 MONO_VARINFO (cfg
, ins
->inst_c0
)->live_range_start
= code
- cfg
->native_code
;
4704 case OP_LIVERANGE_END
: {
4705 if (cfg
->verbose_level
> 1)
4706 printf ("R%d END=0x%x\n", MONO_VARINFO (cfg
, ins
->inst_c0
)->vreg
, (int)(code
- cfg
->native_code
));
4707 MONO_VARINFO (cfg
, ins
->inst_c0
)->live_range_end
= code
- cfg
->native_code
;
4710 case OP_GC_SAFE_POINT
: {
4713 arm_ldrx (code
, ARMREG_IP1
, ins
->sreg1
, 0);
4714 /* Call it if it is non-null */
4716 arm_cbzx (code
, ARMREG_IP1
, 0);
4717 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_JIT_ICALL_ID
, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_threads_state_poll
));
4718 mono_arm_patch (buf
[0], code
, MONO_R_ARM64_CBZ
);
4721 case OP_FILL_PROF_CALL_CTX
:
4722 for (int i
= 0; i
< MONO_MAX_IREGS
; i
++)
4723 if ((MONO_ARCH_CALLEE_SAVED_REGS
& (1 << i
)) || i
== ARMREG_SP
|| i
== ARMREG_FP
)
4724 arm_strx (code
, i
, ins
->sreg1
, MONO_STRUCT_OFFSET (MonoContext
, regs
) + i
* sizeof (target_mgreg_t
));
4727 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins
->opcode
), __FUNCTION__
);
4728 g_assert_not_reached ();
4731 if ((cfg
->opt
& MONO_OPT_BRANCH
) && ((code
- cfg
->native_code
- offset
) > max_len
)) {
4732 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
4733 mono_inst_name (ins
->opcode
), max_len
, code
- cfg
->native_code
- offset
);
4734 g_assert_not_reached ();
4737 set_code_cursor (cfg
, code
);
4740 * If the compiled code size is larger than the bcc displacement (19 bits signed),
4741 * insert branch islands between/inside basic blocks.
4743 if (cfg
->arch
.cond_branch_islands
)
4744 code
= emit_branch_island (cfg
, code
, start_offset
);
4748 emit_move_args (MonoCompile
*cfg
, guint8
*code
)
4754 MonoMethodSignature
*sig
= mono_method_signature_internal (cfg
->method
);
4756 cinfo
= cfg
->arch
.cinfo
;
4758 for (i
= 0; i
< cinfo
->nargs
; ++i
) {
4759 ainfo
= cinfo
->args
+ i
;
4760 ins
= cfg
->args
[i
];
4762 if (ins
->opcode
== OP_REGVAR
) {
4763 switch (ainfo
->storage
) {
4765 arm_movx (code
, ins
->dreg
, ainfo
->reg
);
4766 if (i
== 0 && sig
->hasthis
) {
4767 mono_add_var_location (cfg
, ins
, TRUE
, ainfo
->reg
, 0, 0, code
- cfg
->native_code
);
4768 mono_add_var_location (cfg
, ins
, TRUE
, ins
->dreg
, 0, code
- cfg
->native_code
, 0);
4772 switch (ainfo
->slot_size
) {
4775 code
= emit_ldrsbx (code
, ins
->dreg
, cfg
->arch
.args_reg
, ainfo
->offset
);
4777 code
= emit_ldrb (code
, ins
->dreg
, cfg
->arch
.args_reg
, ainfo
->offset
);
4781 code
= emit_ldrshx (code
, ins
->dreg
, cfg
->arch
.args_reg
, ainfo
->offset
);
4783 code
= emit_ldrh (code
, ins
->dreg
, cfg
->arch
.args_reg
, ainfo
->offset
);
4787 code
= emit_ldrswx (code
, ins
->dreg
, cfg
->arch
.args_reg
, ainfo
->offset
);
4789 code
= emit_ldrw (code
, ins
->dreg
, cfg
->arch
.args_reg
, ainfo
->offset
);
4792 code
= emit_ldrx (code
, ins
->dreg
, cfg
->arch
.args_reg
, ainfo
->offset
);
4797 g_assert_not_reached ();
4801 if (ainfo
->storage
!= ArgVtypeByRef
&& ainfo
->storage
!= ArgVtypeByRefOnStack
)
4802 g_assert (ins
->opcode
== OP_REGOFFSET
);
4804 switch (ainfo
->storage
) {
4806 /* Stack slots for arguments have size 8 */
4807 code
= emit_strx (code
, ainfo
->reg
, ins
->inst_basereg
, ins
->inst_offset
);
4808 if (i
== 0 && sig
->hasthis
) {
4809 mono_add_var_location (cfg
, ins
, TRUE
, ainfo
->reg
, 0, 0, code
- cfg
->native_code
);
4810 mono_add_var_location (cfg
, ins
, FALSE
, ins
->inst_basereg
, ins
->inst_offset
, code
- cfg
->native_code
, 0);
4814 code
= emit_strfpx (code
, ainfo
->reg
, ins
->inst_basereg
, ins
->inst_offset
);
4817 code
= emit_strfpw (code
, ainfo
->reg
, ins
->inst_basereg
, ins
->inst_offset
);
4822 case ArgVtypeByRefOnStack
:
4823 case ArgVtypeOnStack
:
4825 case ArgVtypeByRef
: {
4826 MonoInst
*addr_arg
= ins
->inst_left
;
4828 if (ainfo
->gsharedvt
) {
4829 g_assert (ins
->opcode
== OP_GSHAREDVT_ARG_REGOFFSET
);
4830 arm_strx (code
, ainfo
->reg
, ins
->inst_basereg
, ins
->inst_offset
);
4832 g_assert (ins
->opcode
== OP_VTARG_ADDR
);
4833 g_assert (addr_arg
->opcode
== OP_REGOFFSET
);
4834 arm_strx (code
, ainfo
->reg
, addr_arg
->inst_basereg
, addr_arg
->inst_offset
);
4838 case ArgVtypeInIRegs
:
4839 for (part
= 0; part
< ainfo
->nregs
; part
++) {
4840 code
= emit_strx (code
, ainfo
->reg
+ part
, ins
->inst_basereg
, ins
->inst_offset
+ (part
* 8));
4844 for (part
= 0; part
< ainfo
->nregs
; part
++) {
4845 if (ainfo
->esize
== 4)
4846 code
= emit_strfpw (code
, ainfo
->reg
+ part
, ins
->inst_basereg
, ins
->inst_offset
+ ainfo
->foffsets
[part
]);
4848 code
= emit_strfpx (code
, ainfo
->reg
+ part
, ins
->inst_basereg
, ins
->inst_offset
+ ainfo
->foffsets
[part
]);
4852 g_assert_not_reached ();
4862 * emit_store_regarray:
4864 * Emit code to store the registers in REGS into the appropriate elements of
4865 * the register array at BASEREG+OFFSET.
4867 static __attribute__ ((__warn_unused_result__
)) guint8
*
4868 emit_store_regarray (guint8
*code
, guint64 regs
, int basereg
, int offset
)
4872 for (i
= 0; i
< 32; ++i
) {
4873 if (regs
& (1 << i
)) {
4874 if (i
+ 1 < 32 && (regs
& (1 << (i
+ 1))) && (i
+ 1 != ARMREG_SP
)) {
4875 arm_stpx (code
, i
, i
+ 1, basereg
, offset
+ (i
* 8));
4877 } else if (i
== ARMREG_SP
) {
4878 arm_movspx (code
, ARMREG_IP1
, ARMREG_SP
);
4879 arm_strx (code
, ARMREG_IP1
, basereg
, offset
+ (i
* 8));
4881 arm_strx (code
, i
, basereg
, offset
+ (i
* 8));
4889 * emit_load_regarray:
4891 * Emit code to load the registers in REGS from the appropriate elements of
4892 * the register array at BASEREG+OFFSET.
4894 static __attribute__ ((__warn_unused_result__
)) guint8
*
4895 emit_load_regarray (guint8
*code
, guint64 regs
, int basereg
, int offset
)
4899 for (i
= 0; i
< 32; ++i
) {
4900 if (regs
& (1 << i
)) {
4901 if ((regs
& (1 << (i
+ 1))) && (i
+ 1 != ARMREG_SP
)) {
4902 if (offset
+ (i
* 8) < 500)
4903 arm_ldpx (code
, i
, i
+ 1, basereg
, offset
+ (i
* 8));
4905 code
= emit_ldrx (code
, i
, basereg
, offset
+ (i
* 8));
4906 code
= emit_ldrx (code
, i
+ 1, basereg
, offset
+ ((i
+ 1) * 8));
4909 } else if (i
== ARMREG_SP
) {
4910 g_assert_not_reached ();
4912 code
= emit_ldrx (code
, i
, basereg
, offset
+ (i
* 8));
4920 * emit_store_regset:
4922 * Emit code to store the registers in REGS into consecutive memory locations starting
4923 * at BASEREG+OFFSET.
4925 static __attribute__ ((__warn_unused_result__
)) guint8
*
4926 emit_store_regset (guint8
*code
, guint64 regs
, int basereg
, int offset
)
4931 for (i
= 0; i
< 32; ++i
) {
4932 if (regs
& (1 << i
)) {
4933 if ((regs
& (1 << (i
+ 1))) && (i
+ 1 != ARMREG_SP
)) {
4934 arm_stpx (code
, i
, i
+ 1, basereg
, offset
+ (pos
* 8));
4937 } else if (i
== ARMREG_SP
) {
4938 arm_movspx (code
, ARMREG_IP1
, ARMREG_SP
);
4939 arm_strx (code
, ARMREG_IP1
, basereg
, offset
+ (pos
* 8));
4941 arm_strx (code
, i
, basereg
, offset
+ (pos
* 8));
4952 * Emit code to load the registers in REGS from consecutive memory locations starting
4953 * at BASEREG+OFFSET.
4955 static __attribute__ ((__warn_unused_result__
)) guint8
*
4956 emit_load_regset (guint8
*code
, guint64 regs
, int basereg
, int offset
)
4961 for (i
= 0; i
< 32; ++i
) {
4962 if (regs
& (1 << i
)) {
4963 if ((regs
& (1 << (i
+ 1))) && (i
+ 1 != ARMREG_SP
)) {
4964 arm_ldpx (code
, i
, i
+ 1, basereg
, offset
+ (pos
* 8));
4967 } else if (i
== ARMREG_SP
) {
4968 g_assert_not_reached ();
4970 arm_ldrx (code
, i
, basereg
, offset
+ (pos
* 8));
4978 __attribute__ ((__warn_unused_result__
)) guint8
*
4979 mono_arm_emit_load_regarray (guint8
*code
, guint64 regs
, int basereg
, int offset
)
4981 return emit_load_regarray (code
, regs
, basereg
, offset
);
4984 __attribute__ ((__warn_unused_result__
)) guint8
*
4985 mono_arm_emit_store_regarray (guint8
*code
, guint64 regs
, int basereg
, int offset
)
4987 return emit_store_regarray (code
, regs
, basereg
, offset
);
4990 __attribute__ ((__warn_unused_result__
)) guint8
*
4991 mono_arm_emit_store_regset (guint8
*code
, guint64 regs
, int basereg
, int offset
)
4993 return emit_store_regset (code
, regs
, basereg
, offset
);
4996 /* Same as emit_store_regset, but emit unwind info too */
4997 /* CFA_OFFSET is the offset between the CFA and basereg */
4998 static __attribute__ ((__warn_unused_result__
)) guint8
*
4999 emit_store_regset_cfa (MonoCompile
*cfg
, guint8
*code
, guint64 regs
, int basereg
, int offset
, int cfa_offset
, guint64 no_cfa_regset
)
5001 int i
, j
, pos
, nregs
;
5002 guint32 cfa_regset
= regs
& ~no_cfa_regset
;
5005 for (i
= 0; i
< 32; ++i
) {
5007 if (regs
& (1 << i
)) {
5008 if ((regs
& (1 << (i
+ 1))) && (i
+ 1 != ARMREG_SP
)) {
5010 arm_stpx (code
, i
, i
+ 1, basereg
, offset
+ (pos
* 8));
5012 code
= emit_strx (code
, i
, basereg
, offset
+ (pos
* 8));
5013 code
= emit_strx (code
, i
+ 1, basereg
, offset
+ (pos
* 8) + 8);
5016 } else if (i
== ARMREG_SP
) {
5017 arm_movspx (code
, ARMREG_IP1
, ARMREG_SP
);
5018 code
= emit_strx (code
, ARMREG_IP1
, basereg
, offset
+ (pos
* 8));
5020 code
= emit_strx (code
, i
, basereg
, offset
+ (pos
* 8));
5023 for (j
= 0; j
< nregs
; ++j
) {
5024 if (cfa_regset
& (1 << (i
+ j
)))
5025 mono_emit_unwind_op_offset (cfg
, code
, i
+ j
, (- cfa_offset
) + offset
+ ((pos
+ j
) * 8));
5038 * Emit code to initialize an LMF structure at LMF_OFFSET.
5042 emit_setup_lmf (MonoCompile
*cfg
, guint8
*code
, gint32 lmf_offset
, int cfa_offset
)
5045 * The LMF should contain all the state required to be able to reconstruct the machine state
5046 * at the current point of execution. Since the LMF is only read during EH, only callee
5047 * saved etc. registers need to be saved.
5048 * FIXME: Save callee saved fp regs, JITted code doesn't use them, but native code does, and they
5049 * need to be restored during EH.
5053 arm_adrx (code
, ARMREG_LR
, code
);
5054 code
= emit_strx (code
, ARMREG_LR
, ARMREG_FP
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, pc
));
5055 /* gregs + fp + sp */
5056 /* Don't emit unwind info for sp/fp, they are already handled in the prolog */
5057 code
= emit_store_regset_cfa (cfg
, code
, MONO_ARCH_LMF_REGS
, ARMREG_FP
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, gregs
), cfa_offset
, (1 << ARMREG_FP
) | (1 << ARMREG_SP
));
5063 mono_arch_emit_prolog (MonoCompile
*cfg
)
5065 MonoMethod
*method
= cfg
->method
;
5066 MonoMethodSignature
*sig
;
5069 int cfa_offset
, max_offset
;
5071 sig
= mono_method_signature_internal (method
);
5072 cfg
->code_size
= 256 + sig
->param_count
* 64;
5073 code
= cfg
->native_code
= g_malloc (cfg
->code_size
);
5075 /* This can be unaligned */
5076 cfg
->stack_offset
= ALIGN_TO (cfg
->stack_offset
, MONO_ARCH_FRAME_ALIGNMENT
);
5082 mono_emit_unwind_op_def_cfa (cfg
, code
, ARMREG_SP
, 0);
5088 if (arm_is_ldpx_imm (-cfg
->stack_offset
)) {
5089 arm_stpx_pre (code
, ARMREG_FP
, ARMREG_LR
, ARMREG_SP
, -cfg
->stack_offset
);
5091 /* sp -= cfg->stack_offset */
5092 /* This clobbers ip0/ip1 */
5093 code
= emit_subx_sp_imm (code
, cfg
->stack_offset
);
5094 arm_stpx (code
, ARMREG_FP
, ARMREG_LR
, ARMREG_SP
, 0);
5096 cfa_offset
+= cfg
->stack_offset
;
5097 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, cfa_offset
);
5098 mono_emit_unwind_op_offset (cfg
, code
, ARMREG_FP
, (- cfa_offset
) + 0);
5099 mono_emit_unwind_op_offset (cfg
, code
, ARMREG_LR
, (- cfa_offset
) + 8);
5100 arm_movspx (code
, ARMREG_FP
, ARMREG_SP
);
5101 mono_emit_unwind_op_def_cfa_reg (cfg
, code
, ARMREG_FP
);
5102 if (cfg
->param_area
) {
5103 /* The param area is below the frame pointer */
5104 code
= emit_subx_sp_imm (code
, cfg
->param_area
);
5107 if (cfg
->method
->save_lmf
) {
5108 code
= emit_setup_lmf (cfg
, code
, cfg
->lmf_var
->inst_offset
, cfa_offset
);
5111 code
= emit_store_regset_cfa (cfg
, code
, MONO_ARCH_CALLEE_SAVED_REGS
& cfg
->used_int_regs
, ARMREG_FP
, cfg
->arch
.saved_gregs_offset
, cfa_offset
, 0);
5114 /* Setup args reg */
5115 if (cfg
->arch
.args_reg
) {
5116 /* The register was already saved above */
5117 code
= emit_addx_imm (code
, cfg
->arch
.args_reg
, ARMREG_FP
, cfg
->stack_offset
);
5120 /* Save return area addr received in R8 */
5121 if (cfg
->vret_addr
) {
5122 MonoInst
*ins
= cfg
->vret_addr
;
5124 g_assert (ins
->opcode
== OP_REGOFFSET
);
5125 code
= emit_strx (code
, ARMREG_R8
, ins
->inst_basereg
, ins
->inst_offset
);
5128 /* Save mrgctx received in MONO_ARCH_RGCTX_REG */
5129 if (cfg
->rgctx_var
) {
5130 MonoInst
*ins
= cfg
->rgctx_var
;
5132 g_assert (ins
->opcode
== OP_REGOFFSET
);
5134 code
= emit_strx (code
, MONO_ARCH_RGCTX_REG
, ins
->inst_basereg
, ins
->inst_offset
);
5136 mono_add_var_location (cfg
, cfg
->rgctx_var
, TRUE
, MONO_ARCH_RGCTX_REG
, 0, 0, code
- cfg
->native_code
);
5137 mono_add_var_location (cfg
, cfg
->rgctx_var
, FALSE
, ins
->inst_basereg
, ins
->inst_offset
, code
- cfg
->native_code
, 0);
5141 * Move arguments to their registers/stack locations.
5143 code
= emit_move_args (cfg
, code
);
5145 /* Initialize seq_point_info_var */
5146 if (cfg
->arch
.seq_point_info_var
) {
5147 MonoInst
*ins
= cfg
->arch
.seq_point_info_var
;
5149 /* Initialize the variable from a GOT slot */
5150 code
= emit_aotconst (cfg
, code
, ARMREG_IP0
, MONO_PATCH_INFO_SEQ_POINT_INFO
, cfg
->method
);
5151 g_assert (ins
->opcode
== OP_REGOFFSET
);
5152 code
= emit_strx (code
, ARMREG_IP0
, ins
->inst_basereg
, ins
->inst_offset
);
5154 /* Initialize ss_tramp_var */
5155 ins
= cfg
->arch
.ss_tramp_var
;
5156 g_assert (ins
->opcode
== OP_REGOFFSET
);
5158 code
= emit_ldrx (code
, ARMREG_IP1
, ARMREG_IP0
, MONO_STRUCT_OFFSET (SeqPointInfo
, ss_tramp_addr
));
5159 code
= emit_strx (code
, ARMREG_IP1
, ins
->inst_basereg
, ins
->inst_offset
);
5163 if (cfg
->arch
.ss_tramp_var
) {
5164 /* Initialize ss_tramp_var */
5165 ins
= cfg
->arch
.ss_tramp_var
;
5166 g_assert (ins
->opcode
== OP_REGOFFSET
);
5168 code
= emit_imm64 (code
, ARMREG_IP0
, (guint64
)&ss_trampoline
);
5169 code
= emit_strx (code
, ARMREG_IP0
, ins
->inst_basereg
, ins
->inst_offset
);
5172 if (cfg
->arch
.bp_tramp_var
) {
5173 /* Initialize bp_tramp_var */
5174 ins
= cfg
->arch
.bp_tramp_var
;
5175 g_assert (ins
->opcode
== OP_REGOFFSET
);
5177 code
= emit_imm64 (code
, ARMREG_IP0
, (guint64
)bp_trampoline
);
5178 code
= emit_strx (code
, ARMREG_IP0
, ins
->inst_basereg
, ins
->inst_offset
);
5183 if (cfg
->opt
& MONO_OPT_BRANCH
) {
5184 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
5186 bb
->max_offset
= max_offset
;
5188 MONO_BB_FOR_EACH_INS (bb
, ins
) {
5189 max_offset
+= ins_get_size (ins
->opcode
);
5193 if (max_offset
> 0x3ffff * 4)
5194 cfg
->arch
.cond_branch_islands
= TRUE
;
5200 mono_arch_emit_epilog (MonoCompile
*cfg
)
5203 int max_epilog_size
;
5207 max_epilog_size
= 16 + 20*4;
5208 code
= realloc_code (cfg
, max_epilog_size
);
5210 if (cfg
->method
->save_lmf
) {
5211 code
= mono_arm_emit_load_regarray (code
, MONO_ARCH_CALLEE_SAVED_REGS
& cfg
->used_int_regs
, ARMREG_FP
, cfg
->lmf_var
->inst_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, gregs
) - (MONO_ARCH_FIRST_LMF_REG
* 8));
5214 code
= emit_load_regset (code
, MONO_ARCH_CALLEE_SAVED_REGS
& cfg
->used_int_regs
, ARMREG_FP
, cfg
->arch
.saved_gregs_offset
);
5217 /* Load returned vtypes into registers if needed */
5218 cinfo
= cfg
->arch
.cinfo
;
5219 switch (cinfo
->ret
.storage
) {
5220 case ArgVtypeInIRegs
: {
5221 MonoInst
*ins
= cfg
->ret
;
5223 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
)
5224 code
= emit_ldrx (code
, cinfo
->ret
.reg
+ i
, ins
->inst_basereg
, ins
->inst_offset
+ (i
* 8));
5228 MonoInst
*ins
= cfg
->ret
;
5230 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
) {
5231 if (cinfo
->ret
.esize
== 4)
5232 code
= emit_ldrfpw (code
, cinfo
->ret
.reg
+ i
, ins
->inst_basereg
, ins
->inst_offset
+ cinfo
->ret
.foffsets
[i
]);
5234 code
= emit_ldrfpx (code
, cinfo
->ret
.reg
+ i
, ins
->inst_basereg
, ins
->inst_offset
+ cinfo
->ret
.foffsets
[i
]);
5243 code
= mono_arm_emit_destroy_frame (code
, cfg
->stack_offset
, (1 << ARMREG_IP0
) | (1 << ARMREG_IP1
));
5248 arm_retx (code
, ARMREG_LR
);
5250 g_assert (code
- (cfg
->native_code
+ cfg
->code_len
) < max_epilog_size
);
5252 set_code_cursor (cfg
, code
);
5256 mono_arch_emit_exceptions (MonoCompile
*cfg
)
5259 MonoClass
*exc_class
;
5261 guint8
* exc_throw_pos
[MONO_EXC_INTRINS_NUM
];
5262 guint8 exc_throw_found
[MONO_EXC_INTRINS_NUM
];
5263 int i
, id
, size
= 0;
5265 for (i
= 0; i
< MONO_EXC_INTRINS_NUM
; i
++) {
5266 exc_throw_pos
[i
] = NULL
;
5267 exc_throw_found
[i
] = 0;
5270 for (ji
= cfg
->patch_info
; ji
; ji
= ji
->next
) {
5271 if (ji
->type
== MONO_PATCH_INFO_EXC
) {
5272 i
= mini_exception_id_by_name ((const char*)ji
->data
.target
);
5273 if (!exc_throw_found
[i
]) {
5275 exc_throw_found
[i
] = TRUE
;
5280 code
= realloc_code (cfg
, size
);
5282 /* Emit code to raise corlib exceptions */
5283 for (ji
= cfg
->patch_info
; ji
; ji
= ji
->next
) {
5284 if (ji
->type
!= MONO_PATCH_INFO_EXC
)
5287 ip
= cfg
->native_code
+ ji
->ip
.i
;
5289 id
= mini_exception_id_by_name ((const char*)ji
->data
.target
);
5291 if (exc_throw_pos
[id
]) {
5292 /* ip points to the bcc () in OP_COND_EXC_... */
5293 arm_patch_rel (ip
, exc_throw_pos
[id
], ji
->relocation
);
5294 ji
->type
= MONO_PATCH_INFO_NONE
;
5298 exc_throw_pos
[id
] = code
;
5299 arm_patch_rel (ip
, code
, ji
->relocation
);
5301 /* We are being branched to from the code generated by emit_cond_exc (), the pc is in ip1 */
5303 /* r0 = type token */
5304 exc_class
= mono_class_load_from_name (mono_defaults
.corlib
, "System", ji
->data
.name
);
5305 code
= emit_imm (code
, ARMREG_R0
, m_class_get_type_token (exc_class
) - MONO_TOKEN_TYPE_DEF
);
5307 arm_movx (code
, ARMREG_R1
, ARMREG_IP1
);
5308 /* Branch to the corlib exception throwing trampoline */
5309 ji
->ip
.i
= code
- cfg
->native_code
;
5310 ji
->type
= MONO_PATCH_INFO_JIT_ICALL_ID
;
5311 ji
->data
.jit_icall_id
= MONO_JIT_ICALL_mono_arch_throw_corlib_exception
;
5312 ji
->relocation
= MONO_R_ARM64_BL
;
5314 cfg
->thunk_area
+= THUNK_SIZE
;
5315 set_code_cursor (cfg
, code
);
5318 set_code_cursor (cfg
, code
);
5322 mono_arch_emit_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
5328 mono_arch_get_patch_offset (guint8
*code
)
5334 mono_arch_build_imt_trampoline (MonoVTable
*vtable
, MonoDomain
*domain
, MonoIMTCheckItem
**imt_entries
, int count
,
5335 gpointer fail_tramp
)
5337 int i
, buf_len
, imt_reg
;
5341 printf ("building IMT trampoline for class %s %s entries %d code size %d code at %p end %p vtable %p\n", m_class_get_name_space (vtable
->klass
), m_class_get_name (vtable
->klass
), count
, size
, start
, ((guint8
*)start
) + size
, vtable
);
5342 for (i
= 0; i
< count
; ++i
) {
5343 MonoIMTCheckItem
*item
= imt_entries
[i
];
5344 printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i
, item
->key
, item
->key
->name
, &vtable
->vtable
[item
->value
.vtable_slot
], item
->is_equals
, item
->chunk_size
);
5349 for (i
= 0; i
< count
; ++i
) {
5350 MonoIMTCheckItem
*item
= imt_entries
[i
];
5351 if (item
->is_equals
) {
5352 gboolean fail_case
= !item
->check_target_idx
&& fail_tramp
;
5354 if (item
->check_target_idx
|| fail_case
) {
5355 if (!item
->compare_done
|| fail_case
) {
5356 buf_len
+= 4 * 4 + 4;
5359 if (item
->has_target_code
) {
5376 buf
= (guint8
*)mono_method_alloc_generic_virtual_trampoline (mono_domain_ambient_memory_manager (domain
), buf_len
);
5378 MonoMemoryManager
*mem_manager
= m_class_get_mem_manager (domain
, vtable
->klass
);
5379 buf
= mono_mem_manager_code_reserve (mem_manager
, buf_len
);
5383 MINI_BEGIN_CODEGEN ();
5386 * We are called by JITted code, which passes in the IMT argument in
5387 * MONO_ARCH_RGCTX_REG (r27). We need to preserve all caller saved regs
5390 imt_reg
= MONO_ARCH_RGCTX_REG
;
5391 for (i
= 0; i
< count
; ++i
) {
5392 MonoIMTCheckItem
*item
= imt_entries
[i
];
5394 item
->code_target
= code
;
5396 if (item
->is_equals
) {
5398 * Check the imt argument against item->key, if equals, jump to either
5399 * item->value.target_code or to vtable [item->value.vtable_slot].
5400 * If fail_tramp is set, jump to it if not-equals.
5402 gboolean fail_case
= !item
->check_target_idx
&& fail_tramp
;
5404 if (item
->check_target_idx
|| fail_case
) {
5405 /* Compare imt_reg with item->key */
5406 if (!item
->compare_done
|| fail_case
) {
5407 // FIXME: Optimize this
5408 code
= emit_imm64 (code
, ARMREG_IP0
, (guint64
)item
->key
);
5409 arm_cmpx (code
, imt_reg
, ARMREG_IP0
);
5411 item
->jmp_code
= code
;
5412 arm_bcc (code
, ARMCOND_NE
, 0);
5413 /* Jump to target if equals */
5414 if (item
->has_target_code
) {
5415 code
= emit_imm64 (code
, ARMREG_IP0
, (guint64
)item
->value
.target_code
);
5416 code
= mono_arm_emit_brx (code
, ARMREG_IP0
);
5418 guint64 imm
= (guint64
)&(vtable
->vtable
[item
->value
.vtable_slot
]);
5420 code
= emit_imm64 (code
, ARMREG_IP0
, imm
);
5421 arm_ldrx (code
, ARMREG_IP0
, ARMREG_IP0
, 0);
5422 code
= mono_arm_emit_brx (code
, ARMREG_IP0
);
5426 arm_patch_rel (item
->jmp_code
, code
, MONO_R_ARM64_BCC
);
5427 item
->jmp_code
= NULL
;
5428 code
= emit_imm64 (code
, ARMREG_IP0
, (guint64
)fail_tramp
);
5429 code
= mono_arm_emit_brx (code
, ARMREG_IP0
);
5432 guint64 imm
= (guint64
)&(vtable
->vtable
[item
->value
.vtable_slot
]);
5434 code
= emit_imm64 (code
, ARMREG_IP0
, imm
);
5435 arm_ldrx (code
, ARMREG_IP0
, ARMREG_IP0
, 0);
5436 code
= mono_arm_emit_brx (code
, ARMREG_IP0
);
5439 code
= emit_imm64 (code
, ARMREG_IP0
, (guint64
)item
->key
);
5440 arm_cmpx (code
, imt_reg
, ARMREG_IP0
);
5441 item
->jmp_code
= code
;
5442 arm_bcc (code
, ARMCOND_HS
, 0);
5445 /* Patch the branches */
5446 for (i
= 0; i
< count
; ++i
) {
5447 MonoIMTCheckItem
*item
= imt_entries
[i
];
5448 if (item
->jmp_code
&& item
->check_target_idx
)
5449 arm_patch_rel (item
->jmp_code
, imt_entries
[item
->check_target_idx
]->code_target
, MONO_R_ARM64_BCC
);
5452 g_assert ((code
- buf
) <= buf_len
);
5454 MINI_END_CODEGEN (buf
, code
- buf
, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE
, NULL
);
5456 return MINI_ADDR_TO_FTNPTR (buf
);
5460 mono_arch_get_trampolines (gboolean aot
)
5462 return mono_arm_get_exception_trampolines (aot
);
5465 #else /* DISABLE_JIT */
5468 mono_arch_build_imt_trampoline (MonoVTable
*vtable
, MonoDomain
*domain
, MonoIMTCheckItem
**imt_entries
, int count
,
5469 gpointer fail_tramp
)
5471 g_assert_not_reached ();
5475 #endif /* !DISABLE_JIT */
5477 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
5480 mono_arch_set_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
5482 guint8
*code
= MINI_FTNPTR_TO_ADDR (ip
);
5483 guint32 native_offset
= ip
- (guint8
*)ji
->code_start
;
5486 SeqPointInfo
*info
= mono_arch_get_seq_point_info (mono_domain_get (), (guint8
*)ji
->code_start
);
5490 g_assert (native_offset
% 4 == 0);
5491 g_assert (info
->bp_addrs
[native_offset
/ 4] == 0);
5492 info
->bp_addrs
[native_offset
/ 4] = (guint8
*)mini_get_breakpoint_trampoline ();
5494 /* ip points to an ldrx */
5496 mono_codeman_enable_write ();
5497 code
= mono_arm_emit_blrx (code
, ARMREG_IP0
);
5498 mono_codeman_disable_write ();
5499 mono_arch_flush_icache (ip
, code
- ip
);
5504 mono_arch_clear_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
5506 guint8
*code
= MINI_FTNPTR_TO_ADDR (ip
);
5509 guint32 native_offset
= ip
- (guint8
*)ji
->code_start
;
5510 SeqPointInfo
*info
= mono_arch_get_seq_point_info (mono_domain_get (), (guint8
*)ji
->code_start
);
5515 g_assert (native_offset
% 4 == 0);
5516 info
->bp_addrs
[native_offset
/ 4] = NULL
;
5518 /* ip points to an ldrx */
5520 mono_codeman_enable_write ();
5522 mono_codeman_disable_write ();
5523 mono_arch_flush_icache (ip
, code
- ip
);
5528 mono_arch_start_single_stepping (void)
5530 ss_trampoline
= mini_get_single_step_trampoline ();
5534 mono_arch_stop_single_stepping (void)
5536 ss_trampoline
= NULL
;
5540 mono_arch_is_single_step_event (void *info
, void *sigctx
)
5542 /* We use soft breakpoints on arm64 */
5547 mono_arch_is_breakpoint_event (void *info
, void *sigctx
)
5549 /* We use soft breakpoints on arm64 */
5554 mono_arch_skip_breakpoint (MonoContext
*ctx
, MonoJitInfo
*ji
)
5556 g_assert_not_reached ();
5560 mono_arch_skip_single_step (MonoContext
*ctx
)
5562 g_assert_not_reached ();
5566 mono_arch_get_seq_point_info (MonoDomain
*domain
, guint8
*code
)
5571 // FIXME: Add a free function
5573 mono_domain_lock (domain
);
5574 info
= (SeqPointInfo
*)g_hash_table_lookup (domain_jit_info (domain
)->arch_seq_points
,
5576 mono_domain_unlock (domain
);
5579 ji
= mono_jit_info_table_find (domain
, code
);
5582 info
= g_malloc0 (sizeof (SeqPointInfo
) + (ji
->code_size
/ 4) * sizeof(guint8
*));
5584 info
->ss_tramp_addr
= &ss_trampoline
;
5586 mono_domain_lock (domain
);
5587 g_hash_table_insert (domain_jit_info (domain
)->arch_seq_points
,
5589 mono_domain_unlock (domain
);
5595 #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
5598 mono_arch_opcode_supported (int opcode
)
5601 case OP_ATOMIC_ADD_I4
:
5602 case OP_ATOMIC_ADD_I8
:
5603 case OP_ATOMIC_EXCHANGE_I4
:
5604 case OP_ATOMIC_EXCHANGE_I8
:
5605 case OP_ATOMIC_CAS_I4
:
5606 case OP_ATOMIC_CAS_I8
:
5607 case OP_ATOMIC_LOAD_I1
:
5608 case OP_ATOMIC_LOAD_I2
:
5609 case OP_ATOMIC_LOAD_I4
:
5610 case OP_ATOMIC_LOAD_I8
:
5611 case OP_ATOMIC_LOAD_U1
:
5612 case OP_ATOMIC_LOAD_U2
:
5613 case OP_ATOMIC_LOAD_U4
:
5614 case OP_ATOMIC_LOAD_U8
:
5615 case OP_ATOMIC_LOAD_R4
:
5616 case OP_ATOMIC_LOAD_R8
:
5617 case OP_ATOMIC_STORE_I1
:
5618 case OP_ATOMIC_STORE_I2
:
5619 case OP_ATOMIC_STORE_I4
:
5620 case OP_ATOMIC_STORE_I8
:
5621 case OP_ATOMIC_STORE_U1
:
5622 case OP_ATOMIC_STORE_U2
:
5623 case OP_ATOMIC_STORE_U4
:
5624 case OP_ATOMIC_STORE_U8
:
5625 case OP_ATOMIC_STORE_R4
:
5626 case OP_ATOMIC_STORE_R8
:
5634 mono_arch_get_call_info (MonoMemPool
*mp
, MonoMethodSignature
*sig
)
5636 return get_call_info (mp
, sig
);
5640 mono_arch_load_function (MonoJitICallId jit_icall_id
)
5642 gpointer target
= NULL
;
5643 switch (jit_icall_id
) {
5644 #undef MONO_AOT_ICALL
5645 #define MONO_AOT_ICALL(x) case MONO_JIT_ICALL_ ## x: target = (gpointer)x; break;
5646 MONO_AOT_ICALL (mono_arm_resume_unwind
)
5647 MONO_AOT_ICALL (mono_arm_start_gsharedvt_call
)
5648 MONO_AOT_ICALL (mono_arm_throw_exception
)
5654 emit_blrx (guint8
*code
, int reg
)
5657 arm_blraaz (code
, reg
);
5659 arm_blrx (code
, reg
);
5664 emit_brx (guint8
*code
, int reg
)
5667 arm_braaz (code
, reg
);
5669 arm_brx (code
, reg
);
5674 mono_arm_emit_blrx (guint8
*code
, int reg
)
5676 return emit_blrx (code
, reg
);
5680 mono_arm_emit_brx (guint8
*code
, int reg
)
5682 return emit_brx (code
, reg
);