3 * PowerPC backend for the Mono code generator
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
8 * Andreas Faerber <andreas.faerber@web.de>
10 * (C) 2003 Ximian, Inc.
11 * (C) 2007-2008 Andreas Faerber
16 #include <mono/metadata/abi-details.h>
17 #include <mono/metadata/appdomain.h>
18 #include <mono/metadata/debug-helpers.h>
19 #include <mono/utils/mono-proclib.h>
20 #include <mono/utils/mono-mmap.h>
21 #include <mono/utils/mono-hwcap.h>
22 #include <mono/utils/unlocked.h>
23 #include "mono/utils/mono-tls-inline.h"
26 #ifdef TARGET_POWERPC64
27 #include "cpu-ppc64.h"
32 #include "aot-runtime.h"
33 #include "mini-runtime.h"
35 #include <sys/sysctl.h>
41 #include <sys/systemcfg.h>
44 static GENERATE_TRY_GET_CLASS_WITH_CACHE (math
, "System", "Math")
45 static GENERATE_TRY_GET_CLASS_WITH_CACHE (mathf
, "System", "MathF")
47 #define FORCE_INDIR_CALL 1
58 /* cpu_hw_caps contains the flags defined below */
59 static int cpu_hw_caps
= 0;
60 static int cachelinesize
= 0;
61 static int cachelineinc
= 0;
63 PPC_ICACHE_SNOOP
= 1 << 0,
64 PPC_MULTIPLE_LS_UNITS
= 1 << 1,
65 PPC_SMP_CAPABLE
= 1 << 2,
68 PPC_MOVE_FPR_GPR
= 1 << 5,
69 PPC_ISA_2_03
= 1 << 6,
73 #define BREAKPOINT_SIZE (PPC_LOAD_SEQUENCE_LENGTH + 4)
75 /* This mutex protects architecture specific caches */
76 #define mono_mini_arch_lock() mono_os_mutex_lock (&mini_arch_mutex)
77 #define mono_mini_arch_unlock() mono_os_mutex_unlock (&mini_arch_mutex)
78 static mono_mutex_t mini_arch_mutex
;
81 * The code generated for sequence points reads from this location, which is
82 * made read-only when single stepping is enabled.
84 static gpointer ss_trigger_page
;
86 /* Enabled breakpoints read from this trigger page */
87 static gpointer bp_trigger_page
;
89 #define MONO_EMIT_NEW_LOAD_R8(cfg,dr,addr) do { \
91 MONO_INST_NEW ((cfg), (inst), OP_R8CONST); \
92 inst->type = STACK_R8; \
94 inst->inst_p0 = (void*)(addr); \
95 mono_bblock_add_inst (cfg->cbb, inst); \
99 mono_arch_regname (int reg
) {
100 static const char rnames
[][4] = {
101 "r0", "sp", "r2", "r3", "r4",
102 "r5", "r6", "r7", "r8", "r9",
103 "r10", "r11", "r12", "r13", "r14",
104 "r15", "r16", "r17", "r18", "r19",
105 "r20", "r21", "r22", "r23", "r24",
106 "r25", "r26", "r27", "r28", "r29",
109 if (reg
>= 0 && reg
< 32)
115 mono_arch_fregname (int reg
) {
116 static const char rnames
[][4] = {
117 "f0", "f1", "f2", "f3", "f4",
118 "f5", "f6", "f7", "f8", "f9",
119 "f10", "f11", "f12", "f13", "f14",
120 "f15", "f16", "f17", "f18", "f19",
121 "f20", "f21", "f22", "f23", "f24",
122 "f25", "f26", "f27", "f28", "f29",
125 if (reg
>= 0 && reg
< 32)
130 /* this function overwrites r0, r11, r12 */
132 emit_memcpy (guint8
*code
, int size
, int dreg
, int doffset
, int sreg
, int soffset
)
134 /* unrolled, use the counter in big */
135 if (size
> sizeof (target_mgreg_t
) * 5) {
136 long shifted
= size
/ TARGET_SIZEOF_VOID_P
;
137 guint8
*copy_loop_start
, *copy_loop_jump
;
139 ppc_load (code
, ppc_r0
, shifted
);
140 ppc_mtctr (code
, ppc_r0
);
141 //g_assert (sreg == ppc_r12);
142 ppc_addi (code
, ppc_r11
, dreg
, (doffset
- sizeof (target_mgreg_t
)));
143 ppc_addi (code
, ppc_r12
, sreg
, (soffset
- sizeof (target_mgreg_t
)));
144 copy_loop_start
= code
;
145 ppc_ldptr_update (code
, ppc_r0
, (unsigned int)sizeof (target_mgreg_t
), ppc_r12
);
146 ppc_stptr_update (code
, ppc_r0
, (unsigned int)sizeof (target_mgreg_t
), ppc_r11
);
147 copy_loop_jump
= code
;
148 ppc_bc (code
, PPC_BR_DEC_CTR_NONZERO
, 0, 0);
149 ppc_patch (copy_loop_jump
, copy_loop_start
);
150 size
-= shifted
* sizeof (target_mgreg_t
);
151 doffset
= soffset
= 0;
154 #ifdef __mono_ppc64__
155 /* the hardware has multiple load/store units and the move is long
156 enough to use more then one register, then use load/load/store/store
157 to execute 2 instructions per cycle. */
158 if ((cpu_hw_caps
& PPC_MULTIPLE_LS_UNITS
) && (dreg
!= ppc_r11
) && (sreg
!= ppc_r11
)) {
160 ppc_ldptr (code
, ppc_r0
, soffset
, sreg
);
161 ppc_ldptr (code
, ppc_r11
, soffset
+8, sreg
);
162 ppc_stptr (code
, ppc_r0
, doffset
, dreg
);
163 ppc_stptr (code
, ppc_r11
, doffset
+8, dreg
);
170 ppc_ldr (code
, ppc_r0
, soffset
, sreg
);
171 ppc_str (code
, ppc_r0
, doffset
, dreg
);
177 if ((cpu_hw_caps
& PPC_MULTIPLE_LS_UNITS
) && (dreg
!= ppc_r11
) && (sreg
!= ppc_r11
)) {
179 ppc_lwz (code
, ppc_r0
, soffset
, sreg
);
180 ppc_lwz (code
, ppc_r11
, soffset
+4, sreg
);
181 ppc_stw (code
, ppc_r0
, doffset
, dreg
);
182 ppc_stw (code
, ppc_r11
, doffset
+4, dreg
);
190 ppc_lwz (code
, ppc_r0
, soffset
, sreg
);
191 ppc_stw (code
, ppc_r0
, doffset
, dreg
);
197 ppc_lhz (code
, ppc_r0
, soffset
, sreg
);
198 ppc_sth (code
, ppc_r0
, doffset
, dreg
);
204 ppc_lbz (code
, ppc_r0
, soffset
, sreg
);
205 ppc_stb (code
, ppc_r0
, doffset
, dreg
);
214 * mono_arch_get_argument_info:
215 * @csig: a method signature
216 * @param_count: the number of parameters to consider
217 * @arg_info: an array to store the result infos
219 * Gathers information on parameters such as size, alignment and
220 * padding. arg_info should be large enought to hold param_count + 1 entries.
222 * Returns the size of the activation frame.
225 mono_arch_get_argument_info (MonoMethodSignature
*csig
, int param_count
, MonoJitArgumentInfo
*arg_info
)
227 #ifdef __mono_ppc64__
231 int k
, frame_size
= 0;
232 int size
, align
, pad
;
235 if (MONO_TYPE_ISSTRUCT (csig
->ret
)) {
236 frame_size
+= sizeof (target_mgreg_t
);
240 arg_info
[0].offset
= offset
;
243 frame_size
+= sizeof (target_mgreg_t
);
247 arg_info
[0].size
= frame_size
;
249 for (k
= 0; k
< param_count
; k
++) {
252 size
= mono_type_native_stack_size (csig
->params
[k
], (guint32
*)&align
);
254 size
= mini_type_stack_size (csig
->params
[k
], &align
);
256 /* ignore alignment for now */
259 frame_size
+= pad
= (align
- (frame_size
& (align
- 1))) & (align
- 1);
260 arg_info
[k
].pad
= pad
;
262 arg_info
[k
+ 1].pad
= 0;
263 arg_info
[k
+ 1].size
= size
;
265 arg_info
[k
+ 1].offset
= offset
;
269 align
= MONO_ARCH_FRAME_ALIGNMENT
;
270 frame_size
+= pad
= (align
- (frame_size
& (align
- 1))) & (align
- 1);
271 arg_info
[k
].pad
= pad
;
277 #ifdef __mono_ppc64__
279 is_load_sequence (guint32
*seq
)
281 return ppc_opcode (seq
[0]) == 15 && /* lis */
282 ppc_opcode (seq
[1]) == 24 && /* ori */
283 ppc_opcode (seq
[2]) == 30 && /* sldi */
284 ppc_opcode (seq
[3]) == 25 && /* oris */
285 ppc_opcode (seq
[4]) == 24; /* ori */
288 #define ppc_load_get_dest(l) (((l)>>21) & 0x1f)
289 #define ppc_load_get_off(l) ((gint16)((l) & 0xffff))
293 #define ppc_is_load_op(opcode) (ppc_opcode ((opcode)) == 58 || ppc_opcode ((opcode)) == 32)
295 /* code must point to the blrl */
297 mono_ppc_is_direct_call_sequence (guint32
*code
)
299 #ifdef __mono_ppc64__
300 g_assert(*code
== 0x4e800021 || *code
== 0x4e800020 || *code
== 0x4e800420);
302 /* the thunk-less direct call sequence: lis/ori/sldi/oris/ori/mtlr/blrl */
303 if (ppc_opcode (code
[-1]) == 31) { /* mtlr */
304 if (ppc_is_load_op (code
[-2]) && ppc_is_load_op (code
[-3])) { /* ld/ld */
305 if (!is_load_sequence (&code
[-8]))
307 /* one of the loads must be "ld r2,8(rX)" or "ld r2,4(rX) for ilp32 */
308 return (ppc_load_get_dest (code
[-2]) == ppc_r2
&& ppc_load_get_off (code
[-2]) == sizeof (target_mgreg_t
)) ||
309 (ppc_load_get_dest (code
[-3]) == ppc_r2
&& ppc_load_get_off (code
[-3]) == sizeof (target_mgreg_t
));
311 if (ppc_opcode (code
[-2]) == 24 && ppc_opcode (code
[-3]) == 31) /* mr/nop */
312 return is_load_sequence (&code
[-8]);
314 return is_load_sequence (&code
[-6]);
318 g_assert(*code
== 0x4e800021);
320 /* the thunk-less direct call sequence: lis/ori/mtlr/blrl */
321 return ppc_opcode (code
[-1]) == 31 &&
322 ppc_opcode (code
[-2]) == 24 &&
323 ppc_opcode (code
[-3]) == 15;
327 #define MAX_ARCH_DELEGATE_PARAMS 7
330 get_delegate_invoke_impl (MonoTrampInfo
**info
, gboolean has_target
, guint32 param_count
, gboolean aot
)
332 guint8
*code
, *start
;
335 int size
= MONO_PPC_32_64_CASE (32, 32) + PPC_FTNPTR_SIZE
;
337 start
= code
= mono_global_codeman_reserve (size
);
339 code
= mono_ppc_create_pre_code_ftnptr (code
);
341 /* Replace the this argument with the target */
342 ppc_ldptr (code
, ppc_r0
, MONO_STRUCT_OFFSET (MonoDelegate
, method_ptr
), ppc_r3
);
343 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
344 /* it's a function descriptor */
345 /* Can't use ldptr as it doesn't work with r0 */
346 ppc_ldptr_indexed (code
, ppc_r0
, 0, ppc_r0
);
348 ppc_mtctr (code
, ppc_r0
);
349 ppc_ldptr (code
, ppc_r3
, MONO_STRUCT_OFFSET (MonoDelegate
, target
), ppc_r3
);
350 ppc_bcctr (code
, PPC_BR_ALWAYS
, 0);
352 g_assert ((code
- start
) <= size
);
354 mono_arch_flush_icache (start
, size
);
355 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE
, NULL
));
359 size
= MONO_PPC_32_64_CASE (32, 32) + param_count
* 4 + PPC_FTNPTR_SIZE
;
360 start
= code
= mono_global_codeman_reserve (size
);
362 code
= mono_ppc_create_pre_code_ftnptr (code
);
364 ppc_ldptr (code
, ppc_r0
, MONO_STRUCT_OFFSET (MonoDelegate
, method_ptr
), ppc_r3
);
365 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
366 /* it's a function descriptor */
367 ppc_ldptr_indexed (code
, ppc_r0
, 0, ppc_r0
);
369 ppc_mtctr (code
, ppc_r0
);
370 /* slide down the arguments */
371 for (i
= 0; i
< param_count
; ++i
) {
372 ppc_mr (code
, (ppc_r3
+ i
), (ppc_r3
+ i
+ 1));
374 ppc_bcctr (code
, PPC_BR_ALWAYS
, 0);
376 g_assert ((code
- start
) <= size
);
378 mono_arch_flush_icache (start
, size
);
379 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE
, NULL
));
383 *info
= mono_tramp_info_create ("delegate_invoke_impl_has_target", start
, code
- start
, NULL
, NULL
);
385 char *name
= g_strdup_printf ("delegate_invoke_impl_target_%d", param_count
);
386 *info
= mono_tramp_info_create (name
, start
, code
- start
, NULL
, NULL
);
394 mono_arch_get_delegate_invoke_impls (void)
400 get_delegate_invoke_impl (&info
, TRUE
, 0, TRUE
);
401 res
= g_slist_prepend (res
, info
);
403 for (i
= 0; i
<= MAX_ARCH_DELEGATE_PARAMS
; ++i
) {
404 get_delegate_invoke_impl (&info
, FALSE
, i
, TRUE
);
405 res
= g_slist_prepend (res
, info
);
412 mono_arch_get_delegate_invoke_impl (MonoMethodSignature
*sig
, gboolean has_target
)
414 guint8
*code
, *start
;
416 /* FIXME: Support more cases */
417 if (MONO_TYPE_ISSTRUCT (sig
->ret
))
421 static guint8
* cached
= NULL
;
426 if (mono_ee_features
.use_aot_trampolines
) {
427 start
= mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
430 start
= get_delegate_invoke_impl (&info
, TRUE
, 0, FALSE
);
431 mono_tramp_info_register (info
, NULL
);
433 mono_memory_barrier ();
437 static guint8
* cache
[MAX_ARCH_DELEGATE_PARAMS
+ 1] = {NULL
};
440 if (sig
->param_count
> MAX_ARCH_DELEGATE_PARAMS
)
442 for (i
= 0; i
< sig
->param_count
; ++i
)
443 if (!mono_is_regsize_var (sig
->params
[i
]))
447 code
= cache
[sig
->param_count
];
451 if (mono_ee_features
.use_aot_trampolines
) {
452 char *name
= g_strdup_printf ("delegate_invoke_impl_target_%d", sig
->param_count
);
453 start
= mono_aot_get_trampoline (name
);
457 start
= get_delegate_invoke_impl (&info
, FALSE
, sig
->param_count
, FALSE
);
458 mono_tramp_info_register (info
, NULL
);
461 mono_memory_barrier ();
463 cache
[sig
->param_count
] = start
;
469 mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature
*sig
, MonoMethod
*method
, int offset
, gboolean load_imt_reg
)
475 mono_arch_get_this_arg_from_call (host_mgreg_t
*r
, guint8
*code
)
477 return (gpointer
)(gsize
)r
[ppc_r3
];
485 #define MAX_AUX_ENTRIES 128
487 * PPC_FEATURE_POWER4, PPC_FEATURE_POWER5, PPC_FEATURE_POWER5_PLUS, PPC_FEATURE_CELL,
488 * PPC_FEATURE_PA6T, PPC_FEATURE_ARCH_2_05 are considered supporting 2X ISA features
490 #define ISA_2X (0x00080000 | 0x00040000 | 0x00020000 | 0x00010000 | 0x00000800 | 0x00001000)
492 /* define PPC_FEATURE_64 HWCAP for 64-bit category. */
493 #define ISA_64 0x40000000
495 /* define PPC_FEATURE_POWER6_EXT HWCAP for power6x mffgpr/mftgpr instructions. */
496 #define ISA_MOVE_FPR_GPR 0x00000200
498 * Initialize the cpu to execute managed code.
501 mono_arch_cpu_init (void)
506 * Initialize architecture specific code.
509 mono_arch_init (void)
511 #if defined(MONO_CROSS_COMPILE)
512 #elif defined(__APPLE__)
514 size_t len
= sizeof (cachelinesize
);
517 mib
[1] = HW_CACHELINE
;
519 if (sysctl (mib
, 2, &cachelinesize
, &len
, NULL
, 0) == -1) {
523 cachelineinc
= cachelinesize
;
525 #elif defined(__linux__)
526 AuxVec vec
[MAX_AUX_ENTRIES
];
527 int i
, vec_entries
= 0;
528 /* sadly this will work only with 2.6 kernels... */
529 FILE* f
= fopen ("/proc/self/auxv", "rb");
532 vec_entries
= fread (&vec
, sizeof (AuxVec
), MAX_AUX_ENTRIES
, f
);
536 for (i
= 0; i
< vec_entries
; i
++) {
537 int type
= vec
[i
].type
;
539 if (type
== 19) { /* AT_DCACHEBSIZE */
540 cachelinesize
= vec
[i
].value
;
544 #elif defined(G_COMPILER_CODEWARRIOR)
548 /* FIXME: use block instead? */
549 cachelinesize
= _system_configuration
.icache_line
;
550 cachelineinc
= _system_configuration
.icache_line
;
552 //#error Need a way to get cache line size
555 if (mono_hwcap_ppc_has_icache_snoop
)
556 cpu_hw_caps
|= PPC_ICACHE_SNOOP
;
558 if (mono_hwcap_ppc_is_isa_2x
)
559 cpu_hw_caps
|= PPC_ISA_2X
;
561 if (mono_hwcap_ppc_is_isa_2_03
)
562 cpu_hw_caps
|= PPC_ISA_2_03
;
564 if (mono_hwcap_ppc_is_isa_64
)
565 cpu_hw_caps
|= PPC_ISA_64
;
567 if (mono_hwcap_ppc_has_move_fpr_gpr
)
568 cpu_hw_caps
|= PPC_MOVE_FPR_GPR
;
570 if (mono_hwcap_ppc_has_multiple_ls_units
)
571 cpu_hw_caps
|= PPC_MULTIPLE_LS_UNITS
;
577 cachelineinc
= cachelinesize
;
579 if (mono_cpu_count () > 1)
580 cpu_hw_caps
|= PPC_SMP_CAPABLE
;
582 mono_os_mutex_init_recursive (&mini_arch_mutex
);
584 ss_trigger_page
= mono_valloc (NULL
, mono_pagesize (), MONO_MMAP_READ
, MONO_MEM_ACCOUNT_OTHER
);
585 bp_trigger_page
= mono_valloc (NULL
, mono_pagesize (), MONO_MMAP_READ
, MONO_MEM_ACCOUNT_OTHER
);
586 mono_mprotect (bp_trigger_page
, mono_pagesize (), 0);
588 // FIXME: Fix partial sharing for power and remove this
589 mono_set_partial_sharing_supported (FALSE
);
593 * Cleanup architecture specific code.
596 mono_arch_cleanup (void)
598 mono_os_mutex_destroy (&mini_arch_mutex
);
602 mono_arch_have_fast_tls (void)
608 * This function returns the optimizations supported on this cpu.
611 mono_arch_cpu_optimizations (guint32
*exclude_mask
)
615 /* no ppc-specific optimizations yet */
621 * This function test for all SIMD functions supported.
623 * Returns a bitmask corresponding to all supported versions.
627 mono_arch_cpu_enumerate_simd_versions (void)
629 /* SIMD is currently unimplemented */
633 #ifdef __mono_ppc64__
634 #define CASE_PPC32(c)
635 #define CASE_PPC64(c) case c:
637 #define CASE_PPC32(c) case c:
638 #define CASE_PPC64(c)
642 is_regsize_var (MonoType
*t
) {
645 t
= mini_get_underlying_type (t
);
649 CASE_PPC64 (MONO_TYPE_I8
)
650 CASE_PPC64 (MONO_TYPE_U8
)
654 case MONO_TYPE_FNPTR
:
656 case MONO_TYPE_OBJECT
:
657 case MONO_TYPE_STRING
:
658 case MONO_TYPE_CLASS
:
659 case MONO_TYPE_SZARRAY
:
660 case MONO_TYPE_ARRAY
:
662 case MONO_TYPE_GENERICINST
:
663 if (!mono_type_generic_inst_is_valuetype (t
))
666 case MONO_TYPE_VALUETYPE
:
674 mono_arch_get_allocatable_int_vars (MonoCompile
*cfg
)
679 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
680 MonoInst
*ins
= cfg
->varinfo
[i
];
681 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
684 if (vmv
->range
.first_use
.abs_pos
>= vmv
->range
.last_use
.abs_pos
)
687 if (ins
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
) || (ins
->opcode
!= OP_LOCAL
&& ins
->opcode
!= OP_ARG
))
690 /* we can only allocate 32 bit values */
691 if (is_regsize_var (ins
->inst_vtype
)) {
692 g_assert (MONO_VARINFO (cfg
, i
)->reg
== -1);
693 g_assert (i
== vmv
->idx
);
694 vars
= mono_varlist_insert_sorted (cfg
, vars
, vmv
, FALSE
);
700 #endif /* ifndef DISABLE_JIT */
703 mono_arch_get_global_int_regs (MonoCompile
*cfg
)
707 if (cfg
->frame_reg
!= ppc_sp
)
709 /* ppc_r13 is used by the system on PPC EABI */
710 for (i
= 14; i
< top
; ++i
) {
712 * Reserve r29 for holding the vtable address for virtual calls in AOT mode,
713 * since the trampolines can clobber r12.
715 if (!(cfg
->compile_aot
&& i
== 29))
716 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (i
));
723 * mono_arch_regalloc_cost:
725 * Return the cost, in number of memory references, of the action of
726 * allocating the variable VMV into a register during global register
730 mono_arch_regalloc_cost (MonoCompile
*cfg
, MonoMethodVar
*vmv
)
737 mono_arch_flush_icache (guint8
*code
, gint size
)
739 #ifdef MONO_CROSS_COMPILE
743 guint8
*endp
, *start
;
747 start
= (guint8
*)((gsize
)start
& ~(cachelinesize
- 1));
748 /* use dcbf for smp support, later optimize for UP, see pem._64bit.d20030611.pdf page 211 */
749 #if defined(G_COMPILER_CODEWARRIOR)
750 if (cpu_hw_caps
& PPC_SMP_CAPABLE
) {
751 for (p
= start
; p
< endp
; p
+= cachelineinc
) {
755 for (p
= start
; p
< endp
; p
+= cachelineinc
) {
761 for (p
= start
; p
< endp
; p
+= cachelineinc
) {
772 /* For POWER5/6 with ICACHE_SNOOPing only one icbi in the range is required.
773 * The sync is required to insure that the store queue is completely empty.
774 * While the icbi performs no cache operations, icbi/isync is required to
775 * kill local prefetch.
777 if (cpu_hw_caps
& PPC_ICACHE_SNOOP
) {
779 asm ("icbi 0,%0;" : : "r"(code
) : "memory");
783 /* use dcbf for smp support, see pem._64bit.d20030611.pdf page 211 */
784 if (cpu_hw_caps
& PPC_SMP_CAPABLE
) {
785 for (p
= start
; p
< endp
; p
+= cachelineinc
) {
786 asm ("dcbf 0,%0;" : : "r"(p
) : "memory");
789 for (p
= start
; p
< endp
; p
+= cachelineinc
) {
790 asm ("dcbst 0,%0;" : : "r"(p
) : "memory");
795 for (p
= start
; p
< endp
; p
+= cachelineinc
) {
796 /* for ISA2.0+ implementations we should not need any extra sync between the
797 * icbi instructions. Both the 2.0 PEM and the PowerISA-2.05 say this.
798 * So I am not sure which chip had this problem but its not an issue on
799 * of the ISA V2 chips.
801 if (cpu_hw_caps
& PPC_ISA_2X
)
802 asm ("icbi 0,%0;" : : "r"(p
) : "memory");
804 asm ("icbi 0,%0; sync;" : : "r"(p
) : "memory");
806 if (!(cpu_hw_caps
& PPC_ISA_2X
))
814 mono_arch_flush_register_windows (void)
819 #define ALWAYS_ON_STACK(s) s
820 #define FP_ALSO_IN_REG(s) s
822 #ifdef __mono_ppc64__
823 #define ALWAYS_ON_STACK(s) s
824 #define FP_ALSO_IN_REG(s) s
826 #define ALWAYS_ON_STACK(s)
827 #define FP_ALSO_IN_REG(s)
829 #define ALIGN_DOUBLES
838 RegTypeFPStructByVal
, // For the v2 ABI, floats should be passed in FRs instead of GRs. Only valid for ABI v2!
843 guint32 vtsize
; /* in param area */
845 guint8 vtregs
; /* number of registers used to pass a RegTypeStructByVal/RegTypeFPStructByVal */
846 guint8 regtype
: 4; /* 0 general, 1 basereg, 2 floating point register, see RegType* */
847 guint8 size
: 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal/RegTypeFPStructByVal */
848 guint8 bytes
: 4; /* size in bytes - only valid for
849 RegTypeStructByVal/RegTypeFPStructByVal if the struct fits
850 in one word, otherwise it's 0*/
859 gboolean vtype_retaddr
;
867 #if PPC_RETURN_SMALL_FLOAT_STRUCTS_IN_FR_REGS
869 // Test if a structure is completely composed of either float XOR double fields and has fewer than
870 // PPC_MOST_FLOAT_STRUCT_MEMBERS_TO_RETURN_VIA_REGISTER members.
871 // If this is true the structure can be returned directly via float registers instead of by a hidden parameter
872 // pointing to where the return value should be stored.
873 // This is as per the ELF ABI v2.
876 is_float_struct_returnable_via_regs (MonoType
*type
, int* member_cnt
, int* member_size
)
878 int local_member_cnt
, local_member_size
;
880 member_cnt
= &local_member_cnt
;
883 member_size
= &local_member_size
;
886 gboolean is_all_floats
= mini_type_is_hfa(type
, member_cnt
, member_size
);
887 return is_all_floats
&& (*member_cnt
<= PPC_MOST_FLOAT_STRUCT_MEMBERS_TO_RETURN_VIA_REGISTERS
);
891 #define is_float_struct_returnable_via_regs(a,b,c) (FALSE)
895 #if PPC_RETURN_SMALL_STRUCTS_IN_REGS
897 // Test if a structure is smaller in size than 2 doublewords (PPC_LARGEST_STRUCT_SIZE_TO_RETURN_VIA_REGISTERS) and is
898 // completely composed of fields all of basic types.
899 // If this is true the structure can be returned directly via registers r3/r4 instead of by a hidden parameter
900 // pointing to where the return value should be stored.
901 // This is as per the ELF ABI v2.
904 is_struct_returnable_via_regs (MonoClass
*klass
, gboolean is_pinvoke
)
906 gboolean has_a_field
= FALSE
;
909 gpointer iter
= NULL
;
912 size
= mono_type_native_stack_size (m_class_get_byval_arg (klass
), 0);
914 size
= mini_type_stack_size (m_class_get_byval_arg (klass
), 0);
917 if (size
> PPC_LARGEST_STRUCT_SIZE_TO_RETURN_VIA_REGISTERS
)
919 while ((f
= mono_class_get_fields_internal (klass
, &iter
))) {
920 if (!(f
->type
->attrs
& FIELD_ATTRIBUTE_STATIC
)) {
921 // TBD: Is there a better way to check for the basic types?
922 if (f
->type
->byref
) {
924 } else if ((f
->type
->type
>= MONO_TYPE_BOOLEAN
) && (f
->type
->type
<= MONO_TYPE_R8
)) {
926 } else if (MONO_TYPE_ISSTRUCT (f
->type
)) {
927 MonoClass
*klass
= mono_class_from_mono_type_internal (f
->type
);
928 if (is_struct_returnable_via_regs(klass
, is_pinvoke
)) {
943 #define is_struct_returnable_via_regs(a,b) (FALSE)
948 add_general (guint
*gr
, guint
*stack_size
, ArgInfo
*ainfo
, gboolean simple
)
950 #ifdef __mono_ppc64__
955 if (*gr
>= 3 + PPC_NUM_REG_ARGS
) {
956 ainfo
->offset
= PPC_STACK_PARAM_OFFSET
+ *stack_size
;
957 ainfo
->reg
= ppc_sp
; /* in the caller */
958 ainfo
->regtype
= RegTypeBase
;
959 *stack_size
+= sizeof (target_mgreg_t
);
961 ALWAYS_ON_STACK (*stack_size
+= sizeof (target_mgreg_t
));
965 if (*gr
>= 3 + PPC_NUM_REG_ARGS
- 1) {
967 //*stack_size += (*stack_size % 8);
969 ainfo
->offset
= PPC_STACK_PARAM_OFFSET
+ *stack_size
;
970 ainfo
->reg
= ppc_sp
; /* in the caller */
971 ainfo
->regtype
= RegTypeBase
;
978 ALWAYS_ON_STACK (*stack_size
+= 8);
986 #if defined(__APPLE__) || (defined(__mono_ppc64__) && !PPC_PASS_SMALL_FLOAT_STRUCTS_IN_FR_REGS)
988 has_only_a_r48_field (MonoClass
*klass
)
992 gboolean have_field
= FALSE
;
994 while ((f
= mono_class_get_fields_internal (klass
, &iter
))) {
995 if (!(f
->type
->attrs
& FIELD_ATTRIBUTE_STATIC
)) {
998 if (!f
->type
->byref
&& (f
->type
->type
== MONO_TYPE_R4
|| f
->type
->type
== MONO_TYPE_R8
))
1009 get_call_info (MonoMethodSignature
*sig
)
1011 guint i
, fr
, gr
, pstart
;
1012 int n
= sig
->hasthis
+ sig
->param_count
;
1013 MonoType
*simpletype
;
1014 guint32 stack_size
= 0;
1015 CallInfo
*cinfo
= g_malloc0 (sizeof (CallInfo
) + sizeof (ArgInfo
) * n
);
1016 gboolean is_pinvoke
= sig
->pinvoke
;
1018 fr
= PPC_FIRST_FPARG_REG
;
1019 gr
= PPC_FIRST_ARG_REG
;
1021 if (mini_type_is_vtype (sig
->ret
)) {
1022 cinfo
->vtype_retaddr
= TRUE
;
1028 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1029 * the first argument, allowing 'this' to be always passed in the first arg reg.
1030 * Also do this if the first argument is a reference type, since virtual calls
1031 * are sometimes made using calli without sig->hasthis set, like in the delegate
1034 if (cinfo
->vtype_retaddr
&& !is_pinvoke
&& (sig
->hasthis
|| (sig
->param_count
> 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig
->params
[0]))))) {
1036 add_general (&gr
, &stack_size
, cinfo
->args
+ 0, TRUE
);
1039 add_general (&gr
, &stack_size
, &cinfo
->args
[sig
->hasthis
+ 0], TRUE
);
1043 add_general (&gr
, &stack_size
, &cinfo
->ret
, TRUE
);
1044 cinfo
->struct_ret
= cinfo
->ret
.reg
;
1045 cinfo
->vret_arg_index
= 1;
1049 add_general (&gr
, &stack_size
, cinfo
->args
+ 0, TRUE
);
1053 if (cinfo
->vtype_retaddr
) {
1054 add_general (&gr
, &stack_size
, &cinfo
->ret
, TRUE
);
1055 cinfo
->struct_ret
= cinfo
->ret
.reg
;
1059 DEBUG(printf("params: %d\n", sig
->param_count
));
1060 for (i
= pstart
; i
< sig
->param_count
; ++i
) {
1061 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
1062 /* Prevent implicit arguments and sig_cookie from
1063 being passed in registers */
1064 gr
= PPC_LAST_ARG_REG
+ 1;
1065 /* FIXME: don't we have to set fr, too? */
1066 /* Emit the signature cookie just before the implicit arguments */
1067 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
, TRUE
);
1069 DEBUG(printf("param %d: ", i
));
1070 if (sig
->params
[i
]->byref
) {
1071 DEBUG(printf("byref\n"));
1072 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
1076 simpletype
= mini_get_underlying_type (sig
->params
[i
]);
1077 switch (simpletype
->type
) {
1078 case MONO_TYPE_BOOLEAN
:
1081 cinfo
->args
[n
].size
= 1;
1082 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
1085 case MONO_TYPE_CHAR
:
1088 cinfo
->args
[n
].size
= 2;
1089 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
1094 cinfo
->args
[n
].size
= 4;
1095 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
1101 case MONO_TYPE_FNPTR
:
1102 case MONO_TYPE_CLASS
:
1103 case MONO_TYPE_OBJECT
:
1104 case MONO_TYPE_STRING
:
1105 case MONO_TYPE_SZARRAY
:
1106 case MONO_TYPE_ARRAY
:
1107 cinfo
->args
[n
].size
= sizeof (target_mgreg_t
);
1108 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
1111 case MONO_TYPE_GENERICINST
:
1112 if (!mono_type_generic_inst_is_valuetype (simpletype
)) {
1113 cinfo
->args
[n
].size
= sizeof (target_mgreg_t
);
1114 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
1119 case MONO_TYPE_VALUETYPE
:
1120 case MONO_TYPE_TYPEDBYREF
: {
1122 MonoClass
*klass
= mono_class_from_mono_type_internal (sig
->params
[i
]);
1123 if (simpletype
->type
== MONO_TYPE_TYPEDBYREF
)
1124 size
= MONO_ABI_SIZEOF (MonoTypedRef
);
1125 else if (is_pinvoke
)
1126 size
= mono_class_native_size (klass
, NULL
);
1128 size
= mono_class_value_size (klass
, NULL
);
1130 #if defined(__APPLE__) || (defined(__mono_ppc64__) && !PPC_PASS_SMALL_FLOAT_STRUCTS_IN_FR_REGS)
1131 if ((size
== 4 || size
== 8) && has_only_a_r48_field (klass
)) {
1132 cinfo
->args
[n
].size
= size
;
1134 /* It was 7, now it is 8 in LinuxPPC */
1135 if (fr
<= PPC_LAST_FPARG_REG
) {
1136 cinfo
->args
[n
].regtype
= RegTypeFP
;
1137 cinfo
->args
[n
].reg
= fr
;
1139 FP_ALSO_IN_REG (gr
++);
1140 #if !defined(__mono_ppc64__)
1142 FP_ALSO_IN_REG (gr
++);
1144 ALWAYS_ON_STACK (stack_size
+= size
);
1146 cinfo
->args
[n
].offset
= PPC_STACK_PARAM_OFFSET
+ stack_size
;
1147 cinfo
->args
[n
].regtype
= RegTypeBase
;
1148 cinfo
->args
[n
].reg
= ppc_sp
; /* in the caller*/
1155 DEBUG(printf ("load %d bytes struct\n",
1156 mono_class_native_size (sig
->params
[i
]->data
.klass
, NULL
)));
1158 #if PPC_PASS_STRUCTS_BY_VALUE
1160 int align_size
= size
;
1162 int rest
= PPC_LAST_ARG_REG
- gr
+ 1;
1165 #if PPC_PASS_SMALL_FLOAT_STRUCTS_IN_FR_REGS
1168 gboolean is_all_floats
= is_float_struct_returnable_via_regs (sig
->params
[i
], &mbr_cnt
, &mbr_size
);
1170 if (is_all_floats
) {
1171 rest
= PPC_LAST_FPARG_REG
- fr
+ 1;
1173 // Pass small (<= 8 member) structures entirely made up of either float or double members
1174 // in FR registers. There have to be at least mbr_cnt registers left.
1175 if (is_all_floats
&&
1176 (rest
>= mbr_cnt
)) {
1178 n_in_regs
= MIN (rest
, nregs
);
1179 cinfo
->args
[n
].regtype
= RegTypeFPStructByVal
;
1180 cinfo
->args
[n
].vtregs
= n_in_regs
;
1181 cinfo
->args
[n
].size
= mbr_size
;
1182 cinfo
->args
[n
].vtsize
= nregs
- n_in_regs
;
1183 cinfo
->args
[n
].reg
= fr
;
1185 if (mbr_size
== 4) {
1187 FP_ALSO_IN_REG (gr
+= (n_in_regs
+1)/2);
1190 FP_ALSO_IN_REG (gr
+= (n_in_regs
));
1195 align_size
+= (sizeof (target_mgreg_t
) - 1);
1196 align_size
&= ~(sizeof (target_mgreg_t
) - 1);
1197 nregs
= (align_size
+ sizeof (target_mgreg_t
) -1 ) / sizeof (target_mgreg_t
);
1198 n_in_regs
= MIN (rest
, nregs
);
1202 /* FIXME: check this */
1203 if (size
>= 3 && size
% 4 != 0)
1206 cinfo
->args
[n
].regtype
= RegTypeStructByVal
;
1207 cinfo
->args
[n
].vtregs
= n_in_regs
;
1208 cinfo
->args
[n
].size
= n_in_regs
;
1209 cinfo
->args
[n
].vtsize
= nregs
- n_in_regs
;
1210 cinfo
->args
[n
].reg
= gr
;
1214 #ifdef __mono_ppc64__
1215 if (nregs
== 1 && is_pinvoke
)
1216 cinfo
->args
[n
].bytes
= size
;
1219 cinfo
->args
[n
].bytes
= 0;
1220 cinfo
->args
[n
].offset
= PPC_STACK_PARAM_OFFSET
+ stack_size
;
1221 /*g_print ("offset for arg %d at %d\n", n, PPC_STACK_PARAM_OFFSET + stack_size);*/
1222 stack_size
+= nregs
* sizeof (target_mgreg_t
);
1225 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
1226 cinfo
->args
[n
].regtype
= RegTypeStructByAddr
;
1227 cinfo
->args
[n
].vtsize
= size
;
1234 cinfo
->args
[n
].size
= 8;
1235 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, SIZEOF_REGISTER
== 8);
1239 cinfo
->args
[n
].size
= 4;
1241 /* It was 7, now it is 8 in LinuxPPC */
1242 if (fr
<= PPC_LAST_FPARG_REG
1243 // For non-native vararg calls the parms must go in storage
1244 && !(!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
))
1246 cinfo
->args
[n
].regtype
= RegTypeFP
;
1247 cinfo
->args
[n
].reg
= fr
;
1249 FP_ALSO_IN_REG (gr
++);
1250 ALWAYS_ON_STACK (stack_size
+= SIZEOF_REGISTER
);
1252 cinfo
->args
[n
].offset
= PPC_STACK_PARAM_OFFSET
+ stack_size
+ MONO_PPC_32_64_CASE (0, 4);
1253 cinfo
->args
[n
].regtype
= RegTypeBase
;
1254 cinfo
->args
[n
].reg
= ppc_sp
; /* in the caller*/
1255 stack_size
+= SIZEOF_REGISTER
;
1260 cinfo
->args
[n
].size
= 8;
1261 /* It was 7, now it is 8 in LinuxPPC */
1262 if (fr
<= PPC_LAST_FPARG_REG
1263 // For non-native vararg calls the parms must go in storage
1264 && !(!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
))
1266 cinfo
->args
[n
].regtype
= RegTypeFP
;
1267 cinfo
->args
[n
].reg
= fr
;
1269 FP_ALSO_IN_REG (gr
+= sizeof (double) / SIZEOF_REGISTER
);
1270 ALWAYS_ON_STACK (stack_size
+= 8);
1272 cinfo
->args
[n
].offset
= PPC_STACK_PARAM_OFFSET
+ stack_size
;
1273 cinfo
->args
[n
].regtype
= RegTypeBase
;
1274 cinfo
->args
[n
].reg
= ppc_sp
; /* in the caller*/
1280 g_error ("Can't trampoline 0x%x", sig
->params
[i
]->type
);
1285 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
1286 /* Prevent implicit arguments and sig_cookie from
1287 being passed in registers */
1288 gr
= PPC_LAST_ARG_REG
+ 1;
1289 /* Emit the signature cookie just before the implicit arguments */
1290 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
, TRUE
);
1294 simpletype
= mini_get_underlying_type (sig
->ret
);
1295 switch (simpletype
->type
) {
1296 case MONO_TYPE_BOOLEAN
:
1301 case MONO_TYPE_CHAR
:
1307 case MONO_TYPE_FNPTR
:
1308 case MONO_TYPE_CLASS
:
1309 case MONO_TYPE_OBJECT
:
1310 case MONO_TYPE_SZARRAY
:
1311 case MONO_TYPE_ARRAY
:
1312 case MONO_TYPE_STRING
:
1313 cinfo
->ret
.reg
= ppc_r3
;
1317 cinfo
->ret
.reg
= ppc_r3
;
1321 cinfo
->ret
.reg
= ppc_f1
;
1322 cinfo
->ret
.regtype
= RegTypeFP
;
1324 case MONO_TYPE_GENERICINST
:
1325 if (!mono_type_generic_inst_is_valuetype (simpletype
)) {
1326 cinfo
->ret
.reg
= ppc_r3
;
1330 case MONO_TYPE_VALUETYPE
:
1332 case MONO_TYPE_TYPEDBYREF
:
1333 case MONO_TYPE_VOID
:
1336 g_error ("Can't handle as return value 0x%x", sig
->ret
->type
);
1340 /* align stack size to 16 */
1341 DEBUG (printf (" stack size: %d (%d)\n", (stack_size
+ 15) & ~15, stack_size
));
1342 stack_size
= (stack_size
+ 15) & ~15;
1344 cinfo
->stack_usage
= stack_size
;
1351 mono_arch_tailcall_supported (MonoCompile
*cfg
, MonoMethodSignature
*caller_sig
, MonoMethodSignature
*callee_sig
, gboolean virtual_
)
1353 CallInfo
*caller_info
= get_call_info (caller_sig
);
1354 CallInfo
*callee_info
= get_call_info (callee_sig
);
1356 gboolean res
= IS_SUPPORTED_TAILCALL (callee_info
->stack_usage
<= caller_info
->stack_usage
)
1357 && IS_SUPPORTED_TAILCALL (memcmp (&callee_info
->ret
, &caller_info
->ret
, sizeof (caller_info
->ret
)) == 0);
1359 // FIXME ABIs vary as to if this local is in the parameter area or not,
1360 // so this check might not be needed.
1361 for (int i
= 0; res
&& i
< callee_info
->nargs
; ++i
) {
1362 res
= IS_SUPPORTED_TAILCALL (callee_info
->args
[i
].regtype
!= RegTypeStructByAddr
);
1363 /* An address on the callee's stack is passed as the argument */
1366 g_free (caller_info
);
1367 g_free (callee_info
);
1375 * Set var information according to the calling convention. ppc version.
1376 * The locals var stuff should most likely be split in another method.
1379 mono_arch_allocate_vars (MonoCompile
*m
)
1381 MonoMethodSignature
*sig
;
1382 MonoMethodHeader
*header
;
1384 int i
, offset
, size
, align
, curinst
;
1385 int frame_reg
= ppc_sp
;
1387 guint32 locals_stack_size
, locals_stack_align
;
1389 m
->flags
|= MONO_CFG_HAS_SPILLUP
;
1391 /* this is bug #60332: remove when #59509 is fixed, so no weird vararg
1392 * call convs needs to be handled this way.
1394 if (m
->flags
& MONO_CFG_HAS_VARARGS
)
1395 m
->param_area
= MAX (m
->param_area
, sizeof (target_mgreg_t
)*8);
1396 /* gtk-sharp and other broken code will dllimport vararg functions even with
1397 * non-varargs signatures. Since there is little hope people will get this right
1398 * we assume they won't.
1400 if (m
->method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
)
1401 m
->param_area
= MAX (m
->param_area
, sizeof (target_mgreg_t
)*8);
1406 * We use the frame register also for any method that has
1407 * exception clauses. This way, when the handlers are called,
1408 * the code will reference local variables using the frame reg instead of
1409 * the stack pointer: if we had to restore the stack pointer, we'd
1410 * corrupt the method frames that are already on the stack (since
1411 * filters get called before stack unwinding happens) when the filter
1412 * code would call any method (this also applies to finally etc.).
1414 if ((m
->flags
& MONO_CFG_HAS_ALLOCA
) || header
->num_clauses
)
1415 frame_reg
= ppc_r31
;
1416 m
->frame_reg
= frame_reg
;
1417 if (frame_reg
!= ppc_sp
) {
1418 m
->used_int_regs
|= 1 << frame_reg
;
1421 sig
= mono_method_signature_internal (m
->method
);
1425 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
1426 m
->ret
->opcode
= OP_REGVAR
;
1427 m
->ret
->inst_c0
= m
->ret
->dreg
= ppc_r3
;
1429 /* FIXME: handle long values? */
1430 switch (mini_get_underlying_type (sig
->ret
)->type
) {
1431 case MONO_TYPE_VOID
:
1435 m
->ret
->opcode
= OP_REGVAR
;
1436 m
->ret
->inst_c0
= m
->ret
->dreg
= ppc_f1
;
1439 m
->ret
->opcode
= OP_REGVAR
;
1440 m
->ret
->inst_c0
= m
->ret
->dreg
= ppc_r3
;
1444 /* local vars are at a positive offset from the stack pointer */
1446 * also note that if the function uses alloca, we use ppc_r31
1447 * to point at the local variables.
1449 offset
= PPC_MINIMAL_STACK_SIZE
; /* linkage area */
1450 /* align the offset to 16 bytes: not sure this is needed here */
1452 //offset &= ~(16 - 1);
1454 /* add parameter area size for called functions */
1455 offset
+= m
->param_area
;
1457 offset
&= ~(16 - 1);
1459 /* the MonoLMF structure is stored just below the stack pointer */
1460 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
1461 offset
+= sizeof(gpointer
) - 1;
1462 offset
&= ~(sizeof(gpointer
) - 1);
1464 m
->vret_addr
->opcode
= OP_REGOFFSET
;
1465 m
->vret_addr
->inst_basereg
= frame_reg
;
1466 m
->vret_addr
->inst_offset
= offset
;
1468 if (G_UNLIKELY (m
->verbose_level
> 1)) {
1469 printf ("vret_addr =");
1470 mono_print_ins (m
->vret_addr
);
1473 offset
+= sizeof(gpointer
);
1476 offsets
= mono_allocate_stack_slots (m
, FALSE
, &locals_stack_size
, &locals_stack_align
);
1477 if (locals_stack_align
) {
1478 offset
+= (locals_stack_align
- 1);
1479 offset
&= ~(locals_stack_align
- 1);
1481 for (i
= m
->locals_start
; i
< m
->num_varinfo
; i
++) {
1482 if (offsets
[i
] != -1) {
1483 MonoInst
*inst
= m
->varinfo
[i
];
1484 inst
->opcode
= OP_REGOFFSET
;
1485 inst
->inst_basereg
= frame_reg
;
1486 inst
->inst_offset
= offset
+ offsets
[i
];
1488 g_print ("allocating local %d (%s) to %d\n",
1489 i, mono_type_get_name (inst->inst_vtype), inst->inst_offset);
1493 offset
+= locals_stack_size
;
1497 inst
= m
->args
[curinst
];
1498 if (inst
->opcode
!= OP_REGVAR
) {
1499 inst
->opcode
= OP_REGOFFSET
;
1500 inst
->inst_basereg
= frame_reg
;
1501 offset
+= sizeof (target_mgreg_t
) - 1;
1502 offset
&= ~(sizeof (target_mgreg_t
) - 1);
1503 inst
->inst_offset
= offset
;
1504 offset
+= sizeof (target_mgreg_t
);
1509 for (i
= 0; i
< sig
->param_count
; ++i
) {
1510 inst
= m
->args
[curinst
];
1511 if (inst
->opcode
!= OP_REGVAR
) {
1512 inst
->opcode
= OP_REGOFFSET
;
1513 inst
->inst_basereg
= frame_reg
;
1515 size
= mono_type_native_stack_size (sig
->params
[i
], (guint32
*)&align
);
1516 inst
->backend
.is_pinvoke
= 1;
1518 size
= mono_type_size (sig
->params
[i
], &align
);
1520 if (MONO_TYPE_ISSTRUCT (sig
->params
[i
]) && size
< sizeof (target_mgreg_t
))
1521 size
= align
= sizeof (target_mgreg_t
);
1523 * Use at least 4/8 byte alignment, since these might be passed in registers, and
1524 * they are saved using std in the prolog.
1526 align
= sizeof (target_mgreg_t
);
1527 offset
+= align
- 1;
1528 offset
&= ~(align
- 1);
1529 inst
->inst_offset
= offset
;
1535 /* some storage for fp conversions */
1538 m
->arch
.fp_conv_var_offset
= offset
;
1541 /* align the offset to 16 bytes */
1543 offset
&= ~(16 - 1);
1546 m
->stack_offset
= offset
;
1548 if (sig
->call_convention
== MONO_CALL_VARARG
) {
1549 CallInfo
*cinfo
= get_call_info (m
->method
->signature
);
1551 m
->sig_cookie
= cinfo
->sig_cookie
.offset
;
1558 mono_arch_create_vars (MonoCompile
*cfg
)
1560 MonoMethodSignature
*sig
= mono_method_signature_internal (cfg
->method
);
1562 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
1563 cfg
->vret_addr
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_ARG
);
1567 /* Fixme: we need an alignment solution for enter_method and mono_arch_call_opcode,
1568 * currently alignment in mono_arch_call_opcode is computed without arch_get_argument_info
1572 emit_sig_cookie (MonoCompile
*cfg
, MonoCallInst
*call
, CallInfo
*cinfo
)
1574 int sig_reg
= mono_alloc_ireg (cfg
);
1576 /* FIXME: Add support for signature tokens to AOT */
1577 cfg
->disable_aot
= TRUE
;
1579 MONO_EMIT_NEW_ICONST (cfg
, sig_reg
, (gulong
)call
->signature
);
1580 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
,
1581 ppc_r1
, cinfo
->sig_cookie
.offset
, sig_reg
);
1585 mono_arch_emit_call (MonoCompile
*cfg
, MonoCallInst
*call
)
1588 MonoMethodSignature
*sig
;
1592 sig
= call
->signature
;
1593 n
= sig
->param_count
+ sig
->hasthis
;
1595 cinfo
= get_call_info (sig
);
1597 for (i
= 0; i
< n
; ++i
) {
1598 ArgInfo
*ainfo
= cinfo
->args
+ i
;
1601 if (i
>= sig
->hasthis
)
1602 t
= sig
->params
[i
- sig
->hasthis
];
1604 t
= mono_get_int_type ();
1605 t
= mini_get_underlying_type (t
);
1607 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
))
1608 emit_sig_cookie (cfg
, call
, cinfo
);
1610 in
= call
->args
[i
];
1612 if (ainfo
->regtype
== RegTypeGeneral
) {
1613 #ifndef __mono_ppc64__
1614 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
1615 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
1616 ins
->dreg
= mono_alloc_ireg (cfg
);
1617 ins
->sreg1
= MONO_LVREG_LS (in
->dreg
);
1618 MONO_ADD_INS (cfg
->cbb
, ins
);
1619 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
+ 1, FALSE
);
1621 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
1622 ins
->dreg
= mono_alloc_ireg (cfg
);
1623 ins
->sreg1
= MONO_LVREG_MS (in
->dreg
);
1624 MONO_ADD_INS (cfg
->cbb
, ins
);
1625 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
1629 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
1630 ins
->dreg
= mono_alloc_ireg (cfg
);
1631 ins
->sreg1
= in
->dreg
;
1632 MONO_ADD_INS (cfg
->cbb
, ins
);
1634 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
1636 } else if (ainfo
->regtype
== RegTypeStructByAddr
) {
1637 MONO_INST_NEW (cfg
, ins
, OP_OUTARG_VT
);
1638 ins
->opcode
= OP_OUTARG_VT
;
1639 ins
->sreg1
= in
->dreg
;
1640 ins
->klass
= in
->klass
;
1641 ins
->inst_p0
= call
;
1642 ins
->inst_p1
= mono_mempool_alloc (cfg
->mempool
, sizeof (ArgInfo
));
1643 memcpy (ins
->inst_p1
, ainfo
, sizeof (ArgInfo
));
1644 MONO_ADD_INS (cfg
->cbb
, ins
);
1645 } else if (ainfo
->regtype
== RegTypeStructByVal
) {
1646 /* this is further handled in mono_arch_emit_outarg_vt () */
1647 MONO_INST_NEW (cfg
, ins
, OP_OUTARG_VT
);
1648 ins
->opcode
= OP_OUTARG_VT
;
1649 ins
->sreg1
= in
->dreg
;
1650 ins
->klass
= in
->klass
;
1651 ins
->inst_p0
= call
;
1652 ins
->inst_p1
= mono_mempool_alloc (cfg
->mempool
, sizeof (ArgInfo
));
1653 memcpy (ins
->inst_p1
, ainfo
, sizeof (ArgInfo
));
1654 MONO_ADD_INS (cfg
->cbb
, ins
);
1655 } else if (ainfo
->regtype
== RegTypeFPStructByVal
) {
1656 /* this is further handled in mono_arch_emit_outarg_vt () */
1657 MONO_INST_NEW (cfg
, ins
, OP_OUTARG_VT
);
1658 ins
->opcode
= OP_OUTARG_VT
;
1659 ins
->sreg1
= in
->dreg
;
1660 ins
->klass
= in
->klass
;
1661 ins
->inst_p0
= call
;
1662 ins
->inst_p1
= mono_mempool_alloc (cfg
->mempool
, sizeof (ArgInfo
));
1663 memcpy (ins
->inst_p1
, ainfo
, sizeof (ArgInfo
));
1664 MONO_ADD_INS (cfg
->cbb
, ins
);
1665 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
1666 } else if (ainfo
->regtype
== RegTypeBase
) {
1667 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
1668 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, ppc_r1
, ainfo
->offset
, in
->dreg
);
1669 } else if (!t
->byref
&& ((t
->type
== MONO_TYPE_R4
) || (t
->type
== MONO_TYPE_R8
))) {
1670 if (t
->type
== MONO_TYPE_R8
)
1671 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ppc_r1
, ainfo
->offset
, in
->dreg
);
1673 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, ppc_r1
, ainfo
->offset
, in
->dreg
);
1675 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ppc_r1
, ainfo
->offset
, in
->dreg
);
1677 } else if (ainfo
->regtype
== RegTypeFP
) {
1678 if (t
->type
== MONO_TYPE_VALUETYPE
) {
1679 /* this is further handled in mono_arch_emit_outarg_vt () */
1680 MONO_INST_NEW (cfg
, ins
, OP_OUTARG_VT
);
1681 ins
->opcode
= OP_OUTARG_VT
;
1682 ins
->sreg1
= in
->dreg
;
1683 ins
->klass
= in
->klass
;
1684 ins
->inst_p0
= call
;
1685 ins
->inst_p1
= mono_mempool_alloc (cfg
->mempool
, sizeof (ArgInfo
));
1686 memcpy (ins
->inst_p1
, ainfo
, sizeof (ArgInfo
));
1687 MONO_ADD_INS (cfg
->cbb
, ins
);
1689 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
1691 int dreg
= mono_alloc_freg (cfg
);
1693 if (ainfo
->size
== 4) {
1694 MONO_EMIT_NEW_UNALU (cfg
, OP_FCONV_TO_R4
, dreg
, in
->dreg
);
1696 MONO_INST_NEW (cfg
, ins
, OP_FMOVE
);
1698 ins
->sreg1
= in
->dreg
;
1699 MONO_ADD_INS (cfg
->cbb
, ins
);
1702 mono_call_inst_add_outarg_reg (cfg
, call
, dreg
, ainfo
->reg
, TRUE
);
1703 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
1706 g_assert_not_reached ();
1710 /* Emit the signature cookie in the case that there is no
1711 additional argument */
1712 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (n
== sig
->sentinelpos
))
1713 emit_sig_cookie (cfg
, call
, cinfo
);
1715 if (cinfo
->struct_ret
) {
1718 MONO_INST_NEW (cfg
, vtarg
, OP_MOVE
);
1719 vtarg
->sreg1
= call
->vret_var
->dreg
;
1720 vtarg
->dreg
= mono_alloc_preg (cfg
);
1721 MONO_ADD_INS (cfg
->cbb
, vtarg
);
1723 mono_call_inst_add_outarg_reg (cfg
, call
, vtarg
->dreg
, cinfo
->struct_ret
, FALSE
);
1726 call
->stack_usage
= cinfo
->stack_usage
;
1727 cfg
->param_area
= MAX (PPC_MINIMAL_PARAM_AREA_SIZE
, MAX (cfg
->param_area
, cinfo
->stack_usage
));
1728 cfg
->flags
|= MONO_CFG_HAS_CALLS
;
1736 mono_arch_emit_outarg_vt (MonoCompile
*cfg
, MonoInst
*ins
, MonoInst
*src
)
1738 MonoCallInst
*call
= (MonoCallInst
*)ins
->inst_p0
;
1739 ArgInfo
*ainfo
= ins
->inst_p1
;
1740 int ovf_size
= ainfo
->vtsize
;
1741 int doffset
= ainfo
->offset
;
1742 int i
, soffset
, dreg
;
1744 if (ainfo
->regtype
== RegTypeStructByVal
) {
1751 * Darwin pinvokes needs some special handling for 1
1752 * and 2 byte arguments
1754 g_assert (ins
->klass
);
1755 if (call
->signature
->pinvoke
)
1756 size
= mono_class_native_size (ins
->klass
, NULL
);
1757 if (size
== 2 || size
== 1) {
1758 int tmpr
= mono_alloc_ireg (cfg
);
1760 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, tmpr
, src
->dreg
, soffset
);
1762 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI2_MEMBASE
, tmpr
, src
->dreg
, soffset
);
1763 dreg
= mono_alloc_ireg (cfg
);
1764 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, dreg
, tmpr
);
1765 mono_call_inst_add_outarg_reg (cfg
, call
, dreg
, ainfo
->reg
, FALSE
);
1768 for (i
= 0; i
< ainfo
->vtregs
; ++i
) {
1769 dreg
= mono_alloc_ireg (cfg
);
1770 #if G_BYTE_ORDER == G_BIG_ENDIAN
1771 int antipadding
= 0;
1774 antipadding
= sizeof (target_mgreg_t
) - ainfo
->bytes
;
1776 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, dreg
, src
->dreg
, soffset
);
1778 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHR_UN_IMM
, dreg
, dreg
, antipadding
* 8);
1780 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, dreg
, src
->dreg
, soffset
);
1782 mono_call_inst_add_outarg_reg (cfg
, call
, dreg
, ainfo
->reg
+ i
, FALSE
);
1783 soffset
+= sizeof (target_mgreg_t
);
1786 mini_emit_memcpy (cfg
, ppc_r1
, doffset
+ soffset
, src
->dreg
, soffset
, ovf_size
* sizeof (target_mgreg_t
), TARGET_SIZEOF_VOID_P
);
1787 } else if (ainfo
->regtype
== RegTypeFPStructByVal
) {
1789 for (i
= 0; i
< ainfo
->vtregs
; ++i
) {
1790 int tmpr
= mono_alloc_freg (cfg
);
1791 if (ainfo
->size
== 4)
1792 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADR4_MEMBASE
, tmpr
, src
->dreg
, soffset
);
1794 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADR8_MEMBASE
, tmpr
, src
->dreg
, soffset
);
1795 dreg
= mono_alloc_freg (cfg
);
1796 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, dreg
, tmpr
);
1797 mono_call_inst_add_outarg_reg (cfg
, call
, dreg
, ainfo
->reg
+i
, TRUE
);
1798 soffset
+= ainfo
->size
;
1801 mini_emit_memcpy (cfg
, ppc_r1
, doffset
+ soffset
, src
->dreg
, soffset
, ovf_size
* sizeof (target_mgreg_t
), TARGET_SIZEOF_VOID_P
);
1802 } else if (ainfo
->regtype
== RegTypeFP
) {
1803 int tmpr
= mono_alloc_freg (cfg
);
1804 if (ainfo
->size
== 4)
1805 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADR4_MEMBASE
, tmpr
, src
->dreg
, 0);
1807 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADR8_MEMBASE
, tmpr
, src
->dreg
, 0);
1808 dreg
= mono_alloc_freg (cfg
);
1809 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, dreg
, tmpr
);
1810 mono_call_inst_add_outarg_reg (cfg
, call
, dreg
, ainfo
->reg
, TRUE
);
1812 MonoInst
*vtcopy
= mono_compile_create_var (cfg
, m_class_get_byval_arg (src
->klass
), OP_LOCAL
);
1816 /* FIXME: alignment? */
1817 if (call
->signature
->pinvoke
) {
1818 size
= mono_type_native_stack_size (m_class_get_byval_arg (src
->klass
), NULL
);
1819 vtcopy
->backend
.is_pinvoke
= 1;
1821 size
= mini_type_stack_size (m_class_get_byval_arg (src
->klass
), NULL
);
1824 g_assert (ovf_size
> 0);
1826 EMIT_NEW_VARLOADA (cfg
, load
, vtcopy
, vtcopy
->inst_vtype
);
1827 mini_emit_memcpy (cfg
, load
->dreg
, 0, src
->dreg
, 0, size
, TARGET_SIZEOF_VOID_P
);
1830 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ppc_r1
, ainfo
->offset
, load
->dreg
);
1832 mono_call_inst_add_outarg_reg (cfg
, call
, load
->dreg
, ainfo
->reg
, FALSE
);
1837 mono_arch_emit_setret (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
*val
)
1839 MonoType
*ret
= mini_get_underlying_type (mono_method_signature_internal (method
)->ret
);
1841 #ifndef __mono_ppc64__
1842 if (ret
->type
== MONO_TYPE_I8
|| ret
->type
== MONO_TYPE_U8
) {
1845 MONO_INST_NEW (cfg
, ins
, OP_SETLRET
);
1846 ins
->sreg1
= MONO_LVREG_LS (val
->dreg
);
1847 ins
->sreg2
= MONO_LVREG_MS (val
->dreg
);
1848 MONO_ADD_INS (cfg
->cbb
, ins
);
1852 if (ret
->type
== MONO_TYPE_R8
|| ret
->type
== MONO_TYPE_R4
) {
1853 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, cfg
->ret
->dreg
, val
->dreg
);
1857 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
1861 mono_arch_is_inst_imm (int opcode
, int imm_opcode
, gint64 imm
)
1866 #endif /* DISABLE_JIT */
1869 * Conditional branches have a small offset, so if it is likely overflowed,
1870 * we do a branch to the end of the method (uncond branches have much larger
1871 * offsets) where we perform the conditional and jump back unconditionally.
1872 * It's slightly slower, since we add two uncond branches, but it's very simple
1873 * with the current patch implementation and such large methods are likely not
1874 * going to be perf critical anyway.
1879 const char *exception
;
1886 #define EMIT_COND_BRANCH_FLAGS(ins,b0,b1) \
1887 if (0 && ins->inst_true_bb->native_offset) { \
1888 ppc_bc (code, (b0), (b1), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffff); \
1890 int br_disp = ins->inst_true_bb->max_offset - offset; \
1891 if (!ppc_is_imm16 (br_disp + 8 * 1024) || !ppc_is_imm16 (br_disp - 8 * 1024)) { \
1892 MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump)); \
1893 ovfj->data.bb = ins->inst_true_bb; \
1894 ovfj->ip_offset = 0; \
1895 ovfj->b0_cond = (b0); \
1896 ovfj->b1_cond = (b1); \
1897 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB_OVF, ovfj); \
1900 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1901 ppc_bc (code, (b0), (b1), 0); \
1905 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_b0_table [(cond)], branch_b1_table [(cond)])
1907 /* emit an exception if condition is fail
1909 * We assign the extra code used to throw the implicit exceptions
1910 * to cfg->bb_exit as far as the big branch handling is concerned
1912 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(b0,b1,exc_name) \
1914 int br_disp = cfg->bb_exit->max_offset - offset; \
1915 if (!ppc_is_imm16 (br_disp + 1024) || ! ppc_is_imm16 (ppc_is_imm16 (br_disp - 1024))) { \
1916 MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump)); \
1917 ovfj->data.exception = (exc_name); \
1918 ovfj->ip_offset = code - cfg->native_code; \
1919 ovfj->b0_cond = (b0); \
1920 ovfj->b1_cond = (b1); \
1921 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC_OVF, ovfj); \
1923 cfg->bb_exit->max_offset += 24; \
1925 mono_add_patch_info (cfg, code - cfg->native_code, \
1926 MONO_PATCH_INFO_EXC, exc_name); \
1927 ppc_bcl (code, (b0), (b1), 0); \
1931 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_b0_table [(cond)], branch_b1_table [(cond)], (exc_name))
1934 mono_arch_peephole_pass_1 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
1939 normalize_opcode (int opcode
)
1942 #ifndef MONO_ARCH_ILP32
1943 case MONO_PPC_32_64_CASE (OP_LOADI4_MEMBASE
, OP_LOADI8_MEMBASE
):
1944 return OP_LOAD_MEMBASE
;
1945 case MONO_PPC_32_64_CASE (OP_LOADI4_MEMINDEX
, OP_LOADI8_MEMINDEX
):
1946 return OP_LOAD_MEMINDEX
;
1947 case MONO_PPC_32_64_CASE (OP_STOREI4_MEMBASE_REG
, OP_STOREI8_MEMBASE_REG
):
1948 return OP_STORE_MEMBASE_REG
;
1949 case MONO_PPC_32_64_CASE (OP_STOREI4_MEMBASE_IMM
, OP_STOREI8_MEMBASE_IMM
):
1950 return OP_STORE_MEMBASE_IMM
;
1951 case MONO_PPC_32_64_CASE (OP_STOREI4_MEMINDEX
, OP_STOREI8_MEMINDEX
):
1952 return OP_STORE_MEMINDEX
;
1954 case MONO_PPC_32_64_CASE (OP_ISHR_IMM
, OP_LSHR_IMM
):
1956 case MONO_PPC_32_64_CASE (OP_ISHR_UN_IMM
, OP_LSHR_UN_IMM
):
1957 return OP_SHR_UN_IMM
;
1964 mono_arch_peephole_pass_2 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
1966 MonoInst
*ins
, *n
, *last_ins
= NULL
;
1968 MONO_BB_FOR_EACH_INS_SAFE (bb
, n
, ins
) {
1969 switch (normalize_opcode (ins
->opcode
)) {
1971 /* remove unnecessary multiplication with 1 */
1972 if (ins
->inst_imm
== 1) {
1973 if (ins
->dreg
!= ins
->sreg1
) {
1974 ins
->opcode
= OP_MOVE
;
1976 MONO_DELETE_INS (bb
, ins
);
1980 int power2
= mono_is_power_of_two (ins
->inst_imm
);
1982 ins
->opcode
= OP_SHL_IMM
;
1983 ins
->inst_imm
= power2
;
1987 case OP_LOAD_MEMBASE
:
1989 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1990 * OP_LOAD_MEMBASE offset(basereg), reg
1992 if (last_ins
&& normalize_opcode (last_ins
->opcode
) == OP_STORE_MEMBASE_REG
&&
1993 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
1994 ins
->inst_offset
== last_ins
->inst_offset
) {
1995 if (ins
->dreg
== last_ins
->sreg1
) {
1996 MONO_DELETE_INS (bb
, ins
);
1999 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2000 ins
->opcode
= OP_MOVE
;
2001 ins
->sreg1
= last_ins
->sreg1
;
2005 * Note: reg1 must be different from the basereg in the second load
2006 * OP_LOAD_MEMBASE offset(basereg), reg1
2007 * OP_LOAD_MEMBASE offset(basereg), reg2
2009 * OP_LOAD_MEMBASE offset(basereg), reg1
2010 * OP_MOVE reg1, reg2
2012 } else if (last_ins
&& normalize_opcode (last_ins
->opcode
) == OP_LOAD_MEMBASE
&&
2013 ins
->inst_basereg
!= last_ins
->dreg
&&
2014 ins
->inst_basereg
== last_ins
->inst_basereg
&&
2015 ins
->inst_offset
== last_ins
->inst_offset
) {
2017 if (ins
->dreg
== last_ins
->dreg
) {
2018 MONO_DELETE_INS (bb
, ins
);
2021 ins
->opcode
= OP_MOVE
;
2022 ins
->sreg1
= last_ins
->dreg
;
2025 //g_assert_not_reached ();
2029 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2030 * OP_LOAD_MEMBASE offset(basereg), reg
2032 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2033 * OP_ICONST reg, imm
2035 } else if (last_ins
&& normalize_opcode (last_ins
->opcode
) == OP_STORE_MEMBASE_IMM
&&
2036 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
2037 ins
->inst_offset
== last_ins
->inst_offset
) {
2038 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2039 ins
->opcode
= OP_ICONST
;
2040 ins
->inst_c0
= last_ins
->inst_imm
;
2041 g_assert_not_reached (); // check this rule
2045 case OP_LOADU1_MEMBASE
:
2046 case OP_LOADI1_MEMBASE
:
2047 if (last_ins
&& (last_ins
->opcode
== OP_STOREI1_MEMBASE_REG
) &&
2048 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
2049 ins
->inst_offset
== last_ins
->inst_offset
) {
2050 ins
->opcode
= (ins
->opcode
== OP_LOADI1_MEMBASE
) ? OP_ICONV_TO_I1
: OP_ICONV_TO_U1
;
2051 ins
->sreg1
= last_ins
->sreg1
;
2054 case OP_LOADU2_MEMBASE
:
2055 case OP_LOADI2_MEMBASE
:
2056 if (last_ins
&& (last_ins
->opcode
== OP_STOREI2_MEMBASE_REG
) &&
2057 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
2058 ins
->inst_offset
== last_ins
->inst_offset
) {
2059 ins
->opcode
= (ins
->opcode
== OP_LOADI2_MEMBASE
) ? OP_ICONV_TO_I2
: OP_ICONV_TO_U2
;
2060 ins
->sreg1
= last_ins
->sreg1
;
2063 #ifdef __mono_ppc64__
2064 case OP_LOADU4_MEMBASE
:
2065 case OP_LOADI4_MEMBASE
:
2066 if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_REG
) &&
2067 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
2068 ins
->inst_offset
== last_ins
->inst_offset
) {
2069 ins
->opcode
= (ins
->opcode
== OP_LOADI4_MEMBASE
) ? OP_ICONV_TO_I4
: OP_ICONV_TO_U4
;
2070 ins
->sreg1
= last_ins
->sreg1
;
2075 ins
->opcode
= OP_MOVE
;
2079 if (ins
->dreg
== ins
->sreg1
) {
2080 MONO_DELETE_INS (bb
, ins
);
2084 * OP_MOVE sreg, dreg
2085 * OP_MOVE dreg, sreg
2087 if (last_ins
&& last_ins
->opcode
== OP_MOVE
&&
2088 ins
->sreg1
== last_ins
->dreg
&&
2089 ins
->dreg
== last_ins
->sreg1
) {
2090 MONO_DELETE_INS (bb
, ins
);
2098 bb
->last_ins
= last_ins
;
2102 mono_arch_decompose_opts (MonoCompile
*cfg
, MonoInst
*ins
)
2104 switch (ins
->opcode
) {
2105 case OP_ICONV_TO_R_UN
: {
2106 // This value is OK as-is for both big and little endian because of how it is stored
2107 static const guint64 adjust_val
= 0x4330000000000000ULL
;
2108 int msw_reg
= mono_alloc_ireg (cfg
);
2109 int adj_reg
= mono_alloc_freg (cfg
);
2110 int tmp_reg
= mono_alloc_freg (cfg
);
2111 int basereg
= ppc_sp
;
2113 MONO_EMIT_NEW_ICONST (cfg
, msw_reg
, 0x43300000);
2114 if (!ppc_is_imm16 (offset
+ 4)) {
2115 basereg
= mono_alloc_ireg (cfg
);
2116 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IADD_IMM
, basereg
, cfg
->frame_reg
, offset
);
2118 #if G_BYTE_ORDER == G_BIG_ENDIAN
2119 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, basereg
, offset
, msw_reg
);
2120 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, basereg
, offset
+ 4, ins
->sreg1
);
2122 // For little endian the words are reversed
2123 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, basereg
, offset
+ 4, msw_reg
);
2124 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, basereg
, offset
, ins
->sreg1
);
2126 MONO_EMIT_NEW_LOAD_R8 (cfg
, adj_reg
, &adjust_val
);
2127 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADR8_MEMBASE
, tmp_reg
, basereg
, offset
);
2128 MONO_EMIT_NEW_BIALU (cfg
, OP_FSUB
, ins
->dreg
, tmp_reg
, adj_reg
);
2129 ins
->opcode
= OP_NOP
;
2132 #ifndef __mono_ppc64__
2133 case OP_ICONV_TO_R4
:
2134 case OP_ICONV_TO_R8
: {
2135 /* If we have a PPC_FEATURE_64 machine we can avoid
2136 this and use the fcfid instruction. Otherwise
2137 on an old 32-bit chip and we have to do this the
2139 if (!(cpu_hw_caps
& PPC_ISA_64
)) {
2140 /* FIXME: change precision for CEE_CONV_R4 */
2141 static const guint64 adjust_val
= 0x4330000080000000ULL
;
2142 int msw_reg
= mono_alloc_ireg (cfg
);
2143 int xored
= mono_alloc_ireg (cfg
);
2144 int adj_reg
= mono_alloc_freg (cfg
);
2145 int tmp_reg
= mono_alloc_freg (cfg
);
2146 int basereg
= ppc_sp
;
2148 if (!ppc_is_imm16 (offset
+ 4)) {
2149 basereg
= mono_alloc_ireg (cfg
);
2150 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IADD_IMM
, basereg
, cfg
->frame_reg
, offset
);
2152 MONO_EMIT_NEW_ICONST (cfg
, msw_reg
, 0x43300000);
2153 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, basereg
, offset
, msw_reg
);
2154 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_XOR_IMM
, xored
, ins
->sreg1
, 0x80000000);
2155 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, basereg
, offset
+ 4, xored
);
2156 MONO_EMIT_NEW_LOAD_R8 (cfg
, adj_reg
, (gpointer
)&adjust_val
);
2157 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADR8_MEMBASE
, tmp_reg
, basereg
, offset
);
2158 MONO_EMIT_NEW_BIALU (cfg
, OP_FSUB
, ins
->dreg
, tmp_reg
, adj_reg
);
2159 if (ins
->opcode
== OP_ICONV_TO_R4
)
2160 MONO_EMIT_NEW_UNALU (cfg
, OP_FCONV_TO_R4
, ins
->dreg
, ins
->dreg
);
2161 ins
->opcode
= OP_NOP
;
2167 int msw_reg
= mono_alloc_ireg (cfg
);
2168 int basereg
= ppc_sp
;
2170 if (!ppc_is_imm16 (offset
+ 4)) {
2171 basereg
= mono_alloc_ireg (cfg
);
2172 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IADD_IMM
, basereg
, cfg
->frame_reg
, offset
);
2174 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, basereg
, offset
, ins
->sreg1
);
2175 #if G_BYTE_ORDER == G_BIG_ENDIAN
2176 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, msw_reg
, basereg
, offset
);
2178 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, msw_reg
, basereg
, offset
+4);
2180 MONO_EMIT_NEW_UNALU (cfg
, OP_PPC_CHECK_FINITE
, -1, msw_reg
);
2181 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, ins
->dreg
, ins
->sreg1
);
2182 ins
->opcode
= OP_NOP
;
2185 #ifdef __mono_ppc64__
2187 case OP_IADD_OVF_UN
:
2189 int shifted1_reg
= mono_alloc_ireg (cfg
);
2190 int shifted2_reg
= mono_alloc_ireg (cfg
);
2191 int result_shifted_reg
= mono_alloc_ireg (cfg
);
2193 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, shifted1_reg
, ins
->sreg1
, 32);
2194 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, shifted2_reg
, ins
->sreg2
, 32);
2195 MONO_EMIT_NEW_BIALU (cfg
, ins
->opcode
, result_shifted_reg
, shifted1_reg
, shifted2_reg
);
2196 if (ins
->opcode
== OP_IADD_OVF_UN
)
2197 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHR_UN_IMM
, ins
->dreg
, result_shifted_reg
, 32);
2199 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHR_IMM
, ins
->dreg
, result_shifted_reg
, 32);
2200 ins
->opcode
= OP_NOP
;
2210 mono_arch_decompose_long_opts (MonoCompile
*cfg
, MonoInst
*ins
)
2212 switch (ins
->opcode
) {
2214 /* ADC sets the condition code */
2215 MONO_EMIT_NEW_BIALU (cfg
, OP_ADDCC
, MONO_LVREG_LS (ins
->dreg
), MONO_LVREG_LS (ins
->sreg1
), MONO_LVREG_LS (ins
->sreg2
));
2216 MONO_EMIT_NEW_BIALU (cfg
, OP_ADD_OVF_CARRY
, MONO_LVREG_MS (ins
->dreg
), MONO_LVREG_MS (ins
->sreg1
), MONO_LVREG_MS (ins
->sreg2
));
2219 case OP_LADD_OVF_UN
:
2220 /* ADC sets the condition code */
2221 MONO_EMIT_NEW_BIALU (cfg
, OP_ADDCC
, MONO_LVREG_LS (ins
->dreg
), MONO_LVREG_LS (ins
->sreg1
), MONO_LVREG_LS (ins
->sreg2
));
2222 MONO_EMIT_NEW_BIALU (cfg
, OP_ADD_OVF_UN_CARRY
, MONO_LVREG_MS (ins
->dreg
), MONO_LVREG_MS (ins
->sreg1
), MONO_LVREG_MS (ins
->sreg2
));
2226 /* SBB sets the condition code */
2227 MONO_EMIT_NEW_BIALU (cfg
, OP_SUBCC
, MONO_LVREG_LS (ins
->dreg
), MONO_LVREG_LS (ins
->sreg1
), MONO_LVREG_LS (ins
->sreg2
));
2228 MONO_EMIT_NEW_BIALU (cfg
, OP_SUB_OVF_CARRY
, MONO_LVREG_MS (ins
->dreg
), MONO_LVREG_MS (ins
->sreg1
), MONO_LVREG_MS (ins
->sreg2
));
2231 case OP_LSUB_OVF_UN
:
2232 /* SBB sets the condition code */
2233 MONO_EMIT_NEW_BIALU (cfg
, OP_SUBCC
, MONO_LVREG_LS (ins
->dreg
), MONO_LVREG_LS (ins
->sreg1
), MONO_LVREG_LS (ins
->sreg2
));
2234 MONO_EMIT_NEW_BIALU (cfg
, OP_SUB_OVF_UN_CARRY
, MONO_LVREG_MS (ins
->dreg
), MONO_LVREG_MS (ins
->sreg1
), MONO_LVREG_MS (ins
->sreg2
));
2238 /* From gcc generated code */
2239 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_PPC_SUBFIC
, MONO_LVREG_LS (ins
->dreg
), MONO_LVREG_LS (ins
->sreg1
), 0);
2240 MONO_EMIT_NEW_UNALU (cfg
, OP_PPC_SUBFZE
, MONO_LVREG_MS (ins
->dreg
), MONO_LVREG_MS (ins
->sreg1
));
2249 * the branch_b0_table should maintain the order of these
2263 branch_b0_table
[] = {
2278 branch_b1_table
[] = {
2292 #define NEW_INS(cfg,dest,op) do { \
2293 MONO_INST_NEW((cfg), (dest), (op)); \
2294 mono_bblock_insert_after_ins (bb, last_ins, (dest)); \
2298 map_to_reg_reg_op (int op
)
2307 case OP_COMPARE_IMM
:
2309 case OP_ICOMPARE_IMM
:
2311 case OP_LCOMPARE_IMM
:
2329 case OP_LOAD_MEMBASE
:
2330 return OP_LOAD_MEMINDEX
;
2331 case OP_LOADI4_MEMBASE
:
2332 return OP_LOADI4_MEMINDEX
;
2333 case OP_LOADU4_MEMBASE
:
2334 return OP_LOADU4_MEMINDEX
;
2335 case OP_LOADI8_MEMBASE
:
2336 return OP_LOADI8_MEMINDEX
;
2337 case OP_LOADU1_MEMBASE
:
2338 return OP_LOADU1_MEMINDEX
;
2339 case OP_LOADI2_MEMBASE
:
2340 return OP_LOADI2_MEMINDEX
;
2341 case OP_LOADU2_MEMBASE
:
2342 return OP_LOADU2_MEMINDEX
;
2343 case OP_LOADI1_MEMBASE
:
2344 return OP_LOADI1_MEMINDEX
;
2345 case OP_LOADR4_MEMBASE
:
2346 return OP_LOADR4_MEMINDEX
;
2347 case OP_LOADR8_MEMBASE
:
2348 return OP_LOADR8_MEMINDEX
;
2349 case OP_STOREI1_MEMBASE_REG
:
2350 return OP_STOREI1_MEMINDEX
;
2351 case OP_STOREI2_MEMBASE_REG
:
2352 return OP_STOREI2_MEMINDEX
;
2353 case OP_STOREI4_MEMBASE_REG
:
2354 return OP_STOREI4_MEMINDEX
;
2355 case OP_STOREI8_MEMBASE_REG
:
2356 return OP_STOREI8_MEMINDEX
;
2357 case OP_STORE_MEMBASE_REG
:
2358 return OP_STORE_MEMINDEX
;
2359 case OP_STORER4_MEMBASE_REG
:
2360 return OP_STORER4_MEMINDEX
;
2361 case OP_STORER8_MEMBASE_REG
:
2362 return OP_STORER8_MEMINDEX
;
2363 case OP_STORE_MEMBASE_IMM
:
2364 return OP_STORE_MEMBASE_REG
;
2365 case OP_STOREI1_MEMBASE_IMM
:
2366 return OP_STOREI1_MEMBASE_REG
;
2367 case OP_STOREI2_MEMBASE_IMM
:
2368 return OP_STOREI2_MEMBASE_REG
;
2369 case OP_STOREI4_MEMBASE_IMM
:
2370 return OP_STOREI4_MEMBASE_REG
;
2371 case OP_STOREI8_MEMBASE_IMM
:
2372 return OP_STOREI8_MEMBASE_REG
;
2374 if (mono_op_imm_to_op (op
) == -1)
2375 g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (op
));
2376 return mono_op_imm_to_op (op
);
2379 //#define map_to_reg_reg_op(op) (cfg->new_ir? mono_op_imm_to_op (op): map_to_reg_reg_op (op))
2381 #define compare_opcode_is_unsigned(opcode) \
2382 (((opcode) >= CEE_BNE_UN && (opcode) <= CEE_BLT_UN) || \
2383 ((opcode) >= OP_IBNE_UN && (opcode) <= OP_IBLT_UN) || \
2384 ((opcode) >= OP_LBNE_UN && (opcode) <= OP_LBLT_UN) || \
2385 ((opcode) >= OP_COND_EXC_NE_UN && (opcode) <= OP_COND_EXC_LT_UN) || \
2386 ((opcode) >= OP_COND_EXC_INE_UN && (opcode) <= OP_COND_EXC_ILT_UN) || \
2387 ((opcode) == OP_CLT_UN || (opcode) == OP_CGT_UN || \
2388 (opcode) == OP_ICLT_UN || (opcode) == OP_ICGT_UN || \
2389 (opcode) == OP_LCLT_UN || (opcode) == OP_LCGT_UN))
2392 * Remove from the instruction list the instructions that can't be
2393 * represented with very simple instructions with no register
2397 mono_arch_lowering_pass (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
2399 MonoInst
*ins
, *next
, *temp
, *last_ins
= NULL
;
2402 MONO_BB_FOR_EACH_INS (bb
, ins
) {
2404 switch (ins
->opcode
) {
2405 case OP_IDIV_UN_IMM
:
2408 case OP_IREM_UN_IMM
:
2409 CASE_PPC64 (OP_LREM_IMM
) {
2410 NEW_INS (cfg
, temp
, OP_ICONST
);
2411 temp
->inst_c0
= ins
->inst_imm
;
2412 temp
->dreg
= mono_alloc_ireg (cfg
);
2413 ins
->sreg2
= temp
->dreg
;
2414 if (ins
->opcode
== OP_IDIV_IMM
)
2415 ins
->opcode
= OP_IDIV
;
2416 else if (ins
->opcode
== OP_IREM_IMM
)
2417 ins
->opcode
= OP_IREM
;
2418 else if (ins
->opcode
== OP_IDIV_UN_IMM
)
2419 ins
->opcode
= OP_IDIV_UN
;
2420 else if (ins
->opcode
== OP_IREM_UN_IMM
)
2421 ins
->opcode
= OP_IREM_UN
;
2422 else if (ins
->opcode
== OP_LREM_IMM
)
2423 ins
->opcode
= OP_LREM
;
2425 /* handle rem separately */
2430 CASE_PPC64 (OP_LREM
)
2431 CASE_PPC64 (OP_LREM_UN
) {
2433 /* we change a rem dest, src1, src2 to
2434 * div temp1, src1, src2
2435 * mul temp2, temp1, src2
2436 * sub dest, src1, temp2
2438 if (ins
->opcode
== OP_IREM
|| ins
->opcode
== OP_IREM_UN
) {
2439 NEW_INS (cfg
, mul
, OP_IMUL
);
2440 NEW_INS (cfg
, temp
, ins
->opcode
== OP_IREM
? OP_IDIV
: OP_IDIV_UN
);
2441 ins
->opcode
= OP_ISUB
;
2443 NEW_INS (cfg
, mul
, OP_LMUL
);
2444 NEW_INS (cfg
, temp
, ins
->opcode
== OP_LREM
? OP_LDIV
: OP_LDIV_UN
);
2445 ins
->opcode
= OP_LSUB
;
2447 temp
->sreg1
= ins
->sreg1
;
2448 temp
->sreg2
= ins
->sreg2
;
2449 temp
->dreg
= mono_alloc_ireg (cfg
);
2450 mul
->sreg1
= temp
->dreg
;
2451 mul
->sreg2
= ins
->sreg2
;
2452 mul
->dreg
= mono_alloc_ireg (cfg
);
2453 ins
->sreg2
= mul
->dreg
;
2457 CASE_PPC64 (OP_LADD_IMM
)
2460 if (!ppc_is_imm16 (ins
->inst_imm
)) {
2461 NEW_INS (cfg
, temp
, OP_ICONST
);
2462 temp
->inst_c0
= ins
->inst_imm
;
2463 temp
->dreg
= mono_alloc_ireg (cfg
);
2464 ins
->sreg2
= temp
->dreg
;
2465 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2469 CASE_PPC64 (OP_LSUB_IMM
)
2471 if (!ppc_is_imm16 (-ins
->inst_imm
)) {
2472 NEW_INS (cfg
, temp
, OP_ICONST
);
2473 temp
->inst_c0
= ins
->inst_imm
;
2474 temp
->dreg
= mono_alloc_ireg (cfg
);
2475 ins
->sreg2
= temp
->dreg
;
2476 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2488 gboolean is_imm
= ((ins
->inst_imm
& 0xffff0000) && (ins
->inst_imm
& 0xffff));
2489 #ifdef __mono_ppc64__
2490 if (ins
->inst_imm
& 0xffffffff00000000ULL
)
2494 NEW_INS (cfg
, temp
, OP_ICONST
);
2495 temp
->inst_c0
= ins
->inst_imm
;
2496 temp
->dreg
= mono_alloc_ireg (cfg
);
2497 ins
->sreg2
= temp
->dreg
;
2498 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2507 NEW_INS (cfg
, temp
, OP_ICONST
);
2508 temp
->inst_c0
= ins
->inst_imm
;
2509 temp
->dreg
= mono_alloc_ireg (cfg
);
2510 ins
->sreg2
= temp
->dreg
;
2511 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2513 case OP_COMPARE_IMM
:
2514 case OP_ICOMPARE_IMM
:
2515 CASE_PPC64 (OP_LCOMPARE_IMM
)
2517 /* Branch opts can eliminate the branch */
2518 if (!next
|| (!(MONO_IS_COND_BRANCH_OP (next
) || MONO_IS_COND_EXC (next
) || MONO_IS_SETCC (next
)))) {
2519 ins
->opcode
= OP_NOP
;
2523 if (compare_opcode_is_unsigned (next
->opcode
)) {
2524 if (!ppc_is_uimm16 (ins
->inst_imm
)) {
2525 NEW_INS (cfg
, temp
, OP_ICONST
);
2526 temp
->inst_c0
= ins
->inst_imm
;
2527 temp
->dreg
= mono_alloc_ireg (cfg
);
2528 ins
->sreg2
= temp
->dreg
;
2529 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2532 if (!ppc_is_imm16 (ins
->inst_imm
)) {
2533 NEW_INS (cfg
, temp
, OP_ICONST
);
2534 temp
->inst_c0
= ins
->inst_imm
;
2535 temp
->dreg
= mono_alloc_ireg (cfg
);
2536 ins
->sreg2
= temp
->dreg
;
2537 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2543 CASE_PPC64 (OP_LMUL_IMM
)
2544 if (ins
->inst_imm
== 1) {
2545 ins
->opcode
= OP_MOVE
;
2548 if (ins
->inst_imm
== 0) {
2549 ins
->opcode
= OP_ICONST
;
2553 imm
= mono_is_power_of_two (ins
->inst_imm
);
2555 ins
->opcode
= OP_SHL_IMM
;
2556 ins
->inst_imm
= imm
;
2559 if (!ppc_is_imm16 (ins
->inst_imm
)) {
2560 NEW_INS (cfg
, temp
, OP_ICONST
);
2561 temp
->inst_c0
= ins
->inst_imm
;
2562 temp
->dreg
= mono_alloc_ireg (cfg
);
2563 ins
->sreg2
= temp
->dreg
;
2564 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2567 case OP_LOCALLOC_IMM
:
2568 NEW_INS (cfg
, temp
, OP_ICONST
);
2569 temp
->inst_c0
= ins
->inst_imm
;
2570 temp
->dreg
= mono_alloc_ireg (cfg
);
2571 ins
->sreg1
= temp
->dreg
;
2572 ins
->opcode
= OP_LOCALLOC
;
2574 case OP_LOAD_MEMBASE
:
2575 case OP_LOADI4_MEMBASE
:
2576 CASE_PPC64 (OP_LOADI8_MEMBASE
)
2577 case OP_LOADU4_MEMBASE
:
2578 case OP_LOADI2_MEMBASE
:
2579 case OP_LOADU2_MEMBASE
:
2580 case OP_LOADI1_MEMBASE
:
2581 case OP_LOADU1_MEMBASE
:
2582 case OP_LOADR4_MEMBASE
:
2583 case OP_LOADR8_MEMBASE
:
2584 case OP_STORE_MEMBASE_REG
:
2585 CASE_PPC64 (OP_STOREI8_MEMBASE_REG
)
2586 case OP_STOREI4_MEMBASE_REG
:
2587 case OP_STOREI2_MEMBASE_REG
:
2588 case OP_STOREI1_MEMBASE_REG
:
2589 case OP_STORER4_MEMBASE_REG
:
2590 case OP_STORER8_MEMBASE_REG
:
2591 /* we can do two things: load the immed in a register
2592 * and use an indexed load, or see if the immed can be
2593 * represented as an ad_imm + a load with a smaller offset
2594 * that fits. We just do the first for now, optimize later.
2596 if (ppc_is_imm16 (ins
->inst_offset
))
2598 NEW_INS (cfg
, temp
, OP_ICONST
);
2599 temp
->inst_c0
= ins
->inst_offset
;
2600 temp
->dreg
= mono_alloc_ireg (cfg
);
2601 ins
->sreg2
= temp
->dreg
;
2602 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2604 case OP_STORE_MEMBASE_IMM
:
2605 case OP_STOREI1_MEMBASE_IMM
:
2606 case OP_STOREI2_MEMBASE_IMM
:
2607 case OP_STOREI4_MEMBASE_IMM
:
2608 CASE_PPC64 (OP_STOREI8_MEMBASE_IMM
)
2609 NEW_INS (cfg
, temp
, OP_ICONST
);
2610 temp
->inst_c0
= ins
->inst_imm
;
2611 temp
->dreg
= mono_alloc_ireg (cfg
);
2612 ins
->sreg1
= temp
->dreg
;
2613 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2615 goto loop_start
; /* make it handle the possibly big ins->inst_offset */
2618 if (cfg
->compile_aot
) {
2619 /* Keep these in the aot case */
2622 NEW_INS (cfg
, temp
, OP_ICONST
);
2623 temp
->inst_c0
= (gulong
)ins
->inst_p0
;
2624 temp
->dreg
= mono_alloc_ireg (cfg
);
2625 ins
->inst_basereg
= temp
->dreg
;
2626 ins
->inst_offset
= 0;
2627 ins
->opcode
= ins
->opcode
== OP_R4CONST
? OP_LOADR4_MEMBASE
: OP_LOADR8_MEMBASE
;
2629 /* make it handle the possibly big ins->inst_offset
2630 * later optimize to use lis + load_membase
2636 bb
->last_ins
= last_ins
;
2637 bb
->max_vreg
= cfg
->next_vreg
;
2641 emit_float_to_int (MonoCompile
*cfg
, guchar
*code
, int dreg
, int sreg
, int size
, gboolean is_signed
)
2643 long offset
= cfg
->arch
.fp_conv_var_offset
;
2645 /* sreg is a float, dreg is an integer reg. ppc_f0 is used a scratch */
2646 #ifdef __mono_ppc64__
2648 ppc_fctidz (code
, ppc_f0
, sreg
);
2653 ppc_fctiwz (code
, ppc_f0
, sreg
);
2656 if (ppc_is_imm16 (offset
+ sub_offset
)) {
2657 ppc_stfd (code
, ppc_f0
, offset
, cfg
->frame_reg
);
2659 ppc_ldr (code
, dreg
, offset
+ sub_offset
, cfg
->frame_reg
);
2661 ppc_lwz (code
, dreg
, offset
+ sub_offset
, cfg
->frame_reg
);
2663 ppc_load (code
, dreg
, offset
);
2664 ppc_add (code
, dreg
, dreg
, cfg
->frame_reg
);
2665 ppc_stfd (code
, ppc_f0
, 0, dreg
);
2667 ppc_ldr (code
, dreg
, sub_offset
, dreg
);
2669 ppc_lwz (code
, dreg
, sub_offset
, dreg
);
2673 ppc_andid (code
, dreg
, dreg
, 0xff);
2675 ppc_andid (code
, dreg
, dreg
, 0xffff);
2676 #ifdef __mono_ppc64__
2678 ppc_clrldi (code
, dreg
, dreg
, 32);
2682 ppc_extsb (code
, dreg
, dreg
);
2684 ppc_extsh (code
, dreg
, dreg
);
2685 #ifdef __mono_ppc64__
2687 ppc_extsw (code
, dreg
, dreg
);
2694 emit_thunk (guint8
*code
, gconstpointer target
)
2698 /* 2 bytes on 32bit, 5 bytes on 64bit */
2699 ppc_load_sequence (code
, ppc_r0
, target
);
2701 ppc_mtctr (code
, ppc_r0
);
2702 ppc_bcctr (code
, PPC_BR_ALWAYS
, 0);
2704 mono_arch_flush_icache (p
, code
- p
);
2708 handle_thunk (MonoCompile
*cfg
, MonoDomain
*domain
, guchar
*code
, const guchar
*target
)
2710 MonoJitInfo
*ji
= NULL
;
2711 MonoThunkJitInfo
*info
;
2714 guint8
*orig_target
;
2715 guint8
*target_thunk
;
2718 domain
= mono_domain_get ();
2722 * This can be called multiple times during JITting,
2723 * save the current position in cfg->arch to avoid
2724 * doing a O(n^2) search.
2726 if (!cfg
->arch
.thunks
) {
2727 cfg
->arch
.thunks
= cfg
->thunks
;
2728 cfg
->arch
.thunks_size
= cfg
->thunk_area
;
2730 thunks
= cfg
->arch
.thunks
;
2731 thunks_size
= cfg
->arch
.thunks_size
;
2733 g_print ("thunk failed %p->%p, thunk space=%d method %s", code
, target
, thunks_size
, mono_method_full_name (cfg
->method
, TRUE
));
2734 g_assert_not_reached ();
2737 g_assert (*(guint32
*)thunks
== 0);
2738 emit_thunk (thunks
, target
);
2739 ppc_patch (code
, thunks
);
2741 cfg
->arch
.thunks
+= THUNK_SIZE
;
2742 cfg
->arch
.thunks_size
-= THUNK_SIZE
;
2744 ji
= mini_jit_info_table_find (domain
, (char *) code
, NULL
);
2746 info
= mono_jit_info_get_thunk_info (ji
);
2749 thunks
= (guint8
*) ji
->code_start
+ info
->thunks_offset
;
2750 thunks_size
= info
->thunks_size
;
2752 orig_target
= mono_arch_get_call_target (code
+ 4);
2754 mono_mini_arch_lock ();
2756 target_thunk
= NULL
;
2757 if (orig_target
>= thunks
&& orig_target
< thunks
+ thunks_size
) {
2758 /* The call already points to a thunk, because of trampolines etc. */
2759 target_thunk
= orig_target
;
2761 for (p
= thunks
; p
< thunks
+ thunks_size
; p
+= THUNK_SIZE
) {
2762 if (((guint32
*) p
) [0] == 0) {
2767 /* ppc64 requires 5 instructions, 32bit two instructions */
2768 #ifdef __mono_ppc64__
2769 const int const_load_size
= 5;
2771 const int const_load_size
= 2;
2773 guint32 load
[const_load_size
];
2774 guchar
*templ
= (guchar
*) load
;
2775 ppc_load_sequence (templ
, ppc_r0
, target
);
2776 if (!memcmp (p
, load
, const_load_size
)) {
2777 /* Thunk already points to target */
2785 // g_print ("THUNK: %p %p %p\n", code, target, target_thunk);
2787 if (!target_thunk
) {
2788 mono_mini_arch_unlock ();
2789 g_print ("thunk failed %p->%p, thunk space=%d method %s", code
, target
, thunks_size
, cfg
? mono_method_full_name (cfg
->method
, TRUE
) : mono_method_full_name (jinfo_get_method (ji
), TRUE
));
2790 g_assert_not_reached ();
2793 emit_thunk (target_thunk
, target
);
2794 ppc_patch (code
, target_thunk
);
2796 mono_mini_arch_unlock ();
2801 patch_ins (guint8
*code
, guint32 ins
)
2803 *(guint32
*)code
= ins
;
2804 mono_arch_flush_icache (code
, 4);
2808 ppc_patch_full (MonoCompile
*cfg
, MonoDomain
*domain
, guchar
*code
, const guchar
*target
, gboolean is_fd
)
2810 guint32 ins
= *(guint32
*)code
;
2811 guint32 prim
= ins
>> 26;
2814 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2816 // prefer relative branches, they are more position independent (e.g. for AOT compilation).
2817 gint diff
= target
- code
;
2820 if (diff
<= 33554431){
2821 ins
= (18 << 26) | (diff
) | (ins
& 1);
2822 patch_ins (code
, ins
);
2826 /* diff between 0 and -33554432 */
2827 if (diff
>= -33554432){
2828 ins
= (18 << 26) | (diff
& ~0xfc000000) | (ins
& 1);
2829 patch_ins (code
, ins
);
2834 if ((glong
)target
>= 0){
2835 if ((glong
)target
<= 33554431){
2836 ins
= (18 << 26) | ((gulong
) target
) | (ins
& 1) | 2;
2837 patch_ins (code
, ins
);
2841 if ((glong
)target
>= -33554432){
2842 ins
= (18 << 26) | (((gulong
)target
) & ~0xfc000000) | (ins
& 1) | 2;
2843 patch_ins (code
, ins
);
2848 handle_thunk (cfg
, domain
, code
, target
);
2851 g_assert_not_reached ();
2859 guint32 li
= (gulong
)target
;
2860 ins
= (ins
& 0xffff0000) | (ins
& 3);
2861 ovf
= li
& 0xffff0000;
2862 if (ovf
!= 0 && ovf
!= 0xffff0000)
2863 g_assert_not_reached ();
2866 // FIXME: assert the top bits of li are 0
2868 gint diff
= target
- code
;
2869 ins
= (ins
& 0xffff0000) | (ins
& 3);
2870 ovf
= diff
& 0xffff0000;
2871 if (ovf
!= 0 && ovf
!= 0xffff0000)
2872 g_assert_not_reached ();
2876 patch_ins (code
, ins
);
2880 if (prim
== 15 || ins
== 0x4e800021 || ins
== 0x4e800020 || ins
== 0x4e800420) {
2881 #ifdef __mono_ppc64__
2882 guint32
*seq
= (guint32
*)code
;
2883 guint32
*branch_ins
;
2885 /* the trampoline code will try to patch the blrl, blr, bcctr */
2886 if (ins
== 0x4e800021 || ins
== 0x4e800020 || ins
== 0x4e800420) {
2888 if (ppc_is_load_op (seq
[-3]) || ppc_opcode (seq
[-3]) == 31) /* ld || lwz || mr */
2893 if (ppc_is_load_op (seq
[5])
2894 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
2895 /* With function descs we need to do more careful
2897 || ppc_opcode (seq
[5]) == 31 /* ld || lwz || mr */
2900 branch_ins
= seq
+ 8;
2902 branch_ins
= seq
+ 6;
2905 seq
= (guint32
*)code
;
2906 /* this is the lis/ori/sldi/oris/ori/(ld/ld|mr/nop)/mtlr/blrl sequence */
2907 g_assert (mono_ppc_is_direct_call_sequence (branch_ins
));
2909 if (ppc_is_load_op (seq
[5])) {
2910 g_assert (ppc_is_load_op (seq
[6]));
2913 guint8
*buf
= (guint8
*)&seq
[5];
2914 ppc_mr (buf
, PPC_CALL_REG
, ppc_r12
);
2919 target
= mono_get_addr_from_ftnptr ((gpointer
)target
);
2922 /* FIXME: make this thread safe */
2923 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
2924 /* FIXME: we're assuming we're using r12 here */
2925 ppc_load_ptr_sequence (code
, ppc_r12
, target
);
2927 ppc_load_ptr_sequence (code
, PPC_CALL_REG
, target
);
2929 mono_arch_flush_icache ((guint8
*)seq
, 28);
2932 /* the trampoline code will try to patch the blrl, blr, bcctr */
2933 if (ins
== 0x4e800021 || ins
== 0x4e800020 || ins
== 0x4e800420) {
2936 /* this is the lis/ori/mtlr/blrl sequence */
2937 seq
= (guint32
*)code
;
2938 g_assert ((seq
[0] >> 26) == 15);
2939 g_assert ((seq
[1] >> 26) == 24);
2940 g_assert ((seq
[2] >> 26) == 31);
2941 g_assert (seq
[3] == 0x4e800021 || seq
[3] == 0x4e800020 || seq
[3] == 0x4e800420);
2942 /* FIXME: make this thread safe */
2943 ppc_lis (code
, PPC_CALL_REG
, (guint32
)(target
) >> 16);
2944 ppc_ori (code
, PPC_CALL_REG
, PPC_CALL_REG
, (guint32
)(target
) & 0xffff);
2945 mono_arch_flush_icache (code
- 8, 8);
2948 g_assert_not_reached ();
2950 // g_print ("patched with 0x%08x\n", ins);
2954 ppc_patch (guchar
*code
, const guchar
*target
)
2956 ppc_patch_full (NULL
, NULL
, code
, target
, FALSE
);
2960 mono_ppc_patch (guchar
*code
, const guchar
*target
)
2962 ppc_patch (code
, target
);
2966 emit_move_return_value (MonoCompile
*cfg
, MonoInst
*ins
, guint8
*code
)
2968 switch (ins
->opcode
) {
2971 case OP_FCALL_MEMBASE
:
2972 if (ins
->dreg
!= ppc_f1
)
2973 ppc_fmr (code
, ins
->dreg
, ppc_f1
);
2981 emit_reserve_param_area (MonoCompile
*cfg
, guint8
*code
)
2983 long size
= cfg
->param_area
;
2985 size
+= MONO_ARCH_FRAME_ALIGNMENT
- 1;
2986 size
&= -MONO_ARCH_FRAME_ALIGNMENT
;
2991 ppc_ldptr (code
, ppc_r0
, 0, ppc_sp
);
2992 if (ppc_is_imm16 (-size
)) {
2993 ppc_stptr_update (code
, ppc_r0
, -size
, ppc_sp
);
2995 ppc_load (code
, ppc_r12
, -size
);
2996 ppc_stptr_update_indexed (code
, ppc_r0
, ppc_sp
, ppc_r12
);
3003 emit_unreserve_param_area (MonoCompile
*cfg
, guint8
*code
)
3005 long size
= cfg
->param_area
;
3007 size
+= MONO_ARCH_FRAME_ALIGNMENT
- 1;
3008 size
&= -MONO_ARCH_FRAME_ALIGNMENT
;
3013 ppc_ldptr (code
, ppc_r0
, 0, ppc_sp
);
3014 if (ppc_is_imm16 (size
)) {
3015 ppc_stptr_update (code
, ppc_r0
, size
, ppc_sp
);
3017 ppc_load (code
, ppc_r12
, size
);
3018 ppc_stptr_update_indexed (code
, ppc_r0
, ppc_sp
, ppc_r12
);
3024 #define MASK_SHIFT_IMM(i) ((i) & MONO_PPC_32_64_CASE (0x1f, 0x3f))
3028 mono_arch_output_basic_block (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
3030 MonoInst
*ins
, *next
;
3032 guint8
*code
= cfg
->native_code
+ cfg
->code_len
;
3033 MonoInst
*last_ins
= NULL
;
3037 /* we don't align basic blocks of loops on ppc */
3039 if (cfg
->verbose_level
> 2)
3040 g_print ("Basic block %d starting at offset 0x%x\n", bb
->block_num
, bb
->native_offset
);
3042 cpos
= bb
->max_offset
;
3044 MONO_BB_FOR_EACH_INS (bb
, ins
) {
3045 const guint offset
= code
- cfg
->native_code
;
3046 set_code_cursor (cfg
, code
);
3047 max_len
= ins_get_size (ins
->opcode
);
3048 code
= realloc_code (cfg
, max_len
);
3049 // if (ins->cil_code)
3050 // g_print ("cil code\n");
3051 mono_debug_record_line_number (cfg
, ins
, offset
);
3053 switch (normalize_opcode (ins
->opcode
)) {
3054 case OP_RELAXED_NOP
:
3057 case OP_DUMMY_ICONST
:
3058 case OP_DUMMY_I8CONST
:
3059 case OP_DUMMY_R8CONST
:
3060 case OP_DUMMY_R4CONST
:
3061 case OP_NOT_REACHED
:
3064 case OP_IL_SEQ_POINT
:
3065 mono_add_seq_point (cfg
, bb
, ins
, code
- cfg
->native_code
);
3067 case OP_SEQ_POINT
: {
3070 if (cfg
->compile_aot
)
3074 * Read from the single stepping trigger page. This will cause a
3075 * SIGSEGV when single stepping is enabled.
3076 * We do this _before_ the breakpoint, so single stepping after
3077 * a breakpoint is hit will step to the next IL offset.
3079 if (ins
->flags
& MONO_INST_SINGLE_STEP_LOC
) {
3080 ppc_load (code
, ppc_r12
, (gsize
)ss_trigger_page
);
3081 ppc_ldptr (code
, ppc_r12
, 0, ppc_r12
);
3084 mono_add_seq_point (cfg
, bb
, ins
, code
- cfg
->native_code
);
3087 * A placeholder for a possible breakpoint inserted by
3088 * mono_arch_set_breakpoint ().
3090 for (i
= 0; i
< BREAKPOINT_SIZE
/ 4; ++i
)
3095 ppc_mullw (code
, ppc_r0
, ins
->sreg1
, ins
->sreg2
);
3096 ppc_mulhw (code
, ppc_r3
, ins
->sreg1
, ins
->sreg2
);
3097 ppc_mr (code
, ppc_r4
, ppc_r0
);
3100 ppc_mullw (code
, ppc_r0
, ins
->sreg1
, ins
->sreg2
);
3101 ppc_mulhwu (code
, ppc_r3
, ins
->sreg1
, ins
->sreg2
);
3102 ppc_mr (code
, ppc_r4
, ppc_r0
);
3104 case OP_MEMORY_BARRIER
:
3107 case OP_STOREI1_MEMBASE_REG
:
3108 if (ppc_is_imm16 (ins
->inst_offset
)) {
3109 ppc_stb (code
, ins
->sreg1
, ins
->inst_offset
, ins
->inst_destbasereg
);
3111 if (ppc_is_imm32 (ins
->inst_offset
)) {
3112 ppc_addis (code
, ppc_r11
, ins
->inst_destbasereg
, ppc_ha(ins
->inst_offset
));
3113 ppc_stb (code
, ins
->sreg1
, ins
->inst_offset
, ppc_r11
);
3115 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
3116 ppc_stbx (code
, ins
->sreg1
, ins
->inst_destbasereg
, ppc_r0
);
3120 case OP_STOREI2_MEMBASE_REG
:
3121 if (ppc_is_imm16 (ins
->inst_offset
)) {
3122 ppc_sth (code
, ins
->sreg1
, ins
->inst_offset
, ins
->inst_destbasereg
);
3124 if (ppc_is_imm32 (ins
->inst_offset
)) {
3125 ppc_addis (code
, ppc_r11
, ins
->inst_destbasereg
, ppc_ha(ins
->inst_offset
));
3126 ppc_sth (code
, ins
->sreg1
, ins
->inst_offset
, ppc_r11
);
3128 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
3129 ppc_sthx (code
, ins
->sreg1
, ins
->inst_destbasereg
, ppc_r0
);
3133 case OP_STORE_MEMBASE_REG
:
3134 if (ppc_is_imm16 (ins
->inst_offset
)) {
3135 ppc_stptr (code
, ins
->sreg1
, ins
->inst_offset
, ins
->inst_destbasereg
);
3137 if (ppc_is_imm32 (ins
->inst_offset
)) {
3138 ppc_addis (code
, ppc_r11
, ins
->inst_destbasereg
, ppc_ha(ins
->inst_offset
));
3139 ppc_stptr (code
, ins
->sreg1
, ins
->inst_offset
, ppc_r11
);
3141 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
3142 ppc_stptr_indexed (code
, ins
->sreg1
, ins
->inst_destbasereg
, ppc_r0
);
3146 #ifdef MONO_ARCH_ILP32
3147 case OP_STOREI8_MEMBASE_REG
:
3148 if (ppc_is_imm16 (ins
->inst_offset
)) {
3149 ppc_str (code
, ins
->sreg1
, ins
->inst_offset
, ins
->inst_destbasereg
);
3151 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
3152 ppc_str_indexed (code
, ins
->sreg1
, ins
->inst_destbasereg
, ppc_r0
);
3156 case OP_STOREI1_MEMINDEX
:
3157 ppc_stbx (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
3159 case OP_STOREI2_MEMINDEX
:
3160 ppc_sthx (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
3162 case OP_STORE_MEMINDEX
:
3163 ppc_stptr_indexed (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
3166 g_assert_not_reached ();
3168 case OP_LOAD_MEMBASE
:
3169 if (ppc_is_imm16 (ins
->inst_offset
)) {
3170 ppc_ldptr (code
, ins
->dreg
, ins
->inst_offset
, ins
->inst_basereg
);
3172 if (ppc_is_imm32 (ins
->inst_offset
) && (ins
->dreg
> 0)) {
3173 ppc_addis (code
, ins
->dreg
, ins
->inst_basereg
, ppc_ha(ins
->inst_offset
));
3174 ppc_ldptr (code
, ins
->dreg
, ins
->inst_offset
, ins
->dreg
);
3176 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
3177 ppc_ldptr_indexed (code
, ins
->dreg
, ins
->inst_basereg
, ppc_r0
);
3181 case OP_LOADI4_MEMBASE
:
3182 #ifdef __mono_ppc64__
3183 if (ppc_is_imm16 (ins
->inst_offset
)) {
3184 ppc_lwa (code
, ins
->dreg
, ins
->inst_offset
, ins
->inst_basereg
);
3186 if (ppc_is_imm32 (ins
->inst_offset
) && (ins
->dreg
> 0)) {
3187 ppc_addis (code
, ins
->dreg
, ins
->inst_basereg
, ppc_ha(ins
->inst_offset
));
3188 ppc_lwa (code
, ins
->dreg
, ins
->inst_offset
, ins
->dreg
);
3190 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
3191 ppc_lwax (code
, ins
->dreg
, ins
->inst_basereg
, ppc_r0
);
3196 case OP_LOADU4_MEMBASE
:
3197 if (ppc_is_imm16 (ins
->inst_offset
)) {
3198 ppc_lwz (code
, ins
->dreg
, ins
->inst_offset
, ins
->inst_basereg
);
3200 if (ppc_is_imm32 (ins
->inst_offset
) && (ins
->dreg
> 0)) {
3201 ppc_addis (code
, ins
->dreg
, ins
->inst_basereg
, ppc_ha(ins
->inst_offset
));
3202 ppc_lwz (code
, ins
->dreg
, ins
->inst_offset
, ins
->dreg
);
3204 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
3205 ppc_lwzx (code
, ins
->dreg
, ins
->inst_basereg
, ppc_r0
);
3209 case OP_LOADI1_MEMBASE
:
3210 case OP_LOADU1_MEMBASE
:
3211 if (ppc_is_imm16 (ins
->inst_offset
)) {
3212 ppc_lbz (code
, ins
->dreg
, ins
->inst_offset
, ins
->inst_basereg
);
3214 if (ppc_is_imm32 (ins
->inst_offset
) && (ins
->dreg
> 0)) {
3215 ppc_addis (code
, ins
->dreg
, ins
->inst_basereg
, ppc_ha(ins
->inst_offset
));
3216 ppc_lbz (code
, ins
->dreg
, ins
->inst_offset
, ins
->dreg
);
3218 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
3219 ppc_lbzx (code
, ins
->dreg
, ins
->inst_basereg
, ppc_r0
);
3222 if (ins
->opcode
== OP_LOADI1_MEMBASE
)
3223 ppc_extsb (code
, ins
->dreg
, ins
->dreg
);
3225 case OP_LOADU2_MEMBASE
:
3226 if (ppc_is_imm16 (ins
->inst_offset
)) {
3227 ppc_lhz (code
, ins
->dreg
, ins
->inst_offset
, ins
->inst_basereg
);
3229 if (ppc_is_imm32 (ins
->inst_offset
) && (ins
->dreg
> 0)) {
3230 ppc_addis (code
, ins
->dreg
, ins
->inst_basereg
, ppc_ha(ins
->inst_offset
));
3231 ppc_lhz (code
, ins
->dreg
, ins
->inst_offset
, ins
->dreg
);
3233 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
3234 ppc_lhzx (code
, ins
->dreg
, ins
->inst_basereg
, ppc_r0
);
3238 case OP_LOADI2_MEMBASE
:
3239 if (ppc_is_imm16 (ins
->inst_offset
)) {
3240 ppc_lha (code
, ins
->dreg
, ins
->inst_offset
, ins
->inst_basereg
);
3242 if (ppc_is_imm32 (ins
->inst_offset
) && (ins
->dreg
> 0)) {
3243 ppc_addis (code
, ins
->dreg
, ins
->inst_basereg
, ppc_ha(ins
->inst_offset
));
3244 ppc_lha (code
, ins
->dreg
, ins
->inst_offset
, ins
->dreg
);
3246 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
3247 ppc_lhax (code
, ins
->dreg
, ins
->inst_basereg
, ppc_r0
);
3251 #ifdef MONO_ARCH_ILP32
3252 case OP_LOADI8_MEMBASE
:
3253 if (ppc_is_imm16 (ins
->inst_offset
)) {
3254 ppc_ldr (code
, ins
->dreg
, ins
->inst_offset
, ins
->inst_basereg
);
3256 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
3257 ppc_ldr_indexed (code
, ins
->dreg
, ins
->inst_basereg
, ppc_r0
);
3261 case OP_LOAD_MEMINDEX
:
3262 ppc_ldptr_indexed (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
3264 case OP_LOADI4_MEMINDEX
:
3265 #ifdef __mono_ppc64__
3266 ppc_lwax (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
3269 case OP_LOADU4_MEMINDEX
:
3270 ppc_lwzx (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
3272 case OP_LOADU2_MEMINDEX
:
3273 ppc_lhzx (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
3275 case OP_LOADI2_MEMINDEX
:
3276 ppc_lhax (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
3278 case OP_LOADU1_MEMINDEX
:
3279 ppc_lbzx (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
3281 case OP_LOADI1_MEMINDEX
:
3282 ppc_lbzx (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
3283 ppc_extsb (code
, ins
->dreg
, ins
->dreg
);
3285 case OP_ICONV_TO_I1
:
3286 CASE_PPC64 (OP_LCONV_TO_I1
)
3287 ppc_extsb (code
, ins
->dreg
, ins
->sreg1
);
3289 case OP_ICONV_TO_I2
:
3290 CASE_PPC64 (OP_LCONV_TO_I2
)
3291 ppc_extsh (code
, ins
->dreg
, ins
->sreg1
);
3293 case OP_ICONV_TO_U1
:
3294 CASE_PPC64 (OP_LCONV_TO_U1
)
3295 ppc_clrlwi (code
, ins
->dreg
, ins
->sreg1
, 24);
3297 case OP_ICONV_TO_U2
:
3298 CASE_PPC64 (OP_LCONV_TO_U2
)
3299 ppc_clrlwi (code
, ins
->dreg
, ins
->sreg1
, 16);
3303 CASE_PPC64 (OP_LCOMPARE
)
3304 L
= (sizeof (target_mgreg_t
) == 4 || ins
->opcode
== OP_ICOMPARE
) ? 0 : 1;
3306 if (next
&& compare_opcode_is_unsigned (next
->opcode
))
3307 ppc_cmpl (code
, 0, L
, ins
->sreg1
, ins
->sreg2
);
3309 ppc_cmp (code
, 0, L
, ins
->sreg1
, ins
->sreg2
);
3311 case OP_COMPARE_IMM
:
3312 case OP_ICOMPARE_IMM
:
3313 CASE_PPC64 (OP_LCOMPARE_IMM
)
3314 L
= (sizeof (target_mgreg_t
) == 4 || ins
->opcode
== OP_ICOMPARE_IMM
) ? 0 : 1;
3316 if (next
&& compare_opcode_is_unsigned (next
->opcode
)) {
3317 if (ppc_is_uimm16 (ins
->inst_imm
)) {
3318 ppc_cmpli (code
, 0, L
, ins
->sreg1
, (ins
->inst_imm
& 0xffff));
3320 g_assert_not_reached ();
3323 if (ppc_is_imm16 (ins
->inst_imm
)) {
3324 ppc_cmpi (code
, 0, L
, ins
->sreg1
, (ins
->inst_imm
& 0xffff));
3326 g_assert_not_reached ();
3332 * gdb does not like encountering a trap in the debugged code. So
3333 * instead of emitting a trap, we emit a call a C function and place a
3337 ppc_mr (code
, ppc_r3
, ins
->sreg1
);
3338 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break
));
3339 if ((FORCE_INDIR_CALL
|| cfg
->method
->dynamic
) && !cfg
->compile_aot
) {
3340 ppc_load_func (code
, PPC_CALL_REG
, 0);
3341 ppc_mtlr (code
, PPC_CALL_REG
);
3349 ppc_addco (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3352 CASE_PPC64 (OP_LADD
)
3353 ppc_add (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3357 ppc_adde (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3360 if (ppc_is_imm16 (ins
->inst_imm
)) {
3361 ppc_addic (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_imm
);
3363 g_assert_not_reached ();
3368 CASE_PPC64 (OP_LADD_IMM
)
3369 if (ppc_is_imm16 (ins
->inst_imm
)) {
3370 ppc_addi (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_imm
);
3372 g_assert_not_reached ();
3376 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3378 ppc_addo (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3379 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
3380 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1<<14));
3381 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE
, PPC_BR_EQ
, "OverflowException");
3383 case OP_IADD_OVF_UN
:
3384 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3386 ppc_addco (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3387 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
3388 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1<<13));
3389 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE
, PPC_BR_EQ
, "OverflowException");
3392 CASE_PPC64 (OP_LSUB_OVF
)
3393 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3395 ppc_subfo (code
, ins
->dreg
, ins
->sreg2
, ins
->sreg1
);
3396 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
3397 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1<<14));
3398 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE
, PPC_BR_EQ
, "OverflowException");
3400 case OP_ISUB_OVF_UN
:
3401 CASE_PPC64 (OP_LSUB_OVF_UN
)
3402 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3404 ppc_subfc (code
, ins
->dreg
, ins
->sreg2
, ins
->sreg1
);
3405 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
3406 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1<<13));
3407 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE
, PPC_BR_EQ
, "OverflowException");
3409 case OP_ADD_OVF_CARRY
:
3410 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3412 ppc_addeo (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3413 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
3414 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1<<14));
3415 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE
, PPC_BR_EQ
, "OverflowException");
3417 case OP_ADD_OVF_UN_CARRY
:
3418 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3420 ppc_addeo (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3421 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
3422 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1<<13));
3423 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE
, PPC_BR_EQ
, "OverflowException");
3425 case OP_SUB_OVF_CARRY
:
3426 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3428 ppc_subfeo (code
, ins
->dreg
, ins
->sreg2
, ins
->sreg1
);
3429 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
3430 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1<<14));
3431 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE
, PPC_BR_EQ
, "OverflowException");
3433 case OP_SUB_OVF_UN_CARRY
:
3434 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3436 ppc_subfeo (code
, ins
->dreg
, ins
->sreg2
, ins
->sreg1
);
3437 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
3438 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1<<13));
3439 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE
, PPC_BR_EQ
, "OverflowException");
3443 ppc_subfco (code
, ins
->dreg
, ins
->sreg2
, ins
->sreg1
);
3446 CASE_PPC64 (OP_LSUB
)
3447 ppc_subf (code
, ins
->dreg
, ins
->sreg2
, ins
->sreg1
);
3451 ppc_subfe (code
, ins
->dreg
, ins
->sreg2
, ins
->sreg1
);
3455 CASE_PPC64 (OP_LSUB_IMM
)
3456 // we add the negated value
3457 if (ppc_is_imm16 (-ins
->inst_imm
))
3458 ppc_addi (code
, ins
->dreg
, ins
->sreg1
, -ins
->inst_imm
);
3460 g_assert_not_reached ();
3464 g_assert (ppc_is_imm16 (ins
->inst_imm
));
3465 ppc_subfic (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_imm
);
3468 ppc_subfze (code
, ins
->dreg
, ins
->sreg1
);
3471 CASE_PPC64 (OP_LAND
)
3472 /* FIXME: the ppc macros as inconsistent here: put dest as the first arg! */
3473 ppc_and (code
, ins
->sreg1
, ins
->dreg
, ins
->sreg2
);
3477 CASE_PPC64 (OP_LAND_IMM
)
3478 if (!(ins
->inst_imm
& 0xffff0000)) {
3479 ppc_andid (code
, ins
->sreg1
, ins
->dreg
, ins
->inst_imm
);
3480 } else if (!(ins
->inst_imm
& 0xffff)) {
3481 ppc_andisd (code
, ins
->sreg1
, ins
->dreg
, ((guint32
)ins
->inst_imm
>> 16));
3483 g_assert_not_reached ();
3487 CASE_PPC64 (OP_LDIV
) {
3488 guint8
*divisor_is_m1
;
3489 /* XER format: SO, OV, CA, reserved [21 bits], count [8 bits]
3491 ppc_compare_reg_imm (code
, 0, ins
->sreg2
, -1);
3492 divisor_is_m1
= code
;
3493 ppc_bc (code
, PPC_BR_FALSE
| PPC_BR_LIKELY
, PPC_BR_EQ
, 0);
3494 ppc_lis (code
, ppc_r0
, 0x8000);
3495 #ifdef __mono_ppc64__
3496 if (ins
->opcode
== OP_LDIV
)
3497 ppc_sldi (code
, ppc_r0
, ppc_r0
, 32);
3499 ppc_compare (code
, 0, ins
->sreg1
, ppc_r0
);
3500 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE
, PPC_BR_EQ
, "OverflowException");
3501 ppc_patch (divisor_is_m1
, code
);
3502 /* XER format: SO, OV, CA, reserved [21 bits], count [8 bits]
3504 if (ins
->opcode
== OP_IDIV
)
3505 ppc_divwod (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3506 #ifdef __mono_ppc64__
3508 ppc_divdod (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3510 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
3511 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1<<14));
3512 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE
, PPC_BR_EQ
, "DivideByZeroException");
3516 CASE_PPC64 (OP_LDIV_UN
)
3517 if (ins
->opcode
== OP_IDIV_UN
)
3518 ppc_divwuod (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3519 #ifdef __mono_ppc64__
3521 ppc_divduod (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3523 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
3524 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1<<14));
3525 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE
, PPC_BR_EQ
, "DivideByZeroException");
3531 g_assert_not_reached ();
3534 ppc_or (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3538 CASE_PPC64 (OP_LOR_IMM
)
3539 if (!(ins
->inst_imm
& 0xffff0000)) {
3540 ppc_ori (code
, ins
->sreg1
, ins
->dreg
, ins
->inst_imm
);
3541 } else if (!(ins
->inst_imm
& 0xffff)) {
3542 ppc_oris (code
, ins
->dreg
, ins
->sreg1
, ((guint32
)(ins
->inst_imm
) >> 16));
3544 g_assert_not_reached ();
3548 CASE_PPC64 (OP_LXOR
)
3549 ppc_xor (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3553 CASE_PPC64 (OP_LXOR_IMM
)
3554 if (!(ins
->inst_imm
& 0xffff0000)) {
3555 ppc_xori (code
, ins
->sreg1
, ins
->dreg
, ins
->inst_imm
);
3556 } else if (!(ins
->inst_imm
& 0xffff)) {
3557 ppc_xoris (code
, ins
->sreg1
, ins
->dreg
, ((guint32
)(ins
->inst_imm
) >> 16));
3559 g_assert_not_reached ();
3563 CASE_PPC64 (OP_LSHL
)
3564 ppc_shift_left (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3568 CASE_PPC64 (OP_LSHL_IMM
)
3569 ppc_shift_left_imm (code
, ins
->dreg
, ins
->sreg1
, MASK_SHIFT_IMM (ins
->inst_imm
));
3572 ppc_sraw (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3575 ppc_shift_right_arith_imm (code
, ins
->dreg
, ins
->sreg1
, MASK_SHIFT_IMM (ins
->inst_imm
));
3578 if (MASK_SHIFT_IMM (ins
->inst_imm
))
3579 ppc_shift_right_imm (code
, ins
->dreg
, ins
->sreg1
, MASK_SHIFT_IMM (ins
->inst_imm
));
3581 ppc_mr (code
, ins
->dreg
, ins
->sreg1
);
3584 ppc_srw (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3587 CASE_PPC64 (OP_LNOT
)
3588 ppc_not (code
, ins
->dreg
, ins
->sreg1
);
3591 CASE_PPC64 (OP_LNEG
)
3592 ppc_neg (code
, ins
->dreg
, ins
->sreg1
);
3595 CASE_PPC64 (OP_LMUL
)
3596 ppc_multiply (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3600 CASE_PPC64 (OP_LMUL_IMM
)
3601 if (ppc_is_imm16 (ins
->inst_imm
)) {
3602 ppc_mulli (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_imm
);
3604 g_assert_not_reached ();
3608 CASE_PPC64 (OP_LMUL_OVF
)
3609 /* we annot use mcrxr, since it's not implemented on some processors
3610 * XER format: SO, OV, CA, reserved [21 bits], count [8 bits]
3612 if (ins
->opcode
== OP_IMUL_OVF
)
3613 ppc_mullwo (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3614 #ifdef __mono_ppc64__
3616 ppc_mulldo (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3618 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
3619 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1<<14));
3620 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE
, PPC_BR_EQ
, "OverflowException");
3622 case OP_IMUL_OVF_UN
:
3623 CASE_PPC64 (OP_LMUL_OVF_UN
)
3624 /* we first multiply to get the high word and compare to 0
3625 * to set the flags, then the result is discarded and then
3626 * we multiply to get the lower * bits result
3628 if (ins
->opcode
== OP_IMUL_OVF_UN
)
3629 ppc_mulhwu (code
, ppc_r0
, ins
->sreg1
, ins
->sreg2
);
3630 #ifdef __mono_ppc64__
3632 ppc_mulhdu (code
, ppc_r0
, ins
->sreg1
, ins
->sreg2
);
3634 ppc_cmpi (code
, 0, 0, ppc_r0
, 0);
3635 EMIT_COND_SYSTEM_EXCEPTION (CEE_BNE_UN
- CEE_BEQ
, "OverflowException");
3636 ppc_multiply (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3639 ppc_load (code
, ins
->dreg
, ins
->inst_c0
);
3642 ppc_load (code
, ins
->dreg
, ins
->inst_l
);
3645 case OP_LOAD_GOTADDR
:
3646 /* The PLT implementation depends on this */
3647 g_assert (ins
->dreg
== ppc_r30
);
3649 code
= mono_arch_emit_load_got_addr (cfg
->native_code
, code
, cfg
, NULL
);
3652 // FIXME: Fix max instruction length
3653 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)ins
->inst_right
->inst_i1
, ins
->inst_right
->inst_p0
);
3654 /* arch_emit_got_access () patches this */
3655 ppc_load32 (code
, ppc_r0
, 0);
3656 ppc_ldptr_indexed (code
, ins
->dreg
, ins
->inst_basereg
, ppc_r0
);
3659 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)ins
->inst_i1
, ins
->inst_p0
);
3660 ppc_load_sequence (code
, ins
->dreg
, 0);
3662 CASE_PPC32 (OP_ICONV_TO_I4
)
3663 CASE_PPC32 (OP_ICONV_TO_U4
)
3665 if (ins
->dreg
!= ins
->sreg1
)
3666 ppc_mr (code
, ins
->dreg
, ins
->sreg1
);
3669 int saved
= ins
->sreg1
;
3670 if (ins
->sreg1
== ppc_r3
) {
3671 ppc_mr (code
, ppc_r0
, ins
->sreg1
);
3674 if (ins
->sreg2
!= ppc_r3
)
3675 ppc_mr (code
, ppc_r3
, ins
->sreg2
);
3676 if (saved
!= ppc_r4
)
3677 ppc_mr (code
, ppc_r4
, saved
);
3681 if (ins
->dreg
!= ins
->sreg1
)
3682 ppc_fmr (code
, ins
->dreg
, ins
->sreg1
);
3684 case OP_MOVE_F_TO_I4
:
3685 ppc_stfs (code
, ins
->sreg1
, -4, ppc_r1
);
3686 ppc_ldptr (code
, ins
->dreg
, -4, ppc_r1
);
3688 case OP_MOVE_I4_TO_F
:
3689 ppc_stw (code
, ins
->sreg1
, -4, ppc_r1
);
3690 ppc_lfs (code
, ins
->dreg
, -4, ppc_r1
);
3692 #ifdef __mono_ppc64__
3693 case OP_MOVE_F_TO_I8
:
3694 ppc_stfd (code
, ins
->sreg1
, -8, ppc_r1
);
3695 ppc_ldptr (code
, ins
->dreg
, -8, ppc_r1
);
3697 case OP_MOVE_I8_TO_F
:
3698 ppc_stptr (code
, ins
->sreg1
, -8, ppc_r1
);
3699 ppc_lfd (code
, ins
->dreg
, -8, ppc_r1
);
3702 case OP_FCONV_TO_R4
:
3703 ppc_frsp (code
, ins
->dreg
, ins
->sreg1
);
3706 case OP_TAILCALL_PARAMETER
:
3707 // This opcode helps compute sizes, i.e.
3708 // of the subsequent OP_TAILCALL, but contributes no code.
3709 g_assert (ins
->next
);
3714 MonoCallInst
*call
= (MonoCallInst
*)ins
;
3717 * Keep in sync with mono_arch_emit_epilog
3719 g_assert (!cfg
->method
->save_lmf
);
3721 * Note: we can use ppc_r12 here because it is dead anyway:
3722 * we're leaving the method.
3724 if (1 || cfg
->flags
& MONO_CFG_HAS_CALLS
) {
3725 long ret_offset
= cfg
->stack_usage
+ PPC_RET_ADDR_OFFSET
;
3726 if (ppc_is_imm16 (ret_offset
)) {
3727 ppc_ldptr (code
, ppc_r0
, ret_offset
, cfg
->frame_reg
);
3729 ppc_load (code
, ppc_r12
, ret_offset
);
3730 ppc_ldptr_indexed (code
, ppc_r0
, cfg
->frame_reg
, ppc_r12
);
3732 ppc_mtlr (code
, ppc_r0
);
3735 if (ppc_is_imm16 (cfg
->stack_usage
)) {
3736 ppc_addi (code
, ppc_r12
, cfg
->frame_reg
, cfg
->stack_usage
);
3738 /* cfg->stack_usage is an int, so we can use
3739 * an addis/addi sequence here even in 64-bit. */
3740 ppc_addis (code
, ppc_r12
, cfg
->frame_reg
, ppc_ha(cfg
->stack_usage
));
3741 ppc_addi (code
, ppc_r12
, ppc_r12
, cfg
->stack_usage
);
3743 if (!cfg
->method
->save_lmf
) {
3745 for (i
= 31; i
>= 13; --i
) {
3746 if (cfg
->used_int_regs
& (1 << i
)) {
3747 pos
+= sizeof (target_mgreg_t
);
3748 ppc_ldptr (code
, i
, -pos
, ppc_r12
);
3752 /* FIXME restore from MonoLMF: though this can't happen yet */
3755 /* Copy arguments on the stack to our argument area */
3756 if (call
->stack_usage
) {
3757 code
= emit_memcpy (code
, call
->stack_usage
, ppc_r12
, PPC_STACK_PARAM_OFFSET
, ppc_sp
, PPC_STACK_PARAM_OFFSET
);
3758 /* r12 was clobbered */
3759 g_assert (cfg
->frame_reg
== ppc_sp
);
3760 if (ppc_is_imm16 (cfg
->stack_usage
)) {
3761 ppc_addi (code
, ppc_r12
, cfg
->frame_reg
, cfg
->stack_usage
);
3763 /* cfg->stack_usage is an int, so we can use
3764 * an addis/addi sequence here even in 64-bit. */
3765 ppc_addis (code
, ppc_r12
, cfg
->frame_reg
, ppc_ha(cfg
->stack_usage
));
3766 ppc_addi (code
, ppc_r12
, ppc_r12
, cfg
->stack_usage
);
3770 ppc_mr (code
, ppc_sp
, ppc_r12
);
3771 mono_add_patch_info (cfg
, (guint8
*) code
- cfg
->native_code
, MONO_PATCH_INFO_METHOD_JUMP
, call
->method
);
3772 cfg
->thunk_area
+= THUNK_SIZE
;
3773 if (cfg
->compile_aot
) {
3774 /* arch_emit_got_access () patches this */
3775 ppc_load32 (code
, ppc_r0
, 0);
3776 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
3777 ppc_ldptr_indexed (code
, ppc_r12
, ppc_r30
, ppc_r0
);
3778 ppc_ldptr (code
, ppc_r0
, 0, ppc_r12
);
3780 ppc_ldptr_indexed (code
, ppc_r0
, ppc_r30
, ppc_r0
);
3782 ppc_mtctr (code
, ppc_r0
);
3783 ppc_bcctr (code
, PPC_BR_ALWAYS
, 0);
3790 /* ensure ins->sreg1 is not NULL */
3791 ppc_ldptr (code
, ppc_r0
, 0, ins
->sreg1
);
3794 long cookie_offset
= cfg
->sig_cookie
+ cfg
->stack_usage
;
3795 if (ppc_is_imm16 (cookie_offset
)) {
3796 ppc_addi (code
, ppc_r0
, cfg
->frame_reg
, cookie_offset
);
3798 ppc_load (code
, ppc_r0
, cookie_offset
);
3799 ppc_add (code
, ppc_r0
, cfg
->frame_reg
, ppc_r0
);
3801 ppc_stptr (code
, ppc_r0
, 0, ins
->sreg1
);
3810 call
= (MonoCallInst
*)ins
;
3811 mono_call_add_patch_info (cfg
, call
, offset
);
3812 if ((FORCE_INDIR_CALL
|| cfg
->method
->dynamic
) && !cfg
->compile_aot
) {
3813 ppc_load_func (code
, PPC_CALL_REG
, 0);
3814 ppc_mtlr (code
, PPC_CALL_REG
);
3819 /* FIXME: this should be handled somewhere else in the new jit */
3820 code
= emit_move_return_value (cfg
, ins
, code
);
3826 case OP_VOIDCALL_REG
:
3828 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
3829 ppc_ldptr (code
, ppc_r0
, 0, ins
->sreg1
);
3830 /* FIXME: if we know that this is a method, we
3831 can omit this load */
3832 ppc_ldptr (code
, ppc_r2
, 8, ins
->sreg1
);
3833 ppc_mtlr (code
, ppc_r0
);
3835 #if (_CALL_ELF == 2)
3836 if (ins
->flags
& MONO_INST_HAS_METHOD
) {
3837 // Not a global entry point
3839 // Need to set up r12 with function entry address for global entry point
3840 if (ppc_r12
!= ins
->sreg1
) {
3841 ppc_mr(code
,ppc_r12
,ins
->sreg1
);
3845 ppc_mtlr (code
, ins
->sreg1
);
3848 /* FIXME: this should be handled somewhere else in the new jit */
3849 code
= emit_move_return_value (cfg
, ins
, code
);
3851 case OP_FCALL_MEMBASE
:
3852 case OP_LCALL_MEMBASE
:
3853 case OP_VCALL_MEMBASE
:
3854 case OP_VCALL2_MEMBASE
:
3855 case OP_VOIDCALL_MEMBASE
:
3856 case OP_CALL_MEMBASE
:
3857 if (cfg
->compile_aot
&& ins
->sreg1
== ppc_r12
) {
3858 /* The trampolines clobber this */
3859 ppc_mr (code
, ppc_r29
, ins
->sreg1
);
3860 ppc_ldptr (code
, ppc_r0
, ins
->inst_offset
, ppc_r29
);
3862 ppc_ldptr (code
, ppc_r0
, ins
->inst_offset
, ins
->sreg1
);
3864 ppc_mtlr (code
, ppc_r0
);
3866 /* FIXME: this should be handled somewhere else in the new jit */
3867 code
= emit_move_return_value (cfg
, ins
, code
);
3870 guint8
* zero_loop_jump
, * zero_loop_start
;
3871 /* keep alignment */
3872 int alloca_waste
= PPC_STACK_PARAM_OFFSET
+ cfg
->param_area
+ 31;
3873 int area_offset
= alloca_waste
;
3875 ppc_addi (code
, ppc_r12
, ins
->sreg1
, alloca_waste
+ 31);
3876 /* FIXME: should be calculated from MONO_ARCH_FRAME_ALIGNMENT */
3877 ppc_clear_right_imm (code
, ppc_r12
, ppc_r12
, 4);
3878 /* use ctr to store the number of words to 0 if needed */
3879 if (ins
->flags
& MONO_INST_INIT
) {
3880 /* we zero 4 bytes at a time:
3881 * we add 7 instead of 3 so that we set the counter to
3882 * at least 1, otherwise the bdnz instruction will make
3883 * it negative and iterate billions of times.
3885 ppc_addi (code
, ppc_r0
, ins
->sreg1
, 7);
3886 ppc_shift_right_arith_imm (code
, ppc_r0
, ppc_r0
, 2);
3887 ppc_mtctr (code
, ppc_r0
);
3889 ppc_ldptr (code
, ppc_r0
, 0, ppc_sp
);
3890 ppc_neg (code
, ppc_r12
, ppc_r12
);
3891 ppc_stptr_update_indexed (code
, ppc_r0
, ppc_sp
, ppc_r12
);
3893 /* FIXME: make this loop work in 8 byte
3894 increments on PPC64 */
3895 if (ins
->flags
& MONO_INST_INIT
) {
3896 /* adjust the dest reg by -4 so we can use stwu */
3897 /* we actually adjust -8 because we let the loop
3900 ppc_addi (code
, ins
->dreg
, ppc_sp
, (area_offset
- 8));
3901 ppc_li (code
, ppc_r12
, 0);
3902 zero_loop_start
= code
;
3903 ppc_stwu (code
, ppc_r12
, 4, ins
->dreg
);
3904 zero_loop_jump
= code
;
3905 ppc_bc (code
, PPC_BR_DEC_CTR_NONZERO
, 0, 0);
3906 ppc_patch (zero_loop_jump
, zero_loop_start
);
3908 ppc_addi (code
, ins
->dreg
, ppc_sp
, area_offset
);
3913 ppc_mr (code
, ppc_r3
, ins
->sreg1
);
3914 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_throw_exception
));
3915 if ((FORCE_INDIR_CALL
|| cfg
->method
->dynamic
) && !cfg
->compile_aot
) {
3916 ppc_load_func (code
, PPC_CALL_REG
, 0);
3917 ppc_mtlr (code
, PPC_CALL_REG
);
3926 ppc_mr (code
, ppc_r3
, ins
->sreg1
);
3927 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
3928 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_rethrow_exception
));
3929 if ((FORCE_INDIR_CALL
|| cfg
->method
->dynamic
) && !cfg
->compile_aot
) {
3930 ppc_load_func (code
, PPC_CALL_REG
, 0);
3931 ppc_mtlr (code
, PPC_CALL_REG
);
3938 case OP_START_HANDLER
: {
3939 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3940 g_assert (spvar
->inst_basereg
!= ppc_sp
);
3941 code
= emit_reserve_param_area (cfg
, code
);
3942 ppc_mflr (code
, ppc_r0
);
3943 if (ppc_is_imm16 (spvar
->inst_offset
)) {
3944 ppc_stptr (code
, ppc_r0
, spvar
->inst_offset
, spvar
->inst_basereg
);
3946 ppc_load (code
, ppc_r12
, spvar
->inst_offset
);
3947 ppc_stptr_indexed (code
, ppc_r0
, ppc_r12
, spvar
->inst_basereg
);
3951 case OP_ENDFILTER
: {
3952 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3953 g_assert (spvar
->inst_basereg
!= ppc_sp
);
3954 code
= emit_unreserve_param_area (cfg
, code
);
3955 if (ins
->sreg1
!= ppc_r3
)
3956 ppc_mr (code
, ppc_r3
, ins
->sreg1
);
3957 if (ppc_is_imm16 (spvar
->inst_offset
)) {
3958 ppc_ldptr (code
, ppc_r0
, spvar
->inst_offset
, spvar
->inst_basereg
);
3960 ppc_load (code
, ppc_r12
, spvar
->inst_offset
);
3961 ppc_ldptr_indexed (code
, ppc_r0
, spvar
->inst_basereg
, ppc_r12
);
3963 ppc_mtlr (code
, ppc_r0
);
3967 case OP_ENDFINALLY
: {
3968 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3969 g_assert (spvar
->inst_basereg
!= ppc_sp
);
3970 code
= emit_unreserve_param_area (cfg
, code
);
3971 ppc_ldptr (code
, ppc_r0
, spvar
->inst_offset
, spvar
->inst_basereg
);
3972 ppc_mtlr (code
, ppc_r0
);
3976 case OP_CALL_HANDLER
:
3977 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
3979 for (GList
*tmp
= ins
->inst_eh_blocks
; tmp
!= bb
->clause_holes
; tmp
= tmp
->prev
)
3980 mono_cfg_add_try_hole (cfg
, ((MonoLeaveClause
*) tmp
->data
)->clause
, code
, bb
);
3983 ins
->inst_c0
= code
- cfg
->native_code
;
3986 /*if (ins->inst_target_bb->native_offset) {
3988 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
3990 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
3995 ppc_mtctr (code
, ins
->sreg1
);
3996 ppc_bcctr (code
, PPC_BR_ALWAYS
, 0);
3999 ppc_li (code
, ins
->dreg
, 0);
4000 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_EQ
, 2);
4001 ppc_li (code
, ins
->dreg
, 1);
4005 CASE_PPC64 (OP_LCEQ
)
4006 ppc_li (code
, ins
->dreg
, 0);
4007 ppc_bc (code
, PPC_BR_FALSE
, PPC_BR_EQ
, 2);
4008 ppc_li (code
, ins
->dreg
, 1);
4014 CASE_PPC64 (OP_LCLT
)
4015 CASE_PPC64 (OP_LCLT_UN
)
4016 ppc_li (code
, ins
->dreg
, 1);
4017 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_LT
, 2);
4018 ppc_li (code
, ins
->dreg
, 0);
4022 ppc_li (code
, ins
->dreg
, 1);
4023 ppc_bc (code
, PPC_BR_FALSE
, PPC_BR_LT
, 2);
4024 ppc_li (code
, ins
->dreg
, 0);
4030 CASE_PPC64 (OP_LCGT
)
4031 CASE_PPC64 (OP_LCGT_UN
)
4032 ppc_li (code
, ins
->dreg
, 1);
4033 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_GT
, 2);
4034 ppc_li (code
, ins
->dreg
, 0);
4038 ppc_li (code
, ins
->dreg
, 1);
4039 ppc_bc (code
, PPC_BR_FALSE
, PPC_BR_GT
, 2);
4040 ppc_li (code
, ins
->dreg
, 0);
4042 case OP_COND_EXC_EQ
:
4043 case OP_COND_EXC_NE_UN
:
4044 case OP_COND_EXC_LT
:
4045 case OP_COND_EXC_LT_UN
:
4046 case OP_COND_EXC_GT
:
4047 case OP_COND_EXC_GT_UN
:
4048 case OP_COND_EXC_GE
:
4049 case OP_COND_EXC_GE_UN
:
4050 case OP_COND_EXC_LE
:
4051 case OP_COND_EXC_LE_UN
:
4052 EMIT_COND_SYSTEM_EXCEPTION (ins
->opcode
- OP_COND_EXC_EQ
, ins
->inst_p1
);
4054 case OP_COND_EXC_IEQ
:
4055 case OP_COND_EXC_INE_UN
:
4056 case OP_COND_EXC_ILT
:
4057 case OP_COND_EXC_ILT_UN
:
4058 case OP_COND_EXC_IGT
:
4059 case OP_COND_EXC_IGT_UN
:
4060 case OP_COND_EXC_IGE
:
4061 case OP_COND_EXC_IGE_UN
:
4062 case OP_COND_EXC_ILE
:
4063 case OP_COND_EXC_ILE_UN
:
4064 EMIT_COND_SYSTEM_EXCEPTION (ins
->opcode
- OP_COND_EXC_IEQ
, ins
->inst_p1
);
4076 EMIT_COND_BRANCH (ins
, ins
->opcode
- OP_IBEQ
);
4079 /* floating point opcodes */
4081 g_assert (cfg
->compile_aot
);
4083 /* FIXME: Optimize this */
4085 ppc_mflr (code
, ppc_r12
);
4087 *(double*)code
= *(double*)ins
->inst_p0
;
4089 ppc_lfd (code
, ins
->dreg
, 8, ppc_r12
);
4092 g_assert_not_reached ();
4094 case OP_STORER8_MEMBASE_REG
:
4095 if (ppc_is_imm16 (ins
->inst_offset
)) {
4096 ppc_stfd (code
, ins
->sreg1
, ins
->inst_offset
, ins
->inst_destbasereg
);
4098 if (ppc_is_imm32 (ins
->inst_offset
)) {
4099 ppc_addis (code
, ppc_r11
, ins
->inst_destbasereg
, ppc_ha(ins
->inst_offset
));
4100 ppc_stfd (code
, ins
->sreg1
, ins
->inst_offset
, ppc_r11
);
4102 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
4103 ppc_stfdx (code
, ins
->sreg1
, ins
->inst_destbasereg
, ppc_r0
);
4107 case OP_LOADR8_MEMBASE
:
4108 if (ppc_is_imm16 (ins
->inst_offset
)) {
4109 ppc_lfd (code
, ins
->dreg
, ins
->inst_offset
, ins
->inst_basereg
);
4111 if (ppc_is_imm32 (ins
->inst_offset
)) {
4112 ppc_addis (code
, ppc_r11
, ins
->inst_destbasereg
, ppc_ha(ins
->inst_offset
));
4113 ppc_lfd (code
, ins
->dreg
, ins
->inst_offset
, ppc_r11
);
4115 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
4116 ppc_lfdx (code
, ins
->dreg
, ins
->inst_destbasereg
, ppc_r0
);
4120 case OP_STORER4_MEMBASE_REG
:
4121 ppc_frsp (code
, ins
->sreg1
, ins
->sreg1
);
4122 if (ppc_is_imm16 (ins
->inst_offset
)) {
4123 ppc_stfs (code
, ins
->sreg1
, ins
->inst_offset
, ins
->inst_destbasereg
);
4125 if (ppc_is_imm32 (ins
->inst_offset
)) {
4126 ppc_addis (code
, ppc_r11
, ins
->inst_destbasereg
, ppc_ha(ins
->inst_offset
));
4127 ppc_stfs (code
, ins
->sreg1
, ins
->inst_offset
, ppc_r11
);
4129 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
4130 ppc_stfsx (code
, ins
->sreg1
, ins
->inst_destbasereg
, ppc_r0
);
4134 case OP_LOADR4_MEMBASE
:
4135 if (ppc_is_imm16 (ins
->inst_offset
)) {
4136 ppc_lfs (code
, ins
->dreg
, ins
->inst_offset
, ins
->inst_basereg
);
4138 if (ppc_is_imm32 (ins
->inst_offset
)) {
4139 ppc_addis (code
, ppc_r11
, ins
->inst_destbasereg
, ppc_ha(ins
->inst_offset
));
4140 ppc_lfs (code
, ins
->dreg
, ins
->inst_offset
, ppc_r11
);
4142 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
4143 ppc_lfsx (code
, ins
->dreg
, ins
->inst_destbasereg
, ppc_r0
);
4147 case OP_LOADR4_MEMINDEX
:
4148 ppc_lfsx (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4150 case OP_LOADR8_MEMINDEX
:
4151 ppc_lfdx (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4153 case OP_STORER4_MEMINDEX
:
4154 ppc_frsp (code
, ins
->sreg1
, ins
->sreg1
);
4155 ppc_stfsx (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
4157 case OP_STORER8_MEMINDEX
:
4158 ppc_stfdx (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
4161 case CEE_CONV_R4
: /* FIXME: change precision */
4163 g_assert_not_reached ();
4164 case OP_FCONV_TO_I1
:
4165 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, TRUE
);
4167 case OP_FCONV_TO_U1
:
4168 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, FALSE
);
4170 case OP_FCONV_TO_I2
:
4171 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, TRUE
);
4173 case OP_FCONV_TO_U2
:
4174 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, FALSE
);
4176 case OP_FCONV_TO_I4
:
4178 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, TRUE
);
4180 case OP_FCONV_TO_U4
:
4182 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, FALSE
);
4184 case OP_LCONV_TO_R_UN
:
4185 g_assert_not_reached ();
4186 /* Implemented as helper calls */
4188 case OP_LCONV_TO_OVF_I4_2
:
4189 case OP_LCONV_TO_OVF_I
: {
4190 #ifdef __mono_ppc64__
4193 guint8
*negative_branch
, *msword_positive_branch
, *msword_negative_branch
, *ovf_ex_target
;
4194 // Check if its negative
4195 ppc_cmpi (code
, 0, 0, ins
->sreg1
, 0);
4196 negative_branch
= code
;
4197 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_LT
, 0);
4198 // Its positive msword == 0
4199 ppc_cmpi (code
, 0, 0, ins
->sreg2
, 0);
4200 msword_positive_branch
= code
;
4201 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_EQ
, 0);
4203 ovf_ex_target
= code
;
4204 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_ALWAYS
, 0, "OverflowException");
4206 ppc_patch (negative_branch
, code
);
4207 ppc_cmpi (code
, 0, 0, ins
->sreg2
, -1);
4208 msword_negative_branch
= code
;
4209 ppc_bc (code
, PPC_BR_FALSE
, PPC_BR_EQ
, 0);
4210 ppc_patch (msword_negative_branch
, ovf_ex_target
);
4212 ppc_patch (msword_positive_branch
, code
);
4213 if (ins
->dreg
!= ins
->sreg1
)
4214 ppc_mr (code
, ins
->dreg
, ins
->sreg1
);
4219 ppc_frind (code
, ins
->dreg
, ins
->sreg1
);
4222 ppc_frizd (code
, ins
->dreg
, ins
->sreg1
);
4225 ppc_fripd (code
, ins
->dreg
, ins
->sreg1
);
4228 ppc_frimd (code
, ins
->dreg
, ins
->sreg1
);
4231 ppc_fabsd (code
, ins
->dreg
, ins
->sreg1
);
4234 ppc_fsqrtsd (code
, ins
->dreg
, ins
->sreg1
);
4237 ppc_fsqrtd (code
, ins
->dreg
, ins
->sreg1
);
4240 ppc_fadd (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4243 ppc_fsub (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4246 ppc_fmul (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4249 ppc_fdiv (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4252 ppc_fneg (code
, ins
->dreg
, ins
->sreg1
);
4256 g_assert_not_reached ();
4258 /* These min/max require POWER5 */
4260 ppc_cmp (code
, 0, 0, ins
->sreg1
, ins
->sreg2
);
4261 ppc_isellt (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4264 ppc_cmpl (code
, 0, 0, ins
->sreg1
, ins
->sreg2
);
4265 ppc_isellt (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4268 ppc_cmp (code
, 0, 0, ins
->sreg1
, ins
->sreg2
);
4269 ppc_iselgt (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4272 ppc_cmpl (code
, 0, 0, ins
->sreg1
, ins
->sreg2
);
4273 ppc_iselgt (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4275 CASE_PPC64 (OP_LMIN
)
4276 ppc_cmp (code
, 0, 1, ins
->sreg1
, ins
->sreg2
);
4277 ppc_isellt (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4279 CASE_PPC64 (OP_LMIN_UN
)
4280 ppc_cmpl (code
, 0, 1, ins
->sreg1
, ins
->sreg2
);
4281 ppc_isellt (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4283 CASE_PPC64 (OP_LMAX
)
4284 ppc_cmp (code
, 0, 1, ins
->sreg1
, ins
->sreg2
);
4285 ppc_iselgt (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4287 CASE_PPC64 (OP_LMAX_UN
)
4288 ppc_cmpl (code
, 0, 1, ins
->sreg1
, ins
->sreg2
);
4289 ppc_iselgt (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4292 ppc_fcmpu (code
, 0, ins
->sreg1
, ins
->sreg2
);
4296 ppc_fcmpo (code
, 0, ins
->sreg1
, ins
->sreg2
);
4297 ppc_li (code
, ins
->dreg
, 1);
4298 ppc_bc (code
, ins
->opcode
== OP_FCEQ
? PPC_BR_TRUE
: PPC_BR_FALSE
, PPC_BR_EQ
, 2);
4299 ppc_li (code
, ins
->dreg
, 0);
4303 ppc_fcmpo (code
, 0, ins
->sreg1
, ins
->sreg2
);
4304 ppc_li (code
, ins
->dreg
, 1);
4305 ppc_bc (code
, ins
->opcode
== OP_FCLT
? PPC_BR_TRUE
: PPC_BR_FALSE
, PPC_BR_LT
, 2);
4306 ppc_li (code
, ins
->dreg
, 0);
4309 ppc_fcmpu (code
, 0, ins
->sreg1
, ins
->sreg2
);
4310 ppc_li (code
, ins
->dreg
, 1);
4311 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_SO
, 3);
4312 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_LT
, 2);
4313 ppc_li (code
, ins
->dreg
, 0);
4317 ppc_fcmpo (code
, 0, ins
->sreg1
, ins
->sreg2
);
4318 ppc_li (code
, ins
->dreg
, 1);
4319 ppc_bc (code
, ins
->opcode
== OP_FCGT
? PPC_BR_TRUE
: PPC_BR_FALSE
, PPC_BR_GT
, 2);
4320 ppc_li (code
, ins
->dreg
, 0);
4323 ppc_fcmpu (code
, 0, ins
->sreg1
, ins
->sreg2
);
4324 ppc_li (code
, ins
->dreg
, 1);
4325 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_SO
, 3);
4326 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_GT
, 2);
4327 ppc_li (code
, ins
->dreg
, 0);
4330 EMIT_COND_BRANCH (ins
, CEE_BEQ
- CEE_BEQ
);
4333 EMIT_COND_BRANCH (ins
, CEE_BNE_UN
- CEE_BEQ
);
4336 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_SO
, 2);
4337 EMIT_COND_BRANCH (ins
, CEE_BLT
- CEE_BEQ
);
4340 EMIT_COND_BRANCH_FLAGS (ins
, PPC_BR_TRUE
, PPC_BR_SO
);
4341 EMIT_COND_BRANCH (ins
, CEE_BLT_UN
- CEE_BEQ
);
4344 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_SO
, 2);
4345 EMIT_COND_BRANCH (ins
, CEE_BGT
- CEE_BEQ
);
4348 EMIT_COND_BRANCH_FLAGS (ins
, PPC_BR_TRUE
, PPC_BR_SO
);
4349 EMIT_COND_BRANCH (ins
, CEE_BGT_UN
- CEE_BEQ
);
4352 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_SO
, 2);
4353 EMIT_COND_BRANCH (ins
, CEE_BGE
- CEE_BEQ
);
4356 EMIT_COND_BRANCH (ins
, CEE_BGE_UN
- CEE_BEQ
);
4359 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_SO
, 2);
4360 EMIT_COND_BRANCH (ins
, CEE_BLE
- CEE_BEQ
);
4363 EMIT_COND_BRANCH (ins
, CEE_BLE_UN
- CEE_BEQ
);
4366 g_assert_not_reached ();
4367 case OP_PPC_CHECK_FINITE
: {
4368 ppc_rlwinm (code
, ins
->sreg1
, ins
->sreg1
, 0, 1, 31);
4369 ppc_addis (code
, ins
->sreg1
, ins
->sreg1
, -32752);
4370 ppc_rlwinmd (code
, ins
->sreg1
, ins
->sreg1
, 1, 31, 31);
4371 EMIT_COND_SYSTEM_EXCEPTION (CEE_BEQ
- CEE_BEQ
, "ArithmeticException");
4374 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)ins
->inst_c1
, ins
->inst_p0
);
4375 #ifdef __mono_ppc64__
4376 ppc_load_sequence (code
, ins
->dreg
, (guint64
)0x0f0f0f0f0f0f0f0fLL
);
4378 ppc_load_sequence (code
, ins
->dreg
, (gulong
)0x0f0f0f0fL
);
4383 #ifdef __mono_ppc64__
4384 case OP_ICONV_TO_I4
:
4386 ppc_extsw (code
, ins
->dreg
, ins
->sreg1
);
4388 case OP_ICONV_TO_U4
:
4390 ppc_clrldi (code
, ins
->dreg
, ins
->sreg1
, 32);
4392 case OP_ICONV_TO_R4
:
4393 case OP_ICONV_TO_R8
:
4394 case OP_LCONV_TO_R4
:
4395 case OP_LCONV_TO_R8
: {
4397 if (ins
->opcode
== OP_ICONV_TO_R4
|| ins
->opcode
== OP_ICONV_TO_R8
) {
4398 ppc_extsw (code
, ppc_r0
, ins
->sreg1
);
4403 if (cpu_hw_caps
& PPC_MOVE_FPR_GPR
) {
4404 ppc_mffgpr (code
, ins
->dreg
, tmp
);
4406 ppc_str (code
, tmp
, -8, ppc_r1
);
4407 ppc_lfd (code
, ins
->dreg
, -8, ppc_r1
);
4409 ppc_fcfid (code
, ins
->dreg
, ins
->dreg
);
4410 if (ins
->opcode
== OP_ICONV_TO_R4
|| ins
->opcode
== OP_LCONV_TO_R4
)
4411 ppc_frsp (code
, ins
->dreg
, ins
->dreg
);
4415 ppc_srad (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4418 ppc_srd (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4421 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
4423 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
4424 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1 << 13)); /* CA */
4425 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE
, PPC_BR_EQ
, ins
->inst_p1
);
4427 case OP_COND_EXC_OV
:
4428 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
4429 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1 << 14)); /* OV */
4430 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE
, PPC_BR_EQ
, ins
->inst_p1
);
4442 EMIT_COND_BRANCH (ins
, ins
->opcode
- OP_LBEQ
);
4444 case OP_FCONV_TO_I8
:
4445 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 8, TRUE
);
4447 case OP_FCONV_TO_U8
:
4448 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 8, FALSE
);
4450 case OP_STOREI4_MEMBASE_REG
:
4451 if (ppc_is_imm16 (ins
->inst_offset
)) {
4452 ppc_stw (code
, ins
->sreg1
, ins
->inst_offset
, ins
->inst_destbasereg
);
4454 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
4455 ppc_stwx (code
, ins
->sreg1
, ins
->inst_destbasereg
, ppc_r0
);
4458 case OP_STOREI4_MEMINDEX
:
4459 ppc_stwx (code
, ins
->sreg1
, ins
->sreg2
, ins
->inst_destbasereg
);
4462 ppc_srawi (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
4464 case OP_ISHR_UN_IMM
:
4465 if (ins
->inst_imm
& 0x1f)
4466 ppc_srwi (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
4468 ppc_mr (code
, ins
->dreg
, ins
->sreg1
);
4471 case OP_ICONV_TO_R4
:
4472 case OP_ICONV_TO_R8
: {
4473 if (cpu_hw_caps
& PPC_ISA_64
) {
4474 ppc_srawi(code
, ppc_r0
, ins
->sreg1
, 31);
4475 ppc_stw (code
, ppc_r0
, -8, ppc_r1
);
4476 ppc_stw (code
, ins
->sreg1
, -4, ppc_r1
);
4477 ppc_lfd (code
, ins
->dreg
, -8, ppc_r1
);
4478 ppc_fcfid (code
, ins
->dreg
, ins
->dreg
);
4479 if (ins
->opcode
== OP_ICONV_TO_R4
)
4480 ppc_frsp (code
, ins
->dreg
, ins
->dreg
);
4486 case OP_ATOMIC_ADD_I4
:
4487 CASE_PPC64 (OP_ATOMIC_ADD_I8
) {
4488 int location
= ins
->inst_basereg
;
4489 int addend
= ins
->sreg2
;
4490 guint8
*loop
, *branch
;
4491 g_assert (ins
->inst_offset
== 0);
4495 if (ins
->opcode
== OP_ATOMIC_ADD_I4
)
4496 ppc_lwarx (code
, ppc_r0
, 0, location
);
4497 #ifdef __mono_ppc64__
4499 ppc_ldarx (code
, ppc_r0
, 0, location
);
4502 ppc_add (code
, ppc_r0
, ppc_r0
, addend
);
4504 if (ins
->opcode
== OP_ATOMIC_ADD_I4
)
4505 ppc_stwcxd (code
, ppc_r0
, 0, location
);
4506 #ifdef __mono_ppc64__
4508 ppc_stdcxd (code
, ppc_r0
, 0, location
);
4512 ppc_bc (code
, PPC_BR_FALSE
, PPC_BR_EQ
, 0);
4513 ppc_patch (branch
, loop
);
4516 ppc_mr (code
, ins
->dreg
, ppc_r0
);
4519 case OP_ATOMIC_CAS_I4
:
4520 CASE_PPC64 (OP_ATOMIC_CAS_I8
) {
4521 int location
= ins
->sreg1
;
4522 int value
= ins
->sreg2
;
4523 int comparand
= ins
->sreg3
;
4524 guint8
*start
, *not_equal
, *lost_reservation
;
4528 if (ins
->opcode
== OP_ATOMIC_CAS_I4
)
4529 ppc_lwarx (code
, ppc_r0
, 0, location
);
4530 #ifdef __mono_ppc64__
4532 ppc_ldarx (code
, ppc_r0
, 0, location
);
4535 ppc_cmp (code
, 0, ins
->opcode
== OP_ATOMIC_CAS_I4
? 0 : 1, ppc_r0
, comparand
);
4537 ppc_bc (code
, PPC_BR_FALSE
, PPC_BR_EQ
, 0);
4539 if (ins
->opcode
== OP_ATOMIC_CAS_I4
)
4540 ppc_stwcxd (code
, value
, 0, location
);
4541 #ifdef __mono_ppc64__
4543 ppc_stdcxd (code
, value
, 0, location
);
4546 lost_reservation
= code
;
4547 ppc_bc (code
, PPC_BR_FALSE
, PPC_BR_EQ
, 0);
4548 ppc_patch (lost_reservation
, start
);
4549 ppc_patch (not_equal
, code
);
4552 ppc_mr (code
, ins
->dreg
, ppc_r0
);
4555 case OP_LIVERANGE_START
: {
4556 if (cfg
->verbose_level
> 1)
4557 printf ("R%d START=0x%x\n", MONO_VARINFO (cfg
, ins
->inst_c0
)->vreg
, (int)(code
- cfg
->native_code
));
4558 MONO_VARINFO (cfg
, ins
->inst_c0
)->live_range_start
= code
- cfg
->native_code
;
4561 case OP_LIVERANGE_END
: {
4562 if (cfg
->verbose_level
> 1)
4563 printf ("R%d END=0x%x\n", MONO_VARINFO (cfg
, ins
->inst_c0
)->vreg
, (int)(code
- cfg
->native_code
));
4564 MONO_VARINFO (cfg
, ins
->inst_c0
)->live_range_end
= code
- cfg
->native_code
;
4567 case OP_GC_SAFE_POINT
:
4571 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins
->opcode
), __FUNCTION__
);
4572 g_assert_not_reached ();
4575 if ((cfg
->opt
& MONO_OPT_BRANCH
) && ((code
- cfg
->native_code
- offset
) > max_len
)) {
4576 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %ld)",
4577 mono_inst_name (ins
->opcode
), max_len
, (glong
)(code
- cfg
->native_code
- offset
));
4578 g_assert_not_reached ();
4586 set_code_cursor (cfg
, code
);
4588 #endif /* !DISABLE_JIT */
4591 mono_arch_register_lowlevel_calls (void)
4593 /* The signature doesn't matter */
4594 mono_register_jit_icall (mono_ppc_throw_exception
, mono_icall_sig_void
, TRUE
);
4597 #ifdef __mono_ppc64__
4598 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
4599 #define patch_load_sequence(ip,val) do {\
4600 guint16 *__load = (guint16*)(ip); \
4601 g_assert (sizeof (val) == sizeof (gsize)); \
4602 __load [0] = (((guint64)(gsize)(val)) >> 48) & 0xffff; \
4603 __load [2] = (((guint64)(gsize)(val)) >> 32) & 0xffff; \
4604 __load [6] = (((guint64)(gsize)(val)) >> 16) & 0xffff; \
4605 __load [8] = ((guint64)(gsize)(val)) & 0xffff; \
4607 #elif G_BYTE_ORDER == G_BIG_ENDIAN
4608 #define patch_load_sequence(ip,val) do {\
4609 guint16 *__load = (guint16*)(ip); \
4610 g_assert (sizeof (val) == sizeof (gsize)); \
4611 __load [1] = (((guint64)(gsize)(val)) >> 48) & 0xffff; \
4612 __load [3] = (((guint64)(gsize)(val)) >> 32) & 0xffff; \
4613 __load [7] = (((guint64)(gsize)(val)) >> 16) & 0xffff; \
4614 __load [9] = ((guint64)(gsize)(val)) & 0xffff; \
4617 #error huh? No endianess defined by compiler
4620 #define patch_load_sequence(ip,val) do {\
4621 guint16 *__lis_ori = (guint16*)(ip); \
4622 __lis_ori [1] = (((gulong)(val)) >> 16) & 0xffff; \
4623 __lis_ori [3] = ((gulong)(val)) & 0xffff; \
4629 mono_arch_patch_code_new (MonoCompile
*cfg
, MonoDomain
*domain
, guint8
*code
, MonoJumpInfo
*ji
, gpointer target
)
4631 unsigned char *ip
= ji
->ip
.i
+ code
;
4632 gboolean is_fd
= FALSE
;
4635 case MONO_PATCH_INFO_IP
:
4636 patch_load_sequence (ip
, ip
);
4638 case MONO_PATCH_INFO_SWITCH
: {
4639 gpointer
*table
= (gpointer
*)ji
->data
.table
->table
;
4642 patch_load_sequence (ip
, table
);
4644 for (i
= 0; i
< ji
->data
.table
->table_size
; i
++) {
4645 table
[i
] = (glong
)ji
->data
.table
->table
[i
] + code
;
4647 /* we put into the table the absolute address, no need for ppc_patch in this case */
4650 case MONO_PATCH_INFO_METHODCONST
:
4651 case MONO_PATCH_INFO_CLASS
:
4652 case MONO_PATCH_INFO_IMAGE
:
4653 case MONO_PATCH_INFO_FIELD
:
4654 case MONO_PATCH_INFO_VTABLE
:
4655 case MONO_PATCH_INFO_IID
:
4656 case MONO_PATCH_INFO_SFLDA
:
4657 case MONO_PATCH_INFO_LDSTR
:
4658 case MONO_PATCH_INFO_TYPE_FROM_HANDLE
:
4659 case MONO_PATCH_INFO_LDTOKEN
:
4660 /* from OP_AOTCONST : lis + ori */
4661 patch_load_sequence (ip
, target
);
4663 case MONO_PATCH_INFO_R4
:
4664 case MONO_PATCH_INFO_R8
:
4665 g_assert_not_reached ();
4666 *((gconstpointer
*)(ip
+ 2)) = ji
->data
.target
;
4668 case MONO_PATCH_INFO_EXC_NAME
:
4669 g_assert_not_reached ();
4670 *((gconstpointer
*)(ip
+ 1)) = ji
->data
.name
;
4672 case MONO_PATCH_INFO_NONE
:
4673 case MONO_PATCH_INFO_BB_OVF
:
4674 case MONO_PATCH_INFO_EXC_OVF
:
4675 /* everything is dealt with at epilog output time */
4677 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
4678 case MONO_PATCH_INFO_JIT_ICALL_ID
:
4679 case MONO_PATCH_INFO_ABS
:
4680 case MONO_PATCH_INFO_RGCTX_FETCH
:
4681 case MONO_PATCH_INFO_JIT_ICALL_ADDR
:
4682 case MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR
:
4687 ppc_patch_full (cfg
, domain
, ip
, target
, is_fd
);
4693 * Emit code to save the registers in used_int_regs or the registers in the MonoLMF
4694 * structure at positive offset pos from register base_reg. pos is guaranteed to fit into
4695 * the instruction offset immediate for all the registers.
4698 save_registers (MonoCompile
*cfg
, guint8
* code
, int pos
, int base_reg
, gboolean save_lmf
, guint32 used_int_regs
, int cfa_offset
)
4702 for (i
= 13; i
<= 31; i
++) {
4703 if (used_int_regs
& (1 << i
)) {
4704 ppc_str (code
, i
, pos
, base_reg
);
4705 mono_emit_unwind_op_offset (cfg
, code
, i
, pos
- cfa_offset
);
4706 pos
+= sizeof (target_mgreg_t
);
4710 /* pos is the start of the MonoLMF structure */
4711 int offset
= pos
+ G_STRUCT_OFFSET (MonoLMF
, iregs
);
4712 for (i
= 13; i
<= 31; i
++) {
4713 ppc_str (code
, i
, offset
, base_reg
);
4714 mono_emit_unwind_op_offset (cfg
, code
, i
, offset
- cfa_offset
);
4715 offset
+= sizeof (target_mgreg_t
);
4717 offset
= pos
+ G_STRUCT_OFFSET (MonoLMF
, fregs
);
4718 for (i
= 14; i
< 32; i
++) {
4719 ppc_stfd (code
, i
, offset
, base_reg
);
4720 offset
+= sizeof (gdouble
);
4727 * Stack frame layout:
4729 * ------------------- sp
4730 * MonoLMF structure or saved registers
4731 * -------------------
4733 * -------------------
4735 * -------------------
4736 * param area size is cfg->param_area
4737 * -------------------
4738 * linkage area size is PPC_STACK_PARAM_OFFSET
4739 * ------------------- sp
4743 mono_arch_emit_prolog (MonoCompile
*cfg
)
4745 MonoMethod
*method
= cfg
->method
;
4747 MonoMethodSignature
*sig
;
4749 long alloc_size
, pos
, max_offset
, cfa_offset
;
4754 int tailcall_struct_index
;
4756 sig
= mono_method_signature_internal (method
);
4757 cfg
->code_size
= 512 + sig
->param_count
* 32;
4758 code
= cfg
->native_code
= g_malloc (cfg
->code_size
);
4762 /* We currently emit unwind info for aot, but don't use it */
4763 mono_emit_unwind_op_def_cfa (cfg
, code
, ppc_r1
, 0);
4765 if (1 || cfg
->flags
& MONO_CFG_HAS_CALLS
) {
4766 ppc_mflr (code
, ppc_r0
);
4767 ppc_str (code
, ppc_r0
, PPC_RET_ADDR_OFFSET
, ppc_sp
);
4768 mono_emit_unwind_op_offset (cfg
, code
, ppc_lr
, PPC_RET_ADDR_OFFSET
);
4771 alloc_size
= cfg
->stack_offset
;
4774 if (!method
->save_lmf
) {
4775 for (i
= 31; i
>= 13; --i
) {
4776 if (cfg
->used_int_regs
& (1 << i
)) {
4777 pos
+= sizeof (target_mgreg_t
);
4781 pos
+= sizeof (MonoLMF
);
4785 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
4786 if (alloc_size
& (MONO_ARCH_FRAME_ALIGNMENT
- 1)) {
4787 alloc_size
+= MONO_ARCH_FRAME_ALIGNMENT
- 1;
4788 alloc_size
&= ~(MONO_ARCH_FRAME_ALIGNMENT
- 1);
4791 cfg
->stack_usage
= alloc_size
;
4792 g_assert ((alloc_size
& (MONO_ARCH_FRAME_ALIGNMENT
-1)) == 0);
4794 if (ppc_is_imm16 (-alloc_size
)) {
4795 ppc_str_update (code
, ppc_sp
, -alloc_size
, ppc_sp
);
4796 cfa_offset
= alloc_size
;
4797 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, alloc_size
);
4798 code
= save_registers (cfg
, code
, alloc_size
- pos
, ppc_sp
, method
->save_lmf
, cfg
->used_int_regs
, cfa_offset
);
4801 ppc_addi (code
, ppc_r12
, ppc_sp
, -pos
);
4802 ppc_load (code
, ppc_r0
, -alloc_size
);
4803 ppc_str_update_indexed (code
, ppc_sp
, ppc_sp
, ppc_r0
);
4804 cfa_offset
= alloc_size
;
4805 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, alloc_size
);
4806 code
= save_registers (cfg
, code
, 0, ppc_r12
, method
->save_lmf
, cfg
->used_int_regs
, cfa_offset
);
4809 if (cfg
->frame_reg
!= ppc_sp
) {
4810 ppc_mr (code
, cfg
->frame_reg
, ppc_sp
);
4811 mono_emit_unwind_op_def_cfa_reg (cfg
, code
, cfg
->frame_reg
);
4814 /* store runtime generic context */
4815 if (cfg
->rgctx_var
) {
4816 g_assert (cfg
->rgctx_var
->opcode
== OP_REGOFFSET
&&
4817 (cfg
->rgctx_var
->inst_basereg
== ppc_r1
|| cfg
->rgctx_var
->inst_basereg
== ppc_r31
));
4819 ppc_stptr (code
, MONO_ARCH_RGCTX_REG
, cfg
->rgctx_var
->inst_offset
, cfg
->rgctx_var
->inst_basereg
);
4822 /* compute max_offset in order to use short forward jumps
4823 * we always do it on ppc because the immediate displacement
4824 * for jumps is too small
4827 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
4829 bb
->max_offset
= max_offset
;
4831 MONO_BB_FOR_EACH_INS (bb
, ins
)
4832 max_offset
+= ins_get_size (ins
->opcode
);
4835 /* load arguments allocated to register from the stack */
4838 cinfo
= get_call_info (sig
);
4840 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
4841 ArgInfo
*ainfo
= &cinfo
->ret
;
4843 inst
= cfg
->vret_addr
;
4846 if (ppc_is_imm16 (inst
->inst_offset
)) {
4847 ppc_stptr (code
, ainfo
->reg
, inst
->inst_offset
, inst
->inst_basereg
);
4849 ppc_load (code
, ppc_r12
, inst
->inst_offset
);
4850 ppc_stptr_indexed (code
, ainfo
->reg
, ppc_r12
, inst
->inst_basereg
);
4854 tailcall_struct_index
= 0;
4855 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
4856 ArgInfo
*ainfo
= cinfo
->args
+ i
;
4857 inst
= cfg
->args
[pos
];
4859 if (cfg
->verbose_level
> 2)
4860 g_print ("Saving argument %d (type: %d)\n", i
, ainfo
->regtype
);
4861 if (inst
->opcode
== OP_REGVAR
) {
4862 if (ainfo
->regtype
== RegTypeGeneral
)
4863 ppc_mr (code
, inst
->dreg
, ainfo
->reg
);
4864 else if (ainfo
->regtype
== RegTypeFP
)
4865 ppc_fmr (code
, inst
->dreg
, ainfo
->reg
);
4866 else if (ainfo
->regtype
== RegTypeBase
) {
4867 ppc_ldr (code
, ppc_r12
, 0, ppc_sp
);
4868 ppc_ldptr (code
, inst
->dreg
, ainfo
->offset
, ppc_r12
);
4870 g_assert_not_reached ();
4872 if (cfg
->verbose_level
> 2)
4873 g_print ("Argument %ld assigned to register %s\n", pos
, mono_arch_regname (inst
->dreg
));
4875 /* the argument should be put on the stack: FIXME handle size != word */
4876 if (ainfo
->regtype
== RegTypeGeneral
) {
4877 switch (ainfo
->size
) {
4879 if (ppc_is_imm16 (inst
->inst_offset
)) {
4880 ppc_stb (code
, ainfo
->reg
, inst
->inst_offset
, inst
->inst_basereg
);
4882 if (ppc_is_imm32 (inst
->inst_offset
)) {
4883 ppc_addis (code
, ppc_r12
, inst
->inst_basereg
, ppc_ha(inst
->inst_offset
));
4884 ppc_stb (code
, ainfo
->reg
, inst
->inst_offset
, ppc_r12
);
4886 ppc_load (code
, ppc_r12
, inst
->inst_offset
);
4887 ppc_stbx (code
, ainfo
->reg
, inst
->inst_basereg
, ppc_r12
);
4892 if (ppc_is_imm16 (inst
->inst_offset
)) {
4893 ppc_sth (code
, ainfo
->reg
, inst
->inst_offset
, inst
->inst_basereg
);
4895 if (ppc_is_imm32 (inst
->inst_offset
)) {
4896 ppc_addis (code
, ppc_r12
, inst
->inst_basereg
, ppc_ha(inst
->inst_offset
));
4897 ppc_sth (code
, ainfo
->reg
, inst
->inst_offset
, ppc_r12
);
4899 ppc_load (code
, ppc_r12
, inst
->inst_offset
);
4900 ppc_sthx (code
, ainfo
->reg
, inst
->inst_basereg
, ppc_r12
);
4904 #ifdef __mono_ppc64__
4906 if (ppc_is_imm16 (inst
->inst_offset
)) {
4907 ppc_stw (code
, ainfo
->reg
, inst
->inst_offset
, inst
->inst_basereg
);
4909 if (ppc_is_imm32 (inst
->inst_offset
)) {
4910 ppc_addis (code
, ppc_r12
, inst
->inst_basereg
, ppc_ha(inst
->inst_offset
));
4911 ppc_stw (code
, ainfo
->reg
, inst
->inst_offset
, ppc_r12
);
4913 ppc_load (code
, ppc_r12
, inst
->inst_offset
);
4914 ppc_stwx (code
, ainfo
->reg
, inst
->inst_basereg
, ppc_r12
);
4919 if (ppc_is_imm16 (inst
->inst_offset
)) {
4920 ppc_str (code
, ainfo
->reg
, inst
->inst_offset
, inst
->inst_basereg
);
4922 ppc_load (code
, ppc_r12
, inst
->inst_offset
);
4923 ppc_str_indexed (code
, ainfo
->reg
, ppc_r12
, inst
->inst_basereg
);
4928 if (ppc_is_imm16 (inst
->inst_offset
+ 4)) {
4929 ppc_stw (code
, ainfo
->reg
, inst
->inst_offset
, inst
->inst_basereg
);
4930 ppc_stw (code
, ainfo
->reg
+ 1, inst
->inst_offset
+ 4, inst
->inst_basereg
);
4932 ppc_addis (code
, ppc_r12
, inst
->inst_basereg
, ppc_ha(inst
->inst_offset
));
4933 ppc_addi (code
, ppc_r12
, ppc_r12
, inst
->inst_offset
);
4934 ppc_stw (code
, ainfo
->reg
, 0, ppc_r12
);
4935 ppc_stw (code
, ainfo
->reg
+ 1, 4, ppc_r12
);
4940 if (ppc_is_imm16 (inst
->inst_offset
)) {
4941 ppc_stptr (code
, ainfo
->reg
, inst
->inst_offset
, inst
->inst_basereg
);
4943 if (ppc_is_imm32 (inst
->inst_offset
)) {
4944 ppc_addis (code
, ppc_r12
, inst
->inst_basereg
, ppc_ha(inst
->inst_offset
));
4945 ppc_stptr (code
, ainfo
->reg
, inst
->inst_offset
, ppc_r12
);
4947 ppc_load (code
, ppc_r12
, inst
->inst_offset
);
4948 ppc_stptr_indexed (code
, ainfo
->reg
, inst
->inst_basereg
, ppc_r12
);
4953 } else if (ainfo
->regtype
== RegTypeBase
) {
4954 g_assert (ppc_is_imm16 (ainfo
->offset
));
4955 /* load the previous stack pointer in r12 */
4956 ppc_ldr (code
, ppc_r12
, 0, ppc_sp
);
4957 ppc_ldptr (code
, ppc_r0
, ainfo
->offset
, ppc_r12
);
4958 switch (ainfo
->size
) {
4960 if (ppc_is_imm16 (inst
->inst_offset
)) {
4961 ppc_stb (code
, ppc_r0
, inst
->inst_offset
, inst
->inst_basereg
);
4963 if (ppc_is_imm32 (inst
->inst_offset
)) {
4964 ppc_addis (code
, ppc_r12
, inst
->inst_basereg
, ppc_ha(inst
->inst_offset
));
4965 ppc_stb (code
, ppc_r0
, inst
->inst_offset
, ppc_r12
);
4967 ppc_load (code
, ppc_r12
, inst
->inst_offset
);
4968 ppc_stbx (code
, ppc_r0
, inst
->inst_basereg
, ppc_r12
);
4973 if (ppc_is_imm16 (inst
->inst_offset
)) {
4974 ppc_sth (code
, ppc_r0
, inst
->inst_offset
, inst
->inst_basereg
);
4976 if (ppc_is_imm32 (inst
->inst_offset
)) {
4977 ppc_addis (code
, ppc_r12
, inst
->inst_basereg
, ppc_ha(inst
->inst_offset
));
4978 ppc_sth (code
, ppc_r0
, inst
->inst_offset
, ppc_r12
);
4980 ppc_load (code
, ppc_r12
, inst
->inst_offset
);
4981 ppc_sthx (code
, ppc_r0
, inst
->inst_basereg
, ppc_r12
);
4985 #ifdef __mono_ppc64__
4987 if (ppc_is_imm16 (inst
->inst_offset
)) {
4988 ppc_stw (code
, ppc_r0
, inst
->inst_offset
, inst
->inst_basereg
);
4990 if (ppc_is_imm32 (inst
->inst_offset
)) {
4991 ppc_addis (code
, ppc_r12
, inst
->inst_basereg
, ppc_ha(inst
->inst_offset
));
4992 ppc_stw (code
, ppc_r0
, inst
->inst_offset
, ppc_r12
);
4994 ppc_load (code
, ppc_r12
, inst
->inst_offset
);
4995 ppc_stwx (code
, ppc_r0
, inst
->inst_basereg
, ppc_r12
);
5000 if (ppc_is_imm16 (inst
->inst_offset
)) {
5001 ppc_str (code
, ppc_r0
, inst
->inst_offset
, inst
->inst_basereg
);
5003 ppc_load (code
, ppc_r12
, inst
->inst_offset
);
5004 ppc_str_indexed (code
, ppc_r0
, ppc_r12
, inst
->inst_basereg
);
5009 g_assert (ppc_is_imm16 (ainfo
->offset
+ 4));
5010 if (ppc_is_imm16 (inst
->inst_offset
+ 4)) {
5011 ppc_stw (code
, ppc_r0
, inst
->inst_offset
, inst
->inst_basereg
);
5012 ppc_lwz (code
, ppc_r0
, ainfo
->offset
+ 4, ppc_r12
);
5013 ppc_stw (code
, ppc_r0
, inst
->inst_offset
+ 4, inst
->inst_basereg
);
5015 /* use r11 to load the 2nd half of the long before we clobber r12. */
5016 ppc_lwz (code
, ppc_r11
, ainfo
->offset
+ 4, ppc_r12
);
5017 ppc_addis (code
, ppc_r12
, inst
->inst_basereg
, ppc_ha(inst
->inst_offset
));
5018 ppc_addi (code
, ppc_r12
, ppc_r12
, inst
->inst_offset
);
5019 ppc_stw (code
, ppc_r0
, 0, ppc_r12
);
5020 ppc_stw (code
, ppc_r11
, 4, ppc_r12
);
5025 if (ppc_is_imm16 (inst
->inst_offset
)) {
5026 ppc_stptr (code
, ppc_r0
, inst
->inst_offset
, inst
->inst_basereg
);
5028 if (ppc_is_imm32 (inst
->inst_offset
)) {
5029 ppc_addis (code
, ppc_r12
, inst
->inst_basereg
, ppc_ha(inst
->inst_offset
));
5030 ppc_stptr (code
, ppc_r0
, inst
->inst_offset
, ppc_r12
);
5032 ppc_load (code
, ppc_r12
, inst
->inst_offset
);
5033 ppc_stptr_indexed (code
, ppc_r0
, inst
->inst_basereg
, ppc_r12
);
5038 } else if (ainfo
->regtype
== RegTypeFP
) {
5039 g_assert (ppc_is_imm16 (inst
->inst_offset
));
5040 if (ainfo
->size
== 8)
5041 ppc_stfd (code
, ainfo
->reg
, inst
->inst_offset
, inst
->inst_basereg
);
5042 else if (ainfo
->size
== 4)
5043 ppc_stfs (code
, ainfo
->reg
, inst
->inst_offset
, inst
->inst_basereg
);
5045 g_assert_not_reached ();
5046 } else if (ainfo
->regtype
== RegTypeFPStructByVal
) {
5047 int doffset
= inst
->inst_offset
;
5051 g_assert (ppc_is_imm16 (inst
->inst_offset
));
5052 g_assert (ppc_is_imm16 (inst
->inst_offset
+ ainfo
->vtregs
* sizeof (target_mgreg_t
)));
5053 /* FIXME: what if there is no class? */
5054 if (sig
->pinvoke
&& mono_class_from_mono_type_internal (inst
->inst_vtype
))
5055 size
= mono_class_native_size (mono_class_from_mono_type_internal (inst
->inst_vtype
), NULL
);
5056 for (cur_reg
= 0; cur_reg
< ainfo
->vtregs
; ++cur_reg
) {
5057 if (ainfo
->size
== 4) {
5058 ppc_stfs (code
, ainfo
->reg
+ cur_reg
, doffset
, inst
->inst_basereg
);
5060 ppc_stfd (code
, ainfo
->reg
+ cur_reg
, doffset
, inst
->inst_basereg
);
5062 soffset
+= ainfo
->size
;
5063 doffset
+= ainfo
->size
;
5065 } else if (ainfo
->regtype
== RegTypeStructByVal
) {
5066 int doffset
= inst
->inst_offset
;
5070 g_assert (ppc_is_imm16 (inst
->inst_offset
));
5071 g_assert (ppc_is_imm16 (inst
->inst_offset
+ ainfo
->vtregs
* sizeof (target_mgreg_t
)));
5072 /* FIXME: what if there is no class? */
5073 if (sig
->pinvoke
&& mono_class_from_mono_type_internal (inst
->inst_vtype
))
5074 size
= mono_class_native_size (mono_class_from_mono_type_internal (inst
->inst_vtype
), NULL
);
5075 for (cur_reg
= 0; cur_reg
< ainfo
->vtregs
; ++cur_reg
) {
5078 * Darwin handles 1 and 2 byte
5079 * structs specially by
5080 * loading h/b into the arg
5081 * register. Only done for
5085 ppc_sth (code
, ainfo
->reg
+ cur_reg
, doffset
, inst
->inst_basereg
);
5087 ppc_stb (code
, ainfo
->reg
+ cur_reg
, doffset
, inst
->inst_basereg
);
5091 #ifdef __mono_ppc64__
5093 g_assert (cur_reg
== 0);
5094 #if G_BYTE_ORDER == G_BIG_ENDIAN
5095 ppc_sldi (code
, ppc_r0
, ainfo
->reg
,
5096 (sizeof (target_mgreg_t
) - ainfo
->bytes
) * 8);
5097 ppc_stptr (code
, ppc_r0
, doffset
, inst
->inst_basereg
);
5099 if (mono_class_native_size (inst
->klass
, NULL
) == 1) {
5100 ppc_stb (code
, ainfo
->reg
+ cur_reg
, doffset
, inst
->inst_basereg
);
5101 } else if (mono_class_native_size (inst
->klass
, NULL
) == 2) {
5102 ppc_sth (code
, ainfo
->reg
+ cur_reg
, doffset
, inst
->inst_basereg
);
5103 } else if (mono_class_native_size (inst
->klass
, NULL
) == 4) { // WDS -- maybe <=4?
5104 ppc_stw (code
, ainfo
->reg
+ cur_reg
, doffset
, inst
->inst_basereg
);
5106 ppc_stptr (code
, ainfo
->reg
+ cur_reg
, doffset
, inst
->inst_basereg
); // WDS -- Better way?
5112 ppc_stptr (code
, ainfo
->reg
+ cur_reg
, doffset
,
5113 inst
->inst_basereg
);
5116 soffset
+= sizeof (target_mgreg_t
);
5117 doffset
+= sizeof (target_mgreg_t
);
5119 if (ainfo
->vtsize
) {
5120 /* FIXME: we need to do the shifting here, too */
5123 /* load the previous stack pointer in r12 (r0 gets overwritten by the memcpy) */
5124 ppc_ldr (code
, ppc_r12
, 0, ppc_sp
);
5125 if ((size
& MONO_PPC_32_64_CASE (3, 7)) != 0) {
5126 code
= emit_memcpy (code
, size
- soffset
,
5127 inst
->inst_basereg
, doffset
,
5128 ppc_r12
, ainfo
->offset
+ soffset
);
5130 code
= emit_memcpy (code
, ainfo
->vtsize
* sizeof (target_mgreg_t
),
5131 inst
->inst_basereg
, doffset
,
5132 ppc_r12
, ainfo
->offset
+ soffset
);
5135 } else if (ainfo
->regtype
== RegTypeStructByAddr
) {
5136 /* if it was originally a RegTypeBase */
5137 if (ainfo
->offset
) {
5138 /* load the previous stack pointer in r12 */
5139 ppc_ldr (code
, ppc_r12
, 0, ppc_sp
);
5140 ppc_ldptr (code
, ppc_r12
, ainfo
->offset
, ppc_r12
);
5142 ppc_mr (code
, ppc_r12
, ainfo
->reg
);
5145 g_assert (ppc_is_imm16 (inst
->inst_offset
));
5146 code
= emit_memcpy (code
, ainfo
->vtsize
, inst
->inst_basereg
, inst
->inst_offset
, ppc_r12
, 0);
5147 /*g_print ("copy in %s: %d bytes from %d to offset: %d\n", method->name, ainfo->vtsize, ainfo->reg, inst->inst_offset);*/
5149 g_assert_not_reached ();
5154 if (method
->save_lmf
) {
5155 if (cfg
->compile_aot
) {
5156 /* Compute the got address which is needed by the PLT entry */
5157 code
= mono_arch_emit_load_got_addr (cfg
->native_code
, code
, cfg
, NULL
);
5159 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
5160 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_tls_get_lmf_addr_extern
));
5161 if ((FORCE_INDIR_CALL
|| cfg
->method
->dynamic
) && !cfg
->compile_aot
) {
5162 ppc_load_func (code
, PPC_CALL_REG
, 0);
5163 ppc_mtlr (code
, PPC_CALL_REG
);
5168 /* we build the MonoLMF structure on the stack - see mini-ppc.h */
5169 /* lmf_offset is the offset from the previous stack pointer,
5170 * alloc_size is the total stack space allocated, so the offset
5171 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
5172 * The pointer to the struct is put in ppc_r12 (new_lmf).
5173 * The callee-saved registers are already in the MonoLMF structure
5175 ppc_addi (code
, ppc_r12
, ppc_sp
, alloc_size
- lmf_offset
);
5176 /* ppc_r3 is the result from mono_get_lmf_addr () */
5177 ppc_stptr (code
, ppc_r3
, G_STRUCT_OFFSET(MonoLMF
, lmf_addr
), ppc_r12
);
5178 /* new_lmf->previous_lmf = *lmf_addr */
5179 ppc_ldptr (code
, ppc_r0
, G_STRUCT_OFFSET(MonoLMF
, previous_lmf
), ppc_r3
);
5180 ppc_stptr (code
, ppc_r0
, G_STRUCT_OFFSET(MonoLMF
, previous_lmf
), ppc_r12
);
5181 /* *(lmf_addr) = r12 */
5182 ppc_stptr (code
, ppc_r12
, G_STRUCT_OFFSET(MonoLMF
, previous_lmf
), ppc_r3
);
5183 /* save method info */
5184 if (cfg
->compile_aot
)
5186 ppc_load (code
, ppc_r0
, 0);
5188 ppc_load_ptr (code
, ppc_r0
, method
);
5189 ppc_stptr (code
, ppc_r0
, G_STRUCT_OFFSET(MonoLMF
, method
), ppc_r12
);
5190 ppc_stptr (code
, ppc_sp
, G_STRUCT_OFFSET(MonoLMF
, ebp
), ppc_r12
);
5191 /* save the current IP */
5192 if (cfg
->compile_aot
) {
5194 ppc_mflr (code
, ppc_r0
);
5196 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_IP
, NULL
);
5197 #ifdef __mono_ppc64__
5198 ppc_load_sequence (code
, ppc_r0
, (guint64
)0x0101010101010101LL
);
5200 ppc_load_sequence (code
, ppc_r0
, (gulong
)0x01010101L
);
5203 ppc_stptr (code
, ppc_r0
, G_STRUCT_OFFSET(MonoLMF
, eip
), ppc_r12
);
5206 set_code_cursor (cfg
, code
);
5213 mono_arch_emit_epilog (MonoCompile
*cfg
)
5215 MonoMethod
*method
= cfg
->method
;
5217 int max_epilog_size
= 16 + 20*4;
5220 if (cfg
->method
->save_lmf
)
5221 max_epilog_size
+= 128;
5223 code
= realloc_code (cfg
, max_epilog_size
);
5227 if (method
->save_lmf
) {
5229 pos
+= sizeof (MonoLMF
);
5231 /* save the frame reg in r8 */
5232 ppc_mr (code
, ppc_r8
, cfg
->frame_reg
);
5233 ppc_addi (code
, ppc_r12
, cfg
->frame_reg
, cfg
->stack_usage
- lmf_offset
);
5234 /* r5 = previous_lmf */
5235 ppc_ldptr (code
, ppc_r5
, G_STRUCT_OFFSET(MonoLMF
, previous_lmf
), ppc_r12
);
5237 ppc_ldptr (code
, ppc_r6
, G_STRUCT_OFFSET(MonoLMF
, lmf_addr
), ppc_r12
);
5238 /* *(lmf_addr) = previous_lmf */
5239 ppc_stptr (code
, ppc_r5
, G_STRUCT_OFFSET(MonoLMF
, previous_lmf
), ppc_r6
);
5240 /* FIXME: speedup: there is no actual need to restore the registers if
5241 * we didn't actually change them (idea from Zoltan).
5244 ppc_ldr_multiple (code
, ppc_r13
, G_STRUCT_OFFSET(MonoLMF
, iregs
), ppc_r12
);
5246 /*for (i = 14; i < 32; i++) {
5247 ppc_lfd (code, i, G_STRUCT_OFFSET(MonoLMF, fregs) + ((i-14) * sizeof (gdouble)), ppc_r12);
5249 g_assert (ppc_is_imm16 (cfg
->stack_usage
+ PPC_RET_ADDR_OFFSET
));
5250 /* use the saved copy of the frame reg in r8 */
5251 if (1 || cfg
->flags
& MONO_CFG_HAS_CALLS
) {
5252 ppc_ldr (code
, ppc_r0
, cfg
->stack_usage
+ PPC_RET_ADDR_OFFSET
, ppc_r8
);
5253 ppc_mtlr (code
, ppc_r0
);
5255 ppc_addic (code
, ppc_sp
, ppc_r8
, cfg
->stack_usage
);
5257 if (1 || cfg
->flags
& MONO_CFG_HAS_CALLS
) {
5258 long return_offset
= cfg
->stack_usage
+ PPC_RET_ADDR_OFFSET
;
5259 if (ppc_is_imm16 (return_offset
)) {
5260 ppc_ldr (code
, ppc_r0
, return_offset
, cfg
->frame_reg
);
5262 ppc_load (code
, ppc_r12
, return_offset
);
5263 ppc_ldr_indexed (code
, ppc_r0
, cfg
->frame_reg
, ppc_r12
);
5265 ppc_mtlr (code
, ppc_r0
);
5267 if (ppc_is_imm16 (cfg
->stack_usage
)) {
5268 int offset
= cfg
->stack_usage
;
5269 for (i
= 13; i
<= 31; i
++) {
5270 if (cfg
->used_int_regs
& (1 << i
))
5271 offset
-= sizeof (target_mgreg_t
);
5273 if (cfg
->frame_reg
!= ppc_sp
)
5274 ppc_mr (code
, ppc_r12
, cfg
->frame_reg
);
5275 /* note r31 (possibly the frame register) is restored last */
5276 for (i
= 13; i
<= 31; i
++) {
5277 if (cfg
->used_int_regs
& (1 << i
)) {
5278 ppc_ldr (code
, i
, offset
, cfg
->frame_reg
);
5279 offset
+= sizeof (target_mgreg_t
);
5282 if (cfg
->frame_reg
!= ppc_sp
)
5283 ppc_addi (code
, ppc_sp
, ppc_r12
, cfg
->stack_usage
);
5285 ppc_addi (code
, ppc_sp
, ppc_sp
, cfg
->stack_usage
);
5287 ppc_load32 (code
, ppc_r12
, cfg
->stack_usage
);
5288 if (cfg
->used_int_regs
) {
5289 ppc_add (code
, ppc_r12
, cfg
->frame_reg
, ppc_r12
);
5290 for (i
= 31; i
>= 13; --i
) {
5291 if (cfg
->used_int_regs
& (1 << i
)) {
5292 pos
+= sizeof (target_mgreg_t
);
5293 ppc_ldr (code
, i
, -pos
, ppc_r12
);
5296 ppc_mr (code
, ppc_sp
, ppc_r12
);
5298 ppc_add (code
, ppc_sp
, cfg
->frame_reg
, ppc_r12
);
5304 set_code_cursor (cfg
, code
);
5307 #endif /* ifndef DISABLE_JIT */
5309 /* remove once throw_exception_by_name is eliminated */
5311 exception_id_by_name (const char *name
)
5313 if (strcmp (name
, "IndexOutOfRangeException") == 0)
5314 return MONO_EXC_INDEX_OUT_OF_RANGE
;
5315 if (strcmp (name
, "OverflowException") == 0)
5316 return MONO_EXC_OVERFLOW
;
5317 if (strcmp (name
, "ArithmeticException") == 0)
5318 return MONO_EXC_ARITHMETIC
;
5319 if (strcmp (name
, "DivideByZeroException") == 0)
5320 return MONO_EXC_DIVIDE_BY_ZERO
;
5321 if (strcmp (name
, "InvalidCastException") == 0)
5322 return MONO_EXC_INVALID_CAST
;
5323 if (strcmp (name
, "NullReferenceException") == 0)
5324 return MONO_EXC_NULL_REF
;
5325 if (strcmp (name
, "ArrayTypeMismatchException") == 0)
5326 return MONO_EXC_ARRAY_TYPE_MISMATCH
;
5327 if (strcmp (name
, "ArgumentException") == 0)
5328 return MONO_EXC_ARGUMENT
;
5329 g_error ("Unknown intrinsic exception %s\n", name
);
5335 mono_arch_emit_exceptions (MonoCompile
*cfg
)
5337 MonoJumpInfo
*patch_info
;
5340 guint8
* exc_throw_pos
[MONO_EXC_INTRINS_NUM
];
5341 guint8 exc_throw_found
[MONO_EXC_INTRINS_NUM
];
5342 int max_epilog_size
= 50;
5344 for (i
= 0; i
< MONO_EXC_INTRINS_NUM
; i
++) {
5345 exc_throw_pos
[i
] = NULL
;
5346 exc_throw_found
[i
] = 0;
5349 /* count the number of exception infos */
5352 * make sure we have enough space for exceptions
5354 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
5355 if (patch_info
->type
== MONO_PATCH_INFO_EXC
) {
5356 i
= exception_id_by_name (patch_info
->data
.target
);
5357 if (!exc_throw_found
[i
]) {
5358 max_epilog_size
+= (2 * PPC_LOAD_SEQUENCE_LENGTH
) + 5 * 4;
5359 exc_throw_found
[i
] = TRUE
;
5361 } else if (patch_info
->type
== MONO_PATCH_INFO_BB_OVF
)
5362 max_epilog_size
+= 12;
5363 else if (patch_info
->type
== MONO_PATCH_INFO_EXC_OVF
) {
5364 MonoOvfJump
*ovfj
= (MonoOvfJump
*)patch_info
->data
.target
;
5365 i
= exception_id_by_name (ovfj
->data
.exception
);
5366 if (!exc_throw_found
[i
]) {
5367 max_epilog_size
+= (2 * PPC_LOAD_SEQUENCE_LENGTH
) + 5 * 4;
5368 exc_throw_found
[i
] = TRUE
;
5370 max_epilog_size
+= 8;
5374 code
= realloc_code (cfg
, max_epilog_size
);
5376 /* add code to raise exceptions */
5377 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
5378 switch (patch_info
->type
) {
5379 case MONO_PATCH_INFO_BB_OVF
: {
5380 MonoOvfJump
*ovfj
= (MonoOvfJump
*)patch_info
->data
.target
;
5381 unsigned char *ip
= patch_info
->ip
.i
+ cfg
->native_code
;
5382 /* patch the initial jump */
5383 ppc_patch (ip
, code
);
5384 ppc_bc (code
, ovfj
->b0_cond
, ovfj
->b1_cond
, 2);
5386 ppc_patch (code
- 4, ip
+ 4); /* jump back after the initiali branch */
5387 /* jump back to the true target */
5389 ip
= ovfj
->data
.bb
->native_offset
+ cfg
->native_code
;
5390 ppc_patch (code
- 4, ip
);
5391 patch_info
->type
= MONO_PATCH_INFO_NONE
;
5394 case MONO_PATCH_INFO_EXC_OVF
: {
5395 MonoOvfJump
*ovfj
= (MonoOvfJump
*)patch_info
->data
.target
;
5396 MonoJumpInfo
*newji
;
5397 unsigned char *ip
= patch_info
->ip
.i
+ cfg
->native_code
;
5398 unsigned char *bcl
= code
;
5399 /* patch the initial jump: we arrived here with a call */
5400 ppc_patch (ip
, code
);
5401 ppc_bc (code
, ovfj
->b0_cond
, ovfj
->b1_cond
, 0);
5403 ppc_patch (code
- 4, ip
+ 4); /* jump back after the initiali branch */
5404 /* patch the conditional jump to the right handler */
5405 /* make it processed next */
5406 newji
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoJumpInfo
));
5407 newji
->type
= MONO_PATCH_INFO_EXC
;
5408 newji
->ip
.i
= bcl
- cfg
->native_code
;
5409 newji
->data
.target
= ovfj
->data
.exception
;
5410 newji
->next
= patch_info
->next
;
5411 patch_info
->next
= newji
;
5412 patch_info
->type
= MONO_PATCH_INFO_NONE
;
5415 case MONO_PATCH_INFO_EXC
: {
5416 MonoClass
*exc_class
;
5418 unsigned char *ip
= patch_info
->ip
.i
+ cfg
->native_code
;
5419 i
= exception_id_by_name (patch_info
->data
.target
);
5420 if (exc_throw_pos
[i
] && !(ip
> exc_throw_pos
[i
] && ip
- exc_throw_pos
[i
] > 50000)) {
5421 ppc_patch (ip
, exc_throw_pos
[i
]);
5422 patch_info
->type
= MONO_PATCH_INFO_NONE
;
5425 exc_throw_pos
[i
] = code
;
5428 exc_class
= mono_class_load_from_name (mono_defaults
.corlib
, "System", patch_info
->data
.name
);
5430 ppc_patch (ip
, code
);
5431 /*mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC_NAME, patch_info->data.target);*/
5432 ppc_load (code
, ppc_r3
, m_class_get_type_token (exc_class
));
5433 /* we got here from a conditional call, so the calling ip is set in lr */
5434 ppc_mflr (code
, ppc_r4
);
5435 patch_info
->type
= MONO_PATCH_INFO_JIT_ICALL_ID
;
5436 patch_info
->data
.jit_icall_id
= MONO_JIT_ICALL_mono_arch_throw_corlib_exception
;
5437 patch_info
->ip
.i
= code
- cfg
->native_code
;
5438 if (FORCE_INDIR_CALL
|| cfg
->method
->dynamic
) {
5439 ppc_load_func (code
, PPC_CALL_REG
, 0);
5440 ppc_mtctr (code
, PPC_CALL_REG
);
5441 ppc_bcctr (code
, PPC_BR_ALWAYS
, 0);
5453 set_code_cursor (cfg
, code
);
5459 try_offset_access (void *value
, guint32 idx
)
5461 register void* me
__asm__ ("r2");
5462 void ***p
= (void***)((char*)me
+ 284);
5463 int idx1
= idx
/ 32;
5464 int idx2
= idx
% 32;
5467 if (value
!= p
[idx1
][idx2
])
5474 mono_arch_finish_init (void)
5478 #define CMP_SIZE (PPC_LOAD_SEQUENCE_LENGTH + 4)
5480 #define LOADSTORE_SIZE 4
5481 #define JUMP_IMM_SIZE 12
5482 #define JUMP_IMM32_SIZE (PPC_LOAD_SEQUENCE_LENGTH + 8)
5483 #define ENABLE_WRONG_METHOD_CHECK 0
5486 * LOCKING: called with the domain lock held
5489 mono_arch_build_imt_trampoline (MonoVTable
*vtable
, MonoDomain
*domain
, MonoIMTCheckItem
**imt_entries
, int count
,
5490 gpointer fail_tramp
)
5494 guint8
*code
, *start
;
5496 for (i
= 0; i
< count
; ++i
) {
5497 MonoIMTCheckItem
*item
= imt_entries
[i
];
5498 if (item
->is_equals
) {
5499 if (item
->check_target_idx
) {
5500 if (!item
->compare_done
)
5501 item
->chunk_size
+= CMP_SIZE
;
5502 if (item
->has_target_code
)
5503 item
->chunk_size
+= BR_SIZE
+ JUMP_IMM32_SIZE
;
5505 item
->chunk_size
+= LOADSTORE_SIZE
+ BR_SIZE
+ JUMP_IMM_SIZE
;
5508 item
->chunk_size
+= CMP_SIZE
+ BR_SIZE
+ JUMP_IMM32_SIZE
* 2;
5509 if (!item
->has_target_code
)
5510 item
->chunk_size
+= LOADSTORE_SIZE
;
5512 item
->chunk_size
+= LOADSTORE_SIZE
+ JUMP_IMM_SIZE
;
5513 #if ENABLE_WRONG_METHOD_CHECK
5514 item
->chunk_size
+= CMP_SIZE
+ BR_SIZE
+ 4;
5519 item
->chunk_size
+= CMP_SIZE
+ BR_SIZE
;
5520 imt_entries
[item
->check_target_idx
]->compare_done
= TRUE
;
5522 size
+= item
->chunk_size
;
5524 /* the initial load of the vtable address */
5525 size
+= PPC_LOAD_SEQUENCE_LENGTH
+ LOADSTORE_SIZE
;
5527 code
= mono_method_alloc_generic_virtual_trampoline (domain
, size
);
5529 code
= mono_domain_code_reserve (domain
, size
);
5534 * We need to save and restore r12 because it might be
5535 * used by the caller as the vtable register, so
5536 * clobbering it will trip up the magic trampoline.
5538 * FIXME: Get rid of this by making sure that r12 is
5539 * not used as the vtable register in interface calls.
5541 ppc_stptr (code
, ppc_r12
, PPC_RET_ADDR_OFFSET
, ppc_sp
);
5542 ppc_load (code
, ppc_r12
, (gsize
)(& (vtable
->vtable
[0])));
5544 for (i
= 0; i
< count
; ++i
) {
5545 MonoIMTCheckItem
*item
= imt_entries
[i
];
5546 item
->code_target
= code
;
5547 if (item
->is_equals
) {
5548 if (item
->check_target_idx
) {
5549 if (!item
->compare_done
) {
5550 ppc_load (code
, ppc_r0
, (gsize
)item
->key
);
5551 ppc_compare_log (code
, 0, MONO_ARCH_IMT_REG
, ppc_r0
);
5553 item
->jmp_code
= code
;
5554 ppc_bc (code
, PPC_BR_FALSE
, PPC_BR_EQ
, 0);
5555 if (item
->has_target_code
) {
5556 ppc_load_ptr (code
, ppc_r0
, item
->value
.target_code
);
5558 ppc_ldptr (code
, ppc_r0
, (sizeof (target_mgreg_t
) * item
->value
.vtable_slot
), ppc_r12
);
5559 ppc_ldptr (code
, ppc_r12
, PPC_RET_ADDR_OFFSET
, ppc_sp
);
5561 ppc_mtctr (code
, ppc_r0
);
5562 ppc_bcctr (code
, PPC_BR_ALWAYS
, 0);
5565 ppc_load (code
, ppc_r0
, (gulong
)item
->key
);
5566 ppc_compare_log (code
, 0, MONO_ARCH_IMT_REG
, ppc_r0
);
5567 item
->jmp_code
= code
;
5568 ppc_bc (code
, PPC_BR_FALSE
, PPC_BR_EQ
, 0);
5569 if (item
->has_target_code
) {
5570 ppc_load_ptr (code
, ppc_r0
, item
->value
.target_code
);
5573 ppc_load_ptr (code
, ppc_r0
, & (vtable
->vtable
[item
->value
.vtable_slot
]));
5574 ppc_ldptr_indexed (code
, ppc_r0
, 0, ppc_r0
);
5576 ppc_mtctr (code
, ppc_r0
);
5577 ppc_bcctr (code
, PPC_BR_ALWAYS
, 0);
5578 ppc_patch (item
->jmp_code
, code
);
5579 ppc_load_ptr (code
, ppc_r0
, fail_tramp
);
5580 ppc_mtctr (code
, ppc_r0
);
5581 ppc_bcctr (code
, PPC_BR_ALWAYS
, 0);
5582 item
->jmp_code
= NULL
;
5584 /* enable the commented code to assert on wrong method */
5585 #if ENABLE_WRONG_METHOD_CHECK
5586 ppc_load (code
, ppc_r0
, (guint32
)item
->key
);
5587 ppc_compare_log (code
, 0, MONO_ARCH_IMT_REG
, ppc_r0
);
5588 item
->jmp_code
= code
;
5589 ppc_bc (code
, PPC_BR_FALSE
, PPC_BR_EQ
, 0);
5591 ppc_ldptr (code
, ppc_r0
, (sizeof (target_mgreg_t
) * item
->value
.vtable_slot
), ppc_r12
);
5592 ppc_ldptr (code
, ppc_r12
, PPC_RET_ADDR_OFFSET
, ppc_sp
);
5593 ppc_mtctr (code
, ppc_r0
);
5594 ppc_bcctr (code
, PPC_BR_ALWAYS
, 0);
5595 #if ENABLE_WRONG_METHOD_CHECK
5596 ppc_patch (item
->jmp_code
, code
);
5598 item
->jmp_code
= NULL
;
5603 ppc_load (code
, ppc_r0
, (gulong
)item
->key
);
5604 ppc_compare_log (code
, 0, MONO_ARCH_IMT_REG
, ppc_r0
);
5605 item
->jmp_code
= code
;
5606 ppc_bc (code
, PPC_BR_FALSE
, PPC_BR_LT
, 0);
5609 /* patch the branches to get to the target items */
5610 for (i
= 0; i
< count
; ++i
) {
5611 MonoIMTCheckItem
*item
= imt_entries
[i
];
5612 if (item
->jmp_code
) {
5613 if (item
->check_target_idx
) {
5614 ppc_patch (item
->jmp_code
, imt_entries
[item
->check_target_idx
]->code_target
);
5620 UnlockedAdd (&mono_stats
.imt_trampolines_size
, code
- start
);
5621 g_assert (code
- start
<= size
);
5622 mono_arch_flush_icache (start
, size
);
5623 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE
, NULL
));
5625 mono_tramp_info_register (mono_tramp_info_create (NULL
, start
, code
- start
, NULL
, NULL
), domain
);
5631 mono_arch_find_imt_method (host_mgreg_t
*regs
, guint8
*code
)
5633 host_mgreg_t
*r
= (host_mgreg_t
*)regs
;
5635 return (MonoMethod
*)(gsize
) r
[MONO_ARCH_IMT_REG
];
5639 mono_arch_find_static_call_vtable (host_mgreg_t
*regs
, guint8
*code
)
5641 return (MonoVTable
*)(gsize
) regs
[MONO_ARCH_RGCTX_REG
];
5645 mono_arch_get_cie_program (void)
5649 mono_add_unwind_op_def_cfa (l
, (guint8
*)NULL
, (guint8
*)NULL
, ppc_r1
, 0);
5655 mono_arch_emit_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
5657 MonoInst
*ins
= NULL
;
5660 if (cmethod
->klass
== mono_class_try_get_math_class ()) {
5661 if (strcmp (cmethod
->name
, "Sqrt") == 0) {
5663 } else if (strcmp (cmethod
->name
, "Abs") == 0 && fsig
->params
[0]->type
== MONO_TYPE_R8
) {
5667 if (opcode
&& fsig
->param_count
== 1) {
5668 MONO_INST_NEW (cfg
, ins
, opcode
);
5669 ins
->type
= STACK_R8
;
5670 ins
->dreg
= mono_alloc_freg (cfg
);
5671 ins
->sreg1
= args
[0]->dreg
;
5672 MONO_ADD_INS (cfg
->cbb
, ins
);
5675 /* Check for Min/Max for (u)int(32|64) */
5677 if (cpu_hw_caps
& PPC_ISA_2_03
) {
5678 if (strcmp (cmethod
->name
, "Min") == 0) {
5679 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
5681 if (fsig
->params
[0]->type
== MONO_TYPE_U4
)
5682 opcode
= OP_IMIN_UN
;
5683 #ifdef __mono_ppc64__
5684 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
5686 else if (fsig
->params
[0]->type
== MONO_TYPE_U8
)
5687 opcode
= OP_LMIN_UN
;
5689 } else if (strcmp (cmethod
->name
, "Max") == 0) {
5690 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
5692 if (fsig
->params
[0]->type
== MONO_TYPE_U4
)
5693 opcode
= OP_IMAX_UN
;
5694 #ifdef __mono_ppc64__
5695 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
5697 else if (fsig
->params
[0]->type
== MONO_TYPE_U8
)
5698 opcode
= OP_LMAX_UN
;
5702 * TODO: Floating point version with fsel, but fsel has
5703 * some peculiarities (need a scratch reg unless
5704 * comparing with 0, NaN/Inf behaviour (then MathF too)
5708 if (opcode
&& fsig
->param_count
== 2) {
5709 MONO_INST_NEW (cfg
, ins
, opcode
);
5710 ins
->type
= fsig
->params
[0]->type
== MONO_TYPE_I4
? STACK_I4
: STACK_I8
;
5711 ins
->dreg
= mono_alloc_ireg (cfg
);
5712 ins
->sreg1
= args
[0]->dreg
;
5713 ins
->sreg2
= args
[1]->dreg
;
5714 MONO_ADD_INS (cfg
->cbb
, ins
);
5717 /* Rounding instructions */
5719 if ((cpu_hw_caps
& PPC_ISA_2X
) && (fsig
->param_count
== 1) && (fsig
->params
[0]->type
== MONO_TYPE_R8
)) {
5721 * XXX: sysmath.c and the POWER ISA documentation for
5722 * frin[.] imply rounding is a little more complicated
5723 * than expected; the semantics are slightly different,
5724 * so just "frin." isn't a drop-in replacement. Floor,
5725 * Truncate, and Ceiling seem to work normally though.
5726 * (also, no float versions of these ops, but frsp
5727 * could be preprended?)
5729 //if (!strcmp (cmethod->name, "Round"))
5730 // opcode = OP_ROUND;
5731 if (!strcmp (cmethod
->name
, "Floor"))
5732 opcode
= OP_PPC_FLOOR
;
5733 else if (!strcmp (cmethod
->name
, "Ceiling"))
5734 opcode
= OP_PPC_CEIL
;
5735 else if (!strcmp (cmethod
->name
, "Truncate"))
5736 opcode
= OP_PPC_TRUNC
;
5738 MONO_INST_NEW (cfg
, ins
, opcode
);
5739 ins
->type
= STACK_R8
;
5740 ins
->dreg
= mono_alloc_freg (cfg
);
5741 ins
->sreg1
= args
[0]->dreg
;
5742 MONO_ADD_INS (cfg
->cbb
, ins
);
5746 if (cmethod
->klass
== mono_class_try_get_mathf_class ()) {
5747 if (strcmp (cmethod
->name
, "Sqrt") == 0) {
5749 } /* XXX: POWER has no single-precision normal FPU abs? */
5751 if (opcode
&& fsig
->param_count
== 1) {
5752 MONO_INST_NEW (cfg
, ins
, opcode
);
5753 ins
->type
= STACK_R4
;
5754 ins
->dreg
= mono_alloc_freg (cfg
);
5755 ins
->sreg1
= args
[0]->dreg
;
5756 MONO_ADD_INS (cfg
->cbb
, ins
);
5763 mono_arch_context_get_int_reg (MonoContext
*ctx
, int reg
)
5766 return (host_mgreg_t
)(gsize
)MONO_CONTEXT_GET_SP (ctx
);
5768 return ctx
->regs
[reg
];
5772 mono_arch_get_patch_offset (guint8
*code
)
5778 * mono_aot_emit_load_got_addr:
5780 * Emit code to load the got address.
5781 * On PPC, the result is placed into r30.
5784 mono_arch_emit_load_got_addr (guint8
*start
, guint8
*code
, MonoCompile
*cfg
, MonoJumpInfo
**ji
)
5787 ppc_mflr (code
, ppc_r30
);
5789 mono_add_patch_info (cfg
, code
- start
, MONO_PATCH_INFO_GOT_OFFSET
, NULL
);
5791 *ji
= mono_patch_info_list_prepend (*ji
, code
- start
, MONO_PATCH_INFO_GOT_OFFSET
, NULL
);
5792 /* arch_emit_got_address () patches this */
5793 #if defined(TARGET_POWERPC64)
5799 ppc_load32 (code
, ppc_r0
, 0);
5800 ppc_add (code
, ppc_r30
, ppc_r30
, ppc_r0
);
5803 set_code_cursor (cfg
, code
);
5808 * mono_ppc_emit_load_aotconst:
5810 * Emit code to load the contents of the GOT slot identified by TRAMP_TYPE and
5811 * TARGET from the mscorlib GOT in full-aot code.
5812 * On PPC, the GOT address is assumed to be in r30, and the result is placed into
5816 mono_arch_emit_load_aotconst (guint8
*start
, guint8
*code
, MonoJumpInfo
**ji
, MonoJumpInfoType tramp_type
, gconstpointer target
)
5818 /* Load the mscorlib got address */
5819 ppc_ldptr (code
, ppc_r12
, sizeof (target_mgreg_t
), ppc_r30
);
5820 *ji
= mono_patch_info_list_prepend (*ji
, code
- start
, tramp_type
, target
);
5821 /* arch_emit_got_access () patches this */
5822 ppc_load32 (code
, ppc_r0
, 0);
5823 ppc_ldptr_indexed (code
, ppc_r12
, ppc_r12
, ppc_r0
);
5828 /* Soft Debug support */
5829 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
5836 * mono_arch_set_breakpoint:
5838 * See mini-amd64.c for docs.
5841 mono_arch_set_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
5844 guint8
*orig_code
= code
;
5846 ppc_load_sequence (code
, ppc_r12
, (gsize
)bp_trigger_page
);
5847 ppc_ldptr (code
, ppc_r12
, 0, ppc_r12
);
5849 g_assert (code
- orig_code
== BREAKPOINT_SIZE
);
5851 mono_arch_flush_icache (orig_code
, code
- orig_code
);
5855 * mono_arch_clear_breakpoint:
5857 * See mini-amd64.c for docs.
5860 mono_arch_clear_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
5865 for (i
= 0; i
< BREAKPOINT_SIZE
/ 4; ++i
)
5868 mono_arch_flush_icache (ip
, code
- ip
);
5872 * mono_arch_is_breakpoint_event:
5874 * See mini-amd64.c for docs.
5877 mono_arch_is_breakpoint_event (void *info
, void *sigctx
)
5879 siginfo_t
* sinfo
= (siginfo_t
*) info
;
5880 /* Sometimes the address is off by 4 */
5881 if (sinfo
->si_addr
>= bp_trigger_page
&& (guint8
*)sinfo
->si_addr
<= (guint8
*)bp_trigger_page
+ 128)
5888 * mono_arch_skip_breakpoint:
5890 * See mini-amd64.c for docs.
5893 mono_arch_skip_breakpoint (MonoContext
*ctx
, MonoJitInfo
*ji
)
5895 /* skip the ldptr */
5896 MONO_CONTEXT_SET_IP (ctx
, (guint8
*)MONO_CONTEXT_GET_IP (ctx
) + 4);
5904 * mono_arch_start_single_stepping:
5906 * See mini-amd64.c for docs.
5909 mono_arch_start_single_stepping (void)
5911 mono_mprotect (ss_trigger_page
, mono_pagesize (), 0);
5915 * mono_arch_stop_single_stepping:
5917 * See mini-amd64.c for docs.
5920 mono_arch_stop_single_stepping (void)
5922 mono_mprotect (ss_trigger_page
, mono_pagesize (), MONO_MMAP_READ
);
5926 * mono_arch_is_single_step_event:
5928 * See mini-amd64.c for docs.
5931 mono_arch_is_single_step_event (void *info
, void *sigctx
)
5933 siginfo_t
* sinfo
= (siginfo_t
*) info
;
5934 /* Sometimes the address is off by 4 */
5935 if (sinfo
->si_addr
>= ss_trigger_page
&& (guint8
*)sinfo
->si_addr
<= (guint8
*)ss_trigger_page
+ 128)
5942 * mono_arch_skip_single_step:
5944 * See mini-amd64.c for docs.
5947 mono_arch_skip_single_step (MonoContext
*ctx
)
5949 /* skip the ldptr */
5950 MONO_CONTEXT_SET_IP (ctx
, (guint8
*)MONO_CONTEXT_GET_IP (ctx
) + 4);
5954 * mono_arch_create_seq_point_info:
5956 * See mini-amd64.c for docs.
5959 mono_arch_get_seq_point_info (MonoDomain
*domain
, guint8
*code
)
5968 mono_arch_opcode_supported (int opcode
)
5971 case OP_ATOMIC_ADD_I4
:
5972 case OP_ATOMIC_CAS_I4
:
5973 #ifdef TARGET_POWERPC64
5974 case OP_ATOMIC_ADD_I8
:
5975 case OP_ATOMIC_CAS_I8
:
5984 mono_arch_load_function (MonoJitICallId jit_icall_id
)
5986 gpointer target
= NULL
;
5987 switch (jit_icall_id
) {
5988 #undef MONO_AOT_ICALL
5989 #define MONO_AOT_ICALL(x) case MONO_JIT_ICALL_ ## x: target = (gpointer)x; break;
5990 MONO_AOT_ICALL (mono_ppc_throw_exception
)