3 * PowerPC backend for the Mono code generator
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
8 * Andreas Faerber <andreas.faerber@web.de>
10 * (C) 2003 Ximian, Inc.
11 * (C) 2007-2008 Andreas Faerber
16 #include <mono/metadata/abi-details.h>
17 #include <mono/metadata/appdomain.h>
18 #include <mono/metadata/debug-helpers.h>
19 #include <mono/utils/mono-proclib.h>
20 #include <mono/utils/mono-mmap.h>
21 #include <mono/utils/mono-hwcap.h>
22 #include <mono/utils/unlocked.h>
25 #ifdef TARGET_POWERPC64
26 #include "cpu-ppc64.h"
31 #include "aot-runtime.h"
32 #include "mini-runtime.h"
34 #include <sys/sysctl.h>
40 #include <sys/systemcfg.h>
43 static GENERATE_TRY_GET_CLASS_WITH_CACHE (math
, "System", "Math")
44 static GENERATE_TRY_GET_CLASS_WITH_CACHE (mathf
, "System", "MathF")
46 #define FORCE_INDIR_CALL 1
57 /* cpu_hw_caps contains the flags defined below */
58 static int cpu_hw_caps
= 0;
59 static int cachelinesize
= 0;
60 static int cachelineinc
= 0;
62 PPC_ICACHE_SNOOP
= 1 << 0,
63 PPC_MULTIPLE_LS_UNITS
= 1 << 1,
64 PPC_SMP_CAPABLE
= 1 << 2,
67 PPC_MOVE_FPR_GPR
= 1 << 5,
68 PPC_ISA_2_03
= 1 << 6,
72 #define BREAKPOINT_SIZE (PPC_LOAD_SEQUENCE_LENGTH + 4)
74 /* This mutex protects architecture specific caches */
75 #define mono_mini_arch_lock() mono_os_mutex_lock (&mini_arch_mutex)
76 #define mono_mini_arch_unlock() mono_os_mutex_unlock (&mini_arch_mutex)
77 static mono_mutex_t mini_arch_mutex
;
80 * The code generated for sequence points reads from this location, which is
81 * made read-only when single stepping is enabled.
83 static gpointer ss_trigger_page
;
85 /* Enabled breakpoints read from this trigger page */
86 static gpointer bp_trigger_page
;
88 #define MONO_EMIT_NEW_LOAD_R8(cfg,dr,addr) do { \
90 MONO_INST_NEW ((cfg), (inst), OP_R8CONST); \
91 inst->type = STACK_R8; \
93 inst->inst_p0 = (void*)(addr); \
94 mono_bblock_add_inst (cfg->cbb, inst); \
98 mono_arch_regname (int reg
) {
99 static const char rnames
[][4] = {
100 "r0", "sp", "r2", "r3", "r4",
101 "r5", "r6", "r7", "r8", "r9",
102 "r10", "r11", "r12", "r13", "r14",
103 "r15", "r16", "r17", "r18", "r19",
104 "r20", "r21", "r22", "r23", "r24",
105 "r25", "r26", "r27", "r28", "r29",
108 if (reg
>= 0 && reg
< 32)
114 mono_arch_fregname (int reg
) {
115 static const char rnames
[][4] = {
116 "f0", "f1", "f2", "f3", "f4",
117 "f5", "f6", "f7", "f8", "f9",
118 "f10", "f11", "f12", "f13", "f14",
119 "f15", "f16", "f17", "f18", "f19",
120 "f20", "f21", "f22", "f23", "f24",
121 "f25", "f26", "f27", "f28", "f29",
124 if (reg
>= 0 && reg
< 32)
129 /* this function overwrites r0, r11, r12 */
131 emit_memcpy (guint8
*code
, int size
, int dreg
, int doffset
, int sreg
, int soffset
)
133 /* unrolled, use the counter in big */
134 if (size
> sizeof (target_mgreg_t
) * 5) {
135 long shifted
= size
/ TARGET_SIZEOF_VOID_P
;
136 guint8
*copy_loop_start
, *copy_loop_jump
;
138 ppc_load (code
, ppc_r0
, shifted
);
139 ppc_mtctr (code
, ppc_r0
);
140 //g_assert (sreg == ppc_r12);
141 ppc_addi (code
, ppc_r11
, dreg
, (doffset
- sizeof (target_mgreg_t
)));
142 ppc_addi (code
, ppc_r12
, sreg
, (soffset
- sizeof (target_mgreg_t
)));
143 copy_loop_start
= code
;
144 ppc_ldptr_update (code
, ppc_r0
, (unsigned int)sizeof (target_mgreg_t
), ppc_r12
);
145 ppc_stptr_update (code
, ppc_r0
, (unsigned int)sizeof (target_mgreg_t
), ppc_r11
);
146 copy_loop_jump
= code
;
147 ppc_bc (code
, PPC_BR_DEC_CTR_NONZERO
, 0, 0);
148 ppc_patch (copy_loop_jump
, copy_loop_start
);
149 size
-= shifted
* sizeof (target_mgreg_t
);
150 doffset
= soffset
= 0;
153 #ifdef __mono_ppc64__
154 /* the hardware has multiple load/store units and the move is long
155 enough to use more then one register, then use load/load/store/store
156 to execute 2 instructions per cycle. */
157 if ((cpu_hw_caps
& PPC_MULTIPLE_LS_UNITS
) && (dreg
!= ppc_r11
) && (sreg
!= ppc_r11
)) {
159 ppc_ldptr (code
, ppc_r0
, soffset
, sreg
);
160 ppc_ldptr (code
, ppc_r11
, soffset
+8, sreg
);
161 ppc_stptr (code
, ppc_r0
, doffset
, dreg
);
162 ppc_stptr (code
, ppc_r11
, doffset
+8, dreg
);
169 ppc_ldr (code
, ppc_r0
, soffset
, sreg
);
170 ppc_str (code
, ppc_r0
, doffset
, dreg
);
176 if ((cpu_hw_caps
& PPC_MULTIPLE_LS_UNITS
) && (dreg
!= ppc_r11
) && (sreg
!= ppc_r11
)) {
178 ppc_lwz (code
, ppc_r0
, soffset
, sreg
);
179 ppc_lwz (code
, ppc_r11
, soffset
+4, sreg
);
180 ppc_stw (code
, ppc_r0
, doffset
, dreg
);
181 ppc_stw (code
, ppc_r11
, doffset
+4, dreg
);
189 ppc_lwz (code
, ppc_r0
, soffset
, sreg
);
190 ppc_stw (code
, ppc_r0
, doffset
, dreg
);
196 ppc_lhz (code
, ppc_r0
, soffset
, sreg
);
197 ppc_sth (code
, ppc_r0
, doffset
, dreg
);
203 ppc_lbz (code
, ppc_r0
, soffset
, sreg
);
204 ppc_stb (code
, ppc_r0
, doffset
, dreg
);
213 * mono_arch_get_argument_info:
214 * @csig: a method signature
215 * @param_count: the number of parameters to consider
216 * @arg_info: an array to store the result infos
218 * Gathers information on parameters such as size, alignment and
219 * padding. arg_info should be large enought to hold param_count + 1 entries.
221 * Returns the size of the activation frame.
224 mono_arch_get_argument_info (MonoMethodSignature
*csig
, int param_count
, MonoJitArgumentInfo
*arg_info
)
226 #ifdef __mono_ppc64__
230 int k
, frame_size
= 0;
231 int size
, align
, pad
;
234 if (MONO_TYPE_ISSTRUCT (csig
->ret
)) {
235 frame_size
+= sizeof (target_mgreg_t
);
239 arg_info
[0].offset
= offset
;
242 frame_size
+= sizeof (target_mgreg_t
);
246 arg_info
[0].size
= frame_size
;
248 for (k
= 0; k
< param_count
; k
++) {
251 size
= mono_type_native_stack_size (csig
->params
[k
], (guint32
*)&align
);
253 size
= mini_type_stack_size (csig
->params
[k
], &align
);
255 /* ignore alignment for now */
258 frame_size
+= pad
= (align
- (frame_size
& (align
- 1))) & (align
- 1);
259 arg_info
[k
].pad
= pad
;
261 arg_info
[k
+ 1].pad
= 0;
262 arg_info
[k
+ 1].size
= size
;
264 arg_info
[k
+ 1].offset
= offset
;
268 align
= MONO_ARCH_FRAME_ALIGNMENT
;
269 frame_size
+= pad
= (align
- (frame_size
& (align
- 1))) & (align
- 1);
270 arg_info
[k
].pad
= pad
;
276 #ifdef __mono_ppc64__
278 is_load_sequence (guint32
*seq
)
280 return ppc_opcode (seq
[0]) == 15 && /* lis */
281 ppc_opcode (seq
[1]) == 24 && /* ori */
282 ppc_opcode (seq
[2]) == 30 && /* sldi */
283 ppc_opcode (seq
[3]) == 25 && /* oris */
284 ppc_opcode (seq
[4]) == 24; /* ori */
287 #define ppc_load_get_dest(l) (((l)>>21) & 0x1f)
288 #define ppc_load_get_off(l) ((gint16)((l) & 0xffff))
292 #define ppc_is_load_op(opcode) (ppc_opcode ((opcode)) == 58 || ppc_opcode ((opcode)) == 32)
294 /* code must point to the blrl */
296 mono_ppc_is_direct_call_sequence (guint32
*code
)
298 #ifdef __mono_ppc64__
299 g_assert(*code
== 0x4e800021 || *code
== 0x4e800020 || *code
== 0x4e800420);
301 /* the thunk-less direct call sequence: lis/ori/sldi/oris/ori/mtlr/blrl */
302 if (ppc_opcode (code
[-1]) == 31) { /* mtlr */
303 if (ppc_is_load_op (code
[-2]) && ppc_is_load_op (code
[-3])) { /* ld/ld */
304 if (!is_load_sequence (&code
[-8]))
306 /* one of the loads must be "ld r2,8(rX)" or "ld r2,4(rX) for ilp32 */
307 return (ppc_load_get_dest (code
[-2]) == ppc_r2
&& ppc_load_get_off (code
[-2]) == sizeof (target_mgreg_t
)) ||
308 (ppc_load_get_dest (code
[-3]) == ppc_r2
&& ppc_load_get_off (code
[-3]) == sizeof (target_mgreg_t
));
310 if (ppc_opcode (code
[-2]) == 24 && ppc_opcode (code
[-3]) == 31) /* mr/nop */
311 return is_load_sequence (&code
[-8]);
313 return is_load_sequence (&code
[-6]);
317 g_assert(*code
== 0x4e800021);
319 /* the thunk-less direct call sequence: lis/ori/mtlr/blrl */
320 return ppc_opcode (code
[-1]) == 31 &&
321 ppc_opcode (code
[-2]) == 24 &&
322 ppc_opcode (code
[-3]) == 15;
326 #define MAX_ARCH_DELEGATE_PARAMS 7
329 get_delegate_invoke_impl (MonoTrampInfo
**info
, gboolean has_target
, guint32 param_count
, gboolean aot
)
331 guint8
*code
, *start
;
334 int size
= MONO_PPC_32_64_CASE (32, 32) + PPC_FTNPTR_SIZE
;
336 start
= code
= mono_global_codeman_reserve (size
);
338 code
= mono_ppc_create_pre_code_ftnptr (code
);
340 /* Replace the this argument with the target */
341 ppc_ldptr (code
, ppc_r0
, MONO_STRUCT_OFFSET (MonoDelegate
, method_ptr
), ppc_r3
);
342 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
343 /* it's a function descriptor */
344 /* Can't use ldptr as it doesn't work with r0 */
345 ppc_ldptr_indexed (code
, ppc_r0
, 0, ppc_r0
);
347 ppc_mtctr (code
, ppc_r0
);
348 ppc_ldptr (code
, ppc_r3
, MONO_STRUCT_OFFSET (MonoDelegate
, target
), ppc_r3
);
349 ppc_bcctr (code
, PPC_BR_ALWAYS
, 0);
351 g_assert ((code
- start
) <= size
);
353 mono_arch_flush_icache (start
, size
);
354 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE
, NULL
));
358 size
= MONO_PPC_32_64_CASE (32, 32) + param_count
* 4 + PPC_FTNPTR_SIZE
;
359 start
= code
= mono_global_codeman_reserve (size
);
361 code
= mono_ppc_create_pre_code_ftnptr (code
);
363 ppc_ldptr (code
, ppc_r0
, MONO_STRUCT_OFFSET (MonoDelegate
, method_ptr
), ppc_r3
);
364 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
365 /* it's a function descriptor */
366 ppc_ldptr_indexed (code
, ppc_r0
, 0, ppc_r0
);
368 ppc_mtctr (code
, ppc_r0
);
369 /* slide down the arguments */
370 for (i
= 0; i
< param_count
; ++i
) {
371 ppc_mr (code
, (ppc_r3
+ i
), (ppc_r3
+ i
+ 1));
373 ppc_bcctr (code
, PPC_BR_ALWAYS
, 0);
375 g_assert ((code
- start
) <= size
);
377 mono_arch_flush_icache (start
, size
);
378 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE
, NULL
));
382 *info
= mono_tramp_info_create ("delegate_invoke_impl_has_target", start
, code
- start
, NULL
, NULL
);
384 char *name
= g_strdup_printf ("delegate_invoke_impl_target_%d", param_count
);
385 *info
= mono_tramp_info_create (name
, start
, code
- start
, NULL
, NULL
);
393 mono_arch_get_delegate_invoke_impls (void)
399 get_delegate_invoke_impl (&info
, TRUE
, 0, TRUE
);
400 res
= g_slist_prepend (res
, info
);
402 for (i
= 0; i
<= MAX_ARCH_DELEGATE_PARAMS
; ++i
) {
403 get_delegate_invoke_impl (&info
, FALSE
, i
, TRUE
);
404 res
= g_slist_prepend (res
, info
);
411 mono_arch_get_delegate_invoke_impl (MonoMethodSignature
*sig
, gboolean has_target
)
413 guint8
*code
, *start
;
415 /* FIXME: Support more cases */
416 if (MONO_TYPE_ISSTRUCT (sig
->ret
))
420 static guint8
* cached
= NULL
;
425 if (mono_ee_features
.use_aot_trampolines
) {
426 start
= mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
429 start
= get_delegate_invoke_impl (&info
, TRUE
, 0, FALSE
);
430 mono_tramp_info_register (info
, NULL
);
432 mono_memory_barrier ();
436 static guint8
* cache
[MAX_ARCH_DELEGATE_PARAMS
+ 1] = {NULL
};
439 if (sig
->param_count
> MAX_ARCH_DELEGATE_PARAMS
)
441 for (i
= 0; i
< sig
->param_count
; ++i
)
442 if (!mono_is_regsize_var (sig
->params
[i
]))
446 code
= cache
[sig
->param_count
];
450 if (mono_ee_features
.use_aot_trampolines
) {
451 char *name
= g_strdup_printf ("delegate_invoke_impl_target_%d", sig
->param_count
);
452 start
= mono_aot_get_trampoline (name
);
456 start
= get_delegate_invoke_impl (&info
, FALSE
, sig
->param_count
, FALSE
);
457 mono_tramp_info_register (info
, NULL
);
460 mono_memory_barrier ();
462 cache
[sig
->param_count
] = start
;
468 mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature
*sig
, MonoMethod
*method
, int offset
, gboolean load_imt_reg
)
474 mono_arch_get_this_arg_from_call (host_mgreg_t
*r
, guint8
*code
)
476 return (gpointer
)(gsize
)r
[ppc_r3
];
484 #define MAX_AUX_ENTRIES 128
486 * PPC_FEATURE_POWER4, PPC_FEATURE_POWER5, PPC_FEATURE_POWER5_PLUS, PPC_FEATURE_CELL,
487 * PPC_FEATURE_PA6T, PPC_FEATURE_ARCH_2_05 are considered supporting 2X ISA features
489 #define ISA_2X (0x00080000 | 0x00040000 | 0x00020000 | 0x00010000 | 0x00000800 | 0x00001000)
491 /* define PPC_FEATURE_64 HWCAP for 64-bit category. */
492 #define ISA_64 0x40000000
494 /* define PPC_FEATURE_POWER6_EXT HWCAP for power6x mffgpr/mftgpr instructions. */
495 #define ISA_MOVE_FPR_GPR 0x00000200
497 * Initialize the cpu to execute managed code.
500 mono_arch_cpu_init (void)
505 * Initialize architecture specific code.
508 mono_arch_init (void)
510 #if defined(MONO_CROSS_COMPILE)
511 #elif defined(__APPLE__)
513 size_t len
= sizeof (cachelinesize
);
516 mib
[1] = HW_CACHELINE
;
518 if (sysctl (mib
, 2, &cachelinesize
, &len
, NULL
, 0) == -1) {
522 cachelineinc
= cachelinesize
;
524 #elif defined(__linux__)
525 AuxVec vec
[MAX_AUX_ENTRIES
];
526 int i
, vec_entries
= 0;
527 /* sadly this will work only with 2.6 kernels... */
528 FILE* f
= fopen ("/proc/self/auxv", "rb");
531 vec_entries
= fread (&vec
, sizeof (AuxVec
), MAX_AUX_ENTRIES
, f
);
535 for (i
= 0; i
< vec_entries
; i
++) {
536 int type
= vec
[i
].type
;
538 if (type
== 19) { /* AT_DCACHEBSIZE */
539 cachelinesize
= vec
[i
].value
;
543 #elif defined(G_COMPILER_CODEWARRIOR)
547 /* FIXME: use block instead? */
548 cachelinesize
= _system_configuration
.icache_line
;
549 cachelineinc
= _system_configuration
.icache_line
;
551 //#error Need a way to get cache line size
554 if (mono_hwcap_ppc_has_icache_snoop
)
555 cpu_hw_caps
|= PPC_ICACHE_SNOOP
;
557 if (mono_hwcap_ppc_is_isa_2x
)
558 cpu_hw_caps
|= PPC_ISA_2X
;
560 if (mono_hwcap_ppc_is_isa_2_03
)
561 cpu_hw_caps
|= PPC_ISA_2_03
;
563 if (mono_hwcap_ppc_is_isa_64
)
564 cpu_hw_caps
|= PPC_ISA_64
;
566 if (mono_hwcap_ppc_has_move_fpr_gpr
)
567 cpu_hw_caps
|= PPC_MOVE_FPR_GPR
;
569 if (mono_hwcap_ppc_has_multiple_ls_units
)
570 cpu_hw_caps
|= PPC_MULTIPLE_LS_UNITS
;
576 cachelineinc
= cachelinesize
;
578 if (mono_cpu_count () > 1)
579 cpu_hw_caps
|= PPC_SMP_CAPABLE
;
581 mono_os_mutex_init_recursive (&mini_arch_mutex
);
583 ss_trigger_page
= mono_valloc (NULL
, mono_pagesize (), MONO_MMAP_READ
, MONO_MEM_ACCOUNT_OTHER
);
584 bp_trigger_page
= mono_valloc (NULL
, mono_pagesize (), MONO_MMAP_READ
, MONO_MEM_ACCOUNT_OTHER
);
585 mono_mprotect (bp_trigger_page
, mono_pagesize (), 0);
587 // FIXME: Fix partial sharing for power and remove this
588 mono_set_partial_sharing_supported (FALSE
);
592 * Cleanup architecture specific code.
595 mono_arch_cleanup (void)
597 mono_os_mutex_destroy (&mini_arch_mutex
);
601 mono_arch_have_fast_tls (void)
607 * This function returns the optimizations supported on this cpu.
610 mono_arch_cpu_optimizations (guint32
*exclude_mask
)
614 /* no ppc-specific optimizations yet */
620 * This function test for all SIMD functions supported.
622 * Returns a bitmask corresponding to all supported versions.
626 mono_arch_cpu_enumerate_simd_versions (void)
628 /* SIMD is currently unimplemented */
632 #ifdef __mono_ppc64__
633 #define CASE_PPC32(c)
634 #define CASE_PPC64(c) case c:
636 #define CASE_PPC32(c) case c:
637 #define CASE_PPC64(c)
641 is_regsize_var (MonoType
*t
) {
644 t
= mini_get_underlying_type (t
);
648 CASE_PPC64 (MONO_TYPE_I8
)
649 CASE_PPC64 (MONO_TYPE_U8
)
653 case MONO_TYPE_FNPTR
:
655 case MONO_TYPE_OBJECT
:
656 case MONO_TYPE_STRING
:
657 case MONO_TYPE_CLASS
:
658 case MONO_TYPE_SZARRAY
:
659 case MONO_TYPE_ARRAY
:
661 case MONO_TYPE_GENERICINST
:
662 if (!mono_type_generic_inst_is_valuetype (t
))
665 case MONO_TYPE_VALUETYPE
:
673 mono_arch_get_allocatable_int_vars (MonoCompile
*cfg
)
678 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
679 MonoInst
*ins
= cfg
->varinfo
[i
];
680 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
683 if (vmv
->range
.first_use
.abs_pos
>= vmv
->range
.last_use
.abs_pos
)
686 if (ins
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
) || (ins
->opcode
!= OP_LOCAL
&& ins
->opcode
!= OP_ARG
))
689 /* we can only allocate 32 bit values */
690 if (is_regsize_var (ins
->inst_vtype
)) {
691 g_assert (MONO_VARINFO (cfg
, i
)->reg
== -1);
692 g_assert (i
== vmv
->idx
);
693 vars
= mono_varlist_insert_sorted (cfg
, vars
, vmv
, FALSE
);
699 #endif /* ifndef DISABLE_JIT */
702 mono_arch_get_global_int_regs (MonoCompile
*cfg
)
706 if (cfg
->frame_reg
!= ppc_sp
)
708 /* ppc_r13 is used by the system on PPC EABI */
709 for (i
= 14; i
< top
; ++i
) {
711 * Reserve r29 for holding the vtable address for virtual calls in AOT mode,
712 * since the trampolines can clobber r12.
714 if (!(cfg
->compile_aot
&& i
== 29))
715 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (i
));
722 * mono_arch_regalloc_cost:
724 * Return the cost, in number of memory references, of the action of
725 * allocating the variable VMV into a register during global register
729 mono_arch_regalloc_cost (MonoCompile
*cfg
, MonoMethodVar
*vmv
)
736 mono_arch_flush_icache (guint8
*code
, gint size
)
738 #ifdef MONO_CROSS_COMPILE
742 guint8
*endp
, *start
;
746 start
= (guint8
*)((gsize
)start
& ~(cachelinesize
- 1));
747 /* use dcbf for smp support, later optimize for UP, see pem._64bit.d20030611.pdf page 211 */
748 #if defined(G_COMPILER_CODEWARRIOR)
749 if (cpu_hw_caps
& PPC_SMP_CAPABLE
) {
750 for (p
= start
; p
< endp
; p
+= cachelineinc
) {
754 for (p
= start
; p
< endp
; p
+= cachelineinc
) {
760 for (p
= start
; p
< endp
; p
+= cachelineinc
) {
771 /* For POWER5/6 with ICACHE_SNOOPing only one icbi in the range is required.
772 * The sync is required to insure that the store queue is completely empty.
773 * While the icbi performs no cache operations, icbi/isync is required to
774 * kill local prefetch.
776 if (cpu_hw_caps
& PPC_ICACHE_SNOOP
) {
778 asm ("icbi 0,%0;" : : "r"(code
) : "memory");
782 /* use dcbf for smp support, see pem._64bit.d20030611.pdf page 211 */
783 if (cpu_hw_caps
& PPC_SMP_CAPABLE
) {
784 for (p
= start
; p
< endp
; p
+= cachelineinc
) {
785 asm ("dcbf 0,%0;" : : "r"(p
) : "memory");
788 for (p
= start
; p
< endp
; p
+= cachelineinc
) {
789 asm ("dcbst 0,%0;" : : "r"(p
) : "memory");
794 for (p
= start
; p
< endp
; p
+= cachelineinc
) {
795 /* for ISA2.0+ implementations we should not need any extra sync between the
796 * icbi instructions. Both the 2.0 PEM and the PowerISA-2.05 say this.
797 * So I am not sure which chip had this problem but its not an issue on
798 * of the ISA V2 chips.
800 if (cpu_hw_caps
& PPC_ISA_2X
)
801 asm ("icbi 0,%0;" : : "r"(p
) : "memory");
803 asm ("icbi 0,%0; sync;" : : "r"(p
) : "memory");
805 if (!(cpu_hw_caps
& PPC_ISA_2X
))
813 mono_arch_flush_register_windows (void)
818 #define ALWAYS_ON_STACK(s) s
819 #define FP_ALSO_IN_REG(s) s
821 #ifdef __mono_ppc64__
822 #define ALWAYS_ON_STACK(s) s
823 #define FP_ALSO_IN_REG(s) s
825 #define ALWAYS_ON_STACK(s)
826 #define FP_ALSO_IN_REG(s)
828 #define ALIGN_DOUBLES
837 RegTypeFPStructByVal
, // For the v2 ABI, floats should be passed in FRs instead of GRs. Only valid for ABI v2!
842 guint32 vtsize
; /* in param area */
844 guint8 vtregs
; /* number of registers used to pass a RegTypeStructByVal/RegTypeFPStructByVal */
845 guint8 regtype
: 4; /* 0 general, 1 basereg, 2 floating point register, see RegType* */
846 guint8 size
: 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal/RegTypeFPStructByVal */
847 guint8 bytes
: 4; /* size in bytes - only valid for
848 RegTypeStructByVal/RegTypeFPStructByVal if the struct fits
849 in one word, otherwise it's 0*/
858 gboolean vtype_retaddr
;
866 #if PPC_RETURN_SMALL_FLOAT_STRUCTS_IN_FR_REGS
868 // Test if a structure is completely composed of either float XOR double fields and has fewer than
869 // PPC_MOST_FLOAT_STRUCT_MEMBERS_TO_RETURN_VIA_REGISTER members.
870 // If this is true the structure can be returned directly via float registers instead of by a hidden parameter
871 // pointing to where the return value should be stored.
872 // This is as per the ELF ABI v2.
875 is_float_struct_returnable_via_regs (MonoType
*type
, int* member_cnt
, int* member_size
)
877 int local_member_cnt
, local_member_size
;
879 member_cnt
= &local_member_cnt
;
882 member_size
= &local_member_size
;
885 gboolean is_all_floats
= mini_type_is_hfa(type
, member_cnt
, member_size
);
886 return is_all_floats
&& (*member_cnt
<= PPC_MOST_FLOAT_STRUCT_MEMBERS_TO_RETURN_VIA_REGISTERS
);
890 #define is_float_struct_returnable_via_regs(a,b,c) (FALSE)
894 #if PPC_RETURN_SMALL_STRUCTS_IN_REGS
896 // Test if a structure is smaller in size than 2 doublewords (PPC_LARGEST_STRUCT_SIZE_TO_RETURN_VIA_REGISTERS) and is
897 // completely composed of fields all of basic types.
898 // If this is true the structure can be returned directly via registers r3/r4 instead of by a hidden parameter
899 // pointing to where the return value should be stored.
900 // This is as per the ELF ABI v2.
903 is_struct_returnable_via_regs (MonoClass
*klass
, gboolean is_pinvoke
)
905 gboolean has_a_field
= FALSE
;
908 gpointer iter
= NULL
;
911 size
= mono_type_native_stack_size (m_class_get_byval_arg (klass
), 0);
913 size
= mini_type_stack_size (m_class_get_byval_arg (klass
), 0);
916 if (size
> PPC_LARGEST_STRUCT_SIZE_TO_RETURN_VIA_REGISTERS
)
918 while ((f
= mono_class_get_fields_internal (klass
, &iter
))) {
919 if (!(f
->type
->attrs
& FIELD_ATTRIBUTE_STATIC
)) {
920 // TBD: Is there a better way to check for the basic types?
921 if (f
->type
->byref
) {
923 } else if ((f
->type
->type
>= MONO_TYPE_BOOLEAN
) && (f
->type
->type
<= MONO_TYPE_R8
)) {
925 } else if (MONO_TYPE_ISSTRUCT (f
->type
)) {
926 MonoClass
*klass
= mono_class_from_mono_type_internal (f
->type
);
927 if (is_struct_returnable_via_regs(klass
, is_pinvoke
)) {
942 #define is_struct_returnable_via_regs(a,b) (FALSE)
947 add_general (guint
*gr
, guint
*stack_size
, ArgInfo
*ainfo
, gboolean simple
)
949 #ifdef __mono_ppc64__
954 if (*gr
>= 3 + PPC_NUM_REG_ARGS
) {
955 ainfo
->offset
= PPC_STACK_PARAM_OFFSET
+ *stack_size
;
956 ainfo
->reg
= ppc_sp
; /* in the caller */
957 ainfo
->regtype
= RegTypeBase
;
958 *stack_size
+= sizeof (target_mgreg_t
);
960 ALWAYS_ON_STACK (*stack_size
+= sizeof (target_mgreg_t
));
964 if (*gr
>= 3 + PPC_NUM_REG_ARGS
- 1) {
966 //*stack_size += (*stack_size % 8);
968 ainfo
->offset
= PPC_STACK_PARAM_OFFSET
+ *stack_size
;
969 ainfo
->reg
= ppc_sp
; /* in the caller */
970 ainfo
->regtype
= RegTypeBase
;
977 ALWAYS_ON_STACK (*stack_size
+= 8);
985 #if defined(__APPLE__) || (defined(__mono_ppc64__) && !PPC_PASS_SMALL_FLOAT_STRUCTS_IN_FR_REGS)
987 has_only_a_r48_field (MonoClass
*klass
)
991 gboolean have_field
= FALSE
;
993 while ((f
= mono_class_get_fields_internal (klass
, &iter
))) {
994 if (!(f
->type
->attrs
& FIELD_ATTRIBUTE_STATIC
)) {
997 if (!f
->type
->byref
&& (f
->type
->type
== MONO_TYPE_R4
|| f
->type
->type
== MONO_TYPE_R8
))
1008 get_call_info (MonoMethodSignature
*sig
)
1010 guint i
, fr
, gr
, pstart
;
1011 int n
= sig
->hasthis
+ sig
->param_count
;
1012 MonoType
*simpletype
;
1013 guint32 stack_size
= 0;
1014 CallInfo
*cinfo
= g_malloc0 (sizeof (CallInfo
) + sizeof (ArgInfo
) * n
);
1015 gboolean is_pinvoke
= sig
->pinvoke
;
1017 fr
= PPC_FIRST_FPARG_REG
;
1018 gr
= PPC_FIRST_ARG_REG
;
1020 if (mini_type_is_vtype (sig
->ret
)) {
1021 cinfo
->vtype_retaddr
= TRUE
;
1027 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1028 * the first argument, allowing 'this' to be always passed in the first arg reg.
1029 * Also do this if the first argument is a reference type, since virtual calls
1030 * are sometimes made using calli without sig->hasthis set, like in the delegate
1033 if (cinfo
->vtype_retaddr
&& !is_pinvoke
&& (sig
->hasthis
|| (sig
->param_count
> 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig
->params
[0]))))) {
1035 add_general (&gr
, &stack_size
, cinfo
->args
+ 0, TRUE
);
1038 add_general (&gr
, &stack_size
, &cinfo
->args
[sig
->hasthis
+ 0], TRUE
);
1042 add_general (&gr
, &stack_size
, &cinfo
->ret
, TRUE
);
1043 cinfo
->struct_ret
= cinfo
->ret
.reg
;
1044 cinfo
->vret_arg_index
= 1;
1048 add_general (&gr
, &stack_size
, cinfo
->args
+ 0, TRUE
);
1052 if (cinfo
->vtype_retaddr
) {
1053 add_general (&gr
, &stack_size
, &cinfo
->ret
, TRUE
);
1054 cinfo
->struct_ret
= cinfo
->ret
.reg
;
1058 DEBUG(printf("params: %d\n", sig
->param_count
));
1059 for (i
= pstart
; i
< sig
->param_count
; ++i
) {
1060 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
1061 /* Prevent implicit arguments and sig_cookie from
1062 being passed in registers */
1063 gr
= PPC_LAST_ARG_REG
+ 1;
1064 /* FIXME: don't we have to set fr, too? */
1065 /* Emit the signature cookie just before the implicit arguments */
1066 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
, TRUE
);
1068 DEBUG(printf("param %d: ", i
));
1069 if (sig
->params
[i
]->byref
) {
1070 DEBUG(printf("byref\n"));
1071 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
1075 simpletype
= mini_get_underlying_type (sig
->params
[i
]);
1076 switch (simpletype
->type
) {
1077 case MONO_TYPE_BOOLEAN
:
1080 cinfo
->args
[n
].size
= 1;
1081 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
1084 case MONO_TYPE_CHAR
:
1087 cinfo
->args
[n
].size
= 2;
1088 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
1093 cinfo
->args
[n
].size
= 4;
1094 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
1100 case MONO_TYPE_FNPTR
:
1101 case MONO_TYPE_CLASS
:
1102 case MONO_TYPE_OBJECT
:
1103 case MONO_TYPE_STRING
:
1104 case MONO_TYPE_SZARRAY
:
1105 case MONO_TYPE_ARRAY
:
1106 cinfo
->args
[n
].size
= sizeof (target_mgreg_t
);
1107 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
1110 case MONO_TYPE_GENERICINST
:
1111 if (!mono_type_generic_inst_is_valuetype (simpletype
)) {
1112 cinfo
->args
[n
].size
= sizeof (target_mgreg_t
);
1113 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
1118 case MONO_TYPE_VALUETYPE
:
1119 case MONO_TYPE_TYPEDBYREF
: {
1121 MonoClass
*klass
= mono_class_from_mono_type_internal (sig
->params
[i
]);
1122 if (simpletype
->type
== MONO_TYPE_TYPEDBYREF
)
1123 size
= MONO_ABI_SIZEOF (MonoTypedRef
);
1124 else if (is_pinvoke
)
1125 size
= mono_class_native_size (klass
, NULL
);
1127 size
= mono_class_value_size (klass
, NULL
);
1129 #if defined(__APPLE__) || (defined(__mono_ppc64__) && !PPC_PASS_SMALL_FLOAT_STRUCTS_IN_FR_REGS)
1130 if ((size
== 4 || size
== 8) && has_only_a_r48_field (klass
)) {
1131 cinfo
->args
[n
].size
= size
;
1133 /* It was 7, now it is 8 in LinuxPPC */
1134 if (fr
<= PPC_LAST_FPARG_REG
) {
1135 cinfo
->args
[n
].regtype
= RegTypeFP
;
1136 cinfo
->args
[n
].reg
= fr
;
1138 FP_ALSO_IN_REG (gr
++);
1139 #if !defined(__mono_ppc64__)
1141 FP_ALSO_IN_REG (gr
++);
1143 ALWAYS_ON_STACK (stack_size
+= size
);
1145 cinfo
->args
[n
].offset
= PPC_STACK_PARAM_OFFSET
+ stack_size
;
1146 cinfo
->args
[n
].regtype
= RegTypeBase
;
1147 cinfo
->args
[n
].reg
= ppc_sp
; /* in the caller*/
1154 DEBUG(printf ("load %d bytes struct\n",
1155 mono_class_native_size (sig
->params
[i
]->data
.klass
, NULL
)));
1157 #if PPC_PASS_STRUCTS_BY_VALUE
1159 int align_size
= size
;
1161 int rest
= PPC_LAST_ARG_REG
- gr
+ 1;
1164 #if PPC_PASS_SMALL_FLOAT_STRUCTS_IN_FR_REGS
1167 gboolean is_all_floats
= is_float_struct_returnable_via_regs (sig
->params
[i
], &mbr_cnt
, &mbr_size
);
1169 if (is_all_floats
) {
1170 rest
= PPC_LAST_FPARG_REG
- fr
+ 1;
1172 // Pass small (<= 8 member) structures entirely made up of either float or double members
1173 // in FR registers. There have to be at least mbr_cnt registers left.
1174 if (is_all_floats
&&
1175 (rest
>= mbr_cnt
)) {
1177 n_in_regs
= MIN (rest
, nregs
);
1178 cinfo
->args
[n
].regtype
= RegTypeFPStructByVal
;
1179 cinfo
->args
[n
].vtregs
= n_in_regs
;
1180 cinfo
->args
[n
].size
= mbr_size
;
1181 cinfo
->args
[n
].vtsize
= nregs
- n_in_regs
;
1182 cinfo
->args
[n
].reg
= fr
;
1184 if (mbr_size
== 4) {
1186 FP_ALSO_IN_REG (gr
+= (n_in_regs
+1)/2);
1189 FP_ALSO_IN_REG (gr
+= (n_in_regs
));
1194 align_size
+= (sizeof (target_mgreg_t
) - 1);
1195 align_size
&= ~(sizeof (target_mgreg_t
) - 1);
1196 nregs
= (align_size
+ sizeof (target_mgreg_t
) -1 ) / sizeof (target_mgreg_t
);
1197 n_in_regs
= MIN (rest
, nregs
);
1201 /* FIXME: check this */
1202 if (size
>= 3 && size
% 4 != 0)
1205 cinfo
->args
[n
].regtype
= RegTypeStructByVal
;
1206 cinfo
->args
[n
].vtregs
= n_in_regs
;
1207 cinfo
->args
[n
].size
= n_in_regs
;
1208 cinfo
->args
[n
].vtsize
= nregs
- n_in_regs
;
1209 cinfo
->args
[n
].reg
= gr
;
1213 #ifdef __mono_ppc64__
1214 if (nregs
== 1 && is_pinvoke
)
1215 cinfo
->args
[n
].bytes
= size
;
1218 cinfo
->args
[n
].bytes
= 0;
1219 cinfo
->args
[n
].offset
= PPC_STACK_PARAM_OFFSET
+ stack_size
;
1220 /*g_print ("offset for arg %d at %d\n", n, PPC_STACK_PARAM_OFFSET + stack_size);*/
1221 stack_size
+= nregs
* sizeof (target_mgreg_t
);
1224 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
1225 cinfo
->args
[n
].regtype
= RegTypeStructByAddr
;
1226 cinfo
->args
[n
].vtsize
= size
;
1233 cinfo
->args
[n
].size
= 8;
1234 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, SIZEOF_REGISTER
== 8);
1238 cinfo
->args
[n
].size
= 4;
1240 /* It was 7, now it is 8 in LinuxPPC */
1241 if (fr
<= PPC_LAST_FPARG_REG
1242 // For non-native vararg calls the parms must go in storage
1243 && !(!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
))
1245 cinfo
->args
[n
].regtype
= RegTypeFP
;
1246 cinfo
->args
[n
].reg
= fr
;
1248 FP_ALSO_IN_REG (gr
++);
1249 ALWAYS_ON_STACK (stack_size
+= SIZEOF_REGISTER
);
1251 cinfo
->args
[n
].offset
= PPC_STACK_PARAM_OFFSET
+ stack_size
+ MONO_PPC_32_64_CASE (0, 4);
1252 cinfo
->args
[n
].regtype
= RegTypeBase
;
1253 cinfo
->args
[n
].reg
= ppc_sp
; /* in the caller*/
1254 stack_size
+= SIZEOF_REGISTER
;
1259 cinfo
->args
[n
].size
= 8;
1260 /* It was 7, now it is 8 in LinuxPPC */
1261 if (fr
<= PPC_LAST_FPARG_REG
1262 // For non-native vararg calls the parms must go in storage
1263 && !(!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
))
1265 cinfo
->args
[n
].regtype
= RegTypeFP
;
1266 cinfo
->args
[n
].reg
= fr
;
1268 FP_ALSO_IN_REG (gr
+= sizeof (double) / SIZEOF_REGISTER
);
1269 ALWAYS_ON_STACK (stack_size
+= 8);
1271 cinfo
->args
[n
].offset
= PPC_STACK_PARAM_OFFSET
+ stack_size
;
1272 cinfo
->args
[n
].regtype
= RegTypeBase
;
1273 cinfo
->args
[n
].reg
= ppc_sp
; /* in the caller*/
1279 g_error ("Can't trampoline 0x%x", sig
->params
[i
]->type
);
1284 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
1285 /* Prevent implicit arguments and sig_cookie from
1286 being passed in registers */
1287 gr
= PPC_LAST_ARG_REG
+ 1;
1288 /* Emit the signature cookie just before the implicit arguments */
1289 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
, TRUE
);
1293 simpletype
= mini_get_underlying_type (sig
->ret
);
1294 switch (simpletype
->type
) {
1295 case MONO_TYPE_BOOLEAN
:
1300 case MONO_TYPE_CHAR
:
1306 case MONO_TYPE_FNPTR
:
1307 case MONO_TYPE_CLASS
:
1308 case MONO_TYPE_OBJECT
:
1309 case MONO_TYPE_SZARRAY
:
1310 case MONO_TYPE_ARRAY
:
1311 case MONO_TYPE_STRING
:
1312 cinfo
->ret
.reg
= ppc_r3
;
1316 cinfo
->ret
.reg
= ppc_r3
;
1320 cinfo
->ret
.reg
= ppc_f1
;
1321 cinfo
->ret
.regtype
= RegTypeFP
;
1323 case MONO_TYPE_GENERICINST
:
1324 if (!mono_type_generic_inst_is_valuetype (simpletype
)) {
1325 cinfo
->ret
.reg
= ppc_r3
;
1329 case MONO_TYPE_VALUETYPE
:
1331 case MONO_TYPE_TYPEDBYREF
:
1332 case MONO_TYPE_VOID
:
1335 g_error ("Can't handle as return value 0x%x", sig
->ret
->type
);
1339 /* align stack size to 16 */
1340 DEBUG (printf (" stack size: %d (%d)\n", (stack_size
+ 15) & ~15, stack_size
));
1341 stack_size
= (stack_size
+ 15) & ~15;
1343 cinfo
->stack_usage
= stack_size
;
1350 mono_arch_tailcall_supported (MonoCompile
*cfg
, MonoMethodSignature
*caller_sig
, MonoMethodSignature
*callee_sig
, gboolean virtual_
)
1352 CallInfo
*caller_info
= get_call_info (caller_sig
);
1353 CallInfo
*callee_info
= get_call_info (callee_sig
);
1355 gboolean res
= IS_SUPPORTED_TAILCALL (callee_info
->stack_usage
<= caller_info
->stack_usage
)
1356 && IS_SUPPORTED_TAILCALL (memcmp (&callee_info
->ret
, &caller_info
->ret
, sizeof (caller_info
->ret
)) == 0);
1358 // FIXME ABIs vary as to if this local is in the parameter area or not,
1359 // so this check might not be needed.
1360 for (int i
= 0; res
&& i
< callee_info
->nargs
; ++i
) {
1361 res
= IS_SUPPORTED_TAILCALL (callee_info
->args
[i
].regtype
!= RegTypeStructByAddr
);
1362 /* An address on the callee's stack is passed as the argument */
1365 g_free (caller_info
);
1366 g_free (callee_info
);
1374 * Set var information according to the calling convention. ppc version.
1375 * The locals var stuff should most likely be split in another method.
1378 mono_arch_allocate_vars (MonoCompile
*m
)
1380 MonoMethodSignature
*sig
;
1381 MonoMethodHeader
*header
;
1383 int i
, offset
, size
, align
, curinst
;
1384 int frame_reg
= ppc_sp
;
1386 guint32 locals_stack_size
, locals_stack_align
;
1388 m
->flags
|= MONO_CFG_HAS_SPILLUP
;
1390 /* this is bug #60332: remove when #59509 is fixed, so no weird vararg
1391 * call convs needs to be handled this way.
1393 if (m
->flags
& MONO_CFG_HAS_VARARGS
)
1394 m
->param_area
= MAX (m
->param_area
, sizeof (target_mgreg_t
)*8);
1395 /* gtk-sharp and other broken code will dllimport vararg functions even with
1396 * non-varargs signatures. Since there is little hope people will get this right
1397 * we assume they won't.
1399 if (m
->method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
)
1400 m
->param_area
= MAX (m
->param_area
, sizeof (target_mgreg_t
)*8);
1405 * We use the frame register also for any method that has
1406 * exception clauses. This way, when the handlers are called,
1407 * the code will reference local variables using the frame reg instead of
1408 * the stack pointer: if we had to restore the stack pointer, we'd
1409 * corrupt the method frames that are already on the stack (since
1410 * filters get called before stack unwinding happens) when the filter
1411 * code would call any method (this also applies to finally etc.).
1413 if ((m
->flags
& MONO_CFG_HAS_ALLOCA
) || header
->num_clauses
)
1414 frame_reg
= ppc_r31
;
1415 m
->frame_reg
= frame_reg
;
1416 if (frame_reg
!= ppc_sp
) {
1417 m
->used_int_regs
|= 1 << frame_reg
;
1420 sig
= mono_method_signature_internal (m
->method
);
1424 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
1425 m
->ret
->opcode
= OP_REGVAR
;
1426 m
->ret
->inst_c0
= m
->ret
->dreg
= ppc_r3
;
1428 /* FIXME: handle long values? */
1429 switch (mini_get_underlying_type (sig
->ret
)->type
) {
1430 case MONO_TYPE_VOID
:
1434 m
->ret
->opcode
= OP_REGVAR
;
1435 m
->ret
->inst_c0
= m
->ret
->dreg
= ppc_f1
;
1438 m
->ret
->opcode
= OP_REGVAR
;
1439 m
->ret
->inst_c0
= m
->ret
->dreg
= ppc_r3
;
1443 /* local vars are at a positive offset from the stack pointer */
1445 * also note that if the function uses alloca, we use ppc_r31
1446 * to point at the local variables.
1448 offset
= PPC_MINIMAL_STACK_SIZE
; /* linkage area */
1449 /* align the offset to 16 bytes: not sure this is needed here */
1451 //offset &= ~(16 - 1);
1453 /* add parameter area size for called functions */
1454 offset
+= m
->param_area
;
1456 offset
&= ~(16 - 1);
1458 /* the MonoLMF structure is stored just below the stack pointer */
1459 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
1460 offset
+= sizeof(gpointer
) - 1;
1461 offset
&= ~(sizeof(gpointer
) - 1);
1463 m
->vret_addr
->opcode
= OP_REGOFFSET
;
1464 m
->vret_addr
->inst_basereg
= frame_reg
;
1465 m
->vret_addr
->inst_offset
= offset
;
1467 if (G_UNLIKELY (m
->verbose_level
> 1)) {
1468 printf ("vret_addr =");
1469 mono_print_ins (m
->vret_addr
);
1472 offset
+= sizeof(gpointer
);
1475 offsets
= mono_allocate_stack_slots (m
, FALSE
, &locals_stack_size
, &locals_stack_align
);
1476 if (locals_stack_align
) {
1477 offset
+= (locals_stack_align
- 1);
1478 offset
&= ~(locals_stack_align
- 1);
1480 for (i
= m
->locals_start
; i
< m
->num_varinfo
; i
++) {
1481 if (offsets
[i
] != -1) {
1482 MonoInst
*inst
= m
->varinfo
[i
];
1483 inst
->opcode
= OP_REGOFFSET
;
1484 inst
->inst_basereg
= frame_reg
;
1485 inst
->inst_offset
= offset
+ offsets
[i
];
1487 g_print ("allocating local %d (%s) to %d\n",
1488 i, mono_type_get_name (inst->inst_vtype), inst->inst_offset);
1492 offset
+= locals_stack_size
;
1496 inst
= m
->args
[curinst
];
1497 if (inst
->opcode
!= OP_REGVAR
) {
1498 inst
->opcode
= OP_REGOFFSET
;
1499 inst
->inst_basereg
= frame_reg
;
1500 offset
+= sizeof (target_mgreg_t
) - 1;
1501 offset
&= ~(sizeof (target_mgreg_t
) - 1);
1502 inst
->inst_offset
= offset
;
1503 offset
+= sizeof (target_mgreg_t
);
1508 for (i
= 0; i
< sig
->param_count
; ++i
) {
1509 inst
= m
->args
[curinst
];
1510 if (inst
->opcode
!= OP_REGVAR
) {
1511 inst
->opcode
= OP_REGOFFSET
;
1512 inst
->inst_basereg
= frame_reg
;
1514 size
= mono_type_native_stack_size (sig
->params
[i
], (guint32
*)&align
);
1515 inst
->backend
.is_pinvoke
= 1;
1517 size
= mono_type_size (sig
->params
[i
], &align
);
1519 if (MONO_TYPE_ISSTRUCT (sig
->params
[i
]) && size
< sizeof (target_mgreg_t
))
1520 size
= align
= sizeof (target_mgreg_t
);
1522 * Use at least 4/8 byte alignment, since these might be passed in registers, and
1523 * they are saved using std in the prolog.
1525 align
= sizeof (target_mgreg_t
);
1526 offset
+= align
- 1;
1527 offset
&= ~(align
- 1);
1528 inst
->inst_offset
= offset
;
1534 /* some storage for fp conversions */
1537 m
->arch
.fp_conv_var_offset
= offset
;
1540 /* align the offset to 16 bytes */
1542 offset
&= ~(16 - 1);
1545 m
->stack_offset
= offset
;
1547 if (sig
->call_convention
== MONO_CALL_VARARG
) {
1548 CallInfo
*cinfo
= get_call_info (m
->method
->signature
);
1550 m
->sig_cookie
= cinfo
->sig_cookie
.offset
;
1557 mono_arch_create_vars (MonoCompile
*cfg
)
1559 MonoMethodSignature
*sig
= mono_method_signature_internal (cfg
->method
);
1561 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
1562 cfg
->vret_addr
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_ARG
);
1566 /* Fixme: we need an alignment solution for enter_method and mono_arch_call_opcode,
1567 * currently alignment in mono_arch_call_opcode is computed without arch_get_argument_info
1571 emit_sig_cookie (MonoCompile
*cfg
, MonoCallInst
*call
, CallInfo
*cinfo
)
1573 int sig_reg
= mono_alloc_ireg (cfg
);
1575 /* FIXME: Add support for signature tokens to AOT */
1576 cfg
->disable_aot
= TRUE
;
1578 MONO_EMIT_NEW_ICONST (cfg
, sig_reg
, (gulong
)call
->signature
);
1579 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
,
1580 ppc_r1
, cinfo
->sig_cookie
.offset
, sig_reg
);
1584 mono_arch_emit_call (MonoCompile
*cfg
, MonoCallInst
*call
)
1587 MonoMethodSignature
*sig
;
1591 sig
= call
->signature
;
1592 n
= sig
->param_count
+ sig
->hasthis
;
1594 cinfo
= get_call_info (sig
);
1596 for (i
= 0; i
< n
; ++i
) {
1597 ArgInfo
*ainfo
= cinfo
->args
+ i
;
1600 if (i
>= sig
->hasthis
)
1601 t
= sig
->params
[i
- sig
->hasthis
];
1603 t
= mono_get_int_type ();
1604 t
= mini_get_underlying_type (t
);
1606 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
))
1607 emit_sig_cookie (cfg
, call
, cinfo
);
1609 in
= call
->args
[i
];
1611 if (ainfo
->regtype
== RegTypeGeneral
) {
1612 #ifndef __mono_ppc64__
1613 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
1614 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
1615 ins
->dreg
= mono_alloc_ireg (cfg
);
1616 ins
->sreg1
= MONO_LVREG_LS (in
->dreg
);
1617 MONO_ADD_INS (cfg
->cbb
, ins
);
1618 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
+ 1, FALSE
);
1620 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
1621 ins
->dreg
= mono_alloc_ireg (cfg
);
1622 ins
->sreg1
= MONO_LVREG_MS (in
->dreg
);
1623 MONO_ADD_INS (cfg
->cbb
, ins
);
1624 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
1628 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
1629 ins
->dreg
= mono_alloc_ireg (cfg
);
1630 ins
->sreg1
= in
->dreg
;
1631 MONO_ADD_INS (cfg
->cbb
, ins
);
1633 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
1635 } else if (ainfo
->regtype
== RegTypeStructByAddr
) {
1636 MONO_INST_NEW (cfg
, ins
, OP_OUTARG_VT
);
1637 ins
->opcode
= OP_OUTARG_VT
;
1638 ins
->sreg1
= in
->dreg
;
1639 ins
->klass
= in
->klass
;
1640 ins
->inst_p0
= call
;
1641 ins
->inst_p1
= mono_mempool_alloc (cfg
->mempool
, sizeof (ArgInfo
));
1642 memcpy (ins
->inst_p1
, ainfo
, sizeof (ArgInfo
));
1643 MONO_ADD_INS (cfg
->cbb
, ins
);
1644 } else if (ainfo
->regtype
== RegTypeStructByVal
) {
1645 /* this is further handled in mono_arch_emit_outarg_vt () */
1646 MONO_INST_NEW (cfg
, ins
, OP_OUTARG_VT
);
1647 ins
->opcode
= OP_OUTARG_VT
;
1648 ins
->sreg1
= in
->dreg
;
1649 ins
->klass
= in
->klass
;
1650 ins
->inst_p0
= call
;
1651 ins
->inst_p1
= mono_mempool_alloc (cfg
->mempool
, sizeof (ArgInfo
));
1652 memcpy (ins
->inst_p1
, ainfo
, sizeof (ArgInfo
));
1653 MONO_ADD_INS (cfg
->cbb
, ins
);
1654 } else if (ainfo
->regtype
== RegTypeFPStructByVal
) {
1655 /* this is further handled in mono_arch_emit_outarg_vt () */
1656 MONO_INST_NEW (cfg
, ins
, OP_OUTARG_VT
);
1657 ins
->opcode
= OP_OUTARG_VT
;
1658 ins
->sreg1
= in
->dreg
;
1659 ins
->klass
= in
->klass
;
1660 ins
->inst_p0
= call
;
1661 ins
->inst_p1
= mono_mempool_alloc (cfg
->mempool
, sizeof (ArgInfo
));
1662 memcpy (ins
->inst_p1
, ainfo
, sizeof (ArgInfo
));
1663 MONO_ADD_INS (cfg
->cbb
, ins
);
1664 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
1665 } else if (ainfo
->regtype
== RegTypeBase
) {
1666 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
1667 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, ppc_r1
, ainfo
->offset
, in
->dreg
);
1668 } else if (!t
->byref
&& ((t
->type
== MONO_TYPE_R4
) || (t
->type
== MONO_TYPE_R8
))) {
1669 if (t
->type
== MONO_TYPE_R8
)
1670 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ppc_r1
, ainfo
->offset
, in
->dreg
);
1672 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, ppc_r1
, ainfo
->offset
, in
->dreg
);
1674 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ppc_r1
, ainfo
->offset
, in
->dreg
);
1676 } else if (ainfo
->regtype
== RegTypeFP
) {
1677 if (t
->type
== MONO_TYPE_VALUETYPE
) {
1678 /* this is further handled in mono_arch_emit_outarg_vt () */
1679 MONO_INST_NEW (cfg
, ins
, OP_OUTARG_VT
);
1680 ins
->opcode
= OP_OUTARG_VT
;
1681 ins
->sreg1
= in
->dreg
;
1682 ins
->klass
= in
->klass
;
1683 ins
->inst_p0
= call
;
1684 ins
->inst_p1
= mono_mempool_alloc (cfg
->mempool
, sizeof (ArgInfo
));
1685 memcpy (ins
->inst_p1
, ainfo
, sizeof (ArgInfo
));
1686 MONO_ADD_INS (cfg
->cbb
, ins
);
1688 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
1690 int dreg
= mono_alloc_freg (cfg
);
1692 if (ainfo
->size
== 4) {
1693 MONO_EMIT_NEW_UNALU (cfg
, OP_FCONV_TO_R4
, dreg
, in
->dreg
);
1695 MONO_INST_NEW (cfg
, ins
, OP_FMOVE
);
1697 ins
->sreg1
= in
->dreg
;
1698 MONO_ADD_INS (cfg
->cbb
, ins
);
1701 mono_call_inst_add_outarg_reg (cfg
, call
, dreg
, ainfo
->reg
, TRUE
);
1702 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
1705 g_assert_not_reached ();
1709 /* Emit the signature cookie in the case that there is no
1710 additional argument */
1711 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (n
== sig
->sentinelpos
))
1712 emit_sig_cookie (cfg
, call
, cinfo
);
1714 if (cinfo
->struct_ret
) {
1717 MONO_INST_NEW (cfg
, vtarg
, OP_MOVE
);
1718 vtarg
->sreg1
= call
->vret_var
->dreg
;
1719 vtarg
->dreg
= mono_alloc_preg (cfg
);
1720 MONO_ADD_INS (cfg
->cbb
, vtarg
);
1722 mono_call_inst_add_outarg_reg (cfg
, call
, vtarg
->dreg
, cinfo
->struct_ret
, FALSE
);
1725 call
->stack_usage
= cinfo
->stack_usage
;
1726 cfg
->param_area
= MAX (PPC_MINIMAL_PARAM_AREA_SIZE
, MAX (cfg
->param_area
, cinfo
->stack_usage
));
1727 cfg
->flags
|= MONO_CFG_HAS_CALLS
;
1735 mono_arch_emit_outarg_vt (MonoCompile
*cfg
, MonoInst
*ins
, MonoInst
*src
)
1737 MonoCallInst
*call
= (MonoCallInst
*)ins
->inst_p0
;
1738 ArgInfo
*ainfo
= ins
->inst_p1
;
1739 int ovf_size
= ainfo
->vtsize
;
1740 int doffset
= ainfo
->offset
;
1741 int i
, soffset
, dreg
;
1743 if (ainfo
->regtype
== RegTypeStructByVal
) {
1750 * Darwin pinvokes needs some special handling for 1
1751 * and 2 byte arguments
1753 g_assert (ins
->klass
);
1754 if (call
->signature
->pinvoke
)
1755 size
= mono_class_native_size (ins
->klass
, NULL
);
1756 if (size
== 2 || size
== 1) {
1757 int tmpr
= mono_alloc_ireg (cfg
);
1759 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, tmpr
, src
->dreg
, soffset
);
1761 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI2_MEMBASE
, tmpr
, src
->dreg
, soffset
);
1762 dreg
= mono_alloc_ireg (cfg
);
1763 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, dreg
, tmpr
);
1764 mono_call_inst_add_outarg_reg (cfg
, call
, dreg
, ainfo
->reg
, FALSE
);
1767 for (i
= 0; i
< ainfo
->vtregs
; ++i
) {
1768 dreg
= mono_alloc_ireg (cfg
);
1769 #if G_BYTE_ORDER == G_BIG_ENDIAN
1770 int antipadding
= 0;
1773 antipadding
= sizeof (target_mgreg_t
) - ainfo
->bytes
;
1775 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, dreg
, src
->dreg
, soffset
);
1777 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHR_UN_IMM
, dreg
, dreg
, antipadding
* 8);
1779 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, dreg
, src
->dreg
, soffset
);
1781 mono_call_inst_add_outarg_reg (cfg
, call
, dreg
, ainfo
->reg
+ i
, FALSE
);
1782 soffset
+= sizeof (target_mgreg_t
);
1785 mini_emit_memcpy (cfg
, ppc_r1
, doffset
+ soffset
, src
->dreg
, soffset
, ovf_size
* sizeof (target_mgreg_t
), TARGET_SIZEOF_VOID_P
);
1786 } else if (ainfo
->regtype
== RegTypeFPStructByVal
) {
1788 for (i
= 0; i
< ainfo
->vtregs
; ++i
) {
1789 int tmpr
= mono_alloc_freg (cfg
);
1790 if (ainfo
->size
== 4)
1791 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADR4_MEMBASE
, tmpr
, src
->dreg
, soffset
);
1793 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADR8_MEMBASE
, tmpr
, src
->dreg
, soffset
);
1794 dreg
= mono_alloc_freg (cfg
);
1795 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, dreg
, tmpr
);
1796 mono_call_inst_add_outarg_reg (cfg
, call
, dreg
, ainfo
->reg
+i
, TRUE
);
1797 soffset
+= ainfo
->size
;
1800 mini_emit_memcpy (cfg
, ppc_r1
, doffset
+ soffset
, src
->dreg
, soffset
, ovf_size
* sizeof (target_mgreg_t
), TARGET_SIZEOF_VOID_P
);
1801 } else if (ainfo
->regtype
== RegTypeFP
) {
1802 int tmpr
= mono_alloc_freg (cfg
);
1803 if (ainfo
->size
== 4)
1804 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADR4_MEMBASE
, tmpr
, src
->dreg
, 0);
1806 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADR8_MEMBASE
, tmpr
, src
->dreg
, 0);
1807 dreg
= mono_alloc_freg (cfg
);
1808 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, dreg
, tmpr
);
1809 mono_call_inst_add_outarg_reg (cfg
, call
, dreg
, ainfo
->reg
, TRUE
);
1811 MonoInst
*vtcopy
= mono_compile_create_var (cfg
, m_class_get_byval_arg (src
->klass
), OP_LOCAL
);
1815 /* FIXME: alignment? */
1816 if (call
->signature
->pinvoke
) {
1817 size
= mono_type_native_stack_size (m_class_get_byval_arg (src
->klass
), NULL
);
1818 vtcopy
->backend
.is_pinvoke
= 1;
1820 size
= mini_type_stack_size (m_class_get_byval_arg (src
->klass
), NULL
);
1823 g_assert (ovf_size
> 0);
1825 EMIT_NEW_VARLOADA (cfg
, load
, vtcopy
, vtcopy
->inst_vtype
);
1826 mini_emit_memcpy (cfg
, load
->dreg
, 0, src
->dreg
, 0, size
, TARGET_SIZEOF_VOID_P
);
1829 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ppc_r1
, ainfo
->offset
, load
->dreg
);
1831 mono_call_inst_add_outarg_reg (cfg
, call
, load
->dreg
, ainfo
->reg
, FALSE
);
1836 mono_arch_emit_setret (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
*val
)
1838 MonoType
*ret
= mini_get_underlying_type (mono_method_signature_internal (method
)->ret
);
1840 #ifndef __mono_ppc64__
1841 if (ret
->type
== MONO_TYPE_I8
|| ret
->type
== MONO_TYPE_U8
) {
1844 MONO_INST_NEW (cfg
, ins
, OP_SETLRET
);
1845 ins
->sreg1
= MONO_LVREG_LS (val
->dreg
);
1846 ins
->sreg2
= MONO_LVREG_MS (val
->dreg
);
1847 MONO_ADD_INS (cfg
->cbb
, ins
);
1851 if (ret
->type
== MONO_TYPE_R8
|| ret
->type
== MONO_TYPE_R4
) {
1852 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, cfg
->ret
->dreg
, val
->dreg
);
1856 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
1860 mono_arch_is_inst_imm (int opcode
, int imm_opcode
, gint64 imm
)
1865 #endif /* DISABLE_JIT */
1868 * Conditional branches have a small offset, so if it is likely overflowed,
1869 * we do a branch to the end of the method (uncond branches have much larger
1870 * offsets) where we perform the conditional and jump back unconditionally.
1871 * It's slightly slower, since we add two uncond branches, but it's very simple
1872 * with the current patch implementation and such large methods are likely not
1873 * going to be perf critical anyway.
1878 const char *exception
;
1885 #define EMIT_COND_BRANCH_FLAGS(ins,b0,b1) \
1886 if (0 && ins->inst_true_bb->native_offset) { \
1887 ppc_bc (code, (b0), (b1), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffff); \
1889 int br_disp = ins->inst_true_bb->max_offset - offset; \
1890 if (!ppc_is_imm16 (br_disp + 8 * 1024) || !ppc_is_imm16 (br_disp - 8 * 1024)) { \
1891 MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump)); \
1892 ovfj->data.bb = ins->inst_true_bb; \
1893 ovfj->ip_offset = 0; \
1894 ovfj->b0_cond = (b0); \
1895 ovfj->b1_cond = (b1); \
1896 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB_OVF, ovfj); \
1899 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1900 ppc_bc (code, (b0), (b1), 0); \
1904 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_b0_table [(cond)], branch_b1_table [(cond)])
1906 /* emit an exception if condition is fail
1908 * We assign the extra code used to throw the implicit exceptions
1909 * to cfg->bb_exit as far as the big branch handling is concerned
1911 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(b0,b1,exc_name) \
1913 int br_disp = cfg->bb_exit->max_offset - offset; \
1914 if (!ppc_is_imm16 (br_disp + 1024) || ! ppc_is_imm16 (ppc_is_imm16 (br_disp - 1024))) { \
1915 MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump)); \
1916 ovfj->data.exception = (exc_name); \
1917 ovfj->ip_offset = code - cfg->native_code; \
1918 ovfj->b0_cond = (b0); \
1919 ovfj->b1_cond = (b1); \
1920 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC_OVF, ovfj); \
1922 cfg->bb_exit->max_offset += 24; \
1924 mono_add_patch_info (cfg, code - cfg->native_code, \
1925 MONO_PATCH_INFO_EXC, exc_name); \
1926 ppc_bcl (code, (b0), (b1), 0); \
1930 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_b0_table [(cond)], branch_b1_table [(cond)], (exc_name))
1933 mono_arch_peephole_pass_1 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
1938 normalize_opcode (int opcode
)
1941 #ifndef MONO_ARCH_ILP32
1942 case MONO_PPC_32_64_CASE (OP_LOADI4_MEMBASE
, OP_LOADI8_MEMBASE
):
1943 return OP_LOAD_MEMBASE
;
1944 case MONO_PPC_32_64_CASE (OP_LOADI4_MEMINDEX
, OP_LOADI8_MEMINDEX
):
1945 return OP_LOAD_MEMINDEX
;
1946 case MONO_PPC_32_64_CASE (OP_STOREI4_MEMBASE_REG
, OP_STOREI8_MEMBASE_REG
):
1947 return OP_STORE_MEMBASE_REG
;
1948 case MONO_PPC_32_64_CASE (OP_STOREI4_MEMBASE_IMM
, OP_STOREI8_MEMBASE_IMM
):
1949 return OP_STORE_MEMBASE_IMM
;
1950 case MONO_PPC_32_64_CASE (OP_STOREI4_MEMINDEX
, OP_STOREI8_MEMINDEX
):
1951 return OP_STORE_MEMINDEX
;
1953 case MONO_PPC_32_64_CASE (OP_ISHR_IMM
, OP_LSHR_IMM
):
1955 case MONO_PPC_32_64_CASE (OP_ISHR_UN_IMM
, OP_LSHR_UN_IMM
):
1956 return OP_SHR_UN_IMM
;
1963 mono_arch_peephole_pass_2 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
1965 MonoInst
*ins
, *n
, *last_ins
= NULL
;
1967 MONO_BB_FOR_EACH_INS_SAFE (bb
, n
, ins
) {
1968 switch (normalize_opcode (ins
->opcode
)) {
1970 /* remove unnecessary multiplication with 1 */
1971 if (ins
->inst_imm
== 1) {
1972 if (ins
->dreg
!= ins
->sreg1
) {
1973 ins
->opcode
= OP_MOVE
;
1975 MONO_DELETE_INS (bb
, ins
);
1979 int power2
= mono_is_power_of_two (ins
->inst_imm
);
1981 ins
->opcode
= OP_SHL_IMM
;
1982 ins
->inst_imm
= power2
;
1986 case OP_LOAD_MEMBASE
:
1988 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1989 * OP_LOAD_MEMBASE offset(basereg), reg
1991 if (last_ins
&& normalize_opcode (last_ins
->opcode
) == OP_STORE_MEMBASE_REG
&&
1992 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
1993 ins
->inst_offset
== last_ins
->inst_offset
) {
1994 if (ins
->dreg
== last_ins
->sreg1
) {
1995 MONO_DELETE_INS (bb
, ins
);
1998 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1999 ins
->opcode
= OP_MOVE
;
2000 ins
->sreg1
= last_ins
->sreg1
;
2004 * Note: reg1 must be different from the basereg in the second load
2005 * OP_LOAD_MEMBASE offset(basereg), reg1
2006 * OP_LOAD_MEMBASE offset(basereg), reg2
2008 * OP_LOAD_MEMBASE offset(basereg), reg1
2009 * OP_MOVE reg1, reg2
2011 } else if (last_ins
&& normalize_opcode (last_ins
->opcode
) == OP_LOAD_MEMBASE
&&
2012 ins
->inst_basereg
!= last_ins
->dreg
&&
2013 ins
->inst_basereg
== last_ins
->inst_basereg
&&
2014 ins
->inst_offset
== last_ins
->inst_offset
) {
2016 if (ins
->dreg
== last_ins
->dreg
) {
2017 MONO_DELETE_INS (bb
, ins
);
2020 ins
->opcode
= OP_MOVE
;
2021 ins
->sreg1
= last_ins
->dreg
;
2024 //g_assert_not_reached ();
2028 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2029 * OP_LOAD_MEMBASE offset(basereg), reg
2031 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2032 * OP_ICONST reg, imm
2034 } else if (last_ins
&& normalize_opcode (last_ins
->opcode
) == OP_STORE_MEMBASE_IMM
&&
2035 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
2036 ins
->inst_offset
== last_ins
->inst_offset
) {
2037 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2038 ins
->opcode
= OP_ICONST
;
2039 ins
->inst_c0
= last_ins
->inst_imm
;
2040 g_assert_not_reached (); // check this rule
2044 case OP_LOADU1_MEMBASE
:
2045 case OP_LOADI1_MEMBASE
:
2046 if (last_ins
&& (last_ins
->opcode
== OP_STOREI1_MEMBASE_REG
) &&
2047 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
2048 ins
->inst_offset
== last_ins
->inst_offset
) {
2049 ins
->opcode
= (ins
->opcode
== OP_LOADI1_MEMBASE
) ? OP_ICONV_TO_I1
: OP_ICONV_TO_U1
;
2050 ins
->sreg1
= last_ins
->sreg1
;
2053 case OP_LOADU2_MEMBASE
:
2054 case OP_LOADI2_MEMBASE
:
2055 if (last_ins
&& (last_ins
->opcode
== OP_STOREI2_MEMBASE_REG
) &&
2056 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
2057 ins
->inst_offset
== last_ins
->inst_offset
) {
2058 ins
->opcode
= (ins
->opcode
== OP_LOADI2_MEMBASE
) ? OP_ICONV_TO_I2
: OP_ICONV_TO_U2
;
2059 ins
->sreg1
= last_ins
->sreg1
;
2062 #ifdef __mono_ppc64__
2063 case OP_LOADU4_MEMBASE
:
2064 case OP_LOADI4_MEMBASE
:
2065 if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_REG
) &&
2066 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
2067 ins
->inst_offset
== last_ins
->inst_offset
) {
2068 ins
->opcode
= (ins
->opcode
== OP_LOADI4_MEMBASE
) ? OP_ICONV_TO_I4
: OP_ICONV_TO_U4
;
2069 ins
->sreg1
= last_ins
->sreg1
;
2074 ins
->opcode
= OP_MOVE
;
2078 if (ins
->dreg
== ins
->sreg1
) {
2079 MONO_DELETE_INS (bb
, ins
);
2083 * OP_MOVE sreg, dreg
2084 * OP_MOVE dreg, sreg
2086 if (last_ins
&& last_ins
->opcode
== OP_MOVE
&&
2087 ins
->sreg1
== last_ins
->dreg
&&
2088 ins
->dreg
== last_ins
->sreg1
) {
2089 MONO_DELETE_INS (bb
, ins
);
2097 bb
->last_ins
= last_ins
;
2101 mono_arch_decompose_opts (MonoCompile
*cfg
, MonoInst
*ins
)
2103 switch (ins
->opcode
) {
2104 case OP_ICONV_TO_R_UN
: {
2105 // This value is OK as-is for both big and little endian because of how it is stored
2106 static const guint64 adjust_val
= 0x4330000000000000ULL
;
2107 int msw_reg
= mono_alloc_ireg (cfg
);
2108 int adj_reg
= mono_alloc_freg (cfg
);
2109 int tmp_reg
= mono_alloc_freg (cfg
);
2110 int basereg
= ppc_sp
;
2112 MONO_EMIT_NEW_ICONST (cfg
, msw_reg
, 0x43300000);
2113 if (!ppc_is_imm16 (offset
+ 4)) {
2114 basereg
= mono_alloc_ireg (cfg
);
2115 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IADD_IMM
, basereg
, cfg
->frame_reg
, offset
);
2117 #if G_BYTE_ORDER == G_BIG_ENDIAN
2118 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, basereg
, offset
, msw_reg
);
2119 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, basereg
, offset
+ 4, ins
->sreg1
);
2121 // For little endian the words are reversed
2122 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, basereg
, offset
+ 4, msw_reg
);
2123 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, basereg
, offset
, ins
->sreg1
);
2125 MONO_EMIT_NEW_LOAD_R8 (cfg
, adj_reg
, &adjust_val
);
2126 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADR8_MEMBASE
, tmp_reg
, basereg
, offset
);
2127 MONO_EMIT_NEW_BIALU (cfg
, OP_FSUB
, ins
->dreg
, tmp_reg
, adj_reg
);
2128 ins
->opcode
= OP_NOP
;
2131 #ifndef __mono_ppc64__
2132 case OP_ICONV_TO_R4
:
2133 case OP_ICONV_TO_R8
: {
2134 /* If we have a PPC_FEATURE_64 machine we can avoid
2135 this and use the fcfid instruction. Otherwise
2136 on an old 32-bit chip and we have to do this the
2138 if (!(cpu_hw_caps
& PPC_ISA_64
)) {
2139 /* FIXME: change precision for CEE_CONV_R4 */
2140 static const guint64 adjust_val
= 0x4330000080000000ULL
;
2141 int msw_reg
= mono_alloc_ireg (cfg
);
2142 int xored
= mono_alloc_ireg (cfg
);
2143 int adj_reg
= mono_alloc_freg (cfg
);
2144 int tmp_reg
= mono_alloc_freg (cfg
);
2145 int basereg
= ppc_sp
;
2147 if (!ppc_is_imm16 (offset
+ 4)) {
2148 basereg
= mono_alloc_ireg (cfg
);
2149 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IADD_IMM
, basereg
, cfg
->frame_reg
, offset
);
2151 MONO_EMIT_NEW_ICONST (cfg
, msw_reg
, 0x43300000);
2152 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, basereg
, offset
, msw_reg
);
2153 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_XOR_IMM
, xored
, ins
->sreg1
, 0x80000000);
2154 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, basereg
, offset
+ 4, xored
);
2155 MONO_EMIT_NEW_LOAD_R8 (cfg
, adj_reg
, (gpointer
)&adjust_val
);
2156 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADR8_MEMBASE
, tmp_reg
, basereg
, offset
);
2157 MONO_EMIT_NEW_BIALU (cfg
, OP_FSUB
, ins
->dreg
, tmp_reg
, adj_reg
);
2158 if (ins
->opcode
== OP_ICONV_TO_R4
)
2159 MONO_EMIT_NEW_UNALU (cfg
, OP_FCONV_TO_R4
, ins
->dreg
, ins
->dreg
);
2160 ins
->opcode
= OP_NOP
;
2166 int msw_reg
= mono_alloc_ireg (cfg
);
2167 int basereg
= ppc_sp
;
2169 if (!ppc_is_imm16 (offset
+ 4)) {
2170 basereg
= mono_alloc_ireg (cfg
);
2171 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IADD_IMM
, basereg
, cfg
->frame_reg
, offset
);
2173 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, basereg
, offset
, ins
->sreg1
);
2174 #if G_BYTE_ORDER == G_BIG_ENDIAN
2175 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, msw_reg
, basereg
, offset
);
2177 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, msw_reg
, basereg
, offset
+4);
2179 MONO_EMIT_NEW_UNALU (cfg
, OP_PPC_CHECK_FINITE
, -1, msw_reg
);
2180 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, ins
->dreg
, ins
->sreg1
);
2181 ins
->opcode
= OP_NOP
;
2184 #ifdef __mono_ppc64__
2186 case OP_IADD_OVF_UN
:
2188 int shifted1_reg
= mono_alloc_ireg (cfg
);
2189 int shifted2_reg
= mono_alloc_ireg (cfg
);
2190 int result_shifted_reg
= mono_alloc_ireg (cfg
);
2192 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, shifted1_reg
, ins
->sreg1
, 32);
2193 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, shifted2_reg
, ins
->sreg2
, 32);
2194 MONO_EMIT_NEW_BIALU (cfg
, ins
->opcode
, result_shifted_reg
, shifted1_reg
, shifted2_reg
);
2195 if (ins
->opcode
== OP_IADD_OVF_UN
)
2196 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHR_UN_IMM
, ins
->dreg
, result_shifted_reg
, 32);
2198 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHR_IMM
, ins
->dreg
, result_shifted_reg
, 32);
2199 ins
->opcode
= OP_NOP
;
2209 mono_arch_decompose_long_opts (MonoCompile
*cfg
, MonoInst
*ins
)
2211 switch (ins
->opcode
) {
2213 /* ADC sets the condition code */
2214 MONO_EMIT_NEW_BIALU (cfg
, OP_ADDCC
, MONO_LVREG_LS (ins
->dreg
), MONO_LVREG_LS (ins
->sreg1
), MONO_LVREG_LS (ins
->sreg2
));
2215 MONO_EMIT_NEW_BIALU (cfg
, OP_ADD_OVF_CARRY
, MONO_LVREG_MS (ins
->dreg
), MONO_LVREG_MS (ins
->sreg1
), MONO_LVREG_MS (ins
->sreg2
));
2218 case OP_LADD_OVF_UN
:
2219 /* ADC sets the condition code */
2220 MONO_EMIT_NEW_BIALU (cfg
, OP_ADDCC
, MONO_LVREG_LS (ins
->dreg
), MONO_LVREG_LS (ins
->sreg1
), MONO_LVREG_LS (ins
->sreg2
));
2221 MONO_EMIT_NEW_BIALU (cfg
, OP_ADD_OVF_UN_CARRY
, MONO_LVREG_MS (ins
->dreg
), MONO_LVREG_MS (ins
->sreg1
), MONO_LVREG_MS (ins
->sreg2
));
2225 /* SBB sets the condition code */
2226 MONO_EMIT_NEW_BIALU (cfg
, OP_SUBCC
, MONO_LVREG_LS (ins
->dreg
), MONO_LVREG_LS (ins
->sreg1
), MONO_LVREG_LS (ins
->sreg2
));
2227 MONO_EMIT_NEW_BIALU (cfg
, OP_SUB_OVF_CARRY
, MONO_LVREG_MS (ins
->dreg
), MONO_LVREG_MS (ins
->sreg1
), MONO_LVREG_MS (ins
->sreg2
));
2230 case OP_LSUB_OVF_UN
:
2231 /* SBB sets the condition code */
2232 MONO_EMIT_NEW_BIALU (cfg
, OP_SUBCC
, MONO_LVREG_LS (ins
->dreg
), MONO_LVREG_LS (ins
->sreg1
), MONO_LVREG_LS (ins
->sreg2
));
2233 MONO_EMIT_NEW_BIALU (cfg
, OP_SUB_OVF_UN_CARRY
, MONO_LVREG_MS (ins
->dreg
), MONO_LVREG_MS (ins
->sreg1
), MONO_LVREG_MS (ins
->sreg2
));
2237 /* From gcc generated code */
2238 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_PPC_SUBFIC
, MONO_LVREG_LS (ins
->dreg
), MONO_LVREG_LS (ins
->sreg1
), 0);
2239 MONO_EMIT_NEW_UNALU (cfg
, OP_PPC_SUBFZE
, MONO_LVREG_MS (ins
->dreg
), MONO_LVREG_MS (ins
->sreg1
));
2248 * the branch_b0_table should maintain the order of these
2262 branch_b0_table
[] = {
2277 branch_b1_table
[] = {
2291 #define NEW_INS(cfg,dest,op) do { \
2292 MONO_INST_NEW((cfg), (dest), (op)); \
2293 mono_bblock_insert_after_ins (bb, last_ins, (dest)); \
2297 map_to_reg_reg_op (int op
)
2306 case OP_COMPARE_IMM
:
2308 case OP_ICOMPARE_IMM
:
2310 case OP_LCOMPARE_IMM
:
2328 case OP_LOAD_MEMBASE
:
2329 return OP_LOAD_MEMINDEX
;
2330 case OP_LOADI4_MEMBASE
:
2331 return OP_LOADI4_MEMINDEX
;
2332 case OP_LOADU4_MEMBASE
:
2333 return OP_LOADU4_MEMINDEX
;
2334 case OP_LOADI8_MEMBASE
:
2335 return OP_LOADI8_MEMINDEX
;
2336 case OP_LOADU1_MEMBASE
:
2337 return OP_LOADU1_MEMINDEX
;
2338 case OP_LOADI2_MEMBASE
:
2339 return OP_LOADI2_MEMINDEX
;
2340 case OP_LOADU2_MEMBASE
:
2341 return OP_LOADU2_MEMINDEX
;
2342 case OP_LOADI1_MEMBASE
:
2343 return OP_LOADI1_MEMINDEX
;
2344 case OP_LOADR4_MEMBASE
:
2345 return OP_LOADR4_MEMINDEX
;
2346 case OP_LOADR8_MEMBASE
:
2347 return OP_LOADR8_MEMINDEX
;
2348 case OP_STOREI1_MEMBASE_REG
:
2349 return OP_STOREI1_MEMINDEX
;
2350 case OP_STOREI2_MEMBASE_REG
:
2351 return OP_STOREI2_MEMINDEX
;
2352 case OP_STOREI4_MEMBASE_REG
:
2353 return OP_STOREI4_MEMINDEX
;
2354 case OP_STOREI8_MEMBASE_REG
:
2355 return OP_STOREI8_MEMINDEX
;
2356 case OP_STORE_MEMBASE_REG
:
2357 return OP_STORE_MEMINDEX
;
2358 case OP_STORER4_MEMBASE_REG
:
2359 return OP_STORER4_MEMINDEX
;
2360 case OP_STORER8_MEMBASE_REG
:
2361 return OP_STORER8_MEMINDEX
;
2362 case OP_STORE_MEMBASE_IMM
:
2363 return OP_STORE_MEMBASE_REG
;
2364 case OP_STOREI1_MEMBASE_IMM
:
2365 return OP_STOREI1_MEMBASE_REG
;
2366 case OP_STOREI2_MEMBASE_IMM
:
2367 return OP_STOREI2_MEMBASE_REG
;
2368 case OP_STOREI4_MEMBASE_IMM
:
2369 return OP_STOREI4_MEMBASE_REG
;
2370 case OP_STOREI8_MEMBASE_IMM
:
2371 return OP_STOREI8_MEMBASE_REG
;
2373 if (mono_op_imm_to_op (op
) == -1)
2374 g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (op
));
2375 return mono_op_imm_to_op (op
);
2378 //#define map_to_reg_reg_op(op) (cfg->new_ir? mono_op_imm_to_op (op): map_to_reg_reg_op (op))
2380 #define compare_opcode_is_unsigned(opcode) \
2381 (((opcode) >= CEE_BNE_UN && (opcode) <= CEE_BLT_UN) || \
2382 ((opcode) >= OP_IBNE_UN && (opcode) <= OP_IBLT_UN) || \
2383 ((opcode) >= OP_LBNE_UN && (opcode) <= OP_LBLT_UN) || \
2384 ((opcode) >= OP_COND_EXC_NE_UN && (opcode) <= OP_COND_EXC_LT_UN) || \
2385 ((opcode) >= OP_COND_EXC_INE_UN && (opcode) <= OP_COND_EXC_ILT_UN) || \
2386 ((opcode) == OP_CLT_UN || (opcode) == OP_CGT_UN || \
2387 (opcode) == OP_ICLT_UN || (opcode) == OP_ICGT_UN || \
2388 (opcode) == OP_LCLT_UN || (opcode) == OP_LCGT_UN))
2391 * Remove from the instruction list the instructions that can't be
2392 * represented with very simple instructions with no register
2396 mono_arch_lowering_pass (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
2398 MonoInst
*ins
, *next
, *temp
, *last_ins
= NULL
;
2401 MONO_BB_FOR_EACH_INS (bb
, ins
) {
2403 switch (ins
->opcode
) {
2404 case OP_IDIV_UN_IMM
:
2407 case OP_IREM_UN_IMM
:
2408 CASE_PPC64 (OP_LREM_IMM
) {
2409 NEW_INS (cfg
, temp
, OP_ICONST
);
2410 temp
->inst_c0
= ins
->inst_imm
;
2411 temp
->dreg
= mono_alloc_ireg (cfg
);
2412 ins
->sreg2
= temp
->dreg
;
2413 if (ins
->opcode
== OP_IDIV_IMM
)
2414 ins
->opcode
= OP_IDIV
;
2415 else if (ins
->opcode
== OP_IREM_IMM
)
2416 ins
->opcode
= OP_IREM
;
2417 else if (ins
->opcode
== OP_IDIV_UN_IMM
)
2418 ins
->opcode
= OP_IDIV_UN
;
2419 else if (ins
->opcode
== OP_IREM_UN_IMM
)
2420 ins
->opcode
= OP_IREM_UN
;
2421 else if (ins
->opcode
== OP_LREM_IMM
)
2422 ins
->opcode
= OP_LREM
;
2424 /* handle rem separately */
2429 CASE_PPC64 (OP_LREM
)
2430 CASE_PPC64 (OP_LREM_UN
) {
2432 /* we change a rem dest, src1, src2 to
2433 * div temp1, src1, src2
2434 * mul temp2, temp1, src2
2435 * sub dest, src1, temp2
2437 if (ins
->opcode
== OP_IREM
|| ins
->opcode
== OP_IREM_UN
) {
2438 NEW_INS (cfg
, mul
, OP_IMUL
);
2439 NEW_INS (cfg
, temp
, ins
->opcode
== OP_IREM
? OP_IDIV
: OP_IDIV_UN
);
2440 ins
->opcode
= OP_ISUB
;
2442 NEW_INS (cfg
, mul
, OP_LMUL
);
2443 NEW_INS (cfg
, temp
, ins
->opcode
== OP_LREM
? OP_LDIV
: OP_LDIV_UN
);
2444 ins
->opcode
= OP_LSUB
;
2446 temp
->sreg1
= ins
->sreg1
;
2447 temp
->sreg2
= ins
->sreg2
;
2448 temp
->dreg
= mono_alloc_ireg (cfg
);
2449 mul
->sreg1
= temp
->dreg
;
2450 mul
->sreg2
= ins
->sreg2
;
2451 mul
->dreg
= mono_alloc_ireg (cfg
);
2452 ins
->sreg2
= mul
->dreg
;
2456 CASE_PPC64 (OP_LADD_IMM
)
2459 if (!ppc_is_imm16 (ins
->inst_imm
)) {
2460 NEW_INS (cfg
, temp
, OP_ICONST
);
2461 temp
->inst_c0
= ins
->inst_imm
;
2462 temp
->dreg
= mono_alloc_ireg (cfg
);
2463 ins
->sreg2
= temp
->dreg
;
2464 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2468 CASE_PPC64 (OP_LSUB_IMM
)
2470 if (!ppc_is_imm16 (-ins
->inst_imm
)) {
2471 NEW_INS (cfg
, temp
, OP_ICONST
);
2472 temp
->inst_c0
= ins
->inst_imm
;
2473 temp
->dreg
= mono_alloc_ireg (cfg
);
2474 ins
->sreg2
= temp
->dreg
;
2475 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2487 gboolean is_imm
= ((ins
->inst_imm
& 0xffff0000) && (ins
->inst_imm
& 0xffff));
2488 #ifdef __mono_ppc64__
2489 if (ins
->inst_imm
& 0xffffffff00000000ULL
)
2493 NEW_INS (cfg
, temp
, OP_ICONST
);
2494 temp
->inst_c0
= ins
->inst_imm
;
2495 temp
->dreg
= mono_alloc_ireg (cfg
);
2496 ins
->sreg2
= temp
->dreg
;
2497 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2506 NEW_INS (cfg
, temp
, OP_ICONST
);
2507 temp
->inst_c0
= ins
->inst_imm
;
2508 temp
->dreg
= mono_alloc_ireg (cfg
);
2509 ins
->sreg2
= temp
->dreg
;
2510 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2512 case OP_COMPARE_IMM
:
2513 case OP_ICOMPARE_IMM
:
2514 CASE_PPC64 (OP_LCOMPARE_IMM
)
2516 /* Branch opts can eliminate the branch */
2517 if (!next
|| (!(MONO_IS_COND_BRANCH_OP (next
) || MONO_IS_COND_EXC (next
) || MONO_IS_SETCC (next
)))) {
2518 ins
->opcode
= OP_NOP
;
2522 if (compare_opcode_is_unsigned (next
->opcode
)) {
2523 if (!ppc_is_uimm16 (ins
->inst_imm
)) {
2524 NEW_INS (cfg
, temp
, OP_ICONST
);
2525 temp
->inst_c0
= ins
->inst_imm
;
2526 temp
->dreg
= mono_alloc_ireg (cfg
);
2527 ins
->sreg2
= temp
->dreg
;
2528 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2531 if (!ppc_is_imm16 (ins
->inst_imm
)) {
2532 NEW_INS (cfg
, temp
, OP_ICONST
);
2533 temp
->inst_c0
= ins
->inst_imm
;
2534 temp
->dreg
= mono_alloc_ireg (cfg
);
2535 ins
->sreg2
= temp
->dreg
;
2536 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2542 CASE_PPC64 (OP_LMUL_IMM
)
2543 if (ins
->inst_imm
== 1) {
2544 ins
->opcode
= OP_MOVE
;
2547 if (ins
->inst_imm
== 0) {
2548 ins
->opcode
= OP_ICONST
;
2552 imm
= mono_is_power_of_two (ins
->inst_imm
);
2554 ins
->opcode
= OP_SHL_IMM
;
2555 ins
->inst_imm
= imm
;
2558 if (!ppc_is_imm16 (ins
->inst_imm
)) {
2559 NEW_INS (cfg
, temp
, OP_ICONST
);
2560 temp
->inst_c0
= ins
->inst_imm
;
2561 temp
->dreg
= mono_alloc_ireg (cfg
);
2562 ins
->sreg2
= temp
->dreg
;
2563 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2566 case OP_LOCALLOC_IMM
:
2567 NEW_INS (cfg
, temp
, OP_ICONST
);
2568 temp
->inst_c0
= ins
->inst_imm
;
2569 temp
->dreg
= mono_alloc_ireg (cfg
);
2570 ins
->sreg1
= temp
->dreg
;
2571 ins
->opcode
= OP_LOCALLOC
;
2573 case OP_LOAD_MEMBASE
:
2574 case OP_LOADI4_MEMBASE
:
2575 CASE_PPC64 (OP_LOADI8_MEMBASE
)
2576 case OP_LOADU4_MEMBASE
:
2577 case OP_LOADI2_MEMBASE
:
2578 case OP_LOADU2_MEMBASE
:
2579 case OP_LOADI1_MEMBASE
:
2580 case OP_LOADU1_MEMBASE
:
2581 case OP_LOADR4_MEMBASE
:
2582 case OP_LOADR8_MEMBASE
:
2583 case OP_STORE_MEMBASE_REG
:
2584 CASE_PPC64 (OP_STOREI8_MEMBASE_REG
)
2585 case OP_STOREI4_MEMBASE_REG
:
2586 case OP_STOREI2_MEMBASE_REG
:
2587 case OP_STOREI1_MEMBASE_REG
:
2588 case OP_STORER4_MEMBASE_REG
:
2589 case OP_STORER8_MEMBASE_REG
:
2590 /* we can do two things: load the immed in a register
2591 * and use an indexed load, or see if the immed can be
2592 * represented as an ad_imm + a load with a smaller offset
2593 * that fits. We just do the first for now, optimize later.
2595 if (ppc_is_imm16 (ins
->inst_offset
))
2597 NEW_INS (cfg
, temp
, OP_ICONST
);
2598 temp
->inst_c0
= ins
->inst_offset
;
2599 temp
->dreg
= mono_alloc_ireg (cfg
);
2600 ins
->sreg2
= temp
->dreg
;
2601 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2603 case OP_STORE_MEMBASE_IMM
:
2604 case OP_STOREI1_MEMBASE_IMM
:
2605 case OP_STOREI2_MEMBASE_IMM
:
2606 case OP_STOREI4_MEMBASE_IMM
:
2607 CASE_PPC64 (OP_STOREI8_MEMBASE_IMM
)
2608 NEW_INS (cfg
, temp
, OP_ICONST
);
2609 temp
->inst_c0
= ins
->inst_imm
;
2610 temp
->dreg
= mono_alloc_ireg (cfg
);
2611 ins
->sreg1
= temp
->dreg
;
2612 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2614 goto loop_start
; /* make it handle the possibly big ins->inst_offset */
2617 if (cfg
->compile_aot
) {
2618 /* Keep these in the aot case */
2621 NEW_INS (cfg
, temp
, OP_ICONST
);
2622 temp
->inst_c0
= (gulong
)ins
->inst_p0
;
2623 temp
->dreg
= mono_alloc_ireg (cfg
);
2624 ins
->inst_basereg
= temp
->dreg
;
2625 ins
->inst_offset
= 0;
2626 ins
->opcode
= ins
->opcode
== OP_R4CONST
? OP_LOADR4_MEMBASE
: OP_LOADR8_MEMBASE
;
2628 /* make it handle the possibly big ins->inst_offset
2629 * later optimize to use lis + load_membase
2635 bb
->last_ins
= last_ins
;
2636 bb
->max_vreg
= cfg
->next_vreg
;
2640 emit_float_to_int (MonoCompile
*cfg
, guchar
*code
, int dreg
, int sreg
, int size
, gboolean is_signed
)
2642 long offset
= cfg
->arch
.fp_conv_var_offset
;
2644 /* sreg is a float, dreg is an integer reg. ppc_f0 is used a scratch */
2645 #ifdef __mono_ppc64__
2647 ppc_fctidz (code
, ppc_f0
, sreg
);
2652 ppc_fctiwz (code
, ppc_f0
, sreg
);
2655 if (ppc_is_imm16 (offset
+ sub_offset
)) {
2656 ppc_stfd (code
, ppc_f0
, offset
, cfg
->frame_reg
);
2658 ppc_ldr (code
, dreg
, offset
+ sub_offset
, cfg
->frame_reg
);
2660 ppc_lwz (code
, dreg
, offset
+ sub_offset
, cfg
->frame_reg
);
2662 ppc_load (code
, dreg
, offset
);
2663 ppc_add (code
, dreg
, dreg
, cfg
->frame_reg
);
2664 ppc_stfd (code
, ppc_f0
, 0, dreg
);
2666 ppc_ldr (code
, dreg
, sub_offset
, dreg
);
2668 ppc_lwz (code
, dreg
, sub_offset
, dreg
);
2672 ppc_andid (code
, dreg
, dreg
, 0xff);
2674 ppc_andid (code
, dreg
, dreg
, 0xffff);
2675 #ifdef __mono_ppc64__
2677 ppc_clrldi (code
, dreg
, dreg
, 32);
2681 ppc_extsb (code
, dreg
, dreg
);
2683 ppc_extsh (code
, dreg
, dreg
);
2684 #ifdef __mono_ppc64__
2686 ppc_extsw (code
, dreg
, dreg
);
2693 emit_thunk (guint8
*code
, gconstpointer target
)
2697 /* 2 bytes on 32bit, 5 bytes on 64bit */
2698 ppc_load_sequence (code
, ppc_r0
, target
);
2700 ppc_mtctr (code
, ppc_r0
);
2701 ppc_bcctr (code
, PPC_BR_ALWAYS
, 0);
2703 mono_arch_flush_icache (p
, code
- p
);
2707 handle_thunk (MonoCompile
*cfg
, MonoDomain
*domain
, guchar
*code
, const guchar
*target
)
2709 MonoJitInfo
*ji
= NULL
;
2710 MonoThunkJitInfo
*info
;
2713 guint8
*orig_target
;
2714 guint8
*target_thunk
;
2717 domain
= mono_domain_get ();
2721 * This can be called multiple times during JITting,
2722 * save the current position in cfg->arch to avoid
2723 * doing a O(n^2) search.
2725 if (!cfg
->arch
.thunks
) {
2726 cfg
->arch
.thunks
= cfg
->thunks
;
2727 cfg
->arch
.thunks_size
= cfg
->thunk_area
;
2729 thunks
= cfg
->arch
.thunks
;
2730 thunks_size
= cfg
->arch
.thunks_size
;
2732 g_print ("thunk failed %p->%p, thunk space=%d method %s", code
, target
, thunks_size
, mono_method_full_name (cfg
->method
, TRUE
));
2733 g_assert_not_reached ();
2736 g_assert (*(guint32
*)thunks
== 0);
2737 emit_thunk (thunks
, target
);
2738 ppc_patch (code
, thunks
);
2740 cfg
->arch
.thunks
+= THUNK_SIZE
;
2741 cfg
->arch
.thunks_size
-= THUNK_SIZE
;
2743 ji
= mini_jit_info_table_find (domain
, (char *) code
, NULL
);
2745 info
= mono_jit_info_get_thunk_info (ji
);
2748 thunks
= (guint8
*) ji
->code_start
+ info
->thunks_offset
;
2749 thunks_size
= info
->thunks_size
;
2751 orig_target
= mono_arch_get_call_target (code
+ 4);
2753 mono_mini_arch_lock ();
2755 target_thunk
= NULL
;
2756 if (orig_target
>= thunks
&& orig_target
< thunks
+ thunks_size
) {
2757 /* The call already points to a thunk, because of trampolines etc. */
2758 target_thunk
= orig_target
;
2760 for (p
= thunks
; p
< thunks
+ thunks_size
; p
+= THUNK_SIZE
) {
2761 if (((guint32
*) p
) [0] == 0) {
2766 /* ppc64 requires 5 instructions, 32bit two instructions */
2767 #ifdef __mono_ppc64__
2768 const int const_load_size
= 5;
2770 const int const_load_size
= 2;
2772 guint32 load
[const_load_size
];
2773 guchar
*templ
= (guchar
*) load
;
2774 ppc_load_sequence (templ
, ppc_r0
, target
);
2775 if (!memcmp (p
, load
, const_load_size
)) {
2776 /* Thunk already points to target */
2784 // g_print ("THUNK: %p %p %p\n", code, target, target_thunk);
2786 if (!target_thunk
) {
2787 mono_mini_arch_unlock ();
2788 g_print ("thunk failed %p->%p, thunk space=%d method %s", code
, target
, thunks_size
, cfg
? mono_method_full_name (cfg
->method
, TRUE
) : mono_method_full_name (jinfo_get_method (ji
), TRUE
));
2789 g_assert_not_reached ();
2792 emit_thunk (target_thunk
, target
);
2793 ppc_patch (code
, target_thunk
);
2795 mono_mini_arch_unlock ();
2800 patch_ins (guint8
*code
, guint32 ins
)
2802 *(guint32
*)code
= ins
;
2803 mono_arch_flush_icache (code
, 4);
2807 ppc_patch_full (MonoCompile
*cfg
, MonoDomain
*domain
, guchar
*code
, const guchar
*target
, gboolean is_fd
)
2809 guint32 ins
= *(guint32
*)code
;
2810 guint32 prim
= ins
>> 26;
2813 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2815 // prefer relative branches, they are more position independent (e.g. for AOT compilation).
2816 gint diff
= target
- code
;
2819 if (diff
<= 33554431){
2820 ins
= (18 << 26) | (diff
) | (ins
& 1);
2821 patch_ins (code
, ins
);
2825 /* diff between 0 and -33554432 */
2826 if (diff
>= -33554432){
2827 ins
= (18 << 26) | (diff
& ~0xfc000000) | (ins
& 1);
2828 patch_ins (code
, ins
);
2833 if ((glong
)target
>= 0){
2834 if ((glong
)target
<= 33554431){
2835 ins
= (18 << 26) | ((gulong
) target
) | (ins
& 1) | 2;
2836 patch_ins (code
, ins
);
2840 if ((glong
)target
>= -33554432){
2841 ins
= (18 << 26) | (((gulong
)target
) & ~0xfc000000) | (ins
& 1) | 2;
2842 patch_ins (code
, ins
);
2847 handle_thunk (cfg
, domain
, code
, target
);
2850 g_assert_not_reached ();
2858 guint32 li
= (gulong
)target
;
2859 ins
= (ins
& 0xffff0000) | (ins
& 3);
2860 ovf
= li
& 0xffff0000;
2861 if (ovf
!= 0 && ovf
!= 0xffff0000)
2862 g_assert_not_reached ();
2865 // FIXME: assert the top bits of li are 0
2867 gint diff
= target
- code
;
2868 ins
= (ins
& 0xffff0000) | (ins
& 3);
2869 ovf
= diff
& 0xffff0000;
2870 if (ovf
!= 0 && ovf
!= 0xffff0000)
2871 g_assert_not_reached ();
2875 patch_ins (code
, ins
);
2879 if (prim
== 15 || ins
== 0x4e800021 || ins
== 0x4e800020 || ins
== 0x4e800420) {
2880 #ifdef __mono_ppc64__
2881 guint32
*seq
= (guint32
*)code
;
2882 guint32
*branch_ins
;
2884 /* the trampoline code will try to patch the blrl, blr, bcctr */
2885 if (ins
== 0x4e800021 || ins
== 0x4e800020 || ins
== 0x4e800420) {
2887 if (ppc_is_load_op (seq
[-3]) || ppc_opcode (seq
[-3]) == 31) /* ld || lwz || mr */
2892 if (ppc_is_load_op (seq
[5])
2893 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
2894 /* With function descs we need to do more careful
2896 || ppc_opcode (seq
[5]) == 31 /* ld || lwz || mr */
2899 branch_ins
= seq
+ 8;
2901 branch_ins
= seq
+ 6;
2904 seq
= (guint32
*)code
;
2905 /* this is the lis/ori/sldi/oris/ori/(ld/ld|mr/nop)/mtlr/blrl sequence */
2906 g_assert (mono_ppc_is_direct_call_sequence (branch_ins
));
2908 if (ppc_is_load_op (seq
[5])) {
2909 g_assert (ppc_is_load_op (seq
[6]));
2912 guint8
*buf
= (guint8
*)&seq
[5];
2913 ppc_mr (buf
, PPC_CALL_REG
, ppc_r12
);
2918 target
= mono_get_addr_from_ftnptr ((gpointer
)target
);
2921 /* FIXME: make this thread safe */
2922 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
2923 /* FIXME: we're assuming we're using r12 here */
2924 ppc_load_ptr_sequence (code
, ppc_r12
, target
);
2926 ppc_load_ptr_sequence (code
, PPC_CALL_REG
, target
);
2928 mono_arch_flush_icache ((guint8
*)seq
, 28);
2931 /* the trampoline code will try to patch the blrl, blr, bcctr */
2932 if (ins
== 0x4e800021 || ins
== 0x4e800020 || ins
== 0x4e800420) {
2935 /* this is the lis/ori/mtlr/blrl sequence */
2936 seq
= (guint32
*)code
;
2937 g_assert ((seq
[0] >> 26) == 15);
2938 g_assert ((seq
[1] >> 26) == 24);
2939 g_assert ((seq
[2] >> 26) == 31);
2940 g_assert (seq
[3] == 0x4e800021 || seq
[3] == 0x4e800020 || seq
[3] == 0x4e800420);
2941 /* FIXME: make this thread safe */
2942 ppc_lis (code
, PPC_CALL_REG
, (guint32
)(target
) >> 16);
2943 ppc_ori (code
, PPC_CALL_REG
, PPC_CALL_REG
, (guint32
)(target
) & 0xffff);
2944 mono_arch_flush_icache (code
- 8, 8);
2947 g_assert_not_reached ();
2949 // g_print ("patched with 0x%08x\n", ins);
2953 ppc_patch (guchar
*code
, const guchar
*target
)
2955 ppc_patch_full (NULL
, NULL
, code
, target
, FALSE
);
2959 mono_ppc_patch (guchar
*code
, const guchar
*target
)
2961 ppc_patch (code
, target
);
2965 emit_move_return_value (MonoCompile
*cfg
, MonoInst
*ins
, guint8
*code
)
2967 switch (ins
->opcode
) {
2970 case OP_FCALL_MEMBASE
:
2971 if (ins
->dreg
!= ppc_f1
)
2972 ppc_fmr (code
, ins
->dreg
, ppc_f1
);
2980 emit_reserve_param_area (MonoCompile
*cfg
, guint8
*code
)
2982 long size
= cfg
->param_area
;
2984 size
+= MONO_ARCH_FRAME_ALIGNMENT
- 1;
2985 size
&= -MONO_ARCH_FRAME_ALIGNMENT
;
2990 ppc_ldptr (code
, ppc_r0
, 0, ppc_sp
);
2991 if (ppc_is_imm16 (-size
)) {
2992 ppc_stptr_update (code
, ppc_r0
, -size
, ppc_sp
);
2994 ppc_load (code
, ppc_r12
, -size
);
2995 ppc_stptr_update_indexed (code
, ppc_r0
, ppc_sp
, ppc_r12
);
3002 emit_unreserve_param_area (MonoCompile
*cfg
, guint8
*code
)
3004 long size
= cfg
->param_area
;
3006 size
+= MONO_ARCH_FRAME_ALIGNMENT
- 1;
3007 size
&= -MONO_ARCH_FRAME_ALIGNMENT
;
3012 ppc_ldptr (code
, ppc_r0
, 0, ppc_sp
);
3013 if (ppc_is_imm16 (size
)) {
3014 ppc_stptr_update (code
, ppc_r0
, size
, ppc_sp
);
3016 ppc_load (code
, ppc_r12
, size
);
3017 ppc_stptr_update_indexed (code
, ppc_r0
, ppc_sp
, ppc_r12
);
3023 #define MASK_SHIFT_IMM(i) ((i) & MONO_PPC_32_64_CASE (0x1f, 0x3f))
3027 mono_arch_output_basic_block (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
3029 MonoInst
*ins
, *next
;
3031 guint8
*code
= cfg
->native_code
+ cfg
->code_len
;
3032 MonoInst
*last_ins
= NULL
;
3036 /* we don't align basic blocks of loops on ppc */
3038 if (cfg
->verbose_level
> 2)
3039 g_print ("Basic block %d starting at offset 0x%x\n", bb
->block_num
, bb
->native_offset
);
3041 cpos
= bb
->max_offset
;
3043 MONO_BB_FOR_EACH_INS (bb
, ins
) {
3044 const guint offset
= code
- cfg
->native_code
;
3045 set_code_cursor (cfg
, code
);
3046 max_len
= ins_get_size (ins
->opcode
);
3047 code
= realloc_code (cfg
, max_len
);
3048 // if (ins->cil_code)
3049 // g_print ("cil code\n");
3050 mono_debug_record_line_number (cfg
, ins
, offset
);
3052 switch (normalize_opcode (ins
->opcode
)) {
3053 case OP_RELAXED_NOP
:
3056 case OP_DUMMY_ICONST
:
3057 case OP_DUMMY_I8CONST
:
3058 case OP_DUMMY_R8CONST
:
3059 case OP_DUMMY_R4CONST
:
3060 case OP_NOT_REACHED
:
3063 case OP_IL_SEQ_POINT
:
3064 mono_add_seq_point (cfg
, bb
, ins
, code
- cfg
->native_code
);
3066 case OP_SEQ_POINT
: {
3069 if (cfg
->compile_aot
)
3073 * Read from the single stepping trigger page. This will cause a
3074 * SIGSEGV when single stepping is enabled.
3075 * We do this _before_ the breakpoint, so single stepping after
3076 * a breakpoint is hit will step to the next IL offset.
3078 if (ins
->flags
& MONO_INST_SINGLE_STEP_LOC
) {
3079 ppc_load (code
, ppc_r12
, (gsize
)ss_trigger_page
);
3080 ppc_ldptr (code
, ppc_r12
, 0, ppc_r12
);
3083 mono_add_seq_point (cfg
, bb
, ins
, code
- cfg
->native_code
);
3086 * A placeholder for a possible breakpoint inserted by
3087 * mono_arch_set_breakpoint ().
3089 for (i
= 0; i
< BREAKPOINT_SIZE
/ 4; ++i
)
3094 ppc_mullw (code
, ppc_r0
, ins
->sreg1
, ins
->sreg2
);
3095 ppc_mulhw (code
, ppc_r3
, ins
->sreg1
, ins
->sreg2
);
3096 ppc_mr (code
, ppc_r4
, ppc_r0
);
3099 ppc_mullw (code
, ppc_r0
, ins
->sreg1
, ins
->sreg2
);
3100 ppc_mulhwu (code
, ppc_r3
, ins
->sreg1
, ins
->sreg2
);
3101 ppc_mr (code
, ppc_r4
, ppc_r0
);
3103 case OP_MEMORY_BARRIER
:
3106 case OP_STOREI1_MEMBASE_REG
:
3107 if (ppc_is_imm16 (ins
->inst_offset
)) {
3108 ppc_stb (code
, ins
->sreg1
, ins
->inst_offset
, ins
->inst_destbasereg
);
3110 if (ppc_is_imm32 (ins
->inst_offset
)) {
3111 ppc_addis (code
, ppc_r11
, ins
->inst_destbasereg
, ppc_ha(ins
->inst_offset
));
3112 ppc_stb (code
, ins
->sreg1
, ins
->inst_offset
, ppc_r11
);
3114 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
3115 ppc_stbx (code
, ins
->sreg1
, ins
->inst_destbasereg
, ppc_r0
);
3119 case OP_STOREI2_MEMBASE_REG
:
3120 if (ppc_is_imm16 (ins
->inst_offset
)) {
3121 ppc_sth (code
, ins
->sreg1
, ins
->inst_offset
, ins
->inst_destbasereg
);
3123 if (ppc_is_imm32 (ins
->inst_offset
)) {
3124 ppc_addis (code
, ppc_r11
, ins
->inst_destbasereg
, ppc_ha(ins
->inst_offset
));
3125 ppc_sth (code
, ins
->sreg1
, ins
->inst_offset
, ppc_r11
);
3127 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
3128 ppc_sthx (code
, ins
->sreg1
, ins
->inst_destbasereg
, ppc_r0
);
3132 case OP_STORE_MEMBASE_REG
:
3133 if (ppc_is_imm16 (ins
->inst_offset
)) {
3134 ppc_stptr (code
, ins
->sreg1
, ins
->inst_offset
, ins
->inst_destbasereg
);
3136 if (ppc_is_imm32 (ins
->inst_offset
)) {
3137 ppc_addis (code
, ppc_r11
, ins
->inst_destbasereg
, ppc_ha(ins
->inst_offset
));
3138 ppc_stptr (code
, ins
->sreg1
, ins
->inst_offset
, ppc_r11
);
3140 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
3141 ppc_stptr_indexed (code
, ins
->sreg1
, ins
->inst_destbasereg
, ppc_r0
);
3145 #ifdef MONO_ARCH_ILP32
3146 case OP_STOREI8_MEMBASE_REG
:
3147 if (ppc_is_imm16 (ins
->inst_offset
)) {
3148 ppc_str (code
, ins
->sreg1
, ins
->inst_offset
, ins
->inst_destbasereg
);
3150 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
3151 ppc_str_indexed (code
, ins
->sreg1
, ins
->inst_destbasereg
, ppc_r0
);
3155 case OP_STOREI1_MEMINDEX
:
3156 ppc_stbx (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
3158 case OP_STOREI2_MEMINDEX
:
3159 ppc_sthx (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
3161 case OP_STORE_MEMINDEX
:
3162 ppc_stptr_indexed (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
3165 g_assert_not_reached ();
3167 case OP_LOAD_MEMBASE
:
3168 if (ppc_is_imm16 (ins
->inst_offset
)) {
3169 ppc_ldptr (code
, ins
->dreg
, ins
->inst_offset
, ins
->inst_basereg
);
3171 if (ppc_is_imm32 (ins
->inst_offset
) && (ins
->dreg
> 0)) {
3172 ppc_addis (code
, ins
->dreg
, ins
->inst_basereg
, ppc_ha(ins
->inst_offset
));
3173 ppc_ldptr (code
, ins
->dreg
, ins
->inst_offset
, ins
->dreg
);
3175 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
3176 ppc_ldptr_indexed (code
, ins
->dreg
, ins
->inst_basereg
, ppc_r0
);
3180 case OP_LOADI4_MEMBASE
:
3181 #ifdef __mono_ppc64__
3182 if (ppc_is_imm16 (ins
->inst_offset
)) {
3183 ppc_lwa (code
, ins
->dreg
, ins
->inst_offset
, ins
->inst_basereg
);
3185 if (ppc_is_imm32 (ins
->inst_offset
) && (ins
->dreg
> 0)) {
3186 ppc_addis (code
, ins
->dreg
, ins
->inst_basereg
, ppc_ha(ins
->inst_offset
));
3187 ppc_lwa (code
, ins
->dreg
, ins
->inst_offset
, ins
->dreg
);
3189 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
3190 ppc_lwax (code
, ins
->dreg
, ins
->inst_basereg
, ppc_r0
);
3195 case OP_LOADU4_MEMBASE
:
3196 if (ppc_is_imm16 (ins
->inst_offset
)) {
3197 ppc_lwz (code
, ins
->dreg
, ins
->inst_offset
, ins
->inst_basereg
);
3199 if (ppc_is_imm32 (ins
->inst_offset
) && (ins
->dreg
> 0)) {
3200 ppc_addis (code
, ins
->dreg
, ins
->inst_basereg
, ppc_ha(ins
->inst_offset
));
3201 ppc_lwz (code
, ins
->dreg
, ins
->inst_offset
, ins
->dreg
);
3203 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
3204 ppc_lwzx (code
, ins
->dreg
, ins
->inst_basereg
, ppc_r0
);
3208 case OP_LOADI1_MEMBASE
:
3209 case OP_LOADU1_MEMBASE
:
3210 if (ppc_is_imm16 (ins
->inst_offset
)) {
3211 ppc_lbz (code
, ins
->dreg
, ins
->inst_offset
, ins
->inst_basereg
);
3213 if (ppc_is_imm32 (ins
->inst_offset
) && (ins
->dreg
> 0)) {
3214 ppc_addis (code
, ins
->dreg
, ins
->inst_basereg
, ppc_ha(ins
->inst_offset
));
3215 ppc_lbz (code
, ins
->dreg
, ins
->inst_offset
, ins
->dreg
);
3217 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
3218 ppc_lbzx (code
, ins
->dreg
, ins
->inst_basereg
, ppc_r0
);
3221 if (ins
->opcode
== OP_LOADI1_MEMBASE
)
3222 ppc_extsb (code
, ins
->dreg
, ins
->dreg
);
3224 case OP_LOADU2_MEMBASE
:
3225 if (ppc_is_imm16 (ins
->inst_offset
)) {
3226 ppc_lhz (code
, ins
->dreg
, ins
->inst_offset
, ins
->inst_basereg
);
3228 if (ppc_is_imm32 (ins
->inst_offset
) && (ins
->dreg
> 0)) {
3229 ppc_addis (code
, ins
->dreg
, ins
->inst_basereg
, ppc_ha(ins
->inst_offset
));
3230 ppc_lhz (code
, ins
->dreg
, ins
->inst_offset
, ins
->dreg
);
3232 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
3233 ppc_lhzx (code
, ins
->dreg
, ins
->inst_basereg
, ppc_r0
);
3237 case OP_LOADI2_MEMBASE
:
3238 if (ppc_is_imm16 (ins
->inst_offset
)) {
3239 ppc_lha (code
, ins
->dreg
, ins
->inst_offset
, ins
->inst_basereg
);
3241 if (ppc_is_imm32 (ins
->inst_offset
) && (ins
->dreg
> 0)) {
3242 ppc_addis (code
, ins
->dreg
, ins
->inst_basereg
, ppc_ha(ins
->inst_offset
));
3243 ppc_lha (code
, ins
->dreg
, ins
->inst_offset
, ins
->dreg
);
3245 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
3246 ppc_lhax (code
, ins
->dreg
, ins
->inst_basereg
, ppc_r0
);
3250 #ifdef MONO_ARCH_ILP32
3251 case OP_LOADI8_MEMBASE
:
3252 if (ppc_is_imm16 (ins
->inst_offset
)) {
3253 ppc_ldr (code
, ins
->dreg
, ins
->inst_offset
, ins
->inst_basereg
);
3255 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
3256 ppc_ldr_indexed (code
, ins
->dreg
, ins
->inst_basereg
, ppc_r0
);
3260 case OP_LOAD_MEMINDEX
:
3261 ppc_ldptr_indexed (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
3263 case OP_LOADI4_MEMINDEX
:
3264 #ifdef __mono_ppc64__
3265 ppc_lwax (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
3268 case OP_LOADU4_MEMINDEX
:
3269 ppc_lwzx (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
3271 case OP_LOADU2_MEMINDEX
:
3272 ppc_lhzx (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
3274 case OP_LOADI2_MEMINDEX
:
3275 ppc_lhax (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
3277 case OP_LOADU1_MEMINDEX
:
3278 ppc_lbzx (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
3280 case OP_LOADI1_MEMINDEX
:
3281 ppc_lbzx (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
3282 ppc_extsb (code
, ins
->dreg
, ins
->dreg
);
3284 case OP_ICONV_TO_I1
:
3285 CASE_PPC64 (OP_LCONV_TO_I1
)
3286 ppc_extsb (code
, ins
->dreg
, ins
->sreg1
);
3288 case OP_ICONV_TO_I2
:
3289 CASE_PPC64 (OP_LCONV_TO_I2
)
3290 ppc_extsh (code
, ins
->dreg
, ins
->sreg1
);
3292 case OP_ICONV_TO_U1
:
3293 CASE_PPC64 (OP_LCONV_TO_U1
)
3294 ppc_clrlwi (code
, ins
->dreg
, ins
->sreg1
, 24);
3296 case OP_ICONV_TO_U2
:
3297 CASE_PPC64 (OP_LCONV_TO_U2
)
3298 ppc_clrlwi (code
, ins
->dreg
, ins
->sreg1
, 16);
3302 CASE_PPC64 (OP_LCOMPARE
)
3303 L
= (sizeof (target_mgreg_t
) == 4 || ins
->opcode
== OP_ICOMPARE
) ? 0 : 1;
3305 if (next
&& compare_opcode_is_unsigned (next
->opcode
))
3306 ppc_cmpl (code
, 0, L
, ins
->sreg1
, ins
->sreg2
);
3308 ppc_cmp (code
, 0, L
, ins
->sreg1
, ins
->sreg2
);
3310 case OP_COMPARE_IMM
:
3311 case OP_ICOMPARE_IMM
:
3312 CASE_PPC64 (OP_LCOMPARE_IMM
)
3313 L
= (sizeof (target_mgreg_t
) == 4 || ins
->opcode
== OP_ICOMPARE_IMM
) ? 0 : 1;
3315 if (next
&& compare_opcode_is_unsigned (next
->opcode
)) {
3316 if (ppc_is_uimm16 (ins
->inst_imm
)) {
3317 ppc_cmpli (code
, 0, L
, ins
->sreg1
, (ins
->inst_imm
& 0xffff));
3319 g_assert_not_reached ();
3322 if (ppc_is_imm16 (ins
->inst_imm
)) {
3323 ppc_cmpi (code
, 0, L
, ins
->sreg1
, (ins
->inst_imm
& 0xffff));
3325 g_assert_not_reached ();
3331 * gdb does not like encountering a trap in the debugged code. So
3332 * instead of emitting a trap, we emit a call a C function and place a
3336 ppc_mr (code
, ppc_r3
, ins
->sreg1
);
3337 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break
));
3338 if ((FORCE_INDIR_CALL
|| cfg
->method
->dynamic
) && !cfg
->compile_aot
) {
3339 ppc_load_func (code
, PPC_CALL_REG
, 0);
3340 ppc_mtlr (code
, PPC_CALL_REG
);
3348 ppc_addco (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3351 CASE_PPC64 (OP_LADD
)
3352 ppc_add (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3356 ppc_adde (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3359 if (ppc_is_imm16 (ins
->inst_imm
)) {
3360 ppc_addic (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_imm
);
3362 g_assert_not_reached ();
3367 CASE_PPC64 (OP_LADD_IMM
)
3368 if (ppc_is_imm16 (ins
->inst_imm
)) {
3369 ppc_addi (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_imm
);
3371 g_assert_not_reached ();
3375 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3377 ppc_addo (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3378 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
3379 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1<<14));
3380 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE
, PPC_BR_EQ
, "OverflowException");
3382 case OP_IADD_OVF_UN
:
3383 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3385 ppc_addco (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3386 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
3387 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1<<13));
3388 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE
, PPC_BR_EQ
, "OverflowException");
3391 CASE_PPC64 (OP_LSUB_OVF
)
3392 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3394 ppc_subfo (code
, ins
->dreg
, ins
->sreg2
, ins
->sreg1
);
3395 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
3396 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1<<14));
3397 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE
, PPC_BR_EQ
, "OverflowException");
3399 case OP_ISUB_OVF_UN
:
3400 CASE_PPC64 (OP_LSUB_OVF_UN
)
3401 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3403 ppc_subfc (code
, ins
->dreg
, ins
->sreg2
, ins
->sreg1
);
3404 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
3405 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1<<13));
3406 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE
, PPC_BR_EQ
, "OverflowException");
3408 case OP_ADD_OVF_CARRY
:
3409 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3411 ppc_addeo (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3412 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
3413 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1<<14));
3414 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE
, PPC_BR_EQ
, "OverflowException");
3416 case OP_ADD_OVF_UN_CARRY
:
3417 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3419 ppc_addeo (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3420 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
3421 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1<<13));
3422 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE
, PPC_BR_EQ
, "OverflowException");
3424 case OP_SUB_OVF_CARRY
:
3425 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3427 ppc_subfeo (code
, ins
->dreg
, ins
->sreg2
, ins
->sreg1
);
3428 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
3429 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1<<14));
3430 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE
, PPC_BR_EQ
, "OverflowException");
3432 case OP_SUB_OVF_UN_CARRY
:
3433 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3435 ppc_subfeo (code
, ins
->dreg
, ins
->sreg2
, ins
->sreg1
);
3436 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
3437 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1<<13));
3438 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE
, PPC_BR_EQ
, "OverflowException");
3442 ppc_subfco (code
, ins
->dreg
, ins
->sreg2
, ins
->sreg1
);
3445 CASE_PPC64 (OP_LSUB
)
3446 ppc_subf (code
, ins
->dreg
, ins
->sreg2
, ins
->sreg1
);
3450 ppc_subfe (code
, ins
->dreg
, ins
->sreg2
, ins
->sreg1
);
3454 CASE_PPC64 (OP_LSUB_IMM
)
3455 // we add the negated value
3456 if (ppc_is_imm16 (-ins
->inst_imm
))
3457 ppc_addi (code
, ins
->dreg
, ins
->sreg1
, -ins
->inst_imm
);
3459 g_assert_not_reached ();
3463 g_assert (ppc_is_imm16 (ins
->inst_imm
));
3464 ppc_subfic (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_imm
);
3467 ppc_subfze (code
, ins
->dreg
, ins
->sreg1
);
3470 CASE_PPC64 (OP_LAND
)
3471 /* FIXME: the ppc macros as inconsistent here: put dest as the first arg! */
3472 ppc_and (code
, ins
->sreg1
, ins
->dreg
, ins
->sreg2
);
3476 CASE_PPC64 (OP_LAND_IMM
)
3477 if (!(ins
->inst_imm
& 0xffff0000)) {
3478 ppc_andid (code
, ins
->sreg1
, ins
->dreg
, ins
->inst_imm
);
3479 } else if (!(ins
->inst_imm
& 0xffff)) {
3480 ppc_andisd (code
, ins
->sreg1
, ins
->dreg
, ((guint32
)ins
->inst_imm
>> 16));
3482 g_assert_not_reached ();
3486 CASE_PPC64 (OP_LDIV
) {
3487 guint8
*divisor_is_m1
;
3488 /* XER format: SO, OV, CA, reserved [21 bits], count [8 bits]
3490 ppc_compare_reg_imm (code
, 0, ins
->sreg2
, -1);
3491 divisor_is_m1
= code
;
3492 ppc_bc (code
, PPC_BR_FALSE
| PPC_BR_LIKELY
, PPC_BR_EQ
, 0);
3493 ppc_lis (code
, ppc_r0
, 0x8000);
3494 #ifdef __mono_ppc64__
3495 if (ins
->opcode
== OP_LDIV
)
3496 ppc_sldi (code
, ppc_r0
, ppc_r0
, 32);
3498 ppc_compare (code
, 0, ins
->sreg1
, ppc_r0
);
3499 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE
, PPC_BR_EQ
, "OverflowException");
3500 ppc_patch (divisor_is_m1
, code
);
3501 /* XER format: SO, OV, CA, reserved [21 bits], count [8 bits]
3503 if (ins
->opcode
== OP_IDIV
)
3504 ppc_divwod (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3505 #ifdef __mono_ppc64__
3507 ppc_divdod (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3509 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
3510 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1<<14));
3511 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE
, PPC_BR_EQ
, "DivideByZeroException");
3515 CASE_PPC64 (OP_LDIV_UN
)
3516 if (ins
->opcode
== OP_IDIV_UN
)
3517 ppc_divwuod (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3518 #ifdef __mono_ppc64__
3520 ppc_divduod (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3522 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
3523 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1<<14));
3524 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE
, PPC_BR_EQ
, "DivideByZeroException");
3530 g_assert_not_reached ();
3533 ppc_or (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3537 CASE_PPC64 (OP_LOR_IMM
)
3538 if (!(ins
->inst_imm
& 0xffff0000)) {
3539 ppc_ori (code
, ins
->sreg1
, ins
->dreg
, ins
->inst_imm
);
3540 } else if (!(ins
->inst_imm
& 0xffff)) {
3541 ppc_oris (code
, ins
->dreg
, ins
->sreg1
, ((guint32
)(ins
->inst_imm
) >> 16));
3543 g_assert_not_reached ();
3547 CASE_PPC64 (OP_LXOR
)
3548 ppc_xor (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3552 CASE_PPC64 (OP_LXOR_IMM
)
3553 if (!(ins
->inst_imm
& 0xffff0000)) {
3554 ppc_xori (code
, ins
->sreg1
, ins
->dreg
, ins
->inst_imm
);
3555 } else if (!(ins
->inst_imm
& 0xffff)) {
3556 ppc_xoris (code
, ins
->sreg1
, ins
->dreg
, ((guint32
)(ins
->inst_imm
) >> 16));
3558 g_assert_not_reached ();
3562 CASE_PPC64 (OP_LSHL
)
3563 ppc_shift_left (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3567 CASE_PPC64 (OP_LSHL_IMM
)
3568 ppc_shift_left_imm (code
, ins
->dreg
, ins
->sreg1
, MASK_SHIFT_IMM (ins
->inst_imm
));
3571 ppc_sraw (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3574 ppc_shift_right_arith_imm (code
, ins
->dreg
, ins
->sreg1
, MASK_SHIFT_IMM (ins
->inst_imm
));
3577 if (MASK_SHIFT_IMM (ins
->inst_imm
))
3578 ppc_shift_right_imm (code
, ins
->dreg
, ins
->sreg1
, MASK_SHIFT_IMM (ins
->inst_imm
));
3580 ppc_mr (code
, ins
->dreg
, ins
->sreg1
);
3583 ppc_srw (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3586 CASE_PPC64 (OP_LNOT
)
3587 ppc_not (code
, ins
->dreg
, ins
->sreg1
);
3590 CASE_PPC64 (OP_LNEG
)
3591 ppc_neg (code
, ins
->dreg
, ins
->sreg1
);
3594 CASE_PPC64 (OP_LMUL
)
3595 ppc_multiply (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3599 CASE_PPC64 (OP_LMUL_IMM
)
3600 if (ppc_is_imm16 (ins
->inst_imm
)) {
3601 ppc_mulli (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_imm
);
3603 g_assert_not_reached ();
3607 CASE_PPC64 (OP_LMUL_OVF
)
3608 /* we annot use mcrxr, since it's not implemented on some processors
3609 * XER format: SO, OV, CA, reserved [21 bits], count [8 bits]
3611 if (ins
->opcode
== OP_IMUL_OVF
)
3612 ppc_mullwo (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3613 #ifdef __mono_ppc64__
3615 ppc_mulldo (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3617 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
3618 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1<<14));
3619 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE
, PPC_BR_EQ
, "OverflowException");
3621 case OP_IMUL_OVF_UN
:
3622 CASE_PPC64 (OP_LMUL_OVF_UN
)
3623 /* we first multiply to get the high word and compare to 0
3624 * to set the flags, then the result is discarded and then
3625 * we multiply to get the lower * bits result
3627 if (ins
->opcode
== OP_IMUL_OVF_UN
)
3628 ppc_mulhwu (code
, ppc_r0
, ins
->sreg1
, ins
->sreg2
);
3629 #ifdef __mono_ppc64__
3631 ppc_mulhdu (code
, ppc_r0
, ins
->sreg1
, ins
->sreg2
);
3633 ppc_cmpi (code
, 0, 0, ppc_r0
, 0);
3634 EMIT_COND_SYSTEM_EXCEPTION (CEE_BNE_UN
- CEE_BEQ
, "OverflowException");
3635 ppc_multiply (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3638 ppc_load (code
, ins
->dreg
, ins
->inst_c0
);
3641 ppc_load (code
, ins
->dreg
, ins
->inst_l
);
3644 case OP_LOAD_GOTADDR
:
3645 /* The PLT implementation depends on this */
3646 g_assert (ins
->dreg
== ppc_r30
);
3648 code
= mono_arch_emit_load_got_addr (cfg
->native_code
, code
, cfg
, NULL
);
3651 // FIXME: Fix max instruction length
3652 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)ins
->inst_right
->inst_i1
, ins
->inst_right
->inst_p0
);
3653 /* arch_emit_got_access () patches this */
3654 ppc_load32 (code
, ppc_r0
, 0);
3655 ppc_ldptr_indexed (code
, ins
->dreg
, ins
->inst_basereg
, ppc_r0
);
3658 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)ins
->inst_i1
, ins
->inst_p0
);
3659 ppc_load_sequence (code
, ins
->dreg
, 0);
3661 CASE_PPC32 (OP_ICONV_TO_I4
)
3662 CASE_PPC32 (OP_ICONV_TO_U4
)
3664 if (ins
->dreg
!= ins
->sreg1
)
3665 ppc_mr (code
, ins
->dreg
, ins
->sreg1
);
3668 int saved
= ins
->sreg1
;
3669 if (ins
->sreg1
== ppc_r3
) {
3670 ppc_mr (code
, ppc_r0
, ins
->sreg1
);
3673 if (ins
->sreg2
!= ppc_r3
)
3674 ppc_mr (code
, ppc_r3
, ins
->sreg2
);
3675 if (saved
!= ppc_r4
)
3676 ppc_mr (code
, ppc_r4
, saved
);
3680 if (ins
->dreg
!= ins
->sreg1
)
3681 ppc_fmr (code
, ins
->dreg
, ins
->sreg1
);
3683 case OP_MOVE_F_TO_I4
:
3684 ppc_stfs (code
, ins
->sreg1
, -4, ppc_r1
);
3685 ppc_ldptr (code
, ins
->dreg
, -4, ppc_r1
);
3687 case OP_MOVE_I4_TO_F
:
3688 ppc_stw (code
, ins
->sreg1
, -4, ppc_r1
);
3689 ppc_lfs (code
, ins
->dreg
, -4, ppc_r1
);
3691 #ifdef __mono_ppc64__
3692 case OP_MOVE_F_TO_I8
:
3693 ppc_stfd (code
, ins
->sreg1
, -8, ppc_r1
);
3694 ppc_ldptr (code
, ins
->dreg
, -8, ppc_r1
);
3696 case OP_MOVE_I8_TO_F
:
3697 ppc_stptr (code
, ins
->sreg1
, -8, ppc_r1
);
3698 ppc_lfd (code
, ins
->dreg
, -8, ppc_r1
);
3701 case OP_FCONV_TO_R4
:
3702 ppc_frsp (code
, ins
->dreg
, ins
->sreg1
);
3705 case OP_TAILCALL_PARAMETER
:
3706 // This opcode helps compute sizes, i.e.
3707 // of the subsequent OP_TAILCALL, but contributes no code.
3708 g_assert (ins
->next
);
3713 MonoCallInst
*call
= (MonoCallInst
*)ins
;
3716 * Keep in sync with mono_arch_emit_epilog
3718 g_assert (!cfg
->method
->save_lmf
);
3720 * Note: we can use ppc_r12 here because it is dead anyway:
3721 * we're leaving the method.
3723 if (1 || cfg
->flags
& MONO_CFG_HAS_CALLS
) {
3724 long ret_offset
= cfg
->stack_usage
+ PPC_RET_ADDR_OFFSET
;
3725 if (ppc_is_imm16 (ret_offset
)) {
3726 ppc_ldptr (code
, ppc_r0
, ret_offset
, cfg
->frame_reg
);
3728 ppc_load (code
, ppc_r12
, ret_offset
);
3729 ppc_ldptr_indexed (code
, ppc_r0
, cfg
->frame_reg
, ppc_r12
);
3731 ppc_mtlr (code
, ppc_r0
);
3734 if (ppc_is_imm16 (cfg
->stack_usage
)) {
3735 ppc_addi (code
, ppc_r12
, cfg
->frame_reg
, cfg
->stack_usage
);
3737 /* cfg->stack_usage is an int, so we can use
3738 * an addis/addi sequence here even in 64-bit. */
3739 ppc_addis (code
, ppc_r12
, cfg
->frame_reg
, ppc_ha(cfg
->stack_usage
));
3740 ppc_addi (code
, ppc_r12
, ppc_r12
, cfg
->stack_usage
);
3742 if (!cfg
->method
->save_lmf
) {
3744 for (i
= 31; i
>= 13; --i
) {
3745 if (cfg
->used_int_regs
& (1 << i
)) {
3746 pos
+= sizeof (target_mgreg_t
);
3747 ppc_ldptr (code
, i
, -pos
, ppc_r12
);
3751 /* FIXME restore from MonoLMF: though this can't happen yet */
3754 /* Copy arguments on the stack to our argument area */
3755 if (call
->stack_usage
) {
3756 code
= emit_memcpy (code
, call
->stack_usage
, ppc_r12
, PPC_STACK_PARAM_OFFSET
, ppc_sp
, PPC_STACK_PARAM_OFFSET
);
3757 /* r12 was clobbered */
3758 g_assert (cfg
->frame_reg
== ppc_sp
);
3759 if (ppc_is_imm16 (cfg
->stack_usage
)) {
3760 ppc_addi (code
, ppc_r12
, cfg
->frame_reg
, cfg
->stack_usage
);
3762 /* cfg->stack_usage is an int, so we can use
3763 * an addis/addi sequence here even in 64-bit. */
3764 ppc_addis (code
, ppc_r12
, cfg
->frame_reg
, ppc_ha(cfg
->stack_usage
));
3765 ppc_addi (code
, ppc_r12
, ppc_r12
, cfg
->stack_usage
);
3769 ppc_mr (code
, ppc_sp
, ppc_r12
);
3770 mono_add_patch_info (cfg
, (guint8
*) code
- cfg
->native_code
, MONO_PATCH_INFO_METHOD_JUMP
, call
->method
);
3771 cfg
->thunk_area
+= THUNK_SIZE
;
3772 if (cfg
->compile_aot
) {
3773 /* arch_emit_got_access () patches this */
3774 ppc_load32 (code
, ppc_r0
, 0);
3775 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
3776 ppc_ldptr_indexed (code
, ppc_r12
, ppc_r30
, ppc_r0
);
3777 ppc_ldptr (code
, ppc_r0
, 0, ppc_r12
);
3779 ppc_ldptr_indexed (code
, ppc_r0
, ppc_r30
, ppc_r0
);
3781 ppc_mtctr (code
, ppc_r0
);
3782 ppc_bcctr (code
, PPC_BR_ALWAYS
, 0);
3789 /* ensure ins->sreg1 is not NULL */
3790 ppc_ldptr (code
, ppc_r0
, 0, ins
->sreg1
);
3793 long cookie_offset
= cfg
->sig_cookie
+ cfg
->stack_usage
;
3794 if (ppc_is_imm16 (cookie_offset
)) {
3795 ppc_addi (code
, ppc_r0
, cfg
->frame_reg
, cookie_offset
);
3797 ppc_load (code
, ppc_r0
, cookie_offset
);
3798 ppc_add (code
, ppc_r0
, cfg
->frame_reg
, ppc_r0
);
3800 ppc_stptr (code
, ppc_r0
, 0, ins
->sreg1
);
3809 call
= (MonoCallInst
*)ins
;
3810 mono_call_add_patch_info (cfg
, call
, offset
);
3811 if ((FORCE_INDIR_CALL
|| cfg
->method
->dynamic
) && !cfg
->compile_aot
) {
3812 ppc_load_func (code
, PPC_CALL_REG
, 0);
3813 ppc_mtlr (code
, PPC_CALL_REG
);
3818 /* FIXME: this should be handled somewhere else in the new jit */
3819 code
= emit_move_return_value (cfg
, ins
, code
);
3825 case OP_VOIDCALL_REG
:
3827 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
3828 ppc_ldptr (code
, ppc_r0
, 0, ins
->sreg1
);
3829 /* FIXME: if we know that this is a method, we
3830 can omit this load */
3831 ppc_ldptr (code
, ppc_r2
, 8, ins
->sreg1
);
3832 ppc_mtlr (code
, ppc_r0
);
3834 #if (_CALL_ELF == 2)
3835 if (ins
->flags
& MONO_INST_HAS_METHOD
) {
3836 // Not a global entry point
3838 // Need to set up r12 with function entry address for global entry point
3839 if (ppc_r12
!= ins
->sreg1
) {
3840 ppc_mr(code
,ppc_r12
,ins
->sreg1
);
3844 ppc_mtlr (code
, ins
->sreg1
);
3847 /* FIXME: this should be handled somewhere else in the new jit */
3848 code
= emit_move_return_value (cfg
, ins
, code
);
3850 case OP_FCALL_MEMBASE
:
3851 case OP_LCALL_MEMBASE
:
3852 case OP_VCALL_MEMBASE
:
3853 case OP_VCALL2_MEMBASE
:
3854 case OP_VOIDCALL_MEMBASE
:
3855 case OP_CALL_MEMBASE
:
3856 if (cfg
->compile_aot
&& ins
->sreg1
== ppc_r12
) {
3857 /* The trampolines clobber this */
3858 ppc_mr (code
, ppc_r29
, ins
->sreg1
);
3859 ppc_ldptr (code
, ppc_r0
, ins
->inst_offset
, ppc_r29
);
3861 ppc_ldptr (code
, ppc_r0
, ins
->inst_offset
, ins
->sreg1
);
3863 ppc_mtlr (code
, ppc_r0
);
3865 /* FIXME: this should be handled somewhere else in the new jit */
3866 code
= emit_move_return_value (cfg
, ins
, code
);
3869 guint8
* zero_loop_jump
, * zero_loop_start
;
3870 /* keep alignment */
3871 int alloca_waste
= PPC_STACK_PARAM_OFFSET
+ cfg
->param_area
+ 31;
3872 int area_offset
= alloca_waste
;
3874 ppc_addi (code
, ppc_r12
, ins
->sreg1
, alloca_waste
+ 31);
3875 /* FIXME: should be calculated from MONO_ARCH_FRAME_ALIGNMENT */
3876 ppc_clear_right_imm (code
, ppc_r12
, ppc_r12
, 4);
3877 /* use ctr to store the number of words to 0 if needed */
3878 if (ins
->flags
& MONO_INST_INIT
) {
3879 /* we zero 4 bytes at a time:
3880 * we add 7 instead of 3 so that we set the counter to
3881 * at least 1, otherwise the bdnz instruction will make
3882 * it negative and iterate billions of times.
3884 ppc_addi (code
, ppc_r0
, ins
->sreg1
, 7);
3885 ppc_shift_right_arith_imm (code
, ppc_r0
, ppc_r0
, 2);
3886 ppc_mtctr (code
, ppc_r0
);
3888 ppc_ldptr (code
, ppc_r0
, 0, ppc_sp
);
3889 ppc_neg (code
, ppc_r12
, ppc_r12
);
3890 ppc_stptr_update_indexed (code
, ppc_r0
, ppc_sp
, ppc_r12
);
3892 /* FIXME: make this loop work in 8 byte
3893 increments on PPC64 */
3894 if (ins
->flags
& MONO_INST_INIT
) {
3895 /* adjust the dest reg by -4 so we can use stwu */
3896 /* we actually adjust -8 because we let the loop
3899 ppc_addi (code
, ins
->dreg
, ppc_sp
, (area_offset
- 8));
3900 ppc_li (code
, ppc_r12
, 0);
3901 zero_loop_start
= code
;
3902 ppc_stwu (code
, ppc_r12
, 4, ins
->dreg
);
3903 zero_loop_jump
= code
;
3904 ppc_bc (code
, PPC_BR_DEC_CTR_NONZERO
, 0, 0);
3905 ppc_patch (zero_loop_jump
, zero_loop_start
);
3907 ppc_addi (code
, ins
->dreg
, ppc_sp
, area_offset
);
3912 ppc_mr (code
, ppc_r3
, ins
->sreg1
);
3913 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_throw_exception
));
3914 if ((FORCE_INDIR_CALL
|| cfg
->method
->dynamic
) && !cfg
->compile_aot
) {
3915 ppc_load_func (code
, PPC_CALL_REG
, 0);
3916 ppc_mtlr (code
, PPC_CALL_REG
);
3925 ppc_mr (code
, ppc_r3
, ins
->sreg1
);
3926 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
3927 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_rethrow_exception
));
3928 if ((FORCE_INDIR_CALL
|| cfg
->method
->dynamic
) && !cfg
->compile_aot
) {
3929 ppc_load_func (code
, PPC_CALL_REG
, 0);
3930 ppc_mtlr (code
, PPC_CALL_REG
);
3937 case OP_START_HANDLER
: {
3938 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3939 g_assert (spvar
->inst_basereg
!= ppc_sp
);
3940 code
= emit_reserve_param_area (cfg
, code
);
3941 ppc_mflr (code
, ppc_r0
);
3942 if (ppc_is_imm16 (spvar
->inst_offset
)) {
3943 ppc_stptr (code
, ppc_r0
, spvar
->inst_offset
, spvar
->inst_basereg
);
3945 ppc_load (code
, ppc_r12
, spvar
->inst_offset
);
3946 ppc_stptr_indexed (code
, ppc_r0
, ppc_r12
, spvar
->inst_basereg
);
3950 case OP_ENDFILTER
: {
3951 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3952 g_assert (spvar
->inst_basereg
!= ppc_sp
);
3953 code
= emit_unreserve_param_area (cfg
, code
);
3954 if (ins
->sreg1
!= ppc_r3
)
3955 ppc_mr (code
, ppc_r3
, ins
->sreg1
);
3956 if (ppc_is_imm16 (spvar
->inst_offset
)) {
3957 ppc_ldptr (code
, ppc_r0
, spvar
->inst_offset
, spvar
->inst_basereg
);
3959 ppc_load (code
, ppc_r12
, spvar
->inst_offset
);
3960 ppc_ldptr_indexed (code
, ppc_r0
, spvar
->inst_basereg
, ppc_r12
);
3962 ppc_mtlr (code
, ppc_r0
);
3966 case OP_ENDFINALLY
: {
3967 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3968 g_assert (spvar
->inst_basereg
!= ppc_sp
);
3969 code
= emit_unreserve_param_area (cfg
, code
);
3970 ppc_ldptr (code
, ppc_r0
, spvar
->inst_offset
, spvar
->inst_basereg
);
3971 ppc_mtlr (code
, ppc_r0
);
3975 case OP_CALL_HANDLER
:
3976 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
3978 for (GList
*tmp
= ins
->inst_eh_blocks
; tmp
!= bb
->clause_holes
; tmp
= tmp
->prev
)
3979 mono_cfg_add_try_hole (cfg
, ((MonoLeaveClause
*) tmp
->data
)->clause
, code
, bb
);
3982 ins
->inst_c0
= code
- cfg
->native_code
;
3985 /*if (ins->inst_target_bb->native_offset) {
3987 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
3989 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
3994 ppc_mtctr (code
, ins
->sreg1
);
3995 ppc_bcctr (code
, PPC_BR_ALWAYS
, 0);
3998 ppc_li (code
, ins
->dreg
, 0);
3999 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_EQ
, 2);
4000 ppc_li (code
, ins
->dreg
, 1);
4004 CASE_PPC64 (OP_LCEQ
)
4005 ppc_li (code
, ins
->dreg
, 0);
4006 ppc_bc (code
, PPC_BR_FALSE
, PPC_BR_EQ
, 2);
4007 ppc_li (code
, ins
->dreg
, 1);
4013 CASE_PPC64 (OP_LCLT
)
4014 CASE_PPC64 (OP_LCLT_UN
)
4015 ppc_li (code
, ins
->dreg
, 1);
4016 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_LT
, 2);
4017 ppc_li (code
, ins
->dreg
, 0);
4021 ppc_li (code
, ins
->dreg
, 1);
4022 ppc_bc (code
, PPC_BR_FALSE
, PPC_BR_LT
, 2);
4023 ppc_li (code
, ins
->dreg
, 0);
4029 CASE_PPC64 (OP_LCGT
)
4030 CASE_PPC64 (OP_LCGT_UN
)
4031 ppc_li (code
, ins
->dreg
, 1);
4032 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_GT
, 2);
4033 ppc_li (code
, ins
->dreg
, 0);
4037 ppc_li (code
, ins
->dreg
, 1);
4038 ppc_bc (code
, PPC_BR_FALSE
, PPC_BR_GT
, 2);
4039 ppc_li (code
, ins
->dreg
, 0);
4041 case OP_COND_EXC_EQ
:
4042 case OP_COND_EXC_NE_UN
:
4043 case OP_COND_EXC_LT
:
4044 case OP_COND_EXC_LT_UN
:
4045 case OP_COND_EXC_GT
:
4046 case OP_COND_EXC_GT_UN
:
4047 case OP_COND_EXC_GE
:
4048 case OP_COND_EXC_GE_UN
:
4049 case OP_COND_EXC_LE
:
4050 case OP_COND_EXC_LE_UN
:
4051 EMIT_COND_SYSTEM_EXCEPTION (ins
->opcode
- OP_COND_EXC_EQ
, ins
->inst_p1
);
4053 case OP_COND_EXC_IEQ
:
4054 case OP_COND_EXC_INE_UN
:
4055 case OP_COND_EXC_ILT
:
4056 case OP_COND_EXC_ILT_UN
:
4057 case OP_COND_EXC_IGT
:
4058 case OP_COND_EXC_IGT_UN
:
4059 case OP_COND_EXC_IGE
:
4060 case OP_COND_EXC_IGE_UN
:
4061 case OP_COND_EXC_ILE
:
4062 case OP_COND_EXC_ILE_UN
:
4063 EMIT_COND_SYSTEM_EXCEPTION (ins
->opcode
- OP_COND_EXC_IEQ
, ins
->inst_p1
);
4075 EMIT_COND_BRANCH (ins
, ins
->opcode
- OP_IBEQ
);
4078 /* floating point opcodes */
4080 g_assert (cfg
->compile_aot
);
4082 /* FIXME: Optimize this */
4084 ppc_mflr (code
, ppc_r12
);
4086 *(double*)code
= *(double*)ins
->inst_p0
;
4088 ppc_lfd (code
, ins
->dreg
, 8, ppc_r12
);
4091 g_assert_not_reached ();
4093 case OP_STORER8_MEMBASE_REG
:
4094 if (ppc_is_imm16 (ins
->inst_offset
)) {
4095 ppc_stfd (code
, ins
->sreg1
, ins
->inst_offset
, ins
->inst_destbasereg
);
4097 if (ppc_is_imm32 (ins
->inst_offset
)) {
4098 ppc_addis (code
, ppc_r11
, ins
->inst_destbasereg
, ppc_ha(ins
->inst_offset
));
4099 ppc_stfd (code
, ins
->sreg1
, ins
->inst_offset
, ppc_r11
);
4101 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
4102 ppc_stfdx (code
, ins
->sreg1
, ins
->inst_destbasereg
, ppc_r0
);
4106 case OP_LOADR8_MEMBASE
:
4107 if (ppc_is_imm16 (ins
->inst_offset
)) {
4108 ppc_lfd (code
, ins
->dreg
, ins
->inst_offset
, ins
->inst_basereg
);
4110 if (ppc_is_imm32 (ins
->inst_offset
)) {
4111 ppc_addis (code
, ppc_r11
, ins
->inst_destbasereg
, ppc_ha(ins
->inst_offset
));
4112 ppc_lfd (code
, ins
->dreg
, ins
->inst_offset
, ppc_r11
);
4114 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
4115 ppc_lfdx (code
, ins
->dreg
, ins
->inst_destbasereg
, ppc_r0
);
4119 case OP_STORER4_MEMBASE_REG
:
4120 ppc_frsp (code
, ins
->sreg1
, ins
->sreg1
);
4121 if (ppc_is_imm16 (ins
->inst_offset
)) {
4122 ppc_stfs (code
, ins
->sreg1
, ins
->inst_offset
, ins
->inst_destbasereg
);
4124 if (ppc_is_imm32 (ins
->inst_offset
)) {
4125 ppc_addis (code
, ppc_r11
, ins
->inst_destbasereg
, ppc_ha(ins
->inst_offset
));
4126 ppc_stfs (code
, ins
->sreg1
, ins
->inst_offset
, ppc_r11
);
4128 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
4129 ppc_stfsx (code
, ins
->sreg1
, ins
->inst_destbasereg
, ppc_r0
);
4133 case OP_LOADR4_MEMBASE
:
4134 if (ppc_is_imm16 (ins
->inst_offset
)) {
4135 ppc_lfs (code
, ins
->dreg
, ins
->inst_offset
, ins
->inst_basereg
);
4137 if (ppc_is_imm32 (ins
->inst_offset
)) {
4138 ppc_addis (code
, ppc_r11
, ins
->inst_destbasereg
, ppc_ha(ins
->inst_offset
));
4139 ppc_lfs (code
, ins
->dreg
, ins
->inst_offset
, ppc_r11
);
4141 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
4142 ppc_lfsx (code
, ins
->dreg
, ins
->inst_destbasereg
, ppc_r0
);
4146 case OP_LOADR4_MEMINDEX
:
4147 ppc_lfsx (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4149 case OP_LOADR8_MEMINDEX
:
4150 ppc_lfdx (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4152 case OP_STORER4_MEMINDEX
:
4153 ppc_frsp (code
, ins
->sreg1
, ins
->sreg1
);
4154 ppc_stfsx (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
4156 case OP_STORER8_MEMINDEX
:
4157 ppc_stfdx (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
4160 case CEE_CONV_R4
: /* FIXME: change precision */
4162 g_assert_not_reached ();
4163 case OP_FCONV_TO_I1
:
4164 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, TRUE
);
4166 case OP_FCONV_TO_U1
:
4167 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, FALSE
);
4169 case OP_FCONV_TO_I2
:
4170 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, TRUE
);
4172 case OP_FCONV_TO_U2
:
4173 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, FALSE
);
4175 case OP_FCONV_TO_I4
:
4177 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, TRUE
);
4179 case OP_FCONV_TO_U4
:
4181 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, FALSE
);
4183 case OP_LCONV_TO_R_UN
:
4184 g_assert_not_reached ();
4185 /* Implemented as helper calls */
4187 case OP_LCONV_TO_OVF_I4_2
:
4188 case OP_LCONV_TO_OVF_I
: {
4189 #ifdef __mono_ppc64__
4192 guint8
*negative_branch
, *msword_positive_branch
, *msword_negative_branch
, *ovf_ex_target
;
4193 // Check if its negative
4194 ppc_cmpi (code
, 0, 0, ins
->sreg1
, 0);
4195 negative_branch
= code
;
4196 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_LT
, 0);
4197 // Its positive msword == 0
4198 ppc_cmpi (code
, 0, 0, ins
->sreg2
, 0);
4199 msword_positive_branch
= code
;
4200 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_EQ
, 0);
4202 ovf_ex_target
= code
;
4203 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_ALWAYS
, 0, "OverflowException");
4205 ppc_patch (negative_branch
, code
);
4206 ppc_cmpi (code
, 0, 0, ins
->sreg2
, -1);
4207 msword_negative_branch
= code
;
4208 ppc_bc (code
, PPC_BR_FALSE
, PPC_BR_EQ
, 0);
4209 ppc_patch (msword_negative_branch
, ovf_ex_target
);
4211 ppc_patch (msword_positive_branch
, code
);
4212 if (ins
->dreg
!= ins
->sreg1
)
4213 ppc_mr (code
, ins
->dreg
, ins
->sreg1
);
4218 ppc_frind (code
, ins
->dreg
, ins
->sreg1
);
4221 ppc_frizd (code
, ins
->dreg
, ins
->sreg1
);
4224 ppc_fripd (code
, ins
->dreg
, ins
->sreg1
);
4227 ppc_frimd (code
, ins
->dreg
, ins
->sreg1
);
4230 ppc_fabsd (code
, ins
->dreg
, ins
->sreg1
);
4233 ppc_fsqrtsd (code
, ins
->dreg
, ins
->sreg1
);
4236 ppc_fsqrtd (code
, ins
->dreg
, ins
->sreg1
);
4239 ppc_fadd (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4242 ppc_fsub (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4245 ppc_fmul (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4248 ppc_fdiv (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4251 ppc_fneg (code
, ins
->dreg
, ins
->sreg1
);
4255 g_assert_not_reached ();
4257 /* These min/max require POWER5 */
4259 ppc_cmp (code
, 0, 0, ins
->sreg1
, ins
->sreg2
);
4260 ppc_isellt (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4263 ppc_cmpl (code
, 0, 0, ins
->sreg1
, ins
->sreg2
);
4264 ppc_isellt (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4267 ppc_cmp (code
, 0, 0, ins
->sreg1
, ins
->sreg2
);
4268 ppc_iselgt (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4271 ppc_cmpl (code
, 0, 0, ins
->sreg1
, ins
->sreg2
);
4272 ppc_iselgt (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4274 CASE_PPC64 (OP_LMIN
)
4275 ppc_cmp (code
, 0, 1, ins
->sreg1
, ins
->sreg2
);
4276 ppc_isellt (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4278 CASE_PPC64 (OP_LMIN_UN
)
4279 ppc_cmpl (code
, 0, 1, ins
->sreg1
, ins
->sreg2
);
4280 ppc_isellt (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4282 CASE_PPC64 (OP_LMAX
)
4283 ppc_cmp (code
, 0, 1, ins
->sreg1
, ins
->sreg2
);
4284 ppc_iselgt (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4286 CASE_PPC64 (OP_LMAX_UN
)
4287 ppc_cmpl (code
, 0, 1, ins
->sreg1
, ins
->sreg2
);
4288 ppc_iselgt (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4291 ppc_fcmpu (code
, 0, ins
->sreg1
, ins
->sreg2
);
4295 ppc_fcmpo (code
, 0, ins
->sreg1
, ins
->sreg2
);
4296 ppc_li (code
, ins
->dreg
, 1);
4297 ppc_bc (code
, ins
->opcode
== OP_FCEQ
? PPC_BR_TRUE
: PPC_BR_FALSE
, PPC_BR_EQ
, 2);
4298 ppc_li (code
, ins
->dreg
, 0);
4302 ppc_fcmpo (code
, 0, ins
->sreg1
, ins
->sreg2
);
4303 ppc_li (code
, ins
->dreg
, 1);
4304 ppc_bc (code
, ins
->opcode
== OP_FCLT
? PPC_BR_TRUE
: PPC_BR_FALSE
, PPC_BR_LT
, 2);
4305 ppc_li (code
, ins
->dreg
, 0);
4308 ppc_fcmpu (code
, 0, ins
->sreg1
, ins
->sreg2
);
4309 ppc_li (code
, ins
->dreg
, 1);
4310 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_SO
, 3);
4311 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_LT
, 2);
4312 ppc_li (code
, ins
->dreg
, 0);
4316 ppc_fcmpo (code
, 0, ins
->sreg1
, ins
->sreg2
);
4317 ppc_li (code
, ins
->dreg
, 1);
4318 ppc_bc (code
, ins
->opcode
== OP_FCGT
? PPC_BR_TRUE
: PPC_BR_FALSE
, PPC_BR_GT
, 2);
4319 ppc_li (code
, ins
->dreg
, 0);
4322 ppc_fcmpu (code
, 0, ins
->sreg1
, ins
->sreg2
);
4323 ppc_li (code
, ins
->dreg
, 1);
4324 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_SO
, 3);
4325 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_GT
, 2);
4326 ppc_li (code
, ins
->dreg
, 0);
4329 EMIT_COND_BRANCH (ins
, CEE_BEQ
- CEE_BEQ
);
4332 EMIT_COND_BRANCH (ins
, CEE_BNE_UN
- CEE_BEQ
);
4335 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_SO
, 2);
4336 EMIT_COND_BRANCH (ins
, CEE_BLT
- CEE_BEQ
);
4339 EMIT_COND_BRANCH_FLAGS (ins
, PPC_BR_TRUE
, PPC_BR_SO
);
4340 EMIT_COND_BRANCH (ins
, CEE_BLT_UN
- CEE_BEQ
);
4343 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_SO
, 2);
4344 EMIT_COND_BRANCH (ins
, CEE_BGT
- CEE_BEQ
);
4347 EMIT_COND_BRANCH_FLAGS (ins
, PPC_BR_TRUE
, PPC_BR_SO
);
4348 EMIT_COND_BRANCH (ins
, CEE_BGT_UN
- CEE_BEQ
);
4351 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_SO
, 2);
4352 EMIT_COND_BRANCH (ins
, CEE_BGE
- CEE_BEQ
);
4355 EMIT_COND_BRANCH (ins
, CEE_BGE_UN
- CEE_BEQ
);
4358 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_SO
, 2);
4359 EMIT_COND_BRANCH (ins
, CEE_BLE
- CEE_BEQ
);
4362 EMIT_COND_BRANCH (ins
, CEE_BLE_UN
- CEE_BEQ
);
4365 g_assert_not_reached ();
4366 case OP_PPC_CHECK_FINITE
: {
4367 ppc_rlwinm (code
, ins
->sreg1
, ins
->sreg1
, 0, 1, 31);
4368 ppc_addis (code
, ins
->sreg1
, ins
->sreg1
, -32752);
4369 ppc_rlwinmd (code
, ins
->sreg1
, ins
->sreg1
, 1, 31, 31);
4370 EMIT_COND_SYSTEM_EXCEPTION (CEE_BEQ
- CEE_BEQ
, "ArithmeticException");
4373 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)ins
->inst_c1
, ins
->inst_p0
);
4374 #ifdef __mono_ppc64__
4375 ppc_load_sequence (code
, ins
->dreg
, (guint64
)0x0f0f0f0f0f0f0f0fLL
);
4377 ppc_load_sequence (code
, ins
->dreg
, (gulong
)0x0f0f0f0fL
);
4382 #ifdef __mono_ppc64__
4383 case OP_ICONV_TO_I4
:
4385 ppc_extsw (code
, ins
->dreg
, ins
->sreg1
);
4387 case OP_ICONV_TO_U4
:
4389 ppc_clrldi (code
, ins
->dreg
, ins
->sreg1
, 32);
4391 case OP_ICONV_TO_R4
:
4392 case OP_ICONV_TO_R8
:
4393 case OP_LCONV_TO_R4
:
4394 case OP_LCONV_TO_R8
: {
4396 if (ins
->opcode
== OP_ICONV_TO_R4
|| ins
->opcode
== OP_ICONV_TO_R8
) {
4397 ppc_extsw (code
, ppc_r0
, ins
->sreg1
);
4402 if (cpu_hw_caps
& PPC_MOVE_FPR_GPR
) {
4403 ppc_mffgpr (code
, ins
->dreg
, tmp
);
4405 ppc_str (code
, tmp
, -8, ppc_r1
);
4406 ppc_lfd (code
, ins
->dreg
, -8, ppc_r1
);
4408 ppc_fcfid (code
, ins
->dreg
, ins
->dreg
);
4409 if (ins
->opcode
== OP_ICONV_TO_R4
|| ins
->opcode
== OP_LCONV_TO_R4
)
4410 ppc_frsp (code
, ins
->dreg
, ins
->dreg
);
4414 ppc_srad (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4417 ppc_srd (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4420 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
4422 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
4423 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1 << 13)); /* CA */
4424 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE
, PPC_BR_EQ
, ins
->inst_p1
);
4426 case OP_COND_EXC_OV
:
4427 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
4428 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1 << 14)); /* OV */
4429 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE
, PPC_BR_EQ
, ins
->inst_p1
);
4441 EMIT_COND_BRANCH (ins
, ins
->opcode
- OP_LBEQ
);
4443 case OP_FCONV_TO_I8
:
4444 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 8, TRUE
);
4446 case OP_FCONV_TO_U8
:
4447 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 8, FALSE
);
4449 case OP_STOREI4_MEMBASE_REG
:
4450 if (ppc_is_imm16 (ins
->inst_offset
)) {
4451 ppc_stw (code
, ins
->sreg1
, ins
->inst_offset
, ins
->inst_destbasereg
);
4453 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
4454 ppc_stwx (code
, ins
->sreg1
, ins
->inst_destbasereg
, ppc_r0
);
4457 case OP_STOREI4_MEMINDEX
:
4458 ppc_stwx (code
, ins
->sreg1
, ins
->sreg2
, ins
->inst_destbasereg
);
4461 ppc_srawi (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
4463 case OP_ISHR_UN_IMM
:
4464 if (ins
->inst_imm
& 0x1f)
4465 ppc_srwi (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
4467 ppc_mr (code
, ins
->dreg
, ins
->sreg1
);
4470 case OP_ICONV_TO_R4
:
4471 case OP_ICONV_TO_R8
: {
4472 if (cpu_hw_caps
& PPC_ISA_64
) {
4473 ppc_srawi(code
, ppc_r0
, ins
->sreg1
, 31);
4474 ppc_stw (code
, ppc_r0
, -8, ppc_r1
);
4475 ppc_stw (code
, ins
->sreg1
, -4, ppc_r1
);
4476 ppc_lfd (code
, ins
->dreg
, -8, ppc_r1
);
4477 ppc_fcfid (code
, ins
->dreg
, ins
->dreg
);
4478 if (ins
->opcode
== OP_ICONV_TO_R4
)
4479 ppc_frsp (code
, ins
->dreg
, ins
->dreg
);
4485 case OP_ATOMIC_ADD_I4
:
4486 CASE_PPC64 (OP_ATOMIC_ADD_I8
) {
4487 int location
= ins
->inst_basereg
;
4488 int addend
= ins
->sreg2
;
4489 guint8
*loop
, *branch
;
4490 g_assert (ins
->inst_offset
== 0);
4494 if (ins
->opcode
== OP_ATOMIC_ADD_I4
)
4495 ppc_lwarx (code
, ppc_r0
, 0, location
);
4496 #ifdef __mono_ppc64__
4498 ppc_ldarx (code
, ppc_r0
, 0, location
);
4501 ppc_add (code
, ppc_r0
, ppc_r0
, addend
);
4503 if (ins
->opcode
== OP_ATOMIC_ADD_I4
)
4504 ppc_stwcxd (code
, ppc_r0
, 0, location
);
4505 #ifdef __mono_ppc64__
4507 ppc_stdcxd (code
, ppc_r0
, 0, location
);
4511 ppc_bc (code
, PPC_BR_FALSE
, PPC_BR_EQ
, 0);
4512 ppc_patch (branch
, loop
);
4515 ppc_mr (code
, ins
->dreg
, ppc_r0
);
4518 case OP_ATOMIC_CAS_I4
:
4519 CASE_PPC64 (OP_ATOMIC_CAS_I8
) {
4520 int location
= ins
->sreg1
;
4521 int value
= ins
->sreg2
;
4522 int comparand
= ins
->sreg3
;
4523 guint8
*start
, *not_equal
, *lost_reservation
;
4527 if (ins
->opcode
== OP_ATOMIC_CAS_I4
)
4528 ppc_lwarx (code
, ppc_r0
, 0, location
);
4529 #ifdef __mono_ppc64__
4531 ppc_ldarx (code
, ppc_r0
, 0, location
);
4534 ppc_cmp (code
, 0, ins
->opcode
== OP_ATOMIC_CAS_I4
? 0 : 1, ppc_r0
, comparand
);
4536 ppc_bc (code
, PPC_BR_FALSE
, PPC_BR_EQ
, 0);
4538 if (ins
->opcode
== OP_ATOMIC_CAS_I4
)
4539 ppc_stwcxd (code
, value
, 0, location
);
4540 #ifdef __mono_ppc64__
4542 ppc_stdcxd (code
, value
, 0, location
);
4545 lost_reservation
= code
;
4546 ppc_bc (code
, PPC_BR_FALSE
, PPC_BR_EQ
, 0);
4547 ppc_patch (lost_reservation
, start
);
4548 ppc_patch (not_equal
, code
);
4551 ppc_mr (code
, ins
->dreg
, ppc_r0
);
4554 case OP_LIVERANGE_START
: {
4555 if (cfg
->verbose_level
> 1)
4556 printf ("R%d START=0x%x\n", MONO_VARINFO (cfg
, ins
->inst_c0
)->vreg
, (int)(code
- cfg
->native_code
));
4557 MONO_VARINFO (cfg
, ins
->inst_c0
)->live_range_start
= code
- cfg
->native_code
;
4560 case OP_LIVERANGE_END
: {
4561 if (cfg
->verbose_level
> 1)
4562 printf ("R%d END=0x%x\n", MONO_VARINFO (cfg
, ins
->inst_c0
)->vreg
, (int)(code
- cfg
->native_code
));
4563 MONO_VARINFO (cfg
, ins
->inst_c0
)->live_range_end
= code
- cfg
->native_code
;
4566 case OP_GC_SAFE_POINT
:
4570 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins
->opcode
), __FUNCTION__
);
4571 g_assert_not_reached ();
4574 if ((cfg
->opt
& MONO_OPT_BRANCH
) && ((code
- cfg
->native_code
- offset
) > max_len
)) {
4575 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %ld)",
4576 mono_inst_name (ins
->opcode
), max_len
, (glong
)(code
- cfg
->native_code
- offset
));
4577 g_assert_not_reached ();
4585 set_code_cursor (cfg
, code
);
4587 #endif /* !DISABLE_JIT */
4590 mono_arch_register_lowlevel_calls (void)
4592 /* The signature doesn't matter */
4593 mono_register_jit_icall (mono_ppc_throw_exception
, mono_icall_sig_void
, TRUE
);
4596 #ifdef __mono_ppc64__
4597 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
4598 #define patch_load_sequence(ip,val) do {\
4599 guint16 *__load = (guint16*)(ip); \
4600 g_assert (sizeof (val) == sizeof (gsize)); \
4601 __load [0] = (((guint64)(gsize)(val)) >> 48) & 0xffff; \
4602 __load [2] = (((guint64)(gsize)(val)) >> 32) & 0xffff; \
4603 __load [6] = (((guint64)(gsize)(val)) >> 16) & 0xffff; \
4604 __load [8] = ((guint64)(gsize)(val)) & 0xffff; \
4606 #elif G_BYTE_ORDER == G_BIG_ENDIAN
4607 #define patch_load_sequence(ip,val) do {\
4608 guint16 *__load = (guint16*)(ip); \
4609 g_assert (sizeof (val) == sizeof (gsize)); \
4610 __load [1] = (((guint64)(gsize)(val)) >> 48) & 0xffff; \
4611 __load [3] = (((guint64)(gsize)(val)) >> 32) & 0xffff; \
4612 __load [7] = (((guint64)(gsize)(val)) >> 16) & 0xffff; \
4613 __load [9] = ((guint64)(gsize)(val)) & 0xffff; \
4616 #error huh? No endianess defined by compiler
4619 #define patch_load_sequence(ip,val) do {\
4620 guint16 *__lis_ori = (guint16*)(ip); \
4621 __lis_ori [1] = (((gulong)(val)) >> 16) & 0xffff; \
4622 __lis_ori [3] = ((gulong)(val)) & 0xffff; \
4628 mono_arch_patch_code_new (MonoCompile
*cfg
, MonoDomain
*domain
, guint8
*code
, MonoJumpInfo
*ji
, gpointer target
)
4630 unsigned char *ip
= ji
->ip
.i
+ code
;
4631 gboolean is_fd
= FALSE
;
4634 case MONO_PATCH_INFO_IP
:
4635 patch_load_sequence (ip
, ip
);
4637 case MONO_PATCH_INFO_SWITCH
: {
4638 gpointer
*table
= (gpointer
*)ji
->data
.table
->table
;
4641 patch_load_sequence (ip
, table
);
4643 for (i
= 0; i
< ji
->data
.table
->table_size
; i
++) {
4644 table
[i
] = (glong
)ji
->data
.table
->table
[i
] + code
;
4646 /* we put into the table the absolute address, no need for ppc_patch in this case */
4649 case MONO_PATCH_INFO_METHODCONST
:
4650 case MONO_PATCH_INFO_CLASS
:
4651 case MONO_PATCH_INFO_IMAGE
:
4652 case MONO_PATCH_INFO_FIELD
:
4653 case MONO_PATCH_INFO_VTABLE
:
4654 case MONO_PATCH_INFO_IID
:
4655 case MONO_PATCH_INFO_SFLDA
:
4656 case MONO_PATCH_INFO_LDSTR
:
4657 case MONO_PATCH_INFO_TYPE_FROM_HANDLE
:
4658 case MONO_PATCH_INFO_LDTOKEN
:
4659 /* from OP_AOTCONST : lis + ori */
4660 patch_load_sequence (ip
, target
);
4662 case MONO_PATCH_INFO_R4
:
4663 case MONO_PATCH_INFO_R8
:
4664 g_assert_not_reached ();
4665 *((gconstpointer
*)(ip
+ 2)) = ji
->data
.target
;
4667 case MONO_PATCH_INFO_EXC_NAME
:
4668 g_assert_not_reached ();
4669 *((gconstpointer
*)(ip
+ 1)) = ji
->data
.name
;
4671 case MONO_PATCH_INFO_NONE
:
4672 case MONO_PATCH_INFO_BB_OVF
:
4673 case MONO_PATCH_INFO_EXC_OVF
:
4674 /* everything is dealt with at epilog output time */
4676 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
4677 case MONO_PATCH_INFO_JIT_ICALL_ID
:
4678 case MONO_PATCH_INFO_ABS
:
4679 case MONO_PATCH_INFO_RGCTX_FETCH
:
4680 case MONO_PATCH_INFO_JIT_ICALL_ADDR
:
4681 case MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR
:
4686 ppc_patch_full (cfg
, domain
, ip
, target
, is_fd
);
4692 * Emit code to save the registers in used_int_regs or the registers in the MonoLMF
4693 * structure at positive offset pos from register base_reg. pos is guaranteed to fit into
4694 * the instruction offset immediate for all the registers.
4697 save_registers (MonoCompile
*cfg
, guint8
* code
, int pos
, int base_reg
, gboolean save_lmf
, guint32 used_int_regs
, int cfa_offset
)
4701 for (i
= 13; i
<= 31; i
++) {
4702 if (used_int_regs
& (1 << i
)) {
4703 ppc_str (code
, i
, pos
, base_reg
);
4704 mono_emit_unwind_op_offset (cfg
, code
, i
, pos
- cfa_offset
);
4705 pos
+= sizeof (target_mgreg_t
);
4709 /* pos is the start of the MonoLMF structure */
4710 int offset
= pos
+ G_STRUCT_OFFSET (MonoLMF
, iregs
);
4711 for (i
= 13; i
<= 31; i
++) {
4712 ppc_str (code
, i
, offset
, base_reg
);
4713 mono_emit_unwind_op_offset (cfg
, code
, i
, offset
- cfa_offset
);
4714 offset
+= sizeof (target_mgreg_t
);
4716 offset
= pos
+ G_STRUCT_OFFSET (MonoLMF
, fregs
);
4717 for (i
= 14; i
< 32; i
++) {
4718 ppc_stfd (code
, i
, offset
, base_reg
);
4719 offset
+= sizeof (gdouble
);
4726 * Stack frame layout:
4728 * ------------------- sp
4729 * MonoLMF structure or saved registers
4730 * -------------------
4732 * -------------------
4734 * -------------------
4735 * param area size is cfg->param_area
4736 * -------------------
4737 * linkage area size is PPC_STACK_PARAM_OFFSET
4738 * ------------------- sp
4742 mono_arch_emit_prolog (MonoCompile
*cfg
)
4744 MonoMethod
*method
= cfg
->method
;
4746 MonoMethodSignature
*sig
;
4748 long alloc_size
, pos
, max_offset
, cfa_offset
;
4753 int tailcall_struct_index
;
4755 sig
= mono_method_signature_internal (method
);
4756 cfg
->code_size
= 512 + sig
->param_count
* 32;
4757 code
= cfg
->native_code
= g_malloc (cfg
->code_size
);
4761 /* We currently emit unwind info for aot, but don't use it */
4762 mono_emit_unwind_op_def_cfa (cfg
, code
, ppc_r1
, 0);
4764 if (1 || cfg
->flags
& MONO_CFG_HAS_CALLS
) {
4765 ppc_mflr (code
, ppc_r0
);
4766 ppc_str (code
, ppc_r0
, PPC_RET_ADDR_OFFSET
, ppc_sp
);
4767 mono_emit_unwind_op_offset (cfg
, code
, ppc_lr
, PPC_RET_ADDR_OFFSET
);
4770 alloc_size
= cfg
->stack_offset
;
4773 if (!method
->save_lmf
) {
4774 for (i
= 31; i
>= 13; --i
) {
4775 if (cfg
->used_int_regs
& (1 << i
)) {
4776 pos
+= sizeof (target_mgreg_t
);
4780 pos
+= sizeof (MonoLMF
);
4784 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
4785 if (alloc_size
& (MONO_ARCH_FRAME_ALIGNMENT
- 1)) {
4786 alloc_size
+= MONO_ARCH_FRAME_ALIGNMENT
- 1;
4787 alloc_size
&= ~(MONO_ARCH_FRAME_ALIGNMENT
- 1);
4790 cfg
->stack_usage
= alloc_size
;
4791 g_assert ((alloc_size
& (MONO_ARCH_FRAME_ALIGNMENT
-1)) == 0);
4793 if (ppc_is_imm16 (-alloc_size
)) {
4794 ppc_str_update (code
, ppc_sp
, -alloc_size
, ppc_sp
);
4795 cfa_offset
= alloc_size
;
4796 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, alloc_size
);
4797 code
= save_registers (cfg
, code
, alloc_size
- pos
, ppc_sp
, method
->save_lmf
, cfg
->used_int_regs
, cfa_offset
);
4800 ppc_addi (code
, ppc_r12
, ppc_sp
, -pos
);
4801 ppc_load (code
, ppc_r0
, -alloc_size
);
4802 ppc_str_update_indexed (code
, ppc_sp
, ppc_sp
, ppc_r0
);
4803 cfa_offset
= alloc_size
;
4804 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, alloc_size
);
4805 code
= save_registers (cfg
, code
, 0, ppc_r12
, method
->save_lmf
, cfg
->used_int_regs
, cfa_offset
);
4808 if (cfg
->frame_reg
!= ppc_sp
) {
4809 ppc_mr (code
, cfg
->frame_reg
, ppc_sp
);
4810 mono_emit_unwind_op_def_cfa_reg (cfg
, code
, cfg
->frame_reg
);
4813 /* store runtime generic context */
4814 if (cfg
->rgctx_var
) {
4815 g_assert (cfg
->rgctx_var
->opcode
== OP_REGOFFSET
&&
4816 (cfg
->rgctx_var
->inst_basereg
== ppc_r1
|| cfg
->rgctx_var
->inst_basereg
== ppc_r31
));
4818 ppc_stptr (code
, MONO_ARCH_RGCTX_REG
, cfg
->rgctx_var
->inst_offset
, cfg
->rgctx_var
->inst_basereg
);
4821 /* compute max_offset in order to use short forward jumps
4822 * we always do it on ppc because the immediate displacement
4823 * for jumps is too small
4826 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
4828 bb
->max_offset
= max_offset
;
4830 MONO_BB_FOR_EACH_INS (bb
, ins
)
4831 max_offset
+= ins_get_size (ins
->opcode
);
4834 /* load arguments allocated to register from the stack */
4837 cinfo
= get_call_info (sig
);
4839 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
4840 ArgInfo
*ainfo
= &cinfo
->ret
;
4842 inst
= cfg
->vret_addr
;
4845 if (ppc_is_imm16 (inst
->inst_offset
)) {
4846 ppc_stptr (code
, ainfo
->reg
, inst
->inst_offset
, inst
->inst_basereg
);
4848 ppc_load (code
, ppc_r12
, inst
->inst_offset
);
4849 ppc_stptr_indexed (code
, ainfo
->reg
, ppc_r12
, inst
->inst_basereg
);
4853 tailcall_struct_index
= 0;
4854 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
4855 ArgInfo
*ainfo
= cinfo
->args
+ i
;
4856 inst
= cfg
->args
[pos
];
4858 if (cfg
->verbose_level
> 2)
4859 g_print ("Saving argument %d (type: %d)\n", i
, ainfo
->regtype
);
4860 if (inst
->opcode
== OP_REGVAR
) {
4861 if (ainfo
->regtype
== RegTypeGeneral
)
4862 ppc_mr (code
, inst
->dreg
, ainfo
->reg
);
4863 else if (ainfo
->regtype
== RegTypeFP
)
4864 ppc_fmr (code
, inst
->dreg
, ainfo
->reg
);
4865 else if (ainfo
->regtype
== RegTypeBase
) {
4866 ppc_ldr (code
, ppc_r12
, 0, ppc_sp
);
4867 ppc_ldptr (code
, inst
->dreg
, ainfo
->offset
, ppc_r12
);
4869 g_assert_not_reached ();
4871 if (cfg
->verbose_level
> 2)
4872 g_print ("Argument %ld assigned to register %s\n", pos
, mono_arch_regname (inst
->dreg
));
4874 /* the argument should be put on the stack: FIXME handle size != word */
4875 if (ainfo
->regtype
== RegTypeGeneral
) {
4876 switch (ainfo
->size
) {
4878 if (ppc_is_imm16 (inst
->inst_offset
)) {
4879 ppc_stb (code
, ainfo
->reg
, inst
->inst_offset
, inst
->inst_basereg
);
4881 if (ppc_is_imm32 (inst
->inst_offset
)) {
4882 ppc_addis (code
, ppc_r12
, inst
->inst_basereg
, ppc_ha(inst
->inst_offset
));
4883 ppc_stb (code
, ainfo
->reg
, inst
->inst_offset
, ppc_r12
);
4885 ppc_load (code
, ppc_r12
, inst
->inst_offset
);
4886 ppc_stbx (code
, ainfo
->reg
, inst
->inst_basereg
, ppc_r12
);
4891 if (ppc_is_imm16 (inst
->inst_offset
)) {
4892 ppc_sth (code
, ainfo
->reg
, inst
->inst_offset
, inst
->inst_basereg
);
4894 if (ppc_is_imm32 (inst
->inst_offset
)) {
4895 ppc_addis (code
, ppc_r12
, inst
->inst_basereg
, ppc_ha(inst
->inst_offset
));
4896 ppc_sth (code
, ainfo
->reg
, inst
->inst_offset
, ppc_r12
);
4898 ppc_load (code
, ppc_r12
, inst
->inst_offset
);
4899 ppc_sthx (code
, ainfo
->reg
, inst
->inst_basereg
, ppc_r12
);
4903 #ifdef __mono_ppc64__
4905 if (ppc_is_imm16 (inst
->inst_offset
)) {
4906 ppc_stw (code
, ainfo
->reg
, inst
->inst_offset
, inst
->inst_basereg
);
4908 if (ppc_is_imm32 (inst
->inst_offset
)) {
4909 ppc_addis (code
, ppc_r12
, inst
->inst_basereg
, ppc_ha(inst
->inst_offset
));
4910 ppc_stw (code
, ainfo
->reg
, inst
->inst_offset
, ppc_r12
);
4912 ppc_load (code
, ppc_r12
, inst
->inst_offset
);
4913 ppc_stwx (code
, ainfo
->reg
, inst
->inst_basereg
, ppc_r12
);
4918 if (ppc_is_imm16 (inst
->inst_offset
)) {
4919 ppc_str (code
, ainfo
->reg
, inst
->inst_offset
, inst
->inst_basereg
);
4921 ppc_load (code
, ppc_r12
, inst
->inst_offset
);
4922 ppc_str_indexed (code
, ainfo
->reg
, ppc_r12
, inst
->inst_basereg
);
4927 if (ppc_is_imm16 (inst
->inst_offset
+ 4)) {
4928 ppc_stw (code
, ainfo
->reg
, inst
->inst_offset
, inst
->inst_basereg
);
4929 ppc_stw (code
, ainfo
->reg
+ 1, inst
->inst_offset
+ 4, inst
->inst_basereg
);
4931 ppc_addis (code
, ppc_r12
, inst
->inst_basereg
, ppc_ha(inst
->inst_offset
));
4932 ppc_addi (code
, ppc_r12
, ppc_r12
, inst
->inst_offset
);
4933 ppc_stw (code
, ainfo
->reg
, 0, ppc_r12
);
4934 ppc_stw (code
, ainfo
->reg
+ 1, 4, ppc_r12
);
4939 if (ppc_is_imm16 (inst
->inst_offset
)) {
4940 ppc_stptr (code
, ainfo
->reg
, inst
->inst_offset
, inst
->inst_basereg
);
4942 if (ppc_is_imm32 (inst
->inst_offset
)) {
4943 ppc_addis (code
, ppc_r12
, inst
->inst_basereg
, ppc_ha(inst
->inst_offset
));
4944 ppc_stptr (code
, ainfo
->reg
, inst
->inst_offset
, ppc_r12
);
4946 ppc_load (code
, ppc_r12
, inst
->inst_offset
);
4947 ppc_stptr_indexed (code
, ainfo
->reg
, inst
->inst_basereg
, ppc_r12
);
4952 } else if (ainfo
->regtype
== RegTypeBase
) {
4953 g_assert (ppc_is_imm16 (ainfo
->offset
));
4954 /* load the previous stack pointer in r12 */
4955 ppc_ldr (code
, ppc_r12
, 0, ppc_sp
);
4956 ppc_ldptr (code
, ppc_r0
, ainfo
->offset
, ppc_r12
);
4957 switch (ainfo
->size
) {
4959 if (ppc_is_imm16 (inst
->inst_offset
)) {
4960 ppc_stb (code
, ppc_r0
, inst
->inst_offset
, inst
->inst_basereg
);
4962 if (ppc_is_imm32 (inst
->inst_offset
)) {
4963 ppc_addis (code
, ppc_r12
, inst
->inst_basereg
, ppc_ha(inst
->inst_offset
));
4964 ppc_stb (code
, ppc_r0
, inst
->inst_offset
, ppc_r12
);
4966 ppc_load (code
, ppc_r12
, inst
->inst_offset
);
4967 ppc_stbx (code
, ppc_r0
, inst
->inst_basereg
, ppc_r12
);
4972 if (ppc_is_imm16 (inst
->inst_offset
)) {
4973 ppc_sth (code
, ppc_r0
, inst
->inst_offset
, inst
->inst_basereg
);
4975 if (ppc_is_imm32 (inst
->inst_offset
)) {
4976 ppc_addis (code
, ppc_r12
, inst
->inst_basereg
, ppc_ha(inst
->inst_offset
));
4977 ppc_sth (code
, ppc_r0
, inst
->inst_offset
, ppc_r12
);
4979 ppc_load (code
, ppc_r12
, inst
->inst_offset
);
4980 ppc_sthx (code
, ppc_r0
, inst
->inst_basereg
, ppc_r12
);
4984 #ifdef __mono_ppc64__
4986 if (ppc_is_imm16 (inst
->inst_offset
)) {
4987 ppc_stw (code
, ppc_r0
, inst
->inst_offset
, inst
->inst_basereg
);
4989 if (ppc_is_imm32 (inst
->inst_offset
)) {
4990 ppc_addis (code
, ppc_r12
, inst
->inst_basereg
, ppc_ha(inst
->inst_offset
));
4991 ppc_stw (code
, ppc_r0
, inst
->inst_offset
, ppc_r12
);
4993 ppc_load (code
, ppc_r12
, inst
->inst_offset
);
4994 ppc_stwx (code
, ppc_r0
, inst
->inst_basereg
, ppc_r12
);
4999 if (ppc_is_imm16 (inst
->inst_offset
)) {
5000 ppc_str (code
, ppc_r0
, inst
->inst_offset
, inst
->inst_basereg
);
5002 ppc_load (code
, ppc_r12
, inst
->inst_offset
);
5003 ppc_str_indexed (code
, ppc_r0
, ppc_r12
, inst
->inst_basereg
);
5008 g_assert (ppc_is_imm16 (ainfo
->offset
+ 4));
5009 if (ppc_is_imm16 (inst
->inst_offset
+ 4)) {
5010 ppc_stw (code
, ppc_r0
, inst
->inst_offset
, inst
->inst_basereg
);
5011 ppc_lwz (code
, ppc_r0
, ainfo
->offset
+ 4, ppc_r12
);
5012 ppc_stw (code
, ppc_r0
, inst
->inst_offset
+ 4, inst
->inst_basereg
);
5014 /* use r11 to load the 2nd half of the long before we clobber r12. */
5015 ppc_lwz (code
, ppc_r11
, ainfo
->offset
+ 4, ppc_r12
);
5016 ppc_addis (code
, ppc_r12
, inst
->inst_basereg
, ppc_ha(inst
->inst_offset
));
5017 ppc_addi (code
, ppc_r12
, ppc_r12
, inst
->inst_offset
);
5018 ppc_stw (code
, ppc_r0
, 0, ppc_r12
);
5019 ppc_stw (code
, ppc_r11
, 4, ppc_r12
);
5024 if (ppc_is_imm16 (inst
->inst_offset
)) {
5025 ppc_stptr (code
, ppc_r0
, inst
->inst_offset
, inst
->inst_basereg
);
5027 if (ppc_is_imm32 (inst
->inst_offset
)) {
5028 ppc_addis (code
, ppc_r12
, inst
->inst_basereg
, ppc_ha(inst
->inst_offset
));
5029 ppc_stptr (code
, ppc_r0
, inst
->inst_offset
, ppc_r12
);
5031 ppc_load (code
, ppc_r12
, inst
->inst_offset
);
5032 ppc_stptr_indexed (code
, ppc_r0
, inst
->inst_basereg
, ppc_r12
);
5037 } else if (ainfo
->regtype
== RegTypeFP
) {
5038 g_assert (ppc_is_imm16 (inst
->inst_offset
));
5039 if (ainfo
->size
== 8)
5040 ppc_stfd (code
, ainfo
->reg
, inst
->inst_offset
, inst
->inst_basereg
);
5041 else if (ainfo
->size
== 4)
5042 ppc_stfs (code
, ainfo
->reg
, inst
->inst_offset
, inst
->inst_basereg
);
5044 g_assert_not_reached ();
5045 } else if (ainfo
->regtype
== RegTypeFPStructByVal
) {
5046 int doffset
= inst
->inst_offset
;
5050 g_assert (ppc_is_imm16 (inst
->inst_offset
));
5051 g_assert (ppc_is_imm16 (inst
->inst_offset
+ ainfo
->vtregs
* sizeof (target_mgreg_t
)));
5052 /* FIXME: what if there is no class? */
5053 if (sig
->pinvoke
&& mono_class_from_mono_type_internal (inst
->inst_vtype
))
5054 size
= mono_class_native_size (mono_class_from_mono_type_internal (inst
->inst_vtype
), NULL
);
5055 for (cur_reg
= 0; cur_reg
< ainfo
->vtregs
; ++cur_reg
) {
5056 if (ainfo
->size
== 4) {
5057 ppc_stfs (code
, ainfo
->reg
+ cur_reg
, doffset
, inst
->inst_basereg
);
5059 ppc_stfd (code
, ainfo
->reg
+ cur_reg
, doffset
, inst
->inst_basereg
);
5061 soffset
+= ainfo
->size
;
5062 doffset
+= ainfo
->size
;
5064 } else if (ainfo
->regtype
== RegTypeStructByVal
) {
5065 int doffset
= inst
->inst_offset
;
5069 g_assert (ppc_is_imm16 (inst
->inst_offset
));
5070 g_assert (ppc_is_imm16 (inst
->inst_offset
+ ainfo
->vtregs
* sizeof (target_mgreg_t
)));
5071 /* FIXME: what if there is no class? */
5072 if (sig
->pinvoke
&& mono_class_from_mono_type_internal (inst
->inst_vtype
))
5073 size
= mono_class_native_size (mono_class_from_mono_type_internal (inst
->inst_vtype
), NULL
);
5074 for (cur_reg
= 0; cur_reg
< ainfo
->vtregs
; ++cur_reg
) {
5077 * Darwin handles 1 and 2 byte
5078 * structs specially by
5079 * loading h/b into the arg
5080 * register. Only done for
5084 ppc_sth (code
, ainfo
->reg
+ cur_reg
, doffset
, inst
->inst_basereg
);
5086 ppc_stb (code
, ainfo
->reg
+ cur_reg
, doffset
, inst
->inst_basereg
);
5090 #ifdef __mono_ppc64__
5092 g_assert (cur_reg
== 0);
5093 #if G_BYTE_ORDER == G_BIG_ENDIAN
5094 ppc_sldi (code
, ppc_r0
, ainfo
->reg
,
5095 (sizeof (target_mgreg_t
) - ainfo
->bytes
) * 8);
5096 ppc_stptr (code
, ppc_r0
, doffset
, inst
->inst_basereg
);
5098 if (mono_class_native_size (inst
->klass
, NULL
) == 1) {
5099 ppc_stb (code
, ainfo
->reg
+ cur_reg
, doffset
, inst
->inst_basereg
);
5100 } else if (mono_class_native_size (inst
->klass
, NULL
) == 2) {
5101 ppc_sth (code
, ainfo
->reg
+ cur_reg
, doffset
, inst
->inst_basereg
);
5102 } else if (mono_class_native_size (inst
->klass
, NULL
) == 4) { // WDS -- maybe <=4?
5103 ppc_stw (code
, ainfo
->reg
+ cur_reg
, doffset
, inst
->inst_basereg
);
5105 ppc_stptr (code
, ainfo
->reg
+ cur_reg
, doffset
, inst
->inst_basereg
); // WDS -- Better way?
5111 ppc_stptr (code
, ainfo
->reg
+ cur_reg
, doffset
,
5112 inst
->inst_basereg
);
5115 soffset
+= sizeof (target_mgreg_t
);
5116 doffset
+= sizeof (target_mgreg_t
);
5118 if (ainfo
->vtsize
) {
5119 /* FIXME: we need to do the shifting here, too */
5122 /* load the previous stack pointer in r12 (r0 gets overwritten by the memcpy) */
5123 ppc_ldr (code
, ppc_r12
, 0, ppc_sp
);
5124 if ((size
& MONO_PPC_32_64_CASE (3, 7)) != 0) {
5125 code
= emit_memcpy (code
, size
- soffset
,
5126 inst
->inst_basereg
, doffset
,
5127 ppc_r12
, ainfo
->offset
+ soffset
);
5129 code
= emit_memcpy (code
, ainfo
->vtsize
* sizeof (target_mgreg_t
),
5130 inst
->inst_basereg
, doffset
,
5131 ppc_r12
, ainfo
->offset
+ soffset
);
5134 } else if (ainfo
->regtype
== RegTypeStructByAddr
) {
5135 /* if it was originally a RegTypeBase */
5136 if (ainfo
->offset
) {
5137 /* load the previous stack pointer in r12 */
5138 ppc_ldr (code
, ppc_r12
, 0, ppc_sp
);
5139 ppc_ldptr (code
, ppc_r12
, ainfo
->offset
, ppc_r12
);
5141 ppc_mr (code
, ppc_r12
, ainfo
->reg
);
5144 g_assert (ppc_is_imm16 (inst
->inst_offset
));
5145 code
= emit_memcpy (code
, ainfo
->vtsize
, inst
->inst_basereg
, inst
->inst_offset
, ppc_r12
, 0);
5146 /*g_print ("copy in %s: %d bytes from %d to offset: %d\n", method->name, ainfo->vtsize, ainfo->reg, inst->inst_offset);*/
5148 g_assert_not_reached ();
5153 if (method
->save_lmf
) {
5154 if (cfg
->compile_aot
) {
5155 /* Compute the got address which is needed by the PLT entry */
5156 code
= mono_arch_emit_load_got_addr (cfg
->native_code
, code
, cfg
, NULL
);
5158 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
5159 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_tls_get_lmf_addr_extern
));
5160 if ((FORCE_INDIR_CALL
|| cfg
->method
->dynamic
) && !cfg
->compile_aot
) {
5161 ppc_load_func (code
, PPC_CALL_REG
, 0);
5162 ppc_mtlr (code
, PPC_CALL_REG
);
5167 /* we build the MonoLMF structure on the stack - see mini-ppc.h */
5168 /* lmf_offset is the offset from the previous stack pointer,
5169 * alloc_size is the total stack space allocated, so the offset
5170 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
5171 * The pointer to the struct is put in ppc_r12 (new_lmf).
5172 * The callee-saved registers are already in the MonoLMF structure
5174 ppc_addi (code
, ppc_r12
, ppc_sp
, alloc_size
- lmf_offset
);
5175 /* ppc_r3 is the result from mono_get_lmf_addr () */
5176 ppc_stptr (code
, ppc_r3
, G_STRUCT_OFFSET(MonoLMF
, lmf_addr
), ppc_r12
);
5177 /* new_lmf->previous_lmf = *lmf_addr */
5178 ppc_ldptr (code
, ppc_r0
, G_STRUCT_OFFSET(MonoLMF
, previous_lmf
), ppc_r3
);
5179 ppc_stptr (code
, ppc_r0
, G_STRUCT_OFFSET(MonoLMF
, previous_lmf
), ppc_r12
);
5180 /* *(lmf_addr) = r12 */
5181 ppc_stptr (code
, ppc_r12
, G_STRUCT_OFFSET(MonoLMF
, previous_lmf
), ppc_r3
);
5182 /* save method info */
5183 if (cfg
->compile_aot
)
5185 ppc_load (code
, ppc_r0
, 0);
5187 ppc_load_ptr (code
, ppc_r0
, method
);
5188 ppc_stptr (code
, ppc_r0
, G_STRUCT_OFFSET(MonoLMF
, method
), ppc_r12
);
5189 ppc_stptr (code
, ppc_sp
, G_STRUCT_OFFSET(MonoLMF
, ebp
), ppc_r12
);
5190 /* save the current IP */
5191 if (cfg
->compile_aot
) {
5193 ppc_mflr (code
, ppc_r0
);
5195 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_IP
, NULL
);
5196 #ifdef __mono_ppc64__
5197 ppc_load_sequence (code
, ppc_r0
, (guint64
)0x0101010101010101LL
);
5199 ppc_load_sequence (code
, ppc_r0
, (gulong
)0x01010101L
);
5202 ppc_stptr (code
, ppc_r0
, G_STRUCT_OFFSET(MonoLMF
, eip
), ppc_r12
);
5205 set_code_cursor (cfg
, code
);
5212 mono_arch_emit_epilog (MonoCompile
*cfg
)
5214 MonoMethod
*method
= cfg
->method
;
5216 int max_epilog_size
= 16 + 20*4;
5219 if (cfg
->method
->save_lmf
)
5220 max_epilog_size
+= 128;
5222 code
= realloc_code (cfg
, max_epilog_size
);
5226 if (method
->save_lmf
) {
5228 pos
+= sizeof (MonoLMF
);
5230 /* save the frame reg in r8 */
5231 ppc_mr (code
, ppc_r8
, cfg
->frame_reg
);
5232 ppc_addi (code
, ppc_r12
, cfg
->frame_reg
, cfg
->stack_usage
- lmf_offset
);
5233 /* r5 = previous_lmf */
5234 ppc_ldptr (code
, ppc_r5
, G_STRUCT_OFFSET(MonoLMF
, previous_lmf
), ppc_r12
);
5236 ppc_ldptr (code
, ppc_r6
, G_STRUCT_OFFSET(MonoLMF
, lmf_addr
), ppc_r12
);
5237 /* *(lmf_addr) = previous_lmf */
5238 ppc_stptr (code
, ppc_r5
, G_STRUCT_OFFSET(MonoLMF
, previous_lmf
), ppc_r6
);
5239 /* FIXME: speedup: there is no actual need to restore the registers if
5240 * we didn't actually change them (idea from Zoltan).
5243 ppc_ldr_multiple (code
, ppc_r13
, G_STRUCT_OFFSET(MonoLMF
, iregs
), ppc_r12
);
5245 /*for (i = 14; i < 32; i++) {
5246 ppc_lfd (code, i, G_STRUCT_OFFSET(MonoLMF, fregs) + ((i-14) * sizeof (gdouble)), ppc_r12);
5248 g_assert (ppc_is_imm16 (cfg
->stack_usage
+ PPC_RET_ADDR_OFFSET
));
5249 /* use the saved copy of the frame reg in r8 */
5250 if (1 || cfg
->flags
& MONO_CFG_HAS_CALLS
) {
5251 ppc_ldr (code
, ppc_r0
, cfg
->stack_usage
+ PPC_RET_ADDR_OFFSET
, ppc_r8
);
5252 ppc_mtlr (code
, ppc_r0
);
5254 ppc_addic (code
, ppc_sp
, ppc_r8
, cfg
->stack_usage
);
5256 if (1 || cfg
->flags
& MONO_CFG_HAS_CALLS
) {
5257 long return_offset
= cfg
->stack_usage
+ PPC_RET_ADDR_OFFSET
;
5258 if (ppc_is_imm16 (return_offset
)) {
5259 ppc_ldr (code
, ppc_r0
, return_offset
, cfg
->frame_reg
);
5261 ppc_load (code
, ppc_r12
, return_offset
);
5262 ppc_ldr_indexed (code
, ppc_r0
, cfg
->frame_reg
, ppc_r12
);
5264 ppc_mtlr (code
, ppc_r0
);
5266 if (ppc_is_imm16 (cfg
->stack_usage
)) {
5267 int offset
= cfg
->stack_usage
;
5268 for (i
= 13; i
<= 31; i
++) {
5269 if (cfg
->used_int_regs
& (1 << i
))
5270 offset
-= sizeof (target_mgreg_t
);
5272 if (cfg
->frame_reg
!= ppc_sp
)
5273 ppc_mr (code
, ppc_r12
, cfg
->frame_reg
);
5274 /* note r31 (possibly the frame register) is restored last */
5275 for (i
= 13; i
<= 31; i
++) {
5276 if (cfg
->used_int_regs
& (1 << i
)) {
5277 ppc_ldr (code
, i
, offset
, cfg
->frame_reg
);
5278 offset
+= sizeof (target_mgreg_t
);
5281 if (cfg
->frame_reg
!= ppc_sp
)
5282 ppc_addi (code
, ppc_sp
, ppc_r12
, cfg
->stack_usage
);
5284 ppc_addi (code
, ppc_sp
, ppc_sp
, cfg
->stack_usage
);
5286 ppc_load32 (code
, ppc_r12
, cfg
->stack_usage
);
5287 if (cfg
->used_int_regs
) {
5288 ppc_add (code
, ppc_r12
, cfg
->frame_reg
, ppc_r12
);
5289 for (i
= 31; i
>= 13; --i
) {
5290 if (cfg
->used_int_regs
& (1 << i
)) {
5291 pos
+= sizeof (target_mgreg_t
);
5292 ppc_ldr (code
, i
, -pos
, ppc_r12
);
5295 ppc_mr (code
, ppc_sp
, ppc_r12
);
5297 ppc_add (code
, ppc_sp
, cfg
->frame_reg
, ppc_r12
);
5303 set_code_cursor (cfg
, code
);
5306 #endif /* ifndef DISABLE_JIT */
5308 /* remove once throw_exception_by_name is eliminated */
5310 exception_id_by_name (const char *name
)
5312 if (strcmp (name
, "IndexOutOfRangeException") == 0)
5313 return MONO_EXC_INDEX_OUT_OF_RANGE
;
5314 if (strcmp (name
, "OverflowException") == 0)
5315 return MONO_EXC_OVERFLOW
;
5316 if (strcmp (name
, "ArithmeticException") == 0)
5317 return MONO_EXC_ARITHMETIC
;
5318 if (strcmp (name
, "DivideByZeroException") == 0)
5319 return MONO_EXC_DIVIDE_BY_ZERO
;
5320 if (strcmp (name
, "InvalidCastException") == 0)
5321 return MONO_EXC_INVALID_CAST
;
5322 if (strcmp (name
, "NullReferenceException") == 0)
5323 return MONO_EXC_NULL_REF
;
5324 if (strcmp (name
, "ArrayTypeMismatchException") == 0)
5325 return MONO_EXC_ARRAY_TYPE_MISMATCH
;
5326 if (strcmp (name
, "ArgumentException") == 0)
5327 return MONO_EXC_ARGUMENT
;
5328 g_error ("Unknown intrinsic exception %s\n", name
);
5334 mono_arch_emit_exceptions (MonoCompile
*cfg
)
5336 MonoJumpInfo
*patch_info
;
5339 guint8
* exc_throw_pos
[MONO_EXC_INTRINS_NUM
];
5340 guint8 exc_throw_found
[MONO_EXC_INTRINS_NUM
];
5341 int max_epilog_size
= 50;
5343 for (i
= 0; i
< MONO_EXC_INTRINS_NUM
; i
++) {
5344 exc_throw_pos
[i
] = NULL
;
5345 exc_throw_found
[i
] = 0;
5348 /* count the number of exception infos */
5351 * make sure we have enough space for exceptions
5353 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
5354 if (patch_info
->type
== MONO_PATCH_INFO_EXC
) {
5355 i
= exception_id_by_name (patch_info
->data
.target
);
5356 if (!exc_throw_found
[i
]) {
5357 max_epilog_size
+= (2 * PPC_LOAD_SEQUENCE_LENGTH
) + 5 * 4;
5358 exc_throw_found
[i
] = TRUE
;
5360 } else if (patch_info
->type
== MONO_PATCH_INFO_BB_OVF
)
5361 max_epilog_size
+= 12;
5362 else if (patch_info
->type
== MONO_PATCH_INFO_EXC_OVF
) {
5363 MonoOvfJump
*ovfj
= (MonoOvfJump
*)patch_info
->data
.target
;
5364 i
= exception_id_by_name (ovfj
->data
.exception
);
5365 if (!exc_throw_found
[i
]) {
5366 max_epilog_size
+= (2 * PPC_LOAD_SEQUENCE_LENGTH
) + 5 * 4;
5367 exc_throw_found
[i
] = TRUE
;
5369 max_epilog_size
+= 8;
5373 code
= realloc_code (cfg
, max_epilog_size
);
5375 /* add code to raise exceptions */
5376 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
5377 switch (patch_info
->type
) {
5378 case MONO_PATCH_INFO_BB_OVF
: {
5379 MonoOvfJump
*ovfj
= (MonoOvfJump
*)patch_info
->data
.target
;
5380 unsigned char *ip
= patch_info
->ip
.i
+ cfg
->native_code
;
5381 /* patch the initial jump */
5382 ppc_patch (ip
, code
);
5383 ppc_bc (code
, ovfj
->b0_cond
, ovfj
->b1_cond
, 2);
5385 ppc_patch (code
- 4, ip
+ 4); /* jump back after the initiali branch */
5386 /* jump back to the true target */
5388 ip
= ovfj
->data
.bb
->native_offset
+ cfg
->native_code
;
5389 ppc_patch (code
- 4, ip
);
5390 patch_info
->type
= MONO_PATCH_INFO_NONE
;
5393 case MONO_PATCH_INFO_EXC_OVF
: {
5394 MonoOvfJump
*ovfj
= (MonoOvfJump
*)patch_info
->data
.target
;
5395 MonoJumpInfo
*newji
;
5396 unsigned char *ip
= patch_info
->ip
.i
+ cfg
->native_code
;
5397 unsigned char *bcl
= code
;
5398 /* patch the initial jump: we arrived here with a call */
5399 ppc_patch (ip
, code
);
5400 ppc_bc (code
, ovfj
->b0_cond
, ovfj
->b1_cond
, 0);
5402 ppc_patch (code
- 4, ip
+ 4); /* jump back after the initiali branch */
5403 /* patch the conditional jump to the right handler */
5404 /* make it processed next */
5405 newji
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoJumpInfo
));
5406 newji
->type
= MONO_PATCH_INFO_EXC
;
5407 newji
->ip
.i
= bcl
- cfg
->native_code
;
5408 newji
->data
.target
= ovfj
->data
.exception
;
5409 newji
->next
= patch_info
->next
;
5410 patch_info
->next
= newji
;
5411 patch_info
->type
= MONO_PATCH_INFO_NONE
;
5414 case MONO_PATCH_INFO_EXC
: {
5415 MonoClass
*exc_class
;
5417 unsigned char *ip
= patch_info
->ip
.i
+ cfg
->native_code
;
5418 i
= exception_id_by_name (patch_info
->data
.target
);
5419 if (exc_throw_pos
[i
] && !(ip
> exc_throw_pos
[i
] && ip
- exc_throw_pos
[i
] > 50000)) {
5420 ppc_patch (ip
, exc_throw_pos
[i
]);
5421 patch_info
->type
= MONO_PATCH_INFO_NONE
;
5424 exc_throw_pos
[i
] = code
;
5427 exc_class
= mono_class_load_from_name (mono_defaults
.corlib
, "System", patch_info
->data
.name
);
5429 ppc_patch (ip
, code
);
5430 /*mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC_NAME, patch_info->data.target);*/
5431 ppc_load (code
, ppc_r3
, m_class_get_type_token (exc_class
));
5432 /* we got here from a conditional call, so the calling ip is set in lr */
5433 ppc_mflr (code
, ppc_r4
);
5434 patch_info
->type
= MONO_PATCH_INFO_JIT_ICALL_ID
;
5435 patch_info
->data
.jit_icall_id
= MONO_JIT_ICALL_mono_arch_throw_corlib_exception
;
5436 patch_info
->ip
.i
= code
- cfg
->native_code
;
5437 if (FORCE_INDIR_CALL
|| cfg
->method
->dynamic
) {
5438 ppc_load_func (code
, PPC_CALL_REG
, 0);
5439 ppc_mtctr (code
, PPC_CALL_REG
);
5440 ppc_bcctr (code
, PPC_BR_ALWAYS
, 0);
5452 set_code_cursor (cfg
, code
);
5458 try_offset_access (void *value
, guint32 idx
)
5460 register void* me
__asm__ ("r2");
5461 void ***p
= (void***)((char*)me
+ 284);
5462 int idx1
= idx
/ 32;
5463 int idx2
= idx
% 32;
5466 if (value
!= p
[idx1
][idx2
])
5473 mono_arch_finish_init (void)
5477 #define CMP_SIZE (PPC_LOAD_SEQUENCE_LENGTH + 4)
5479 #define LOADSTORE_SIZE 4
5480 #define JUMP_IMM_SIZE 12
5481 #define JUMP_IMM32_SIZE (PPC_LOAD_SEQUENCE_LENGTH + 8)
5482 #define ENABLE_WRONG_METHOD_CHECK 0
5485 * LOCKING: called with the domain lock held
5488 mono_arch_build_imt_trampoline (MonoVTable
*vtable
, MonoDomain
*domain
, MonoIMTCheckItem
**imt_entries
, int count
,
5489 gpointer fail_tramp
)
5493 guint8
*code
, *start
;
5495 for (i
= 0; i
< count
; ++i
) {
5496 MonoIMTCheckItem
*item
= imt_entries
[i
];
5497 if (item
->is_equals
) {
5498 if (item
->check_target_idx
) {
5499 if (!item
->compare_done
)
5500 item
->chunk_size
+= CMP_SIZE
;
5501 if (item
->has_target_code
)
5502 item
->chunk_size
+= BR_SIZE
+ JUMP_IMM32_SIZE
;
5504 item
->chunk_size
+= LOADSTORE_SIZE
+ BR_SIZE
+ JUMP_IMM_SIZE
;
5507 item
->chunk_size
+= CMP_SIZE
+ BR_SIZE
+ JUMP_IMM32_SIZE
* 2;
5508 if (!item
->has_target_code
)
5509 item
->chunk_size
+= LOADSTORE_SIZE
;
5511 item
->chunk_size
+= LOADSTORE_SIZE
+ JUMP_IMM_SIZE
;
5512 #if ENABLE_WRONG_METHOD_CHECK
5513 item
->chunk_size
+= CMP_SIZE
+ BR_SIZE
+ 4;
5518 item
->chunk_size
+= CMP_SIZE
+ BR_SIZE
;
5519 imt_entries
[item
->check_target_idx
]->compare_done
= TRUE
;
5521 size
+= item
->chunk_size
;
5523 /* the initial load of the vtable address */
5524 size
+= PPC_LOAD_SEQUENCE_LENGTH
+ LOADSTORE_SIZE
;
5526 code
= mono_method_alloc_generic_virtual_trampoline (domain
, size
);
5528 code
= mono_domain_code_reserve (domain
, size
);
5533 * We need to save and restore r12 because it might be
5534 * used by the caller as the vtable register, so
5535 * clobbering it will trip up the magic trampoline.
5537 * FIXME: Get rid of this by making sure that r12 is
5538 * not used as the vtable register in interface calls.
5540 ppc_stptr (code
, ppc_r12
, PPC_RET_ADDR_OFFSET
, ppc_sp
);
5541 ppc_load (code
, ppc_r12
, (gsize
)(& (vtable
->vtable
[0])));
5543 for (i
= 0; i
< count
; ++i
) {
5544 MonoIMTCheckItem
*item
= imt_entries
[i
];
5545 item
->code_target
= code
;
5546 if (item
->is_equals
) {
5547 if (item
->check_target_idx
) {
5548 if (!item
->compare_done
) {
5549 ppc_load (code
, ppc_r0
, (gsize
)item
->key
);
5550 ppc_compare_log (code
, 0, MONO_ARCH_IMT_REG
, ppc_r0
);
5552 item
->jmp_code
= code
;
5553 ppc_bc (code
, PPC_BR_FALSE
, PPC_BR_EQ
, 0);
5554 if (item
->has_target_code
) {
5555 ppc_load_ptr (code
, ppc_r0
, item
->value
.target_code
);
5557 ppc_ldptr (code
, ppc_r0
, (sizeof (target_mgreg_t
) * item
->value
.vtable_slot
), ppc_r12
);
5558 ppc_ldptr (code
, ppc_r12
, PPC_RET_ADDR_OFFSET
, ppc_sp
);
5560 ppc_mtctr (code
, ppc_r0
);
5561 ppc_bcctr (code
, PPC_BR_ALWAYS
, 0);
5564 ppc_load (code
, ppc_r0
, (gulong
)item
->key
);
5565 ppc_compare_log (code
, 0, MONO_ARCH_IMT_REG
, ppc_r0
);
5566 item
->jmp_code
= code
;
5567 ppc_bc (code
, PPC_BR_FALSE
, PPC_BR_EQ
, 0);
5568 if (item
->has_target_code
) {
5569 ppc_load_ptr (code
, ppc_r0
, item
->value
.target_code
);
5572 ppc_load_ptr (code
, ppc_r0
, & (vtable
->vtable
[item
->value
.vtable_slot
]));
5573 ppc_ldptr_indexed (code
, ppc_r0
, 0, ppc_r0
);
5575 ppc_mtctr (code
, ppc_r0
);
5576 ppc_bcctr (code
, PPC_BR_ALWAYS
, 0);
5577 ppc_patch (item
->jmp_code
, code
);
5578 ppc_load_ptr (code
, ppc_r0
, fail_tramp
);
5579 ppc_mtctr (code
, ppc_r0
);
5580 ppc_bcctr (code
, PPC_BR_ALWAYS
, 0);
5581 item
->jmp_code
= NULL
;
5583 /* enable the commented code to assert on wrong method */
5584 #if ENABLE_WRONG_METHOD_CHECK
5585 ppc_load (code
, ppc_r0
, (guint32
)item
->key
);
5586 ppc_compare_log (code
, 0, MONO_ARCH_IMT_REG
, ppc_r0
);
5587 item
->jmp_code
= code
;
5588 ppc_bc (code
, PPC_BR_FALSE
, PPC_BR_EQ
, 0);
5590 ppc_ldptr (code
, ppc_r0
, (sizeof (target_mgreg_t
) * item
->value
.vtable_slot
), ppc_r12
);
5591 ppc_ldptr (code
, ppc_r12
, PPC_RET_ADDR_OFFSET
, ppc_sp
);
5592 ppc_mtctr (code
, ppc_r0
);
5593 ppc_bcctr (code
, PPC_BR_ALWAYS
, 0);
5594 #if ENABLE_WRONG_METHOD_CHECK
5595 ppc_patch (item
->jmp_code
, code
);
5597 item
->jmp_code
= NULL
;
5602 ppc_load (code
, ppc_r0
, (gulong
)item
->key
);
5603 ppc_compare_log (code
, 0, MONO_ARCH_IMT_REG
, ppc_r0
);
5604 item
->jmp_code
= code
;
5605 ppc_bc (code
, PPC_BR_FALSE
, PPC_BR_LT
, 0);
5608 /* patch the branches to get to the target items */
5609 for (i
= 0; i
< count
; ++i
) {
5610 MonoIMTCheckItem
*item
= imt_entries
[i
];
5611 if (item
->jmp_code
) {
5612 if (item
->check_target_idx
) {
5613 ppc_patch (item
->jmp_code
, imt_entries
[item
->check_target_idx
]->code_target
);
5619 UnlockedAdd (&mono_stats
.imt_trampolines_size
, code
- start
);
5620 g_assert (code
- start
<= size
);
5621 mono_arch_flush_icache (start
, size
);
5622 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE
, NULL
));
5624 mono_tramp_info_register (mono_tramp_info_create (NULL
, start
, code
- start
, NULL
, NULL
), domain
);
5630 mono_arch_find_imt_method (host_mgreg_t
*regs
, guint8
*code
)
5632 host_mgreg_t
*r
= (host_mgreg_t
*)regs
;
5634 return (MonoMethod
*)(gsize
) r
[MONO_ARCH_IMT_REG
];
5638 mono_arch_find_static_call_vtable (host_mgreg_t
*regs
, guint8
*code
)
5640 return (MonoVTable
*)(gsize
) regs
[MONO_ARCH_RGCTX_REG
];
5644 mono_arch_get_cie_program (void)
5648 mono_add_unwind_op_def_cfa (l
, (guint8
*)NULL
, (guint8
*)NULL
, ppc_r1
, 0);
5654 mono_arch_emit_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
5656 MonoInst
*ins
= NULL
;
5659 if (cmethod
->klass
== mono_class_try_get_math_class ()) {
5660 if (strcmp (cmethod
->name
, "Sqrt") == 0) {
5662 } else if (strcmp (cmethod
->name
, "Abs") == 0 && fsig
->params
[0]->type
== MONO_TYPE_R8
) {
5666 if (opcode
&& fsig
->param_count
== 1) {
5667 MONO_INST_NEW (cfg
, ins
, opcode
);
5668 ins
->type
= STACK_R8
;
5669 ins
->dreg
= mono_alloc_freg (cfg
);
5670 ins
->sreg1
= args
[0]->dreg
;
5671 MONO_ADD_INS (cfg
->cbb
, ins
);
5674 /* Check for Min/Max for (u)int(32|64) */
5676 if (cpu_hw_caps
& PPC_ISA_2_03
) {
5677 if (strcmp (cmethod
->name
, "Min") == 0) {
5678 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
5680 if (fsig
->params
[0]->type
== MONO_TYPE_U4
)
5681 opcode
= OP_IMIN_UN
;
5682 #ifdef __mono_ppc64__
5683 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
5685 else if (fsig
->params
[0]->type
== MONO_TYPE_U8
)
5686 opcode
= OP_LMIN_UN
;
5688 } else if (strcmp (cmethod
->name
, "Max") == 0) {
5689 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
5691 if (fsig
->params
[0]->type
== MONO_TYPE_U4
)
5692 opcode
= OP_IMAX_UN
;
5693 #ifdef __mono_ppc64__
5694 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
5696 else if (fsig
->params
[0]->type
== MONO_TYPE_U8
)
5697 opcode
= OP_LMAX_UN
;
5701 * TODO: Floating point version with fsel, but fsel has
5702 * some peculiarities (need a scratch reg unless
5703 * comparing with 0, NaN/Inf behaviour (then MathF too)
5707 if (opcode
&& fsig
->param_count
== 2) {
5708 MONO_INST_NEW (cfg
, ins
, opcode
);
5709 ins
->type
= fsig
->params
[0]->type
== MONO_TYPE_I4
? STACK_I4
: STACK_I8
;
5710 ins
->dreg
= mono_alloc_ireg (cfg
);
5711 ins
->sreg1
= args
[0]->dreg
;
5712 ins
->sreg2
= args
[1]->dreg
;
5713 MONO_ADD_INS (cfg
->cbb
, ins
);
5716 /* Rounding instructions */
5718 if ((cpu_hw_caps
& PPC_ISA_2X
) && (fsig
->param_count
== 1) && (fsig
->params
[0]->type
== MONO_TYPE_R8
)) {
5720 * XXX: sysmath.c and the POWER ISA documentation for
5721 * frin[.] imply rounding is a little more complicated
5722 * than expected; the semantics are slightly different,
5723 * so just "frin." isn't a drop-in replacement. Floor,
5724 * Truncate, and Ceiling seem to work normally though.
5725 * (also, no float versions of these ops, but frsp
5726 * could be preprended?)
5728 //if (!strcmp (cmethod->name, "Round"))
5729 // opcode = OP_ROUND;
5730 if (!strcmp (cmethod
->name
, "Floor"))
5731 opcode
= OP_PPC_FLOOR
;
5732 else if (!strcmp (cmethod
->name
, "Ceiling"))
5733 opcode
= OP_PPC_CEIL
;
5734 else if (!strcmp (cmethod
->name
, "Truncate"))
5735 opcode
= OP_PPC_TRUNC
;
5737 MONO_INST_NEW (cfg
, ins
, opcode
);
5738 ins
->type
= STACK_R8
;
5739 ins
->dreg
= mono_alloc_freg (cfg
);
5740 ins
->sreg1
= args
[0]->dreg
;
5741 MONO_ADD_INS (cfg
->cbb
, ins
);
5745 if (cmethod
->klass
== mono_class_try_get_mathf_class ()) {
5746 if (strcmp (cmethod
->name
, "Sqrt") == 0) {
5748 } /* XXX: POWER has no single-precision normal FPU abs? */
5750 if (opcode
&& fsig
->param_count
== 1) {
5751 MONO_INST_NEW (cfg
, ins
, opcode
);
5752 ins
->type
= STACK_R4
;
5753 ins
->dreg
= mono_alloc_freg (cfg
);
5754 ins
->sreg1
= args
[0]->dreg
;
5755 MONO_ADD_INS (cfg
->cbb
, ins
);
5762 mono_arch_context_get_int_reg (MonoContext
*ctx
, int reg
)
5765 return (host_mgreg_t
)(gsize
)MONO_CONTEXT_GET_SP (ctx
);
5767 return ctx
->regs
[reg
];
5771 mono_arch_get_patch_offset (guint8
*code
)
5777 * mono_aot_emit_load_got_addr:
5779 * Emit code to load the got address.
5780 * On PPC, the result is placed into r30.
5783 mono_arch_emit_load_got_addr (guint8
*start
, guint8
*code
, MonoCompile
*cfg
, MonoJumpInfo
**ji
)
5786 ppc_mflr (code
, ppc_r30
);
5788 mono_add_patch_info (cfg
, code
- start
, MONO_PATCH_INFO_GOT_OFFSET
, NULL
);
5790 *ji
= mono_patch_info_list_prepend (*ji
, code
- start
, MONO_PATCH_INFO_GOT_OFFSET
, NULL
);
5791 /* arch_emit_got_address () patches this */
5792 #if defined(TARGET_POWERPC64)
5798 ppc_load32 (code
, ppc_r0
, 0);
5799 ppc_add (code
, ppc_r30
, ppc_r30
, ppc_r0
);
5802 set_code_cursor (cfg
, code
);
5807 * mono_ppc_emit_load_aotconst:
5809 * Emit code to load the contents of the GOT slot identified by TRAMP_TYPE and
5810 * TARGET from the mscorlib GOT in full-aot code.
5811 * On PPC, the GOT address is assumed to be in r30, and the result is placed into
5815 mono_arch_emit_load_aotconst (guint8
*start
, guint8
*code
, MonoJumpInfo
**ji
, MonoJumpInfoType tramp_type
, gconstpointer target
)
5817 /* Load the mscorlib got address */
5818 ppc_ldptr (code
, ppc_r12
, sizeof (target_mgreg_t
), ppc_r30
);
5819 *ji
= mono_patch_info_list_prepend (*ji
, code
- start
, tramp_type
, target
);
5820 /* arch_emit_got_access () patches this */
5821 ppc_load32 (code
, ppc_r0
, 0);
5822 ppc_ldptr_indexed (code
, ppc_r12
, ppc_r12
, ppc_r0
);
5827 /* Soft Debug support */
5828 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
5835 * mono_arch_set_breakpoint:
5837 * See mini-amd64.c for docs.
5840 mono_arch_set_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
5843 guint8
*orig_code
= code
;
5845 ppc_load_sequence (code
, ppc_r12
, (gsize
)bp_trigger_page
);
5846 ppc_ldptr (code
, ppc_r12
, 0, ppc_r12
);
5848 g_assert (code
- orig_code
== BREAKPOINT_SIZE
);
5850 mono_arch_flush_icache (orig_code
, code
- orig_code
);
5854 * mono_arch_clear_breakpoint:
5856 * See mini-amd64.c for docs.
5859 mono_arch_clear_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
5864 for (i
= 0; i
< BREAKPOINT_SIZE
/ 4; ++i
)
5867 mono_arch_flush_icache (ip
, code
- ip
);
5871 * mono_arch_is_breakpoint_event:
5873 * See mini-amd64.c for docs.
5876 mono_arch_is_breakpoint_event (void *info
, void *sigctx
)
5878 siginfo_t
* sinfo
= (siginfo_t
*) info
;
5879 /* Sometimes the address is off by 4 */
5880 if (sinfo
->si_addr
>= bp_trigger_page
&& (guint8
*)sinfo
->si_addr
<= (guint8
*)bp_trigger_page
+ 128)
5887 * mono_arch_skip_breakpoint:
5889 * See mini-amd64.c for docs.
5892 mono_arch_skip_breakpoint (MonoContext
*ctx
, MonoJitInfo
*ji
)
5894 /* skip the ldptr */
5895 MONO_CONTEXT_SET_IP (ctx
, (guint8
*)MONO_CONTEXT_GET_IP (ctx
) + 4);
5903 * mono_arch_start_single_stepping:
5905 * See mini-amd64.c for docs.
5908 mono_arch_start_single_stepping (void)
5910 mono_mprotect (ss_trigger_page
, mono_pagesize (), 0);
5914 * mono_arch_stop_single_stepping:
5916 * See mini-amd64.c for docs.
5919 mono_arch_stop_single_stepping (void)
5921 mono_mprotect (ss_trigger_page
, mono_pagesize (), MONO_MMAP_READ
);
5925 * mono_arch_is_single_step_event:
5927 * See mini-amd64.c for docs.
5930 mono_arch_is_single_step_event (void *info
, void *sigctx
)
5932 siginfo_t
* sinfo
= (siginfo_t
*) info
;
5933 /* Sometimes the address is off by 4 */
5934 if (sinfo
->si_addr
>= ss_trigger_page
&& (guint8
*)sinfo
->si_addr
<= (guint8
*)ss_trigger_page
+ 128)
5941 * mono_arch_skip_single_step:
5943 * See mini-amd64.c for docs.
5946 mono_arch_skip_single_step (MonoContext
*ctx
)
5948 /* skip the ldptr */
5949 MONO_CONTEXT_SET_IP (ctx
, (guint8
*)MONO_CONTEXT_GET_IP (ctx
) + 4);
5953 * mono_arch_create_seq_point_info:
5955 * See mini-amd64.c for docs.
5958 mono_arch_get_seq_point_info (MonoDomain
*domain
, guint8
*code
)
5967 mono_arch_opcode_supported (int opcode
)
5970 case OP_ATOMIC_ADD_I4
:
5971 case OP_ATOMIC_CAS_I4
:
5972 #ifdef TARGET_POWERPC64
5973 case OP_ATOMIC_ADD_I8
:
5974 case OP_ATOMIC_CAS_I8
:
5983 mono_arch_load_function (MonoJitICallId jit_icall_id
)
5985 gpointer target
= NULL
;
5986 switch (jit_icall_id
) {
5987 #undef MONO_AOT_ICALL
5988 #define MONO_AOT_ICALL(x) case MONO_JIT_ICALL_ ## x: target = (gpointer)x; break;
5989 MONO_AOT_ICALL (mono_ppc_throw_exception
)