3 * PowerPC backend for the Mono code generator
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
8 * Andreas Faerber <andreas.faerber@web.de>
10 * (C) 2003 Ximian, Inc.
11 * (C) 2007-2008 Andreas Faerber
16 #include <mono/metadata/abi-details.h>
17 #include <mono/metadata/appdomain.h>
18 #include <mono/metadata/debug-helpers.h>
19 #include <mono/utils/mono-proclib.h>
20 #include <mono/utils/mono-mmap.h>
21 #include <mono/utils/mono-hwcap.h>
22 #include <mono/utils/unlocked.h>
23 #include "mono/utils/mono-tls-inline.h"
26 #ifdef TARGET_POWERPC64
27 #include "cpu-ppc64.h"
32 #include "aot-runtime.h"
33 #include "mini-runtime.h"
35 #include <sys/sysctl.h>
41 #include <sys/systemcfg.h>
44 static GENERATE_TRY_GET_CLASS_WITH_CACHE (math
, "System", "Math")
45 static GENERATE_TRY_GET_CLASS_WITH_CACHE (mathf
, "System", "MathF")
47 #define FORCE_INDIR_CALL 1
58 /* cpu_hw_caps contains the flags defined below */
59 static int cpu_hw_caps
= 0;
60 static int cachelinesize
= 0;
61 static int cachelineinc
= 0;
63 PPC_ICACHE_SNOOP
= 1 << 0,
64 PPC_MULTIPLE_LS_UNITS
= 1 << 1,
65 PPC_SMP_CAPABLE
= 1 << 2,
68 PPC_MOVE_FPR_GPR
= 1 << 5,
69 PPC_ISA_2_03
= 1 << 6,
73 #define BREAKPOINT_SIZE (PPC_LOAD_SEQUENCE_LENGTH + 4)
75 /* This mutex protects architecture specific caches */
76 #define mono_mini_arch_lock() mono_os_mutex_lock (&mini_arch_mutex)
77 #define mono_mini_arch_unlock() mono_os_mutex_unlock (&mini_arch_mutex)
78 static mono_mutex_t mini_arch_mutex
;
81 * The code generated for sequence points reads from this location, which is
82 * made read-only when single stepping is enabled.
84 static gpointer ss_trigger_page
;
86 /* Enabled breakpoints read from this trigger page */
87 static gpointer bp_trigger_page
;
89 #define MONO_EMIT_NEW_LOAD_R8(cfg,dr,addr) do { \
91 MONO_INST_NEW ((cfg), (inst), OP_R8CONST); \
92 inst->type = STACK_R8; \
94 inst->inst_p0 = (void*)(addr); \
95 mono_bblock_add_inst (cfg->cbb, inst); \
99 mono_arch_regname (int reg
) {
100 static const char rnames
[][4] = {
101 "r0", "sp", "r2", "r3", "r4",
102 "r5", "r6", "r7", "r8", "r9",
103 "r10", "r11", "r12", "r13", "r14",
104 "r15", "r16", "r17", "r18", "r19",
105 "r20", "r21", "r22", "r23", "r24",
106 "r25", "r26", "r27", "r28", "r29",
109 if (reg
>= 0 && reg
< 32)
115 mono_arch_fregname (int reg
) {
116 static const char rnames
[][4] = {
117 "f0", "f1", "f2", "f3", "f4",
118 "f5", "f6", "f7", "f8", "f9",
119 "f10", "f11", "f12", "f13", "f14",
120 "f15", "f16", "f17", "f18", "f19",
121 "f20", "f21", "f22", "f23", "f24",
122 "f25", "f26", "f27", "f28", "f29",
125 if (reg
>= 0 && reg
< 32)
130 /* this function overwrites r0, r11, r12 */
132 emit_memcpy (guint8
*code
, int size
, int dreg
, int doffset
, int sreg
, int soffset
)
134 /* unrolled, use the counter in big */
135 if (size
> sizeof (target_mgreg_t
) * 5) {
136 long shifted
= size
/ TARGET_SIZEOF_VOID_P
;
137 guint8
*copy_loop_start
, *copy_loop_jump
;
139 ppc_load (code
, ppc_r0
, shifted
);
140 ppc_mtctr (code
, ppc_r0
);
141 //g_assert (sreg == ppc_r12);
142 ppc_addi (code
, ppc_r11
, dreg
, (doffset
- sizeof (target_mgreg_t
)));
143 ppc_addi (code
, ppc_r12
, sreg
, (soffset
- sizeof (target_mgreg_t
)));
144 copy_loop_start
= code
;
145 ppc_ldptr_update (code
, ppc_r0
, (unsigned int)sizeof (target_mgreg_t
), ppc_r12
);
146 ppc_stptr_update (code
, ppc_r0
, (unsigned int)sizeof (target_mgreg_t
), ppc_r11
);
147 copy_loop_jump
= code
;
148 ppc_bc (code
, PPC_BR_DEC_CTR_NONZERO
, 0, 0);
149 ppc_patch (copy_loop_jump
, copy_loop_start
);
150 size
-= shifted
* sizeof (target_mgreg_t
);
151 doffset
= soffset
= 0;
154 #ifdef __mono_ppc64__
155 /* the hardware has multiple load/store units and the move is long
156 enough to use more then one register, then use load/load/store/store
157 to execute 2 instructions per cycle. */
158 if ((cpu_hw_caps
& PPC_MULTIPLE_LS_UNITS
) && (dreg
!= ppc_r11
) && (sreg
!= ppc_r11
)) {
160 ppc_ldptr (code
, ppc_r0
, soffset
, sreg
);
161 ppc_ldptr (code
, ppc_r11
, soffset
+8, sreg
);
162 ppc_stptr (code
, ppc_r0
, doffset
, dreg
);
163 ppc_stptr (code
, ppc_r11
, doffset
+8, dreg
);
170 ppc_ldr (code
, ppc_r0
, soffset
, sreg
);
171 ppc_str (code
, ppc_r0
, doffset
, dreg
);
177 if ((cpu_hw_caps
& PPC_MULTIPLE_LS_UNITS
) && (dreg
!= ppc_r11
) && (sreg
!= ppc_r11
)) {
179 ppc_lwz (code
, ppc_r0
, soffset
, sreg
);
180 ppc_lwz (code
, ppc_r11
, soffset
+4, sreg
);
181 ppc_stw (code
, ppc_r0
, doffset
, dreg
);
182 ppc_stw (code
, ppc_r11
, doffset
+4, dreg
);
190 ppc_lwz (code
, ppc_r0
, soffset
, sreg
);
191 ppc_stw (code
, ppc_r0
, doffset
, dreg
);
197 ppc_lhz (code
, ppc_r0
, soffset
, sreg
);
198 ppc_sth (code
, ppc_r0
, doffset
, dreg
);
204 ppc_lbz (code
, ppc_r0
, soffset
, sreg
);
205 ppc_stb (code
, ppc_r0
, doffset
, dreg
);
214 * mono_arch_get_argument_info:
215 * @csig: a method signature
216 * @param_count: the number of parameters to consider
217 * @arg_info: an array to store the result infos
219 * Gathers information on parameters such as size, alignment and
220 * padding. arg_info should be large enought to hold param_count + 1 entries.
222 * Returns the size of the activation frame.
225 mono_arch_get_argument_info (MonoMethodSignature
*csig
, int param_count
, MonoJitArgumentInfo
*arg_info
)
227 #ifdef __mono_ppc64__
231 int k
, frame_size
= 0;
232 int size
, align
, pad
;
235 if (MONO_TYPE_ISSTRUCT (csig
->ret
)) {
236 frame_size
+= sizeof (target_mgreg_t
);
240 arg_info
[0].offset
= offset
;
243 frame_size
+= sizeof (target_mgreg_t
);
247 arg_info
[0].size
= frame_size
;
249 for (k
= 0; k
< param_count
; k
++) {
252 size
= mono_type_native_stack_size (csig
->params
[k
], (guint32
*)&align
);
254 size
= mini_type_stack_size (csig
->params
[k
], &align
);
256 /* ignore alignment for now */
259 frame_size
+= pad
= (align
- (frame_size
& (align
- 1))) & (align
- 1);
260 arg_info
[k
].pad
= pad
;
262 arg_info
[k
+ 1].pad
= 0;
263 arg_info
[k
+ 1].size
= size
;
265 arg_info
[k
+ 1].offset
= offset
;
269 align
= MONO_ARCH_FRAME_ALIGNMENT
;
270 frame_size
+= pad
= (align
- (frame_size
& (align
- 1))) & (align
- 1);
271 arg_info
[k
].pad
= pad
;
277 #ifdef __mono_ppc64__
279 is_load_sequence (guint32
*seq
)
281 return ppc_opcode (seq
[0]) == 15 && /* lis */
282 ppc_opcode (seq
[1]) == 24 && /* ori */
283 ppc_opcode (seq
[2]) == 30 && /* sldi */
284 ppc_opcode (seq
[3]) == 25 && /* oris */
285 ppc_opcode (seq
[4]) == 24; /* ori */
288 #define ppc_load_get_dest(l) (((l)>>21) & 0x1f)
289 #define ppc_load_get_off(l) ((gint16)((l) & 0xffff))
293 #define ppc_is_load_op(opcode) (ppc_opcode ((opcode)) == 58 || ppc_opcode ((opcode)) == 32)
295 /* code must point to the blrl */
297 mono_ppc_is_direct_call_sequence (guint32
*code
)
299 #ifdef __mono_ppc64__
300 g_assert(*code
== 0x4e800021 || *code
== 0x4e800020 || *code
== 0x4e800420);
302 /* the thunk-less direct call sequence: lis/ori/sldi/oris/ori/mtlr/blrl */
303 if (ppc_opcode (code
[-1]) == 31) { /* mtlr */
304 if (ppc_is_load_op (code
[-2]) && ppc_is_load_op (code
[-3])) { /* ld/ld */
305 if (!is_load_sequence (&code
[-8]))
307 /* one of the loads must be "ld r2,8(rX)" or "ld r2,4(rX) for ilp32 */
308 return (ppc_load_get_dest (code
[-2]) == ppc_r2
&& ppc_load_get_off (code
[-2]) == sizeof (target_mgreg_t
)) ||
309 (ppc_load_get_dest (code
[-3]) == ppc_r2
&& ppc_load_get_off (code
[-3]) == sizeof (target_mgreg_t
));
311 if (ppc_opcode (code
[-2]) == 24 && ppc_opcode (code
[-3]) == 31) /* mr/nop */
312 return is_load_sequence (&code
[-8]);
314 return is_load_sequence (&code
[-6]);
318 g_assert(*code
== 0x4e800021);
320 /* the thunk-less direct call sequence: lis/ori/mtlr/blrl */
321 return ppc_opcode (code
[-1]) == 31 &&
322 ppc_opcode (code
[-2]) == 24 &&
323 ppc_opcode (code
[-3]) == 15;
327 #define MAX_ARCH_DELEGATE_PARAMS 7
330 get_delegate_invoke_impl (MonoTrampInfo
**info
, gboolean has_target
, guint32 param_count
, gboolean aot
)
332 guint8
*code
, *start
;
335 int size
= MONO_PPC_32_64_CASE (32, 32) + PPC_FTNPTR_SIZE
;
337 start
= code
= mono_global_codeman_reserve (size
);
339 code
= mono_ppc_create_pre_code_ftnptr (code
);
341 /* Replace the this argument with the target */
342 ppc_ldptr (code
, ppc_r0
, MONO_STRUCT_OFFSET (MonoDelegate
, method_ptr
), ppc_r3
);
343 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
344 /* it's a function descriptor */
345 /* Can't use ldptr as it doesn't work with r0 */
346 ppc_ldptr_indexed (code
, ppc_r0
, 0, ppc_r0
);
348 ppc_mtctr (code
, ppc_r0
);
349 ppc_ldptr (code
, ppc_r3
, MONO_STRUCT_OFFSET (MonoDelegate
, target
), ppc_r3
);
350 ppc_bcctr (code
, PPC_BR_ALWAYS
, 0);
352 g_assert ((code
- start
) <= size
);
354 mono_arch_flush_icache (start
, size
);
355 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE
, NULL
));
359 size
= MONO_PPC_32_64_CASE (32, 32) + param_count
* 4 + PPC_FTNPTR_SIZE
;
360 start
= code
= mono_global_codeman_reserve (size
);
362 code
= mono_ppc_create_pre_code_ftnptr (code
);
364 ppc_ldptr (code
, ppc_r0
, MONO_STRUCT_OFFSET (MonoDelegate
, method_ptr
), ppc_r3
);
365 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
366 /* it's a function descriptor */
367 ppc_ldptr_indexed (code
, ppc_r0
, 0, ppc_r0
);
369 ppc_mtctr (code
, ppc_r0
);
370 /* slide down the arguments */
371 for (i
= 0; i
< param_count
; ++i
) {
372 ppc_mr (code
, (ppc_r3
+ i
), (ppc_r3
+ i
+ 1));
374 ppc_bcctr (code
, PPC_BR_ALWAYS
, 0);
376 g_assert ((code
- start
) <= size
);
378 mono_arch_flush_icache (start
, size
);
379 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE
, NULL
));
383 *info
= mono_tramp_info_create ("delegate_invoke_impl_has_target", start
, code
- start
, NULL
, NULL
);
385 char *name
= g_strdup_printf ("delegate_invoke_impl_target_%d", param_count
);
386 *info
= mono_tramp_info_create (name
, start
, code
- start
, NULL
, NULL
);
394 mono_arch_get_delegate_invoke_impls (void)
400 get_delegate_invoke_impl (&info
, TRUE
, 0, TRUE
);
401 res
= g_slist_prepend (res
, info
);
403 for (i
= 0; i
<= MAX_ARCH_DELEGATE_PARAMS
; ++i
) {
404 get_delegate_invoke_impl (&info
, FALSE
, i
, TRUE
);
405 res
= g_slist_prepend (res
, info
);
412 mono_arch_get_delegate_invoke_impl (MonoMethodSignature
*sig
, gboolean has_target
)
414 guint8
*code
, *start
;
416 /* FIXME: Support more cases */
417 if (MONO_TYPE_ISSTRUCT (sig
->ret
))
421 static guint8
* cached
= NULL
;
426 if (mono_ee_features
.use_aot_trampolines
) {
427 start
= (guint8
*)mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
430 start
= get_delegate_invoke_impl (&info
, TRUE
, 0, FALSE
);
431 mono_tramp_info_register (info
, NULL
);
433 mono_memory_barrier ();
437 static guint8
* cache
[MAX_ARCH_DELEGATE_PARAMS
+ 1] = {NULL
};
440 if (sig
->param_count
> MAX_ARCH_DELEGATE_PARAMS
)
442 for (i
= 0; i
< sig
->param_count
; ++i
)
443 if (!mono_is_regsize_var (sig
->params
[i
]))
447 code
= cache
[sig
->param_count
];
451 if (mono_ee_features
.use_aot_trampolines
) {
452 char *name
= g_strdup_printf ("delegate_invoke_impl_target_%d", sig
->param_count
);
453 start
= (guint8
*)mono_aot_get_trampoline (name
);
457 start
= get_delegate_invoke_impl (&info
, FALSE
, sig
->param_count
, FALSE
);
458 mono_tramp_info_register (info
, NULL
);
461 mono_memory_barrier ();
463 cache
[sig
->param_count
] = start
;
469 mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature
*sig
, MonoMethod
*method
, int offset
, gboolean load_imt_reg
)
475 mono_arch_get_this_arg_from_call (host_mgreg_t
*r
, guint8
*code
)
477 return (gpointer
)(gsize
)r
[ppc_r3
];
485 #define MAX_AUX_ENTRIES 128
487 * PPC_FEATURE_POWER4, PPC_FEATURE_POWER5, PPC_FEATURE_POWER5_PLUS, PPC_FEATURE_CELL,
488 * PPC_FEATURE_PA6T, PPC_FEATURE_ARCH_2_05 are considered supporting 2X ISA features
490 #define ISA_2X (0x00080000 | 0x00040000 | 0x00020000 | 0x00010000 | 0x00000800 | 0x00001000)
492 /* define PPC_FEATURE_64 HWCAP for 64-bit category. */
493 #define ISA_64 0x40000000
495 /* define PPC_FEATURE_POWER6_EXT HWCAP for power6x mffgpr/mftgpr instructions. */
496 #define ISA_MOVE_FPR_GPR 0x00000200
498 * Initialize the cpu to execute managed code.
501 mono_arch_cpu_init (void)
506 * Initialize architecture specific code.
509 mono_arch_init (void)
511 #if defined(MONO_CROSS_COMPILE)
512 #elif defined(__APPLE__)
514 size_t len
= sizeof (cachelinesize
);
517 mib
[1] = HW_CACHELINE
;
519 if (sysctl (mib
, 2, &cachelinesize
, &len
, NULL
, 0) == -1) {
523 cachelineinc
= cachelinesize
;
525 #elif defined(__linux__)
526 AuxVec vec
[MAX_AUX_ENTRIES
];
527 int i
, vec_entries
= 0;
528 /* sadly this will work only with 2.6 kernels... */
529 FILE* f
= fopen ("/proc/self/auxv", "rb");
532 vec_entries
= fread (&vec
, sizeof (AuxVec
), MAX_AUX_ENTRIES
, f
);
536 for (i
= 0; i
< vec_entries
; i
++) {
537 int type
= vec
[i
].type
;
539 if (type
== 19) { /* AT_DCACHEBSIZE */
540 cachelinesize
= vec
[i
].value
;
544 #elif defined(G_COMPILER_CODEWARRIOR)
548 /* FIXME: use block instead? */
549 cachelinesize
= _system_configuration
.icache_line
;
550 cachelineinc
= _system_configuration
.icache_line
;
552 //#error Need a way to get cache line size
555 if (mono_hwcap_ppc_has_icache_snoop
)
556 cpu_hw_caps
|= PPC_ICACHE_SNOOP
;
558 if (mono_hwcap_ppc_is_isa_2x
)
559 cpu_hw_caps
|= PPC_ISA_2X
;
561 if (mono_hwcap_ppc_is_isa_2_03
)
562 cpu_hw_caps
|= PPC_ISA_2_03
;
564 if (mono_hwcap_ppc_is_isa_64
)
565 cpu_hw_caps
|= PPC_ISA_64
;
567 if (mono_hwcap_ppc_has_move_fpr_gpr
)
568 cpu_hw_caps
|= PPC_MOVE_FPR_GPR
;
570 if (mono_hwcap_ppc_has_multiple_ls_units
)
571 cpu_hw_caps
|= PPC_MULTIPLE_LS_UNITS
;
577 cachelineinc
= cachelinesize
;
579 if (mono_cpu_count () > 1)
580 cpu_hw_caps
|= PPC_SMP_CAPABLE
;
582 mono_os_mutex_init_recursive (&mini_arch_mutex
);
584 ss_trigger_page
= mono_valloc (NULL
, mono_pagesize (), MONO_MMAP_READ
, MONO_MEM_ACCOUNT_OTHER
);
585 bp_trigger_page
= mono_valloc (NULL
, mono_pagesize (), MONO_MMAP_READ
, MONO_MEM_ACCOUNT_OTHER
);
586 mono_mprotect (bp_trigger_page
, mono_pagesize (), 0);
588 // FIXME: Fix partial sharing for power and remove this
589 mono_set_partial_sharing_supported (FALSE
);
593 * Cleanup architecture specific code.
596 mono_arch_cleanup (void)
598 mono_os_mutex_destroy (&mini_arch_mutex
);
602 mono_arch_have_fast_tls (void)
608 * This function returns the optimizations supported on this cpu.
611 mono_arch_cpu_optimizations (guint32
*exclude_mask
)
615 /* no ppc-specific optimizations yet */
620 #ifdef __mono_ppc64__
621 #define CASE_PPC32(c)
622 #define CASE_PPC64(c) case c:
624 #define CASE_PPC32(c) case c:
625 #define CASE_PPC64(c)
629 is_regsize_var (MonoType
*t
) {
632 t
= mini_get_underlying_type (t
);
636 CASE_PPC64 (MONO_TYPE_I8
)
637 CASE_PPC64 (MONO_TYPE_U8
)
641 case MONO_TYPE_FNPTR
:
643 case MONO_TYPE_OBJECT
:
644 case MONO_TYPE_STRING
:
645 case MONO_TYPE_CLASS
:
646 case MONO_TYPE_SZARRAY
:
647 case MONO_TYPE_ARRAY
:
649 case MONO_TYPE_GENERICINST
:
650 if (!mono_type_generic_inst_is_valuetype (t
))
653 case MONO_TYPE_VALUETYPE
:
661 mono_arch_get_allocatable_int_vars (MonoCompile
*cfg
)
666 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
667 MonoInst
*ins
= cfg
->varinfo
[i
];
668 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
671 if (vmv
->range
.first_use
.abs_pos
>= vmv
->range
.last_use
.abs_pos
)
674 if (ins
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
) || (ins
->opcode
!= OP_LOCAL
&& ins
->opcode
!= OP_ARG
))
677 /* we can only allocate 32 bit values */
678 if (is_regsize_var (ins
->inst_vtype
)) {
679 g_assert (MONO_VARINFO (cfg
, i
)->reg
== -1);
680 g_assert (i
== vmv
->idx
);
681 vars
= mono_varlist_insert_sorted (cfg
, vars
, vmv
, FALSE
);
687 #endif /* ifndef DISABLE_JIT */
690 mono_arch_get_global_int_regs (MonoCompile
*cfg
)
694 if (cfg
->frame_reg
!= ppc_sp
)
696 /* ppc_r13 is used by the system on PPC EABI */
697 for (i
= 14; i
< top
; ++i
) {
699 * Reserve r29 for holding the vtable address for virtual calls in AOT mode,
700 * since the trampolines can clobber r12.
702 if (!(cfg
->compile_aot
&& i
== 29))
703 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (i
));
710 * mono_arch_regalloc_cost:
712 * Return the cost, in number of memory references, of the action of
713 * allocating the variable VMV into a register during global register
717 mono_arch_regalloc_cost (MonoCompile
*cfg
, MonoMethodVar
*vmv
)
724 mono_arch_flush_icache (guint8
*code
, gint size
)
726 #ifdef MONO_CROSS_COMPILE
730 guint8
*endp
, *start
;
734 start
= (guint8
*)((gsize
)start
& ~(cachelinesize
- 1));
735 /* use dcbf for smp support, later optimize for UP, see pem._64bit.d20030611.pdf page 211 */
736 #if defined(G_COMPILER_CODEWARRIOR)
737 if (cpu_hw_caps
& PPC_SMP_CAPABLE
) {
738 for (p
= start
; p
< endp
; p
+= cachelineinc
) {
742 for (p
= start
; p
< endp
; p
+= cachelineinc
) {
748 for (p
= start
; p
< endp
; p
+= cachelineinc
) {
759 /* For POWER5/6 with ICACHE_SNOOPing only one icbi in the range is required.
760 * The sync is required to insure that the store queue is completely empty.
761 * While the icbi performs no cache operations, icbi/isync is required to
762 * kill local prefetch.
764 if (cpu_hw_caps
& PPC_ICACHE_SNOOP
) {
766 asm ("icbi 0,%0;" : : "r"(code
) : "memory");
770 /* use dcbf for smp support, see pem._64bit.d20030611.pdf page 211 */
771 if (cpu_hw_caps
& PPC_SMP_CAPABLE
) {
772 for (p
= start
; p
< endp
; p
+= cachelineinc
) {
773 asm ("dcbf 0,%0;" : : "r"(p
) : "memory");
776 for (p
= start
; p
< endp
; p
+= cachelineinc
) {
777 asm ("dcbst 0,%0;" : : "r"(p
) : "memory");
782 for (p
= start
; p
< endp
; p
+= cachelineinc
) {
783 /* for ISA2.0+ implementations we should not need any extra sync between the
784 * icbi instructions. Both the 2.0 PEM and the PowerISA-2.05 say this.
785 * So I am not sure which chip had this problem but its not an issue on
786 * of the ISA V2 chips.
788 if (cpu_hw_caps
& PPC_ISA_2X
)
789 asm ("icbi 0,%0;" : : "r"(p
) : "memory");
791 asm ("icbi 0,%0; sync;" : : "r"(p
) : "memory");
793 if (!(cpu_hw_caps
& PPC_ISA_2X
))
801 mono_arch_flush_register_windows (void)
806 #define ALWAYS_ON_STACK(s) s
807 #define FP_ALSO_IN_REG(s) s
809 #ifdef __mono_ppc64__
810 #define ALWAYS_ON_STACK(s) s
811 #define FP_ALSO_IN_REG(s) s
813 #define ALWAYS_ON_STACK(s)
814 #define FP_ALSO_IN_REG(s)
816 #define ALIGN_DOUBLES
825 RegTypeFPStructByVal
, // For the v2 ABI, floats should be passed in FRs instead of GRs. Only valid for ABI v2!
830 guint32 vtsize
; /* in param area */
832 guint8 vtregs
; /* number of registers used to pass a RegTypeStructByVal/RegTypeFPStructByVal */
833 guint8 regtype
: 4; /* 0 general, 1 basereg, 2 floating point register, see RegType* */
834 guint8 size
: 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal/RegTypeFPStructByVal */
835 guint8 bytes
: 4; /* size in bytes - only valid for
836 RegTypeStructByVal/RegTypeFPStructByVal if the struct fits
837 in one word, otherwise it's 0*/
846 gboolean vtype_retaddr
;
854 #if PPC_RETURN_SMALL_FLOAT_STRUCTS_IN_FR_REGS
856 // Test if a structure is completely composed of either float XOR double fields and has fewer than
857 // PPC_MOST_FLOAT_STRUCT_MEMBERS_TO_RETURN_VIA_REGISTER members.
858 // If this is true the structure can be returned directly via float registers instead of by a hidden parameter
859 // pointing to where the return value should be stored.
860 // This is as per the ELF ABI v2.
863 is_float_struct_returnable_via_regs (MonoType
*type
, int* member_cnt
, int* member_size
)
865 int local_member_cnt
, local_member_size
;
867 member_cnt
= &local_member_cnt
;
870 member_size
= &local_member_size
;
873 gboolean is_all_floats
= mini_type_is_hfa(type
, member_cnt
, member_size
);
874 return is_all_floats
&& (*member_cnt
<= PPC_MOST_FLOAT_STRUCT_MEMBERS_TO_RETURN_VIA_REGISTERS
);
878 #define is_float_struct_returnable_via_regs(a,b,c) (FALSE)
882 #if PPC_RETURN_SMALL_STRUCTS_IN_REGS
884 // Test if a structure is smaller in size than 2 doublewords (PPC_LARGEST_STRUCT_SIZE_TO_RETURN_VIA_REGISTERS) and is
885 // completely composed of fields all of basic types.
886 // If this is true the structure can be returned directly via registers r3/r4 instead of by a hidden parameter
887 // pointing to where the return value should be stored.
888 // This is as per the ELF ABI v2.
891 is_struct_returnable_via_regs (MonoClass
*klass
, gboolean is_pinvoke
)
893 gboolean has_a_field
= FALSE
;
896 gpointer iter
= NULL
;
899 size
= mono_type_native_stack_size (m_class_get_byval_arg (klass
), 0);
901 size
= mini_type_stack_size (m_class_get_byval_arg (klass
), 0);
904 if (size
> PPC_LARGEST_STRUCT_SIZE_TO_RETURN_VIA_REGISTERS
)
906 while ((f
= mono_class_get_fields_internal (klass
, &iter
))) {
907 if (!(f
->type
->attrs
& FIELD_ATTRIBUTE_STATIC
)) {
908 // TBD: Is there a better way to check for the basic types?
909 if (f
->type
->byref
) {
911 } else if ((f
->type
->type
>= MONO_TYPE_BOOLEAN
) && (f
->type
->type
<= MONO_TYPE_R8
)) {
913 } else if (MONO_TYPE_ISSTRUCT (f
->type
)) {
914 MonoClass
*klass
= mono_class_from_mono_type_internal (f
->type
);
915 if (is_struct_returnable_via_regs(klass
, is_pinvoke
)) {
930 #define is_struct_returnable_via_regs(a,b) (FALSE)
935 add_general (guint
*gr
, guint
*stack_size
, ArgInfo
*ainfo
, gboolean simple
)
937 #ifdef __mono_ppc64__
942 if (*gr
>= 3 + PPC_NUM_REG_ARGS
) {
943 ainfo
->offset
= PPC_STACK_PARAM_OFFSET
+ *stack_size
;
944 ainfo
->reg
= ppc_sp
; /* in the caller */
945 ainfo
->regtype
= RegTypeBase
;
946 *stack_size
+= sizeof (target_mgreg_t
);
948 ALWAYS_ON_STACK (*stack_size
+= sizeof (target_mgreg_t
));
952 if (*gr
>= 3 + PPC_NUM_REG_ARGS
- 1) {
954 //*stack_size += (*stack_size % 8);
956 ainfo
->offset
= PPC_STACK_PARAM_OFFSET
+ *stack_size
;
957 ainfo
->reg
= ppc_sp
; /* in the caller */
958 ainfo
->regtype
= RegTypeBase
;
965 ALWAYS_ON_STACK (*stack_size
+= 8);
973 #if defined(__APPLE__) || (defined(__mono_ppc64__) && !PPC_PASS_SMALL_FLOAT_STRUCTS_IN_FR_REGS)
975 has_only_a_r48_field (MonoClass
*klass
)
979 gboolean have_field
= FALSE
;
981 while ((f
= mono_class_get_fields_internal (klass
, &iter
))) {
982 if (!(f
->type
->attrs
& FIELD_ATTRIBUTE_STATIC
)) {
985 if (!f
->type
->byref
&& (f
->type
->type
== MONO_TYPE_R4
|| f
->type
->type
== MONO_TYPE_R8
))
996 get_call_info (MonoMethodSignature
*sig
)
998 guint i
, fr
, gr
, pstart
;
999 int n
= sig
->hasthis
+ sig
->param_count
;
1000 MonoType
*simpletype
;
1001 guint32 stack_size
= 0;
1002 CallInfo
*cinfo
= g_malloc0 (sizeof (CallInfo
) + sizeof (ArgInfo
) * n
);
1003 gboolean is_pinvoke
= sig
->pinvoke
;
1005 fr
= PPC_FIRST_FPARG_REG
;
1006 gr
= PPC_FIRST_ARG_REG
;
1008 if (mini_type_is_vtype (sig
->ret
)) {
1009 cinfo
->vtype_retaddr
= TRUE
;
1015 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1016 * the first argument, allowing 'this' to be always passed in the first arg reg.
1017 * Also do this if the first argument is a reference type, since virtual calls
1018 * are sometimes made using calli without sig->hasthis set, like in the delegate
1021 if (cinfo
->vtype_retaddr
&& !is_pinvoke
&& (sig
->hasthis
|| (sig
->param_count
> 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig
->params
[0]))))) {
1023 add_general (&gr
, &stack_size
, cinfo
->args
+ 0, TRUE
);
1026 add_general (&gr
, &stack_size
, &cinfo
->args
[sig
->hasthis
+ 0], TRUE
);
1030 add_general (&gr
, &stack_size
, &cinfo
->ret
, TRUE
);
1031 cinfo
->struct_ret
= cinfo
->ret
.reg
;
1032 cinfo
->vret_arg_index
= 1;
1036 add_general (&gr
, &stack_size
, cinfo
->args
+ 0, TRUE
);
1040 if (cinfo
->vtype_retaddr
) {
1041 add_general (&gr
, &stack_size
, &cinfo
->ret
, TRUE
);
1042 cinfo
->struct_ret
= cinfo
->ret
.reg
;
1046 DEBUG(printf("params: %d\n", sig
->param_count
));
1047 for (i
= pstart
; i
< sig
->param_count
; ++i
) {
1048 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
1049 /* Prevent implicit arguments and sig_cookie from
1050 being passed in registers */
1051 gr
= PPC_LAST_ARG_REG
+ 1;
1052 /* FIXME: don't we have to set fr, too? */
1053 /* Emit the signature cookie just before the implicit arguments */
1054 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
, TRUE
);
1056 DEBUG(printf("param %d: ", i
));
1057 if (sig
->params
[i
]->byref
) {
1058 DEBUG(printf("byref\n"));
1059 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
1063 simpletype
= mini_get_underlying_type (sig
->params
[i
]);
1064 switch (simpletype
->type
) {
1065 case MONO_TYPE_BOOLEAN
:
1068 cinfo
->args
[n
].size
= 1;
1069 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
1072 case MONO_TYPE_CHAR
:
1075 cinfo
->args
[n
].size
= 2;
1076 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
1081 cinfo
->args
[n
].size
= 4;
1082 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
1088 case MONO_TYPE_FNPTR
:
1089 case MONO_TYPE_CLASS
:
1090 case MONO_TYPE_OBJECT
:
1091 case MONO_TYPE_STRING
:
1092 case MONO_TYPE_SZARRAY
:
1093 case MONO_TYPE_ARRAY
:
1094 cinfo
->args
[n
].size
= sizeof (target_mgreg_t
);
1095 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
1098 case MONO_TYPE_GENERICINST
:
1099 if (!mono_type_generic_inst_is_valuetype (simpletype
)) {
1100 cinfo
->args
[n
].size
= sizeof (target_mgreg_t
);
1101 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
1106 case MONO_TYPE_VALUETYPE
:
1107 case MONO_TYPE_TYPEDBYREF
: {
1109 MonoClass
*klass
= mono_class_from_mono_type_internal (sig
->params
[i
]);
1110 if (simpletype
->type
== MONO_TYPE_TYPEDBYREF
)
1111 size
= MONO_ABI_SIZEOF (MonoTypedRef
);
1112 else if (is_pinvoke
)
1113 size
= mono_class_native_size (klass
, NULL
);
1115 size
= mono_class_value_size (klass
, NULL
);
1117 #if defined(__APPLE__) || (defined(__mono_ppc64__) && !PPC_PASS_SMALL_FLOAT_STRUCTS_IN_FR_REGS)
1118 if ((size
== 4 || size
== 8) && has_only_a_r48_field (klass
)) {
1119 cinfo
->args
[n
].size
= size
;
1121 /* It was 7, now it is 8 in LinuxPPC */
1122 if (fr
<= PPC_LAST_FPARG_REG
) {
1123 cinfo
->args
[n
].regtype
= RegTypeFP
;
1124 cinfo
->args
[n
].reg
= fr
;
1126 FP_ALSO_IN_REG (gr
++);
1127 #if !defined(__mono_ppc64__)
1129 FP_ALSO_IN_REG (gr
++);
1131 ALWAYS_ON_STACK (stack_size
+= size
);
1133 cinfo
->args
[n
].offset
= PPC_STACK_PARAM_OFFSET
+ stack_size
;
1134 cinfo
->args
[n
].regtype
= RegTypeBase
;
1135 cinfo
->args
[n
].reg
= ppc_sp
; /* in the caller*/
1142 DEBUG(printf ("load %d bytes struct\n",
1143 mono_class_native_size (sig
->params
[i
]->data
.klass
, NULL
)));
1145 #if PPC_PASS_STRUCTS_BY_VALUE
1147 int align_size
= size
;
1149 int rest
= PPC_LAST_ARG_REG
- gr
+ 1;
1152 #if PPC_PASS_SMALL_FLOAT_STRUCTS_IN_FR_REGS
1155 gboolean is_all_floats
= is_float_struct_returnable_via_regs (sig
->params
[i
], &mbr_cnt
, &mbr_size
);
1157 if (is_all_floats
) {
1158 rest
= PPC_LAST_FPARG_REG
- fr
+ 1;
1160 // Pass small (<= 8 member) structures entirely made up of either float or double members
1161 // in FR registers. There have to be at least mbr_cnt registers left.
1162 if (is_all_floats
&&
1163 (rest
>= mbr_cnt
)) {
1165 n_in_regs
= MIN (rest
, nregs
);
1166 cinfo
->args
[n
].regtype
= RegTypeFPStructByVal
;
1167 cinfo
->args
[n
].vtregs
= n_in_regs
;
1168 cinfo
->args
[n
].size
= mbr_size
;
1169 cinfo
->args
[n
].vtsize
= nregs
- n_in_regs
;
1170 cinfo
->args
[n
].reg
= fr
;
1172 if (mbr_size
== 4) {
1174 FP_ALSO_IN_REG (gr
+= (n_in_regs
+1)/2);
1177 FP_ALSO_IN_REG (gr
+= (n_in_regs
));
1182 align_size
+= (sizeof (target_mgreg_t
) - 1);
1183 align_size
&= ~(sizeof (target_mgreg_t
) - 1);
1184 nregs
= (align_size
+ sizeof (target_mgreg_t
) -1 ) / sizeof (target_mgreg_t
);
1185 n_in_regs
= MIN (rest
, nregs
);
1189 /* FIXME: check this */
1190 if (size
>= 3 && size
% 4 != 0)
1193 cinfo
->args
[n
].regtype
= RegTypeStructByVal
;
1194 cinfo
->args
[n
].vtregs
= n_in_regs
;
1195 cinfo
->args
[n
].size
= n_in_regs
;
1196 cinfo
->args
[n
].vtsize
= nregs
- n_in_regs
;
1197 cinfo
->args
[n
].reg
= gr
;
1201 #ifdef __mono_ppc64__
1202 if (nregs
== 1 && is_pinvoke
)
1203 cinfo
->args
[n
].bytes
= size
;
1206 cinfo
->args
[n
].bytes
= 0;
1207 cinfo
->args
[n
].offset
= PPC_STACK_PARAM_OFFSET
+ stack_size
;
1208 /*g_print ("offset for arg %d at %d\n", n, PPC_STACK_PARAM_OFFSET + stack_size);*/
1209 stack_size
+= nregs
* sizeof (target_mgreg_t
);
1212 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
1213 cinfo
->args
[n
].regtype
= RegTypeStructByAddr
;
1214 cinfo
->args
[n
].vtsize
= size
;
1221 cinfo
->args
[n
].size
= 8;
1222 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, SIZEOF_REGISTER
== 8);
1226 cinfo
->args
[n
].size
= 4;
1228 /* It was 7, now it is 8 in LinuxPPC */
1229 if (fr
<= PPC_LAST_FPARG_REG
1230 // For non-native vararg calls the parms must go in storage
1231 && !(!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
))
1233 cinfo
->args
[n
].regtype
= RegTypeFP
;
1234 cinfo
->args
[n
].reg
= fr
;
1236 FP_ALSO_IN_REG (gr
++);
1237 ALWAYS_ON_STACK (stack_size
+= SIZEOF_REGISTER
);
1239 cinfo
->args
[n
].offset
= PPC_STACK_PARAM_OFFSET
+ stack_size
+ MONO_PPC_32_64_CASE (0, 4);
1240 cinfo
->args
[n
].regtype
= RegTypeBase
;
1241 cinfo
->args
[n
].reg
= ppc_sp
; /* in the caller*/
1242 stack_size
+= SIZEOF_REGISTER
;
1247 cinfo
->args
[n
].size
= 8;
1248 /* It was 7, now it is 8 in LinuxPPC */
1249 if (fr
<= PPC_LAST_FPARG_REG
1250 // For non-native vararg calls the parms must go in storage
1251 && !(!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
))
1253 cinfo
->args
[n
].regtype
= RegTypeFP
;
1254 cinfo
->args
[n
].reg
= fr
;
1256 FP_ALSO_IN_REG (gr
+= sizeof (double) / SIZEOF_REGISTER
);
1257 ALWAYS_ON_STACK (stack_size
+= 8);
1259 cinfo
->args
[n
].offset
= PPC_STACK_PARAM_OFFSET
+ stack_size
;
1260 cinfo
->args
[n
].regtype
= RegTypeBase
;
1261 cinfo
->args
[n
].reg
= ppc_sp
; /* in the caller*/
1267 g_error ("Can't trampoline 0x%x", sig
->params
[i
]->type
);
1272 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
1273 /* Prevent implicit arguments and sig_cookie from
1274 being passed in registers */
1275 gr
= PPC_LAST_ARG_REG
+ 1;
1276 /* Emit the signature cookie just before the implicit arguments */
1277 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
, TRUE
);
1281 simpletype
= mini_get_underlying_type (sig
->ret
);
1282 switch (simpletype
->type
) {
1283 case MONO_TYPE_BOOLEAN
:
1288 case MONO_TYPE_CHAR
:
1294 case MONO_TYPE_FNPTR
:
1295 case MONO_TYPE_CLASS
:
1296 case MONO_TYPE_OBJECT
:
1297 case MONO_TYPE_SZARRAY
:
1298 case MONO_TYPE_ARRAY
:
1299 case MONO_TYPE_STRING
:
1300 cinfo
->ret
.reg
= ppc_r3
;
1304 cinfo
->ret
.reg
= ppc_r3
;
1308 cinfo
->ret
.reg
= ppc_f1
;
1309 cinfo
->ret
.regtype
= RegTypeFP
;
1311 case MONO_TYPE_GENERICINST
:
1312 if (!mono_type_generic_inst_is_valuetype (simpletype
)) {
1313 cinfo
->ret
.reg
= ppc_r3
;
1317 case MONO_TYPE_VALUETYPE
:
1319 case MONO_TYPE_TYPEDBYREF
:
1320 case MONO_TYPE_VOID
:
1323 g_error ("Can't handle as return value 0x%x", sig
->ret
->type
);
1327 /* align stack size to 16 */
1328 DEBUG (printf (" stack size: %d (%d)\n", (stack_size
+ 15) & ~15, stack_size
));
1329 stack_size
= (stack_size
+ 15) & ~15;
1331 cinfo
->stack_usage
= stack_size
;
1338 mono_arch_tailcall_supported (MonoCompile
*cfg
, MonoMethodSignature
*caller_sig
, MonoMethodSignature
*callee_sig
, gboolean virtual_
)
1340 CallInfo
*caller_info
= get_call_info (caller_sig
);
1341 CallInfo
*callee_info
= get_call_info (callee_sig
);
1343 gboolean res
= IS_SUPPORTED_TAILCALL (callee_info
->stack_usage
<= caller_info
->stack_usage
)
1344 && IS_SUPPORTED_TAILCALL (memcmp (&callee_info
->ret
, &caller_info
->ret
, sizeof (caller_info
->ret
)) == 0);
1346 // FIXME ABIs vary as to if this local is in the parameter area or not,
1347 // so this check might not be needed.
1348 for (int i
= 0; res
&& i
< callee_info
->nargs
; ++i
) {
1349 res
= IS_SUPPORTED_TAILCALL (callee_info
->args
[i
].regtype
!= RegTypeStructByAddr
);
1350 /* An address on the callee's stack is passed as the argument */
1353 g_free (caller_info
);
1354 g_free (callee_info
);
1362 * Set var information according to the calling convention. ppc version.
1363 * The locals var stuff should most likely be split in another method.
1366 mono_arch_allocate_vars (MonoCompile
*m
)
1368 MonoMethodSignature
*sig
;
1369 MonoMethodHeader
*header
;
1371 int i
, offset
, size
, align
, curinst
;
1372 int frame_reg
= ppc_sp
;
1374 guint32 locals_stack_size
, locals_stack_align
;
1376 m
->flags
|= MONO_CFG_HAS_SPILLUP
;
1378 /* this is bug #60332: remove when #59509 is fixed, so no weird vararg
1379 * call convs needs to be handled this way.
1381 if (m
->flags
& MONO_CFG_HAS_VARARGS
)
1382 m
->param_area
= MAX (m
->param_area
, sizeof (target_mgreg_t
)*8);
1383 /* gtk-sharp and other broken code will dllimport vararg functions even with
1384 * non-varargs signatures. Since there is little hope people will get this right
1385 * we assume they won't.
1387 if (m
->method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
)
1388 m
->param_area
= MAX (m
->param_area
, sizeof (target_mgreg_t
)*8);
1393 * We use the frame register also for any method that has
1394 * exception clauses. This way, when the handlers are called,
1395 * the code will reference local variables using the frame reg instead of
1396 * the stack pointer: if we had to restore the stack pointer, we'd
1397 * corrupt the method frames that are already on the stack (since
1398 * filters get called before stack unwinding happens) when the filter
1399 * code would call any method (this also applies to finally etc.).
1401 if ((m
->flags
& MONO_CFG_HAS_ALLOCA
) || header
->num_clauses
)
1402 frame_reg
= ppc_r31
;
1403 m
->frame_reg
= frame_reg
;
1404 if (frame_reg
!= ppc_sp
) {
1405 m
->used_int_regs
|= 1 << frame_reg
;
1408 sig
= mono_method_signature_internal (m
->method
);
1412 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
1413 m
->ret
->opcode
= OP_REGVAR
;
1414 m
->ret
->inst_c0
= m
->ret
->dreg
= ppc_r3
;
1416 /* FIXME: handle long values? */
1417 switch (mini_get_underlying_type (sig
->ret
)->type
) {
1418 case MONO_TYPE_VOID
:
1422 m
->ret
->opcode
= OP_REGVAR
;
1423 m
->ret
->inst_c0
= m
->ret
->dreg
= ppc_f1
;
1426 m
->ret
->opcode
= OP_REGVAR
;
1427 m
->ret
->inst_c0
= m
->ret
->dreg
= ppc_r3
;
1431 /* local vars are at a positive offset from the stack pointer */
1433 * also note that if the function uses alloca, we use ppc_r31
1434 * to point at the local variables.
1436 offset
= PPC_MINIMAL_STACK_SIZE
; /* linkage area */
1437 /* align the offset to 16 bytes: not sure this is needed here */
1439 //offset &= ~(16 - 1);
1441 /* add parameter area size for called functions */
1442 offset
+= m
->param_area
;
1444 offset
&= ~(16 - 1);
1446 /* the MonoLMF structure is stored just below the stack pointer */
1447 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
1448 offset
+= sizeof(gpointer
) - 1;
1449 offset
&= ~(sizeof(gpointer
) - 1);
1451 m
->vret_addr
->opcode
= OP_REGOFFSET
;
1452 m
->vret_addr
->inst_basereg
= frame_reg
;
1453 m
->vret_addr
->inst_offset
= offset
;
1455 if (G_UNLIKELY (m
->verbose_level
> 1)) {
1456 printf ("vret_addr =");
1457 mono_print_ins (m
->vret_addr
);
1460 offset
+= sizeof(gpointer
);
1463 offsets
= mono_allocate_stack_slots (m
, FALSE
, &locals_stack_size
, &locals_stack_align
);
1464 if (locals_stack_align
) {
1465 offset
+= (locals_stack_align
- 1);
1466 offset
&= ~(locals_stack_align
- 1);
1468 for (i
= m
->locals_start
; i
< m
->num_varinfo
; i
++) {
1469 if (offsets
[i
] != -1) {
1470 MonoInst
*inst
= m
->varinfo
[i
];
1471 inst
->opcode
= OP_REGOFFSET
;
1472 inst
->inst_basereg
= frame_reg
;
1473 inst
->inst_offset
= offset
+ offsets
[i
];
1475 g_print ("allocating local %d (%s) to %d\n",
1476 i, mono_type_get_name (inst->inst_vtype), inst->inst_offset);
1480 offset
+= locals_stack_size
;
1484 inst
= m
->args
[curinst
];
1485 if (inst
->opcode
!= OP_REGVAR
) {
1486 inst
->opcode
= OP_REGOFFSET
;
1487 inst
->inst_basereg
= frame_reg
;
1488 offset
+= sizeof (target_mgreg_t
) - 1;
1489 offset
&= ~(sizeof (target_mgreg_t
) - 1);
1490 inst
->inst_offset
= offset
;
1491 offset
+= sizeof (target_mgreg_t
);
1496 for (i
= 0; i
< sig
->param_count
; ++i
) {
1497 inst
= m
->args
[curinst
];
1498 if (inst
->opcode
!= OP_REGVAR
) {
1499 inst
->opcode
= OP_REGOFFSET
;
1500 inst
->inst_basereg
= frame_reg
;
1502 size
= mono_type_native_stack_size (sig
->params
[i
], (guint32
*)&align
);
1503 inst
->backend
.is_pinvoke
= 1;
1505 size
= mono_type_size (sig
->params
[i
], &align
);
1507 if (MONO_TYPE_ISSTRUCT (sig
->params
[i
]) && size
< sizeof (target_mgreg_t
))
1508 size
= align
= sizeof (target_mgreg_t
);
1510 * Use at least 4/8 byte alignment, since these might be passed in registers, and
1511 * they are saved using std in the prolog.
1513 align
= sizeof (target_mgreg_t
);
1514 offset
+= align
- 1;
1515 offset
&= ~(align
- 1);
1516 inst
->inst_offset
= offset
;
1522 /* some storage for fp conversions */
1525 m
->arch
.fp_conv_var_offset
= offset
;
1528 /* align the offset to 16 bytes */
1530 offset
&= ~(16 - 1);
1533 m
->stack_offset
= offset
;
1535 if (sig
->call_convention
== MONO_CALL_VARARG
) {
1536 CallInfo
*cinfo
= get_call_info (m
->method
->signature
);
1538 m
->sig_cookie
= cinfo
->sig_cookie
.offset
;
1545 mono_arch_create_vars (MonoCompile
*cfg
)
1547 MonoMethodSignature
*sig
= mono_method_signature_internal (cfg
->method
);
1549 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
1550 cfg
->vret_addr
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_ARG
);
1554 /* Fixme: we need an alignment solution for enter_method and mono_arch_call_opcode,
1555 * currently alignment in mono_arch_call_opcode is computed without arch_get_argument_info
1559 emit_sig_cookie (MonoCompile
*cfg
, MonoCallInst
*call
, CallInfo
*cinfo
)
1561 int sig_reg
= mono_alloc_ireg (cfg
);
1563 /* FIXME: Add support for signature tokens to AOT */
1564 cfg
->disable_aot
= TRUE
;
1566 MONO_EMIT_NEW_ICONST (cfg
, sig_reg
, (gulong
)call
->signature
);
1567 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
,
1568 ppc_r1
, cinfo
->sig_cookie
.offset
, sig_reg
);
1572 mono_arch_emit_call (MonoCompile
*cfg
, MonoCallInst
*call
)
1575 MonoMethodSignature
*sig
;
1579 sig
= call
->signature
;
1580 n
= sig
->param_count
+ sig
->hasthis
;
1582 cinfo
= get_call_info (sig
);
1584 for (i
= 0; i
< n
; ++i
) {
1585 ArgInfo
*ainfo
= cinfo
->args
+ i
;
1588 if (i
>= sig
->hasthis
)
1589 t
= sig
->params
[i
- sig
->hasthis
];
1591 t
= mono_get_int_type ();
1592 t
= mini_get_underlying_type (t
);
1594 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
))
1595 emit_sig_cookie (cfg
, call
, cinfo
);
1597 in
= call
->args
[i
];
1599 if (ainfo
->regtype
== RegTypeGeneral
) {
1600 #ifndef __mono_ppc64__
1601 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
1602 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
1603 ins
->dreg
= mono_alloc_ireg (cfg
);
1604 ins
->sreg1
= MONO_LVREG_LS (in
->dreg
);
1605 MONO_ADD_INS (cfg
->cbb
, ins
);
1606 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
+ 1, FALSE
);
1608 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
1609 ins
->dreg
= mono_alloc_ireg (cfg
);
1610 ins
->sreg1
= MONO_LVREG_MS (in
->dreg
);
1611 MONO_ADD_INS (cfg
->cbb
, ins
);
1612 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
1616 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
1617 ins
->dreg
= mono_alloc_ireg (cfg
);
1618 ins
->sreg1
= in
->dreg
;
1619 MONO_ADD_INS (cfg
->cbb
, ins
);
1621 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
1623 } else if (ainfo
->regtype
== RegTypeStructByAddr
) {
1624 MONO_INST_NEW (cfg
, ins
, OP_OUTARG_VT
);
1625 ins
->opcode
= OP_OUTARG_VT
;
1626 ins
->sreg1
= in
->dreg
;
1627 ins
->klass
= in
->klass
;
1628 ins
->inst_p0
= call
;
1629 ins
->inst_p1
= mono_mempool_alloc (cfg
->mempool
, sizeof (ArgInfo
));
1630 memcpy (ins
->inst_p1
, ainfo
, sizeof (ArgInfo
));
1631 MONO_ADD_INS (cfg
->cbb
, ins
);
1632 } else if (ainfo
->regtype
== RegTypeStructByVal
) {
1633 /* this is further handled in mono_arch_emit_outarg_vt () */
1634 MONO_INST_NEW (cfg
, ins
, OP_OUTARG_VT
);
1635 ins
->opcode
= OP_OUTARG_VT
;
1636 ins
->sreg1
= in
->dreg
;
1637 ins
->klass
= in
->klass
;
1638 ins
->inst_p0
= call
;
1639 ins
->inst_p1
= mono_mempool_alloc (cfg
->mempool
, sizeof (ArgInfo
));
1640 memcpy (ins
->inst_p1
, ainfo
, sizeof (ArgInfo
));
1641 MONO_ADD_INS (cfg
->cbb
, ins
);
1642 } else if (ainfo
->regtype
== RegTypeFPStructByVal
) {
1643 /* this is further handled in mono_arch_emit_outarg_vt () */
1644 MONO_INST_NEW (cfg
, ins
, OP_OUTARG_VT
);
1645 ins
->opcode
= OP_OUTARG_VT
;
1646 ins
->sreg1
= in
->dreg
;
1647 ins
->klass
= in
->klass
;
1648 ins
->inst_p0
= call
;
1649 ins
->inst_p1
= mono_mempool_alloc (cfg
->mempool
, sizeof (ArgInfo
));
1650 memcpy (ins
->inst_p1
, ainfo
, sizeof (ArgInfo
));
1651 MONO_ADD_INS (cfg
->cbb
, ins
);
1652 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
1653 } else if (ainfo
->regtype
== RegTypeBase
) {
1654 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
1655 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, ppc_r1
, ainfo
->offset
, in
->dreg
);
1656 } else if (!t
->byref
&& ((t
->type
== MONO_TYPE_R4
) || (t
->type
== MONO_TYPE_R8
))) {
1657 if (t
->type
== MONO_TYPE_R8
)
1658 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ppc_r1
, ainfo
->offset
, in
->dreg
);
1660 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, ppc_r1
, ainfo
->offset
, in
->dreg
);
1662 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ppc_r1
, ainfo
->offset
, in
->dreg
);
1664 } else if (ainfo
->regtype
== RegTypeFP
) {
1665 if (t
->type
== MONO_TYPE_VALUETYPE
) {
1666 /* this is further handled in mono_arch_emit_outarg_vt () */
1667 MONO_INST_NEW (cfg
, ins
, OP_OUTARG_VT
);
1668 ins
->opcode
= OP_OUTARG_VT
;
1669 ins
->sreg1
= in
->dreg
;
1670 ins
->klass
= in
->klass
;
1671 ins
->inst_p0
= call
;
1672 ins
->inst_p1
= mono_mempool_alloc (cfg
->mempool
, sizeof (ArgInfo
));
1673 memcpy (ins
->inst_p1
, ainfo
, sizeof (ArgInfo
));
1674 MONO_ADD_INS (cfg
->cbb
, ins
);
1676 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
1678 int dreg
= mono_alloc_freg (cfg
);
1680 if (ainfo
->size
== 4) {
1681 MONO_EMIT_NEW_UNALU (cfg
, OP_FCONV_TO_R4
, dreg
, in
->dreg
);
1683 MONO_INST_NEW (cfg
, ins
, OP_FMOVE
);
1685 ins
->sreg1
= in
->dreg
;
1686 MONO_ADD_INS (cfg
->cbb
, ins
);
1689 mono_call_inst_add_outarg_reg (cfg
, call
, dreg
, ainfo
->reg
, TRUE
);
1690 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
1693 g_assert_not_reached ();
1697 /* Emit the signature cookie in the case that there is no
1698 additional argument */
1699 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (n
== sig
->sentinelpos
))
1700 emit_sig_cookie (cfg
, call
, cinfo
);
1702 if (cinfo
->struct_ret
) {
1705 MONO_INST_NEW (cfg
, vtarg
, OP_MOVE
);
1706 vtarg
->sreg1
= call
->vret_var
->dreg
;
1707 vtarg
->dreg
= mono_alloc_preg (cfg
);
1708 MONO_ADD_INS (cfg
->cbb
, vtarg
);
1710 mono_call_inst_add_outarg_reg (cfg
, call
, vtarg
->dreg
, cinfo
->struct_ret
, FALSE
);
1713 call
->stack_usage
= cinfo
->stack_usage
;
1714 cfg
->param_area
= MAX (PPC_MINIMAL_PARAM_AREA_SIZE
, MAX (cfg
->param_area
, cinfo
->stack_usage
));
1715 cfg
->flags
|= MONO_CFG_HAS_CALLS
;
1723 mono_arch_emit_outarg_vt (MonoCompile
*cfg
, MonoInst
*ins
, MonoInst
*src
)
1725 MonoCallInst
*call
= (MonoCallInst
*)ins
->inst_p0
;
1726 ArgInfo
*ainfo
= (ArgInfo
*)ins
->inst_p1
;
1727 int ovf_size
= ainfo
->vtsize
;
1728 int doffset
= ainfo
->offset
;
1729 int i
, soffset
, dreg
;
1731 if (ainfo
->regtype
== RegTypeStructByVal
) {
1738 * Darwin pinvokes needs some special handling for 1
1739 * and 2 byte arguments
1741 g_assert (ins
->klass
);
1742 if (call
->signature
->pinvoke
)
1743 size
= mono_class_native_size (ins
->klass
, NULL
);
1744 if (size
== 2 || size
== 1) {
1745 int tmpr
= mono_alloc_ireg (cfg
);
1747 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, tmpr
, src
->dreg
, soffset
);
1749 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI2_MEMBASE
, tmpr
, src
->dreg
, soffset
);
1750 dreg
= mono_alloc_ireg (cfg
);
1751 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, dreg
, tmpr
);
1752 mono_call_inst_add_outarg_reg (cfg
, call
, dreg
, ainfo
->reg
, FALSE
);
1755 for (i
= 0; i
< ainfo
->vtregs
; ++i
) {
1756 dreg
= mono_alloc_ireg (cfg
);
1757 #if G_BYTE_ORDER == G_BIG_ENDIAN
1758 int antipadding
= 0;
1761 antipadding
= sizeof (target_mgreg_t
) - ainfo
->bytes
;
1763 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, dreg
, src
->dreg
, soffset
);
1765 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHR_UN_IMM
, dreg
, dreg
, antipadding
* 8);
1767 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, dreg
, src
->dreg
, soffset
);
1769 mono_call_inst_add_outarg_reg (cfg
, call
, dreg
, ainfo
->reg
+ i
, FALSE
);
1770 soffset
+= sizeof (target_mgreg_t
);
1773 mini_emit_memcpy (cfg
, ppc_r1
, doffset
+ soffset
, src
->dreg
, soffset
, ovf_size
* sizeof (target_mgreg_t
), TARGET_SIZEOF_VOID_P
);
1774 } else if (ainfo
->regtype
== RegTypeFPStructByVal
) {
1776 for (i
= 0; i
< ainfo
->vtregs
; ++i
) {
1777 int tmpr
= mono_alloc_freg (cfg
);
1778 if (ainfo
->size
== 4)
1779 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADR4_MEMBASE
, tmpr
, src
->dreg
, soffset
);
1781 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADR8_MEMBASE
, tmpr
, src
->dreg
, soffset
);
1782 dreg
= mono_alloc_freg (cfg
);
1783 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, dreg
, tmpr
);
1784 mono_call_inst_add_outarg_reg (cfg
, call
, dreg
, ainfo
->reg
+i
, TRUE
);
1785 soffset
+= ainfo
->size
;
1788 mini_emit_memcpy (cfg
, ppc_r1
, doffset
+ soffset
, src
->dreg
, soffset
, ovf_size
* sizeof (target_mgreg_t
), TARGET_SIZEOF_VOID_P
);
1789 } else if (ainfo
->regtype
== RegTypeFP
) {
1790 int tmpr
= mono_alloc_freg (cfg
);
1791 if (ainfo
->size
== 4)
1792 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADR4_MEMBASE
, tmpr
, src
->dreg
, 0);
1794 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADR8_MEMBASE
, tmpr
, src
->dreg
, 0);
1795 dreg
= mono_alloc_freg (cfg
);
1796 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, dreg
, tmpr
);
1797 mono_call_inst_add_outarg_reg (cfg
, call
, dreg
, ainfo
->reg
, TRUE
);
1799 MonoInst
*vtcopy
= mono_compile_create_var (cfg
, m_class_get_byval_arg (src
->klass
), OP_LOCAL
);
1803 /* FIXME: alignment? */
1804 if (call
->signature
->pinvoke
) {
1805 size
= mono_type_native_stack_size (m_class_get_byval_arg (src
->klass
), NULL
);
1806 vtcopy
->backend
.is_pinvoke
= 1;
1808 size
= mini_type_stack_size (m_class_get_byval_arg (src
->klass
), NULL
);
1811 g_assert (ovf_size
> 0);
1813 EMIT_NEW_VARLOADA (cfg
, load
, vtcopy
, vtcopy
->inst_vtype
);
1814 mini_emit_memcpy (cfg
, load
->dreg
, 0, src
->dreg
, 0, size
, TARGET_SIZEOF_VOID_P
);
1817 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ppc_r1
, ainfo
->offset
, load
->dreg
);
1819 mono_call_inst_add_outarg_reg (cfg
, call
, load
->dreg
, ainfo
->reg
, FALSE
);
1824 mono_arch_emit_setret (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
*val
)
1826 MonoType
*ret
= mini_get_underlying_type (mono_method_signature_internal (method
)->ret
);
1828 #ifndef __mono_ppc64__
1829 if (ret
->type
== MONO_TYPE_I8
|| ret
->type
== MONO_TYPE_U8
) {
1832 MONO_INST_NEW (cfg
, ins
, OP_SETLRET
);
1833 ins
->sreg1
= MONO_LVREG_LS (val
->dreg
);
1834 ins
->sreg2
= MONO_LVREG_MS (val
->dreg
);
1835 MONO_ADD_INS (cfg
->cbb
, ins
);
1839 if (ret
->type
== MONO_TYPE_R8
|| ret
->type
== MONO_TYPE_R4
) {
1840 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, cfg
->ret
->dreg
, val
->dreg
);
1844 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
1848 mono_arch_is_inst_imm (int opcode
, int imm_opcode
, gint64 imm
)
1853 #endif /* DISABLE_JIT */
1856 * Conditional branches have a small offset, so if it is likely overflowed,
1857 * we do a branch to the end of the method (uncond branches have much larger
1858 * offsets) where we perform the conditional and jump back unconditionally.
1859 * It's slightly slower, since we add two uncond branches, but it's very simple
1860 * with the current patch implementation and such large methods are likely not
1861 * going to be perf critical anyway.
1866 const char *exception
;
1873 #define EMIT_COND_BRANCH_FLAGS(ins,b0,b1) \
1874 if (0 && ins->inst_true_bb->native_offset) { \
1875 ppc_bc (code, (b0), (b1), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffff); \
1877 int br_disp = ins->inst_true_bb->max_offset - offset; \
1878 if (!ppc_is_imm16 (br_disp + 8 * 1024) || !ppc_is_imm16 (br_disp - 8 * 1024)) { \
1879 MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump)); \
1880 ovfj->data.bb = ins->inst_true_bb; \
1881 ovfj->ip_offset = 0; \
1882 ovfj->b0_cond = (b0); \
1883 ovfj->b1_cond = (b1); \
1884 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB_OVF, ovfj); \
1887 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1888 ppc_bc (code, (b0), (b1), 0); \
1892 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_b0_table [(cond)], branch_b1_table [(cond)])
1894 /* emit an exception if condition is fail
1896 * We assign the extra code used to throw the implicit exceptions
1897 * to cfg->bb_exit as far as the big branch handling is concerned
1899 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(b0,b1,exc_name) \
1901 int br_disp = cfg->bb_exit->max_offset - offset; \
1902 if (!ppc_is_imm16 (br_disp + 1024) || ! ppc_is_imm16 (ppc_is_imm16 (br_disp - 1024))) { \
1903 MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump)); \
1904 ovfj->data.exception = (exc_name); \
1905 ovfj->ip_offset = code - cfg->native_code; \
1906 ovfj->b0_cond = (b0); \
1907 ovfj->b1_cond = (b1); \
1908 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC_OVF, ovfj); \
1910 cfg->bb_exit->max_offset += 24; \
1912 mono_add_patch_info (cfg, code - cfg->native_code, \
1913 MONO_PATCH_INFO_EXC, exc_name); \
1914 ppc_bcl (code, (b0), (b1), 0); \
1918 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_b0_table [(cond)], branch_b1_table [(cond)], (exc_name))
1921 mono_arch_peephole_pass_1 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
1926 normalize_opcode (int opcode
)
1929 #ifndef MONO_ARCH_ILP32
1930 case MONO_PPC_32_64_CASE (OP_LOADI4_MEMBASE
, OP_LOADI8_MEMBASE
):
1931 return OP_LOAD_MEMBASE
;
1932 case MONO_PPC_32_64_CASE (OP_LOADI4_MEMINDEX
, OP_LOADI8_MEMINDEX
):
1933 return OP_LOAD_MEMINDEX
;
1934 case MONO_PPC_32_64_CASE (OP_STOREI4_MEMBASE_REG
, OP_STOREI8_MEMBASE_REG
):
1935 return OP_STORE_MEMBASE_REG
;
1936 case MONO_PPC_32_64_CASE (OP_STOREI4_MEMBASE_IMM
, OP_STOREI8_MEMBASE_IMM
):
1937 return OP_STORE_MEMBASE_IMM
;
1938 case MONO_PPC_32_64_CASE (OP_STOREI4_MEMINDEX
, OP_STOREI8_MEMINDEX
):
1939 return OP_STORE_MEMINDEX
;
1941 case MONO_PPC_32_64_CASE (OP_ISHR_IMM
, OP_LSHR_IMM
):
1943 case MONO_PPC_32_64_CASE (OP_ISHR_UN_IMM
, OP_LSHR_UN_IMM
):
1944 return OP_SHR_UN_IMM
;
1951 mono_arch_peephole_pass_2 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
1953 MonoInst
*ins
, *n
, *last_ins
= NULL
;
1955 MONO_BB_FOR_EACH_INS_SAFE (bb
, n
, ins
) {
1956 switch (normalize_opcode (ins
->opcode
)) {
1958 /* remove unnecessary multiplication with 1 */
1959 if (ins
->inst_imm
== 1) {
1960 if (ins
->dreg
!= ins
->sreg1
) {
1961 ins
->opcode
= OP_MOVE
;
1963 MONO_DELETE_INS (bb
, ins
);
1967 int power2
= mono_is_power_of_two (ins
->inst_imm
);
1969 ins
->opcode
= OP_SHL_IMM
;
1970 ins
->inst_imm
= power2
;
1974 case OP_LOAD_MEMBASE
:
1976 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1977 * OP_LOAD_MEMBASE offset(basereg), reg
1979 if (last_ins
&& normalize_opcode (last_ins
->opcode
) == OP_STORE_MEMBASE_REG
&&
1980 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
1981 ins
->inst_offset
== last_ins
->inst_offset
) {
1982 if (ins
->dreg
== last_ins
->sreg1
) {
1983 MONO_DELETE_INS (bb
, ins
);
1986 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1987 ins
->opcode
= OP_MOVE
;
1988 ins
->sreg1
= last_ins
->sreg1
;
1992 * Note: reg1 must be different from the basereg in the second load
1993 * OP_LOAD_MEMBASE offset(basereg), reg1
1994 * OP_LOAD_MEMBASE offset(basereg), reg2
1996 * OP_LOAD_MEMBASE offset(basereg), reg1
1997 * OP_MOVE reg1, reg2
1999 } else if (last_ins
&& normalize_opcode (last_ins
->opcode
) == OP_LOAD_MEMBASE
&&
2000 ins
->inst_basereg
!= last_ins
->dreg
&&
2001 ins
->inst_basereg
== last_ins
->inst_basereg
&&
2002 ins
->inst_offset
== last_ins
->inst_offset
) {
2004 if (ins
->dreg
== last_ins
->dreg
) {
2005 MONO_DELETE_INS (bb
, ins
);
2008 ins
->opcode
= OP_MOVE
;
2009 ins
->sreg1
= last_ins
->dreg
;
2012 //g_assert_not_reached ();
2016 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2017 * OP_LOAD_MEMBASE offset(basereg), reg
2019 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2020 * OP_ICONST reg, imm
2022 } else if (last_ins
&& normalize_opcode (last_ins
->opcode
) == OP_STORE_MEMBASE_IMM
&&
2023 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
2024 ins
->inst_offset
== last_ins
->inst_offset
) {
2025 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2026 ins
->opcode
= OP_ICONST
;
2027 ins
->inst_c0
= last_ins
->inst_imm
;
2028 g_assert_not_reached (); // check this rule
2032 case OP_LOADU1_MEMBASE
:
2033 case OP_LOADI1_MEMBASE
:
2034 if (last_ins
&& (last_ins
->opcode
== OP_STOREI1_MEMBASE_REG
) &&
2035 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
2036 ins
->inst_offset
== last_ins
->inst_offset
) {
2037 ins
->opcode
= (ins
->opcode
== OP_LOADI1_MEMBASE
) ? OP_ICONV_TO_I1
: OP_ICONV_TO_U1
;
2038 ins
->sreg1
= last_ins
->sreg1
;
2041 case OP_LOADU2_MEMBASE
:
2042 case OP_LOADI2_MEMBASE
:
2043 if (last_ins
&& (last_ins
->opcode
== OP_STOREI2_MEMBASE_REG
) &&
2044 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
2045 ins
->inst_offset
== last_ins
->inst_offset
) {
2046 ins
->opcode
= (ins
->opcode
== OP_LOADI2_MEMBASE
) ? OP_ICONV_TO_I2
: OP_ICONV_TO_U2
;
2047 ins
->sreg1
= last_ins
->sreg1
;
2050 #ifdef __mono_ppc64__
2051 case OP_LOADU4_MEMBASE
:
2052 case OP_LOADI4_MEMBASE
:
2053 if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_REG
) &&
2054 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
2055 ins
->inst_offset
== last_ins
->inst_offset
) {
2056 ins
->opcode
= (ins
->opcode
== OP_LOADI4_MEMBASE
) ? OP_ICONV_TO_I4
: OP_ICONV_TO_U4
;
2057 ins
->sreg1
= last_ins
->sreg1
;
2062 ins
->opcode
= OP_MOVE
;
2066 if (ins
->dreg
== ins
->sreg1
) {
2067 MONO_DELETE_INS (bb
, ins
);
2071 * OP_MOVE sreg, dreg
2072 * OP_MOVE dreg, sreg
2074 if (last_ins
&& last_ins
->opcode
== OP_MOVE
&&
2075 ins
->sreg1
== last_ins
->dreg
&&
2076 ins
->dreg
== last_ins
->sreg1
) {
2077 MONO_DELETE_INS (bb
, ins
);
2085 bb
->last_ins
= last_ins
;
2089 mono_arch_decompose_opts (MonoCompile
*cfg
, MonoInst
*ins
)
2091 switch (ins
->opcode
) {
2092 case OP_ICONV_TO_R_UN
: {
2093 // This value is OK as-is for both big and little endian because of how it is stored
2094 static const guint64 adjust_val
= 0x4330000000000000ULL
;
2095 int msw_reg
= mono_alloc_ireg (cfg
);
2096 int adj_reg
= mono_alloc_freg (cfg
);
2097 int tmp_reg
= mono_alloc_freg (cfg
);
2098 int basereg
= ppc_sp
;
2100 MONO_EMIT_NEW_ICONST (cfg
, msw_reg
, 0x43300000);
2101 if (!ppc_is_imm16 (offset
+ 4)) {
2102 basereg
= mono_alloc_ireg (cfg
);
2103 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IADD_IMM
, basereg
, cfg
->frame_reg
, offset
);
2105 #if G_BYTE_ORDER == G_BIG_ENDIAN
2106 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, basereg
, offset
, msw_reg
);
2107 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, basereg
, offset
+ 4, ins
->sreg1
);
2109 // For little endian the words are reversed
2110 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, basereg
, offset
+ 4, msw_reg
);
2111 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, basereg
, offset
, ins
->sreg1
);
2113 MONO_EMIT_NEW_LOAD_R8 (cfg
, adj_reg
, &adjust_val
);
2114 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADR8_MEMBASE
, tmp_reg
, basereg
, offset
);
2115 MONO_EMIT_NEW_BIALU (cfg
, OP_FSUB
, ins
->dreg
, tmp_reg
, adj_reg
);
2116 ins
->opcode
= OP_NOP
;
2119 #ifndef __mono_ppc64__
2120 case OP_ICONV_TO_R4
:
2121 case OP_ICONV_TO_R8
: {
2122 /* If we have a PPC_FEATURE_64 machine we can avoid
2123 this and use the fcfid instruction. Otherwise
2124 on an old 32-bit chip and we have to do this the
2126 if (!(cpu_hw_caps
& PPC_ISA_64
)) {
2127 /* FIXME: change precision for CEE_CONV_R4 */
2128 static const guint64 adjust_val
= 0x4330000080000000ULL
;
2129 int msw_reg
= mono_alloc_ireg (cfg
);
2130 int xored
= mono_alloc_ireg (cfg
);
2131 int adj_reg
= mono_alloc_freg (cfg
);
2132 int tmp_reg
= mono_alloc_freg (cfg
);
2133 int basereg
= ppc_sp
;
2135 if (!ppc_is_imm16 (offset
+ 4)) {
2136 basereg
= mono_alloc_ireg (cfg
);
2137 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IADD_IMM
, basereg
, cfg
->frame_reg
, offset
);
2139 MONO_EMIT_NEW_ICONST (cfg
, msw_reg
, 0x43300000);
2140 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, basereg
, offset
, msw_reg
);
2141 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_XOR_IMM
, xored
, ins
->sreg1
, 0x80000000);
2142 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, basereg
, offset
+ 4, xored
);
2143 MONO_EMIT_NEW_LOAD_R8 (cfg
, adj_reg
, (gpointer
)&adjust_val
);
2144 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADR8_MEMBASE
, tmp_reg
, basereg
, offset
);
2145 MONO_EMIT_NEW_BIALU (cfg
, OP_FSUB
, ins
->dreg
, tmp_reg
, adj_reg
);
2146 if (ins
->opcode
== OP_ICONV_TO_R4
)
2147 MONO_EMIT_NEW_UNALU (cfg
, OP_FCONV_TO_R4
, ins
->dreg
, ins
->dreg
);
2148 ins
->opcode
= OP_NOP
;
2154 int msw_reg
= mono_alloc_ireg (cfg
);
2155 int basereg
= ppc_sp
;
2157 if (!ppc_is_imm16 (offset
+ 4)) {
2158 basereg
= mono_alloc_ireg (cfg
);
2159 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IADD_IMM
, basereg
, cfg
->frame_reg
, offset
);
2161 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, basereg
, offset
, ins
->sreg1
);
2162 #if G_BYTE_ORDER == G_BIG_ENDIAN
2163 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, msw_reg
, basereg
, offset
);
2165 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, msw_reg
, basereg
, offset
+4);
2167 MONO_EMIT_NEW_UNALU (cfg
, OP_PPC_CHECK_FINITE
, -1, msw_reg
);
2168 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, ins
->dreg
, ins
->sreg1
);
2169 ins
->opcode
= OP_NOP
;
2172 #ifdef __mono_ppc64__
2174 case OP_IADD_OVF_UN
:
2176 int shifted1_reg
= mono_alloc_ireg (cfg
);
2177 int shifted2_reg
= mono_alloc_ireg (cfg
);
2178 int result_shifted_reg
= mono_alloc_ireg (cfg
);
2180 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, shifted1_reg
, ins
->sreg1
, 32);
2181 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, shifted2_reg
, ins
->sreg2
, 32);
2182 MONO_EMIT_NEW_BIALU (cfg
, ins
->opcode
, result_shifted_reg
, shifted1_reg
, shifted2_reg
);
2183 if (ins
->opcode
== OP_IADD_OVF_UN
)
2184 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHR_UN_IMM
, ins
->dreg
, result_shifted_reg
, 32);
2186 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHR_IMM
, ins
->dreg
, result_shifted_reg
, 32);
2187 ins
->opcode
= OP_NOP
;
2197 mono_arch_decompose_long_opts (MonoCompile
*cfg
, MonoInst
*ins
)
2199 switch (ins
->opcode
) {
2201 /* ADC sets the condition code */
2202 MONO_EMIT_NEW_BIALU (cfg
, OP_ADDCC
, MONO_LVREG_LS (ins
->dreg
), MONO_LVREG_LS (ins
->sreg1
), MONO_LVREG_LS (ins
->sreg2
));
2203 MONO_EMIT_NEW_BIALU (cfg
, OP_ADD_OVF_CARRY
, MONO_LVREG_MS (ins
->dreg
), MONO_LVREG_MS (ins
->sreg1
), MONO_LVREG_MS (ins
->sreg2
));
2206 case OP_LADD_OVF_UN
:
2207 /* ADC sets the condition code */
2208 MONO_EMIT_NEW_BIALU (cfg
, OP_ADDCC
, MONO_LVREG_LS (ins
->dreg
), MONO_LVREG_LS (ins
->sreg1
), MONO_LVREG_LS (ins
->sreg2
));
2209 MONO_EMIT_NEW_BIALU (cfg
, OP_ADD_OVF_UN_CARRY
, MONO_LVREG_MS (ins
->dreg
), MONO_LVREG_MS (ins
->sreg1
), MONO_LVREG_MS (ins
->sreg2
));
2213 /* SBB sets the condition code */
2214 MONO_EMIT_NEW_BIALU (cfg
, OP_SUBCC
, MONO_LVREG_LS (ins
->dreg
), MONO_LVREG_LS (ins
->sreg1
), MONO_LVREG_LS (ins
->sreg2
));
2215 MONO_EMIT_NEW_BIALU (cfg
, OP_SUB_OVF_CARRY
, MONO_LVREG_MS (ins
->dreg
), MONO_LVREG_MS (ins
->sreg1
), MONO_LVREG_MS (ins
->sreg2
));
2218 case OP_LSUB_OVF_UN
:
2219 /* SBB sets the condition code */
2220 MONO_EMIT_NEW_BIALU (cfg
, OP_SUBCC
, MONO_LVREG_LS (ins
->dreg
), MONO_LVREG_LS (ins
->sreg1
), MONO_LVREG_LS (ins
->sreg2
));
2221 MONO_EMIT_NEW_BIALU (cfg
, OP_SUB_OVF_UN_CARRY
, MONO_LVREG_MS (ins
->dreg
), MONO_LVREG_MS (ins
->sreg1
), MONO_LVREG_MS (ins
->sreg2
));
2225 /* From gcc generated code */
2226 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_PPC_SUBFIC
, MONO_LVREG_LS (ins
->dreg
), MONO_LVREG_LS (ins
->sreg1
), 0);
2227 MONO_EMIT_NEW_UNALU (cfg
, OP_PPC_SUBFZE
, MONO_LVREG_MS (ins
->dreg
), MONO_LVREG_MS (ins
->sreg1
));
2236 * the branch_b0_table should maintain the order of these
2250 branch_b0_table
[] = {
2265 branch_b1_table
[] = {
2279 #define NEW_INS(cfg,dest,op) do { \
2280 MONO_INST_NEW((cfg), (dest), (op)); \
2281 mono_bblock_insert_after_ins (bb, last_ins, (dest)); \
2285 map_to_reg_reg_op (int op
)
2294 case OP_COMPARE_IMM
:
2296 case OP_ICOMPARE_IMM
:
2298 case OP_LCOMPARE_IMM
:
2316 case OP_LOAD_MEMBASE
:
2317 return OP_LOAD_MEMINDEX
;
2318 case OP_LOADI4_MEMBASE
:
2319 return OP_LOADI4_MEMINDEX
;
2320 case OP_LOADU4_MEMBASE
:
2321 return OP_LOADU4_MEMINDEX
;
2322 case OP_LOADI8_MEMBASE
:
2323 return OP_LOADI8_MEMINDEX
;
2324 case OP_LOADU1_MEMBASE
:
2325 return OP_LOADU1_MEMINDEX
;
2326 case OP_LOADI2_MEMBASE
:
2327 return OP_LOADI2_MEMINDEX
;
2328 case OP_LOADU2_MEMBASE
:
2329 return OP_LOADU2_MEMINDEX
;
2330 case OP_LOADI1_MEMBASE
:
2331 return OP_LOADI1_MEMINDEX
;
2332 case OP_LOADR4_MEMBASE
:
2333 return OP_LOADR4_MEMINDEX
;
2334 case OP_LOADR8_MEMBASE
:
2335 return OP_LOADR8_MEMINDEX
;
2336 case OP_STOREI1_MEMBASE_REG
:
2337 return OP_STOREI1_MEMINDEX
;
2338 case OP_STOREI2_MEMBASE_REG
:
2339 return OP_STOREI2_MEMINDEX
;
2340 case OP_STOREI4_MEMBASE_REG
:
2341 return OP_STOREI4_MEMINDEX
;
2342 case OP_STOREI8_MEMBASE_REG
:
2343 return OP_STOREI8_MEMINDEX
;
2344 case OP_STORE_MEMBASE_REG
:
2345 return OP_STORE_MEMINDEX
;
2346 case OP_STORER4_MEMBASE_REG
:
2347 return OP_STORER4_MEMINDEX
;
2348 case OP_STORER8_MEMBASE_REG
:
2349 return OP_STORER8_MEMINDEX
;
2350 case OP_STORE_MEMBASE_IMM
:
2351 return OP_STORE_MEMBASE_REG
;
2352 case OP_STOREI1_MEMBASE_IMM
:
2353 return OP_STOREI1_MEMBASE_REG
;
2354 case OP_STOREI2_MEMBASE_IMM
:
2355 return OP_STOREI2_MEMBASE_REG
;
2356 case OP_STOREI4_MEMBASE_IMM
:
2357 return OP_STOREI4_MEMBASE_REG
;
2358 case OP_STOREI8_MEMBASE_IMM
:
2359 return OP_STOREI8_MEMBASE_REG
;
2361 if (mono_op_imm_to_op (op
) == -1)
2362 g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (op
));
2363 return mono_op_imm_to_op (op
);
2366 //#define map_to_reg_reg_op(op) (cfg->new_ir? mono_op_imm_to_op (op): map_to_reg_reg_op (op))
2368 #define compare_opcode_is_unsigned(opcode) \
2369 (((opcode) >= CEE_BNE_UN && (opcode) <= CEE_BLT_UN) || \
2370 ((opcode) >= OP_IBNE_UN && (opcode) <= OP_IBLT_UN) || \
2371 ((opcode) >= OP_LBNE_UN && (opcode) <= OP_LBLT_UN) || \
2372 ((opcode) >= OP_COND_EXC_NE_UN && (opcode) <= OP_COND_EXC_LT_UN) || \
2373 ((opcode) >= OP_COND_EXC_INE_UN && (opcode) <= OP_COND_EXC_ILT_UN) || \
2374 ((opcode) == OP_CLT_UN || (opcode) == OP_CGT_UN || \
2375 (opcode) == OP_ICLT_UN || (opcode) == OP_ICGT_UN || \
2376 (opcode) == OP_LCLT_UN || (opcode) == OP_LCGT_UN))
2379 * Remove from the instruction list the instructions that can't be
2380 * represented with very simple instructions with no register
2384 mono_arch_lowering_pass (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
2386 MonoInst
*ins
, *next
, *temp
, *last_ins
= NULL
;
2389 MONO_BB_FOR_EACH_INS (bb
, ins
) {
2391 switch (ins
->opcode
) {
2392 case OP_IDIV_UN_IMM
:
2395 case OP_IREM_UN_IMM
:
2396 CASE_PPC64 (OP_LREM_IMM
) {
2397 NEW_INS (cfg
, temp
, OP_ICONST
);
2398 temp
->inst_c0
= ins
->inst_imm
;
2399 temp
->dreg
= mono_alloc_ireg (cfg
);
2400 ins
->sreg2
= temp
->dreg
;
2401 if (ins
->opcode
== OP_IDIV_IMM
)
2402 ins
->opcode
= OP_IDIV
;
2403 else if (ins
->opcode
== OP_IREM_IMM
)
2404 ins
->opcode
= OP_IREM
;
2405 else if (ins
->opcode
== OP_IDIV_UN_IMM
)
2406 ins
->opcode
= OP_IDIV_UN
;
2407 else if (ins
->opcode
== OP_IREM_UN_IMM
)
2408 ins
->opcode
= OP_IREM_UN
;
2409 else if (ins
->opcode
== OP_LREM_IMM
)
2410 ins
->opcode
= OP_LREM
;
2412 /* handle rem separately */
2417 CASE_PPC64 (OP_LREM
)
2418 CASE_PPC64 (OP_LREM_UN
) {
2420 /* we change a rem dest, src1, src2 to
2421 * div temp1, src1, src2
2422 * mul temp2, temp1, src2
2423 * sub dest, src1, temp2
2425 if (ins
->opcode
== OP_IREM
|| ins
->opcode
== OP_IREM_UN
) {
2426 NEW_INS (cfg
, mul
, OP_IMUL
);
2427 NEW_INS (cfg
, temp
, ins
->opcode
== OP_IREM
? OP_IDIV
: OP_IDIV_UN
);
2428 ins
->opcode
= OP_ISUB
;
2430 NEW_INS (cfg
, mul
, OP_LMUL
);
2431 NEW_INS (cfg
, temp
, ins
->opcode
== OP_LREM
? OP_LDIV
: OP_LDIV_UN
);
2432 ins
->opcode
= OP_LSUB
;
2434 temp
->sreg1
= ins
->sreg1
;
2435 temp
->sreg2
= ins
->sreg2
;
2436 temp
->dreg
= mono_alloc_ireg (cfg
);
2437 mul
->sreg1
= temp
->dreg
;
2438 mul
->sreg2
= ins
->sreg2
;
2439 mul
->dreg
= mono_alloc_ireg (cfg
);
2440 ins
->sreg2
= mul
->dreg
;
2444 CASE_PPC64 (OP_LADD_IMM
)
2447 if (!ppc_is_imm16 (ins
->inst_imm
)) {
2448 NEW_INS (cfg
, temp
, OP_ICONST
);
2449 temp
->inst_c0
= ins
->inst_imm
;
2450 temp
->dreg
= mono_alloc_ireg (cfg
);
2451 ins
->sreg2
= temp
->dreg
;
2452 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2456 CASE_PPC64 (OP_LSUB_IMM
)
2458 if (!ppc_is_imm16 (-ins
->inst_imm
)) {
2459 NEW_INS (cfg
, temp
, OP_ICONST
);
2460 temp
->inst_c0
= ins
->inst_imm
;
2461 temp
->dreg
= mono_alloc_ireg (cfg
);
2462 ins
->sreg2
= temp
->dreg
;
2463 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2475 gboolean is_imm
= ((ins
->inst_imm
& 0xffff0000) && (ins
->inst_imm
& 0xffff));
2476 #ifdef __mono_ppc64__
2477 if (ins
->inst_imm
& 0xffffffff00000000ULL
)
2481 NEW_INS (cfg
, temp
, OP_ICONST
);
2482 temp
->inst_c0
= ins
->inst_imm
;
2483 temp
->dreg
= mono_alloc_ireg (cfg
);
2484 ins
->sreg2
= temp
->dreg
;
2485 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2494 NEW_INS (cfg
, temp
, OP_ICONST
);
2495 temp
->inst_c0
= ins
->inst_imm
;
2496 temp
->dreg
= mono_alloc_ireg (cfg
);
2497 ins
->sreg2
= temp
->dreg
;
2498 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2500 case OP_COMPARE_IMM
:
2501 case OP_ICOMPARE_IMM
:
2502 CASE_PPC64 (OP_LCOMPARE_IMM
)
2504 /* Branch opts can eliminate the branch */
2505 if (!next
|| (!(MONO_IS_COND_BRANCH_OP (next
) || MONO_IS_COND_EXC (next
) || MONO_IS_SETCC (next
)))) {
2506 ins
->opcode
= OP_NOP
;
2510 if (compare_opcode_is_unsigned (next
->opcode
)) {
2511 if (!ppc_is_uimm16 (ins
->inst_imm
)) {
2512 NEW_INS (cfg
, temp
, OP_ICONST
);
2513 temp
->inst_c0
= ins
->inst_imm
;
2514 temp
->dreg
= mono_alloc_ireg (cfg
);
2515 ins
->sreg2
= temp
->dreg
;
2516 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2519 if (!ppc_is_imm16 (ins
->inst_imm
)) {
2520 NEW_INS (cfg
, temp
, OP_ICONST
);
2521 temp
->inst_c0
= ins
->inst_imm
;
2522 temp
->dreg
= mono_alloc_ireg (cfg
);
2523 ins
->sreg2
= temp
->dreg
;
2524 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2530 CASE_PPC64 (OP_LMUL_IMM
)
2531 if (ins
->inst_imm
== 1) {
2532 ins
->opcode
= OP_MOVE
;
2535 if (ins
->inst_imm
== 0) {
2536 ins
->opcode
= OP_ICONST
;
2540 imm
= mono_is_power_of_two (ins
->inst_imm
);
2542 ins
->opcode
= OP_SHL_IMM
;
2543 ins
->inst_imm
= imm
;
2546 if (!ppc_is_imm16 (ins
->inst_imm
)) {
2547 NEW_INS (cfg
, temp
, OP_ICONST
);
2548 temp
->inst_c0
= ins
->inst_imm
;
2549 temp
->dreg
= mono_alloc_ireg (cfg
);
2550 ins
->sreg2
= temp
->dreg
;
2551 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2554 case OP_LOCALLOC_IMM
:
2555 NEW_INS (cfg
, temp
, OP_ICONST
);
2556 temp
->inst_c0
= ins
->inst_imm
;
2557 temp
->dreg
= mono_alloc_ireg (cfg
);
2558 ins
->sreg1
= temp
->dreg
;
2559 ins
->opcode
= OP_LOCALLOC
;
2561 case OP_LOAD_MEMBASE
:
2562 case OP_LOADI4_MEMBASE
:
2563 CASE_PPC64 (OP_LOADI8_MEMBASE
)
2564 case OP_LOADU4_MEMBASE
:
2565 case OP_LOADI2_MEMBASE
:
2566 case OP_LOADU2_MEMBASE
:
2567 case OP_LOADI1_MEMBASE
:
2568 case OP_LOADU1_MEMBASE
:
2569 case OP_LOADR4_MEMBASE
:
2570 case OP_LOADR8_MEMBASE
:
2571 case OP_STORE_MEMBASE_REG
:
2572 CASE_PPC64 (OP_STOREI8_MEMBASE_REG
)
2573 case OP_STOREI4_MEMBASE_REG
:
2574 case OP_STOREI2_MEMBASE_REG
:
2575 case OP_STOREI1_MEMBASE_REG
:
2576 case OP_STORER4_MEMBASE_REG
:
2577 case OP_STORER8_MEMBASE_REG
:
2578 /* we can do two things: load the immed in a register
2579 * and use an indexed load, or see if the immed can be
2580 * represented as an ad_imm + a load with a smaller offset
2581 * that fits. We just do the first for now, optimize later.
2583 if (ppc_is_imm16 (ins
->inst_offset
))
2585 NEW_INS (cfg
, temp
, OP_ICONST
);
2586 temp
->inst_c0
= ins
->inst_offset
;
2587 temp
->dreg
= mono_alloc_ireg (cfg
);
2588 ins
->sreg2
= temp
->dreg
;
2589 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2591 case OP_STORE_MEMBASE_IMM
:
2592 case OP_STOREI1_MEMBASE_IMM
:
2593 case OP_STOREI2_MEMBASE_IMM
:
2594 case OP_STOREI4_MEMBASE_IMM
:
2595 CASE_PPC64 (OP_STOREI8_MEMBASE_IMM
)
2596 NEW_INS (cfg
, temp
, OP_ICONST
);
2597 temp
->inst_c0
= ins
->inst_imm
;
2598 temp
->dreg
= mono_alloc_ireg (cfg
);
2599 ins
->sreg1
= temp
->dreg
;
2600 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2602 goto loop_start
; /* make it handle the possibly big ins->inst_offset */
2605 if (cfg
->compile_aot
) {
2606 /* Keep these in the aot case */
2609 NEW_INS (cfg
, temp
, OP_ICONST
);
2610 temp
->inst_c0
= (gulong
)ins
->inst_p0
;
2611 temp
->dreg
= mono_alloc_ireg (cfg
);
2612 ins
->inst_basereg
= temp
->dreg
;
2613 ins
->inst_offset
= 0;
2614 ins
->opcode
= ins
->opcode
== OP_R4CONST
? OP_LOADR4_MEMBASE
: OP_LOADR8_MEMBASE
;
2616 /* make it handle the possibly big ins->inst_offset
2617 * later optimize to use lis + load_membase
2623 bb
->last_ins
= last_ins
;
2624 bb
->max_vreg
= cfg
->next_vreg
;
2628 emit_float_to_int (MonoCompile
*cfg
, guchar
*code
, int dreg
, int sreg
, int size
, gboolean is_signed
)
2630 long offset
= cfg
->arch
.fp_conv_var_offset
;
2632 /* sreg is a float, dreg is an integer reg. ppc_f0 is used a scratch */
2633 #ifdef __mono_ppc64__
2635 ppc_fctidz (code
, ppc_f0
, sreg
);
2640 ppc_fctiwz (code
, ppc_f0
, sreg
);
2643 if (ppc_is_imm16 (offset
+ sub_offset
)) {
2644 ppc_stfd (code
, ppc_f0
, offset
, cfg
->frame_reg
);
2646 ppc_ldr (code
, dreg
, offset
+ sub_offset
, cfg
->frame_reg
);
2648 ppc_lwz (code
, dreg
, offset
+ sub_offset
, cfg
->frame_reg
);
2650 ppc_load (code
, dreg
, offset
);
2651 ppc_add (code
, dreg
, dreg
, cfg
->frame_reg
);
2652 ppc_stfd (code
, ppc_f0
, 0, dreg
);
2654 ppc_ldr (code
, dreg
, sub_offset
, dreg
);
2656 ppc_lwz (code
, dreg
, sub_offset
, dreg
);
2660 ppc_andid (code
, dreg
, dreg
, 0xff);
2662 ppc_andid (code
, dreg
, dreg
, 0xffff);
2663 #ifdef __mono_ppc64__
2665 ppc_clrldi (code
, dreg
, dreg
, 32);
2669 ppc_extsb (code
, dreg
, dreg
);
2671 ppc_extsh (code
, dreg
, dreg
);
2672 #ifdef __mono_ppc64__
2674 ppc_extsw (code
, dreg
, dreg
);
2681 emit_thunk (guint8
*code
, gconstpointer target
)
2685 /* 2 bytes on 32bit, 5 bytes on 64bit */
2686 ppc_load_sequence (code
, ppc_r0
, target
);
2688 ppc_mtctr (code
, ppc_r0
);
2689 ppc_bcctr (code
, PPC_BR_ALWAYS
, 0);
2691 mono_arch_flush_icache (p
, code
- p
);
2695 handle_thunk (MonoCompile
*cfg
, MonoDomain
*domain
, guchar
*code
, const guchar
*target
)
2697 MonoJitInfo
*ji
= NULL
;
2698 MonoThunkJitInfo
*info
;
2701 guint8
*orig_target
;
2702 guint8
*target_thunk
;
2705 domain
= mono_domain_get ();
2709 * This can be called multiple times during JITting,
2710 * save the current position in cfg->arch to avoid
2711 * doing a O(n^2) search.
2713 if (!cfg
->arch
.thunks
) {
2714 cfg
->arch
.thunks
= cfg
->thunks
;
2715 cfg
->arch
.thunks_size
= cfg
->thunk_area
;
2717 thunks
= cfg
->arch
.thunks
;
2718 thunks_size
= cfg
->arch
.thunks_size
;
2720 g_print ("thunk failed %p->%p, thunk space=%d method %s", code
, target
, thunks_size
, mono_method_full_name (cfg
->method
, TRUE
));
2721 g_assert_not_reached ();
2724 g_assert (*(guint32
*)thunks
== 0);
2725 emit_thunk (thunks
, target
);
2726 ppc_patch (code
, thunks
);
2728 cfg
->arch
.thunks
+= THUNK_SIZE
;
2729 cfg
->arch
.thunks_size
-= THUNK_SIZE
;
2731 ji
= mini_jit_info_table_find (domain
, (char *) code
, NULL
);
2733 info
= mono_jit_info_get_thunk_info (ji
);
2736 thunks
= (guint8
*) ji
->code_start
+ info
->thunks_offset
;
2737 thunks_size
= info
->thunks_size
;
2739 orig_target
= mono_arch_get_call_target (code
+ 4);
2741 mono_mini_arch_lock ();
2743 target_thunk
= NULL
;
2744 if (orig_target
>= thunks
&& orig_target
< thunks
+ thunks_size
) {
2745 /* The call already points to a thunk, because of trampolines etc. */
2746 target_thunk
= orig_target
;
2748 for (p
= thunks
; p
< thunks
+ thunks_size
; p
+= THUNK_SIZE
) {
2749 if (((guint32
*) p
) [0] == 0) {
2754 /* ppc64 requires 5 instructions, 32bit two instructions */
2755 #ifdef __mono_ppc64__
2756 const int const_load_size
= 5;
2758 const int const_load_size
= 2;
2760 guint32 load
[const_load_size
];
2761 guchar
*templ
= (guchar
*) load
;
2762 ppc_load_sequence (templ
, ppc_r0
, target
);
2763 if (!memcmp (p
, load
, const_load_size
)) {
2764 /* Thunk already points to target */
2772 // g_print ("THUNK: %p %p %p\n", code, target, target_thunk);
2774 if (!target_thunk
) {
2775 mono_mini_arch_unlock ();
2776 g_print ("thunk failed %p->%p, thunk space=%d method %s", code
, target
, thunks_size
, cfg
? mono_method_full_name (cfg
->method
, TRUE
) : mono_method_full_name (jinfo_get_method (ji
), TRUE
));
2777 g_assert_not_reached ();
2780 emit_thunk (target_thunk
, target
);
2781 ppc_patch (code
, target_thunk
);
2783 mono_mini_arch_unlock ();
2788 patch_ins (guint8
*code
, guint32 ins
)
2790 *(guint32
*)code
= ins
;
2791 mono_arch_flush_icache (code
, 4);
2795 ppc_patch_full (MonoCompile
*cfg
, MonoDomain
*domain
, guchar
*code
, const guchar
*target
, gboolean is_fd
)
2797 guint32 ins
= *(guint32
*)code
;
2798 guint32 prim
= ins
>> 26;
2801 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2803 // prefer relative branches, they are more position independent (e.g. for AOT compilation).
2804 gint diff
= target
- code
;
2807 if (diff
<= 33554431){
2808 ins
= (18 << 26) | (diff
) | (ins
& 1);
2809 patch_ins (code
, ins
);
2813 /* diff between 0 and -33554432 */
2814 if (diff
>= -33554432){
2815 ins
= (18 << 26) | (diff
& ~0xfc000000) | (ins
& 1);
2816 patch_ins (code
, ins
);
2821 if ((glong
)target
>= 0){
2822 if ((glong
)target
<= 33554431){
2823 ins
= (18 << 26) | ((gulong
) target
) | (ins
& 1) | 2;
2824 patch_ins (code
, ins
);
2828 if ((glong
)target
>= -33554432){
2829 ins
= (18 << 26) | (((gulong
)target
) & ~0xfc000000) | (ins
& 1) | 2;
2830 patch_ins (code
, ins
);
2835 handle_thunk (cfg
, domain
, code
, target
);
2838 g_assert_not_reached ();
2846 guint32 li
= (gulong
)target
;
2847 ins
= (ins
& 0xffff0000) | (ins
& 3);
2848 ovf
= li
& 0xffff0000;
2849 if (ovf
!= 0 && ovf
!= 0xffff0000)
2850 g_assert_not_reached ();
2853 // FIXME: assert the top bits of li are 0
2855 gint diff
= target
- code
;
2856 ins
= (ins
& 0xffff0000) | (ins
& 3);
2857 ovf
= diff
& 0xffff0000;
2858 if (ovf
!= 0 && ovf
!= 0xffff0000)
2859 g_assert_not_reached ();
2863 patch_ins (code
, ins
);
2867 if (prim
== 15 || ins
== 0x4e800021 || ins
== 0x4e800020 || ins
== 0x4e800420) {
2868 #ifdef __mono_ppc64__
2869 guint32
*seq
= (guint32
*)code
;
2870 guint32
*branch_ins
;
2872 /* the trampoline code will try to patch the blrl, blr, bcctr */
2873 if (ins
== 0x4e800021 || ins
== 0x4e800020 || ins
== 0x4e800420) {
2875 if (ppc_is_load_op (seq
[-3]) || ppc_opcode (seq
[-3]) == 31) /* ld || lwz || mr */
2880 if (ppc_is_load_op (seq
[5])
2881 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
2882 /* With function descs we need to do more careful
2884 || ppc_opcode (seq
[5]) == 31 /* ld || lwz || mr */
2887 branch_ins
= seq
+ 8;
2889 branch_ins
= seq
+ 6;
2892 seq
= (guint32
*)code
;
2893 /* this is the lis/ori/sldi/oris/ori/(ld/ld|mr/nop)/mtlr/blrl sequence */
2894 g_assert (mono_ppc_is_direct_call_sequence (branch_ins
));
2896 if (ppc_is_load_op (seq
[5])) {
2897 g_assert (ppc_is_load_op (seq
[6]));
2900 guint8
*buf
= (guint8
*)&seq
[5];
2901 ppc_mr (buf
, PPC_CALL_REG
, ppc_r12
);
2906 target
= (const guchar
*)mono_get_addr_from_ftnptr ((gpointer
)target
);
2909 /* FIXME: make this thread safe */
2910 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
2911 /* FIXME: we're assuming we're using r12 here */
2912 ppc_load_ptr_sequence (code
, ppc_r12
, target
);
2914 ppc_load_ptr_sequence (code
, PPC_CALL_REG
, target
);
2916 mono_arch_flush_icache ((guint8
*)seq
, 28);
2919 /* the trampoline code will try to patch the blrl, blr, bcctr */
2920 if (ins
== 0x4e800021 || ins
== 0x4e800020 || ins
== 0x4e800420) {
2923 /* this is the lis/ori/mtlr/blrl sequence */
2924 seq
= (guint32
*)code
;
2925 g_assert ((seq
[0] >> 26) == 15);
2926 g_assert ((seq
[1] >> 26) == 24);
2927 g_assert ((seq
[2] >> 26) == 31);
2928 g_assert (seq
[3] == 0x4e800021 || seq
[3] == 0x4e800020 || seq
[3] == 0x4e800420);
2929 /* FIXME: make this thread safe */
2930 ppc_lis (code
, PPC_CALL_REG
, (guint32
)(target
) >> 16);
2931 ppc_ori (code
, PPC_CALL_REG
, PPC_CALL_REG
, (guint32
)(target
) & 0xffff);
2932 mono_arch_flush_icache (code
- 8, 8);
2935 g_assert_not_reached ();
2937 // g_print ("patched with 0x%08x\n", ins);
2941 ppc_patch (guchar
*code
, const guchar
*target
)
2943 ppc_patch_full (NULL
, NULL
, code
, target
, FALSE
);
2947 mono_ppc_patch (guchar
*code
, const guchar
*target
)
2949 ppc_patch (code
, target
);
2953 emit_move_return_value (MonoCompile
*cfg
, MonoInst
*ins
, guint8
*code
)
2955 switch (ins
->opcode
) {
2958 case OP_FCALL_MEMBASE
:
2959 if (ins
->dreg
!= ppc_f1
)
2960 ppc_fmr (code
, ins
->dreg
, ppc_f1
);
2968 emit_reserve_param_area (MonoCompile
*cfg
, guint8
*code
)
2970 long size
= cfg
->param_area
;
2972 size
+= MONO_ARCH_FRAME_ALIGNMENT
- 1;
2973 size
&= -MONO_ARCH_FRAME_ALIGNMENT
;
2978 ppc_ldptr (code
, ppc_r0
, 0, ppc_sp
);
2979 if (ppc_is_imm16 (-size
)) {
2980 ppc_stptr_update (code
, ppc_r0
, -size
, ppc_sp
);
2982 ppc_load (code
, ppc_r12
, -size
);
2983 ppc_stptr_update_indexed (code
, ppc_r0
, ppc_sp
, ppc_r12
);
2990 emit_unreserve_param_area (MonoCompile
*cfg
, guint8
*code
)
2992 long size
= cfg
->param_area
;
2994 size
+= MONO_ARCH_FRAME_ALIGNMENT
- 1;
2995 size
&= -MONO_ARCH_FRAME_ALIGNMENT
;
3000 ppc_ldptr (code
, ppc_r0
, 0, ppc_sp
);
3001 if (ppc_is_imm16 (size
)) {
3002 ppc_stptr_update (code
, ppc_r0
, size
, ppc_sp
);
3004 ppc_load (code
, ppc_r12
, size
);
3005 ppc_stptr_update_indexed (code
, ppc_r0
, ppc_sp
, ppc_r12
);
3011 #define MASK_SHIFT_IMM(i) ((i) & MONO_PPC_32_64_CASE (0x1f, 0x3f))
3015 mono_arch_output_basic_block (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
3017 MonoInst
*ins
, *next
;
3019 guint8
*code
= cfg
->native_code
+ cfg
->code_len
;
3020 MonoInst
*last_ins
= NULL
;
3024 /* we don't align basic blocks of loops on ppc */
3026 if (cfg
->verbose_level
> 2)
3027 g_print ("Basic block %d starting at offset 0x%x\n", bb
->block_num
, bb
->native_offset
);
3029 cpos
= bb
->max_offset
;
3031 MONO_BB_FOR_EACH_INS (bb
, ins
) {
3032 const guint offset
= code
- cfg
->native_code
;
3033 set_code_cursor (cfg
, code
);
3034 max_len
= ins_get_size (ins
->opcode
);
3035 code
= realloc_code (cfg
, max_len
);
3036 // if (ins->cil_code)
3037 // g_print ("cil code\n");
3038 mono_debug_record_line_number (cfg
, ins
, offset
);
3040 switch (normalize_opcode (ins
->opcode
)) {
3041 case OP_RELAXED_NOP
:
3044 case OP_DUMMY_ICONST
:
3045 case OP_DUMMY_I8CONST
:
3046 case OP_DUMMY_R8CONST
:
3047 case OP_DUMMY_R4CONST
:
3048 case OP_NOT_REACHED
:
3051 case OP_IL_SEQ_POINT
:
3052 mono_add_seq_point (cfg
, bb
, ins
, code
- cfg
->native_code
);
3054 case OP_SEQ_POINT
: {
3057 if (cfg
->compile_aot
)
3061 * Read from the single stepping trigger page. This will cause a
3062 * SIGSEGV when single stepping is enabled.
3063 * We do this _before_ the breakpoint, so single stepping after
3064 * a breakpoint is hit will step to the next IL offset.
3066 if (ins
->flags
& MONO_INST_SINGLE_STEP_LOC
) {
3067 ppc_load (code
, ppc_r12
, (gsize
)ss_trigger_page
);
3068 ppc_ldptr (code
, ppc_r12
, 0, ppc_r12
);
3071 mono_add_seq_point (cfg
, bb
, ins
, code
- cfg
->native_code
);
3074 * A placeholder for a possible breakpoint inserted by
3075 * mono_arch_set_breakpoint ().
3077 for (i
= 0; i
< BREAKPOINT_SIZE
/ 4; ++i
)
3082 ppc_mullw (code
, ppc_r0
, ins
->sreg1
, ins
->sreg2
);
3083 ppc_mulhw (code
, ppc_r3
, ins
->sreg1
, ins
->sreg2
);
3084 ppc_mr (code
, ppc_r4
, ppc_r0
);
3087 ppc_mullw (code
, ppc_r0
, ins
->sreg1
, ins
->sreg2
);
3088 ppc_mulhwu (code
, ppc_r3
, ins
->sreg1
, ins
->sreg2
);
3089 ppc_mr (code
, ppc_r4
, ppc_r0
);
3091 case OP_MEMORY_BARRIER
:
3094 case OP_STOREI1_MEMBASE_REG
:
3095 if (ppc_is_imm16 (ins
->inst_offset
)) {
3096 ppc_stb (code
, ins
->sreg1
, ins
->inst_offset
, ins
->inst_destbasereg
);
3098 if (ppc_is_imm32 (ins
->inst_offset
)) {
3099 ppc_addis (code
, ppc_r11
, ins
->inst_destbasereg
, ppc_ha(ins
->inst_offset
));
3100 ppc_stb (code
, ins
->sreg1
, ins
->inst_offset
, ppc_r11
);
3102 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
3103 ppc_stbx (code
, ins
->sreg1
, ins
->inst_destbasereg
, ppc_r0
);
3107 case OP_STOREI2_MEMBASE_REG
:
3108 if (ppc_is_imm16 (ins
->inst_offset
)) {
3109 ppc_sth (code
, ins
->sreg1
, ins
->inst_offset
, ins
->inst_destbasereg
);
3111 if (ppc_is_imm32 (ins
->inst_offset
)) {
3112 ppc_addis (code
, ppc_r11
, ins
->inst_destbasereg
, ppc_ha(ins
->inst_offset
));
3113 ppc_sth (code
, ins
->sreg1
, ins
->inst_offset
, ppc_r11
);
3115 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
3116 ppc_sthx (code
, ins
->sreg1
, ins
->inst_destbasereg
, ppc_r0
);
3120 case OP_STORE_MEMBASE_REG
:
3121 if (ppc_is_imm16 (ins
->inst_offset
)) {
3122 ppc_stptr (code
, ins
->sreg1
, ins
->inst_offset
, ins
->inst_destbasereg
);
3124 if (ppc_is_imm32 (ins
->inst_offset
)) {
3125 ppc_addis (code
, ppc_r11
, ins
->inst_destbasereg
, ppc_ha(ins
->inst_offset
));
3126 ppc_stptr (code
, ins
->sreg1
, ins
->inst_offset
, ppc_r11
);
3128 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
3129 ppc_stptr_indexed (code
, ins
->sreg1
, ins
->inst_destbasereg
, ppc_r0
);
3133 #ifdef MONO_ARCH_ILP32
3134 case OP_STOREI8_MEMBASE_REG
:
3135 if (ppc_is_imm16 (ins
->inst_offset
)) {
3136 ppc_str (code
, ins
->sreg1
, ins
->inst_offset
, ins
->inst_destbasereg
);
3138 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
3139 ppc_str_indexed (code
, ins
->sreg1
, ins
->inst_destbasereg
, ppc_r0
);
3143 case OP_STOREI1_MEMINDEX
:
3144 ppc_stbx (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
3146 case OP_STOREI2_MEMINDEX
:
3147 ppc_sthx (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
3149 case OP_STORE_MEMINDEX
:
3150 ppc_stptr_indexed (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
3153 g_assert_not_reached ();
3155 case OP_LOAD_MEMBASE
:
3156 if (ppc_is_imm16 (ins
->inst_offset
)) {
3157 ppc_ldptr (code
, ins
->dreg
, ins
->inst_offset
, ins
->inst_basereg
);
3159 if (ppc_is_imm32 (ins
->inst_offset
) && (ins
->dreg
> 0)) {
3160 ppc_addis (code
, ins
->dreg
, ins
->inst_basereg
, ppc_ha(ins
->inst_offset
));
3161 ppc_ldptr (code
, ins
->dreg
, ins
->inst_offset
, ins
->dreg
);
3163 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
3164 ppc_ldptr_indexed (code
, ins
->dreg
, ins
->inst_basereg
, ppc_r0
);
3168 case OP_LOADI4_MEMBASE
:
3169 #ifdef __mono_ppc64__
3170 if (ppc_is_imm16 (ins
->inst_offset
)) {
3171 ppc_lwa (code
, ins
->dreg
, ins
->inst_offset
, ins
->inst_basereg
);
3173 if (ppc_is_imm32 (ins
->inst_offset
) && (ins
->dreg
> 0)) {
3174 ppc_addis (code
, ins
->dreg
, ins
->inst_basereg
, ppc_ha(ins
->inst_offset
));
3175 ppc_lwa (code
, ins
->dreg
, ins
->inst_offset
, ins
->dreg
);
3177 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
3178 ppc_lwax (code
, ins
->dreg
, ins
->inst_basereg
, ppc_r0
);
3183 case OP_LOADU4_MEMBASE
:
3184 if (ppc_is_imm16 (ins
->inst_offset
)) {
3185 ppc_lwz (code
, ins
->dreg
, ins
->inst_offset
, ins
->inst_basereg
);
3187 if (ppc_is_imm32 (ins
->inst_offset
) && (ins
->dreg
> 0)) {
3188 ppc_addis (code
, ins
->dreg
, ins
->inst_basereg
, ppc_ha(ins
->inst_offset
));
3189 ppc_lwz (code
, ins
->dreg
, ins
->inst_offset
, ins
->dreg
);
3191 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
3192 ppc_lwzx (code
, ins
->dreg
, ins
->inst_basereg
, ppc_r0
);
3196 case OP_LOADI1_MEMBASE
:
3197 case OP_LOADU1_MEMBASE
:
3198 if (ppc_is_imm16 (ins
->inst_offset
)) {
3199 ppc_lbz (code
, ins
->dreg
, ins
->inst_offset
, ins
->inst_basereg
);
3201 if (ppc_is_imm32 (ins
->inst_offset
) && (ins
->dreg
> 0)) {
3202 ppc_addis (code
, ins
->dreg
, ins
->inst_basereg
, ppc_ha(ins
->inst_offset
));
3203 ppc_lbz (code
, ins
->dreg
, ins
->inst_offset
, ins
->dreg
);
3205 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
3206 ppc_lbzx (code
, ins
->dreg
, ins
->inst_basereg
, ppc_r0
);
3209 if (ins
->opcode
== OP_LOADI1_MEMBASE
)
3210 ppc_extsb (code
, ins
->dreg
, ins
->dreg
);
3212 case OP_LOADU2_MEMBASE
:
3213 if (ppc_is_imm16 (ins
->inst_offset
)) {
3214 ppc_lhz (code
, ins
->dreg
, ins
->inst_offset
, ins
->inst_basereg
);
3216 if (ppc_is_imm32 (ins
->inst_offset
) && (ins
->dreg
> 0)) {
3217 ppc_addis (code
, ins
->dreg
, ins
->inst_basereg
, ppc_ha(ins
->inst_offset
));
3218 ppc_lhz (code
, ins
->dreg
, ins
->inst_offset
, ins
->dreg
);
3220 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
3221 ppc_lhzx (code
, ins
->dreg
, ins
->inst_basereg
, ppc_r0
);
3225 case OP_LOADI2_MEMBASE
:
3226 if (ppc_is_imm16 (ins
->inst_offset
)) {
3227 ppc_lha (code
, ins
->dreg
, ins
->inst_offset
, ins
->inst_basereg
);
3229 if (ppc_is_imm32 (ins
->inst_offset
) && (ins
->dreg
> 0)) {
3230 ppc_addis (code
, ins
->dreg
, ins
->inst_basereg
, ppc_ha(ins
->inst_offset
));
3231 ppc_lha (code
, ins
->dreg
, ins
->inst_offset
, ins
->dreg
);
3233 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
3234 ppc_lhax (code
, ins
->dreg
, ins
->inst_basereg
, ppc_r0
);
3238 #ifdef MONO_ARCH_ILP32
3239 case OP_LOADI8_MEMBASE
:
3240 if (ppc_is_imm16 (ins
->inst_offset
)) {
3241 ppc_ldr (code
, ins
->dreg
, ins
->inst_offset
, ins
->inst_basereg
);
3243 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
3244 ppc_ldr_indexed (code
, ins
->dreg
, ins
->inst_basereg
, ppc_r0
);
3248 case OP_LOAD_MEMINDEX
:
3249 ppc_ldptr_indexed (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
3251 case OP_LOADI4_MEMINDEX
:
3252 #ifdef __mono_ppc64__
3253 ppc_lwax (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
3256 case OP_LOADU4_MEMINDEX
:
3257 ppc_lwzx (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
3259 case OP_LOADU2_MEMINDEX
:
3260 ppc_lhzx (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
3262 case OP_LOADI2_MEMINDEX
:
3263 ppc_lhax (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
3265 case OP_LOADU1_MEMINDEX
:
3266 ppc_lbzx (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
3268 case OP_LOADI1_MEMINDEX
:
3269 ppc_lbzx (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
3270 ppc_extsb (code
, ins
->dreg
, ins
->dreg
);
3272 case OP_ICONV_TO_I1
:
3273 CASE_PPC64 (OP_LCONV_TO_I1
)
3274 ppc_extsb (code
, ins
->dreg
, ins
->sreg1
);
3276 case OP_ICONV_TO_I2
:
3277 CASE_PPC64 (OP_LCONV_TO_I2
)
3278 ppc_extsh (code
, ins
->dreg
, ins
->sreg1
);
3280 case OP_ICONV_TO_U1
:
3281 CASE_PPC64 (OP_LCONV_TO_U1
)
3282 ppc_clrlwi (code
, ins
->dreg
, ins
->sreg1
, 24);
3284 case OP_ICONV_TO_U2
:
3285 CASE_PPC64 (OP_LCONV_TO_U2
)
3286 ppc_clrlwi (code
, ins
->dreg
, ins
->sreg1
, 16);
3290 CASE_PPC64 (OP_LCOMPARE
)
3291 L
= (sizeof (target_mgreg_t
) == 4 || ins
->opcode
== OP_ICOMPARE
) ? 0 : 1;
3293 if (next
&& compare_opcode_is_unsigned (next
->opcode
))
3294 ppc_cmpl (code
, 0, L
, ins
->sreg1
, ins
->sreg2
);
3296 ppc_cmp (code
, 0, L
, ins
->sreg1
, ins
->sreg2
);
3298 case OP_COMPARE_IMM
:
3299 case OP_ICOMPARE_IMM
:
3300 CASE_PPC64 (OP_LCOMPARE_IMM
)
3301 L
= (sizeof (target_mgreg_t
) == 4 || ins
->opcode
== OP_ICOMPARE_IMM
) ? 0 : 1;
3303 if (next
&& compare_opcode_is_unsigned (next
->opcode
)) {
3304 if (ppc_is_uimm16 (ins
->inst_imm
)) {
3305 ppc_cmpli (code
, 0, L
, ins
->sreg1
, (ins
->inst_imm
& 0xffff));
3307 g_assert_not_reached ();
3310 if (ppc_is_imm16 (ins
->inst_imm
)) {
3311 ppc_cmpi (code
, 0, L
, ins
->sreg1
, (ins
->inst_imm
& 0xffff));
3313 g_assert_not_reached ();
3319 * gdb does not like encountering a trap in the debugged code. So
3320 * instead of emitting a trap, we emit a call a C function and place a
3324 ppc_mr (code
, ppc_r3
, ins
->sreg1
);
3325 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break
));
3326 if ((FORCE_INDIR_CALL
|| cfg
->method
->dynamic
) && !cfg
->compile_aot
) {
3327 ppc_load_func (code
, PPC_CALL_REG
, 0);
3328 ppc_mtlr (code
, PPC_CALL_REG
);
3336 ppc_addco (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3339 CASE_PPC64 (OP_LADD
)
3340 ppc_add (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3344 ppc_adde (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3347 if (ppc_is_imm16 (ins
->inst_imm
)) {
3348 ppc_addic (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_imm
);
3350 g_assert_not_reached ();
3355 CASE_PPC64 (OP_LADD_IMM
)
3356 if (ppc_is_imm16 (ins
->inst_imm
)) {
3357 ppc_addi (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_imm
);
3359 g_assert_not_reached ();
3363 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3365 ppc_addo (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3366 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
3367 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1<<14));
3368 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE
, PPC_BR_EQ
, "OverflowException");
3370 case OP_IADD_OVF_UN
:
3371 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3373 ppc_addco (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3374 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
3375 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1<<13));
3376 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE
, PPC_BR_EQ
, "OverflowException");
3379 CASE_PPC64 (OP_LSUB_OVF
)
3380 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3382 ppc_subfo (code
, ins
->dreg
, ins
->sreg2
, ins
->sreg1
);
3383 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
3384 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1<<14));
3385 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE
, PPC_BR_EQ
, "OverflowException");
3387 case OP_ISUB_OVF_UN
:
3388 CASE_PPC64 (OP_LSUB_OVF_UN
)
3389 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3391 ppc_subfc (code
, ins
->dreg
, ins
->sreg2
, ins
->sreg1
);
3392 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
3393 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1<<13));
3394 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE
, PPC_BR_EQ
, "OverflowException");
3396 case OP_ADD_OVF_CARRY
:
3397 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3399 ppc_addeo (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3400 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
3401 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1<<14));
3402 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE
, PPC_BR_EQ
, "OverflowException");
3404 case OP_ADD_OVF_UN_CARRY
:
3405 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3407 ppc_addeo (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3408 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
3409 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1<<13));
3410 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE
, PPC_BR_EQ
, "OverflowException");
3412 case OP_SUB_OVF_CARRY
:
3413 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3415 ppc_subfeo (code
, ins
->dreg
, ins
->sreg2
, ins
->sreg1
);
3416 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
3417 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1<<14));
3418 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE
, PPC_BR_EQ
, "OverflowException");
3420 case OP_SUB_OVF_UN_CARRY
:
3421 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
3423 ppc_subfeo (code
, ins
->dreg
, ins
->sreg2
, ins
->sreg1
);
3424 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
3425 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1<<13));
3426 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE
, PPC_BR_EQ
, "OverflowException");
3430 ppc_subfco (code
, ins
->dreg
, ins
->sreg2
, ins
->sreg1
);
3433 CASE_PPC64 (OP_LSUB
)
3434 ppc_subf (code
, ins
->dreg
, ins
->sreg2
, ins
->sreg1
);
3438 ppc_subfe (code
, ins
->dreg
, ins
->sreg2
, ins
->sreg1
);
3442 CASE_PPC64 (OP_LSUB_IMM
)
3443 // we add the negated value
3444 if (ppc_is_imm16 (-ins
->inst_imm
))
3445 ppc_addi (code
, ins
->dreg
, ins
->sreg1
, -ins
->inst_imm
);
3447 g_assert_not_reached ();
3451 g_assert (ppc_is_imm16 (ins
->inst_imm
));
3452 ppc_subfic (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_imm
);
3455 ppc_subfze (code
, ins
->dreg
, ins
->sreg1
);
3458 CASE_PPC64 (OP_LAND
)
3459 /* FIXME: the ppc macros as inconsistent here: put dest as the first arg! */
3460 ppc_and (code
, ins
->sreg1
, ins
->dreg
, ins
->sreg2
);
3464 CASE_PPC64 (OP_LAND_IMM
)
3465 if (!(ins
->inst_imm
& 0xffff0000)) {
3466 ppc_andid (code
, ins
->sreg1
, ins
->dreg
, ins
->inst_imm
);
3467 } else if (!(ins
->inst_imm
& 0xffff)) {
3468 ppc_andisd (code
, ins
->sreg1
, ins
->dreg
, ((guint32
)ins
->inst_imm
>> 16));
3470 g_assert_not_reached ();
3474 CASE_PPC64 (OP_LDIV
) {
3475 guint8
*divisor_is_m1
;
3476 /* XER format: SO, OV, CA, reserved [21 bits], count [8 bits]
3478 ppc_compare_reg_imm (code
, 0, ins
->sreg2
, -1);
3479 divisor_is_m1
= code
;
3480 ppc_bc (code
, PPC_BR_FALSE
| PPC_BR_LIKELY
, PPC_BR_EQ
, 0);
3481 ppc_lis (code
, ppc_r0
, 0x8000);
3482 #ifdef __mono_ppc64__
3483 if (ins
->opcode
== OP_LDIV
)
3484 ppc_sldi (code
, ppc_r0
, ppc_r0
, 32);
3486 ppc_compare (code
, 0, ins
->sreg1
, ppc_r0
);
3487 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE
, PPC_BR_EQ
, "OverflowException");
3488 ppc_patch (divisor_is_m1
, code
);
3489 /* XER format: SO, OV, CA, reserved [21 bits], count [8 bits]
3491 if (ins
->opcode
== OP_IDIV
)
3492 ppc_divwod (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3493 #ifdef __mono_ppc64__
3495 ppc_divdod (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3497 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
3498 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1<<14));
3499 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE
, PPC_BR_EQ
, "DivideByZeroException");
3503 CASE_PPC64 (OP_LDIV_UN
)
3504 if (ins
->opcode
== OP_IDIV_UN
)
3505 ppc_divwuod (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3506 #ifdef __mono_ppc64__
3508 ppc_divduod (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3510 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
3511 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1<<14));
3512 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE
, PPC_BR_EQ
, "DivideByZeroException");
3518 g_assert_not_reached ();
3521 ppc_or (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3525 CASE_PPC64 (OP_LOR_IMM
)
3526 if (!(ins
->inst_imm
& 0xffff0000)) {
3527 ppc_ori (code
, ins
->sreg1
, ins
->dreg
, ins
->inst_imm
);
3528 } else if (!(ins
->inst_imm
& 0xffff)) {
3529 ppc_oris (code
, ins
->dreg
, ins
->sreg1
, ((guint32
)(ins
->inst_imm
) >> 16));
3531 g_assert_not_reached ();
3535 CASE_PPC64 (OP_LXOR
)
3536 ppc_xor (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3540 CASE_PPC64 (OP_LXOR_IMM
)
3541 if (!(ins
->inst_imm
& 0xffff0000)) {
3542 ppc_xori (code
, ins
->sreg1
, ins
->dreg
, ins
->inst_imm
);
3543 } else if (!(ins
->inst_imm
& 0xffff)) {
3544 ppc_xoris (code
, ins
->sreg1
, ins
->dreg
, ((guint32
)(ins
->inst_imm
) >> 16));
3546 g_assert_not_reached ();
3550 CASE_PPC64 (OP_LSHL
)
3551 ppc_shift_left (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3555 CASE_PPC64 (OP_LSHL_IMM
)
3556 ppc_shift_left_imm (code
, ins
->dreg
, ins
->sreg1
, MASK_SHIFT_IMM (ins
->inst_imm
));
3559 ppc_sraw (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3562 ppc_shift_right_arith_imm (code
, ins
->dreg
, ins
->sreg1
, MASK_SHIFT_IMM (ins
->inst_imm
));
3565 if (MASK_SHIFT_IMM (ins
->inst_imm
))
3566 ppc_shift_right_imm (code
, ins
->dreg
, ins
->sreg1
, MASK_SHIFT_IMM (ins
->inst_imm
));
3568 ppc_mr (code
, ins
->dreg
, ins
->sreg1
);
3571 ppc_srw (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3574 CASE_PPC64 (OP_LNOT
)
3575 ppc_not (code
, ins
->dreg
, ins
->sreg1
);
3578 CASE_PPC64 (OP_LNEG
)
3579 ppc_neg (code
, ins
->dreg
, ins
->sreg1
);
3582 CASE_PPC64 (OP_LMUL
)
3583 ppc_multiply (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3587 CASE_PPC64 (OP_LMUL_IMM
)
3588 if (ppc_is_imm16 (ins
->inst_imm
)) {
3589 ppc_mulli (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_imm
);
3591 g_assert_not_reached ();
3595 CASE_PPC64 (OP_LMUL_OVF
)
3596 /* we annot use mcrxr, since it's not implemented on some processors
3597 * XER format: SO, OV, CA, reserved [21 bits], count [8 bits]
3599 if (ins
->opcode
== OP_IMUL_OVF
)
3600 ppc_mullwo (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3601 #ifdef __mono_ppc64__
3603 ppc_mulldo (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3605 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
3606 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1<<14));
3607 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE
, PPC_BR_EQ
, "OverflowException");
3609 case OP_IMUL_OVF_UN
:
3610 CASE_PPC64 (OP_LMUL_OVF_UN
)
3611 /* we first multiply to get the high word and compare to 0
3612 * to set the flags, then the result is discarded and then
3613 * we multiply to get the lower * bits result
3615 if (ins
->opcode
== OP_IMUL_OVF_UN
)
3616 ppc_mulhwu (code
, ppc_r0
, ins
->sreg1
, ins
->sreg2
);
3617 #ifdef __mono_ppc64__
3619 ppc_mulhdu (code
, ppc_r0
, ins
->sreg1
, ins
->sreg2
);
3621 ppc_cmpi (code
, 0, 0, ppc_r0
, 0);
3622 EMIT_COND_SYSTEM_EXCEPTION (CEE_BNE_UN
- CEE_BEQ
, "OverflowException");
3623 ppc_multiply (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3626 ppc_load (code
, ins
->dreg
, ins
->inst_c0
);
3629 ppc_load (code
, ins
->dreg
, ins
->inst_l
);
3632 case OP_LOAD_GOTADDR
:
3633 /* The PLT implementation depends on this */
3634 g_assert (ins
->dreg
== ppc_r30
);
3636 code
= mono_arch_emit_load_got_addr (cfg
->native_code
, code
, cfg
, NULL
);
3639 // FIXME: Fix max instruction length
3640 /* XXX: This is hairy; we're casting a pointer from a union to an enum... */
3641 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)(intptr_t)ins
->inst_right
->inst_i1
, ins
->inst_right
->inst_p0
);
3642 /* arch_emit_got_access () patches this */
3643 ppc_load32 (code
, ppc_r0
, 0);
3644 ppc_ldptr_indexed (code
, ins
->dreg
, ins
->inst_basereg
, ppc_r0
);
3647 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)(intptr_t)ins
->inst_i1
, ins
->inst_p0
);
3648 ppc_load_sequence (code
, ins
->dreg
, 0);
3650 CASE_PPC32 (OP_ICONV_TO_I4
)
3651 CASE_PPC32 (OP_ICONV_TO_U4
)
3653 if (ins
->dreg
!= ins
->sreg1
)
3654 ppc_mr (code
, ins
->dreg
, ins
->sreg1
);
3657 int saved
= ins
->sreg1
;
3658 if (ins
->sreg1
== ppc_r3
) {
3659 ppc_mr (code
, ppc_r0
, ins
->sreg1
);
3662 if (ins
->sreg2
!= ppc_r3
)
3663 ppc_mr (code
, ppc_r3
, ins
->sreg2
);
3664 if (saved
!= ppc_r4
)
3665 ppc_mr (code
, ppc_r4
, saved
);
3669 if (ins
->dreg
!= ins
->sreg1
)
3670 ppc_fmr (code
, ins
->dreg
, ins
->sreg1
);
3672 case OP_MOVE_F_TO_I4
:
3673 ppc_stfs (code
, ins
->sreg1
, -4, ppc_r1
);
3674 ppc_ldptr (code
, ins
->dreg
, -4, ppc_r1
);
3676 case OP_MOVE_I4_TO_F
:
3677 ppc_stw (code
, ins
->sreg1
, -4, ppc_r1
);
3678 ppc_lfs (code
, ins
->dreg
, -4, ppc_r1
);
3680 #ifdef __mono_ppc64__
3681 case OP_MOVE_F_TO_I8
:
3682 ppc_stfd (code
, ins
->sreg1
, -8, ppc_r1
);
3683 ppc_ldptr (code
, ins
->dreg
, -8, ppc_r1
);
3685 case OP_MOVE_I8_TO_F
:
3686 ppc_stptr (code
, ins
->sreg1
, -8, ppc_r1
);
3687 ppc_lfd (code
, ins
->dreg
, -8, ppc_r1
);
3690 case OP_FCONV_TO_R4
:
3691 ppc_frsp (code
, ins
->dreg
, ins
->sreg1
);
3694 case OP_TAILCALL_PARAMETER
:
3695 // This opcode helps compute sizes, i.e.
3696 // of the subsequent OP_TAILCALL, but contributes no code.
3697 g_assert (ins
->next
);
3702 MonoCallInst
*call
= (MonoCallInst
*)ins
;
3705 * Keep in sync with mono_arch_emit_epilog
3707 g_assert (!cfg
->method
->save_lmf
);
3709 * Note: we can use ppc_r12 here because it is dead anyway:
3710 * we're leaving the method.
3712 if (1 || cfg
->flags
& MONO_CFG_HAS_CALLS
) {
3713 long ret_offset
= cfg
->stack_usage
+ PPC_RET_ADDR_OFFSET
;
3714 if (ppc_is_imm16 (ret_offset
)) {
3715 ppc_ldptr (code
, ppc_r0
, ret_offset
, cfg
->frame_reg
);
3717 ppc_load (code
, ppc_r12
, ret_offset
);
3718 ppc_ldptr_indexed (code
, ppc_r0
, cfg
->frame_reg
, ppc_r12
);
3720 ppc_mtlr (code
, ppc_r0
);
3723 if (ppc_is_imm16 (cfg
->stack_usage
)) {
3724 ppc_addi (code
, ppc_r12
, cfg
->frame_reg
, cfg
->stack_usage
);
3726 /* cfg->stack_usage is an int, so we can use
3727 * an addis/addi sequence here even in 64-bit. */
3728 ppc_addis (code
, ppc_r12
, cfg
->frame_reg
, ppc_ha(cfg
->stack_usage
));
3729 ppc_addi (code
, ppc_r12
, ppc_r12
, cfg
->stack_usage
);
3731 if (!cfg
->method
->save_lmf
) {
3733 for (i
= 31; i
>= 13; --i
) {
3734 if (cfg
->used_int_regs
& (1 << i
)) {
3735 pos
+= sizeof (target_mgreg_t
);
3736 ppc_ldptr (code
, i
, -pos
, ppc_r12
);
3740 /* FIXME restore from MonoLMF: though this can't happen yet */
3743 /* Copy arguments on the stack to our argument area */
3744 if (call
->stack_usage
) {
3745 code
= emit_memcpy (code
, call
->stack_usage
, ppc_r12
, PPC_STACK_PARAM_OFFSET
, ppc_sp
, PPC_STACK_PARAM_OFFSET
);
3746 /* r12 was clobbered */
3747 g_assert (cfg
->frame_reg
== ppc_sp
);
3748 if (ppc_is_imm16 (cfg
->stack_usage
)) {
3749 ppc_addi (code
, ppc_r12
, cfg
->frame_reg
, cfg
->stack_usage
);
3751 /* cfg->stack_usage is an int, so we can use
3752 * an addis/addi sequence here even in 64-bit. */
3753 ppc_addis (code
, ppc_r12
, cfg
->frame_reg
, ppc_ha(cfg
->stack_usage
));
3754 ppc_addi (code
, ppc_r12
, ppc_r12
, cfg
->stack_usage
);
3758 ppc_mr (code
, ppc_sp
, ppc_r12
);
3759 mono_add_patch_info (cfg
, (guint8
*) code
- cfg
->native_code
, MONO_PATCH_INFO_METHOD_JUMP
, call
->method
);
3760 cfg
->thunk_area
+= THUNK_SIZE
;
3761 if (cfg
->compile_aot
) {
3762 /* arch_emit_got_access () patches this */
3763 ppc_load32 (code
, ppc_r0
, 0);
3764 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
3765 ppc_ldptr_indexed (code
, ppc_r12
, ppc_r30
, ppc_r0
);
3766 ppc_ldptr (code
, ppc_r0
, 0, ppc_r12
);
3768 ppc_ldptr_indexed (code
, ppc_r0
, ppc_r30
, ppc_r0
);
3770 ppc_mtctr (code
, ppc_r0
);
3771 ppc_bcctr (code
, PPC_BR_ALWAYS
, 0);
3778 /* ensure ins->sreg1 is not NULL */
3779 ppc_ldptr (code
, ppc_r0
, 0, ins
->sreg1
);
3782 long cookie_offset
= cfg
->sig_cookie
+ cfg
->stack_usage
;
3783 if (ppc_is_imm16 (cookie_offset
)) {
3784 ppc_addi (code
, ppc_r0
, cfg
->frame_reg
, cookie_offset
);
3786 ppc_load (code
, ppc_r0
, cookie_offset
);
3787 ppc_add (code
, ppc_r0
, cfg
->frame_reg
, ppc_r0
);
3789 ppc_stptr (code
, ppc_r0
, 0, ins
->sreg1
);
3798 call
= (MonoCallInst
*)ins
;
3799 mono_call_add_patch_info (cfg
, call
, offset
);
3800 if ((FORCE_INDIR_CALL
|| cfg
->method
->dynamic
) && !cfg
->compile_aot
) {
3801 ppc_load_func (code
, PPC_CALL_REG
, 0);
3802 ppc_mtlr (code
, PPC_CALL_REG
);
3807 /* FIXME: this should be handled somewhere else in the new jit */
3808 code
= emit_move_return_value (cfg
, ins
, code
);
3814 case OP_VOIDCALL_REG
:
3816 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
3817 ppc_ldptr (code
, ppc_r0
, 0, ins
->sreg1
);
3818 /* FIXME: if we know that this is a method, we
3819 can omit this load */
3820 ppc_ldptr (code
, ppc_r2
, 8, ins
->sreg1
);
3821 ppc_mtlr (code
, ppc_r0
);
3823 #if (_CALL_ELF == 2)
3824 if (ins
->flags
& MONO_INST_HAS_METHOD
) {
3825 // Not a global entry point
3827 // Need to set up r12 with function entry address for global entry point
3828 if (ppc_r12
!= ins
->sreg1
) {
3829 ppc_mr(code
,ppc_r12
,ins
->sreg1
);
3833 ppc_mtlr (code
, ins
->sreg1
);
3836 /* FIXME: this should be handled somewhere else in the new jit */
3837 code
= emit_move_return_value (cfg
, ins
, code
);
3839 case OP_FCALL_MEMBASE
:
3840 case OP_LCALL_MEMBASE
:
3841 case OP_VCALL_MEMBASE
:
3842 case OP_VCALL2_MEMBASE
:
3843 case OP_VOIDCALL_MEMBASE
:
3844 case OP_CALL_MEMBASE
:
3845 if (cfg
->compile_aot
&& ins
->sreg1
== ppc_r12
) {
3846 /* The trampolines clobber this */
3847 ppc_mr (code
, ppc_r29
, ins
->sreg1
);
3848 ppc_ldptr (code
, ppc_r0
, ins
->inst_offset
, ppc_r29
);
3850 ppc_ldptr (code
, ppc_r0
, ins
->inst_offset
, ins
->sreg1
);
3852 ppc_mtlr (code
, ppc_r0
);
3854 /* FIXME: this should be handled somewhere else in the new jit */
3855 code
= emit_move_return_value (cfg
, ins
, code
);
3858 guint8
* zero_loop_jump
, * zero_loop_start
;
3859 /* keep alignment */
3860 int alloca_waste
= PPC_STACK_PARAM_OFFSET
+ cfg
->param_area
+ 31;
3861 int area_offset
= alloca_waste
;
3863 ppc_addi (code
, ppc_r12
, ins
->sreg1
, alloca_waste
+ 31);
3864 /* FIXME: should be calculated from MONO_ARCH_FRAME_ALIGNMENT */
3865 ppc_clear_right_imm (code
, ppc_r12
, ppc_r12
, 4);
3866 /* use ctr to store the number of words to 0 if needed */
3867 if (ins
->flags
& MONO_INST_INIT
) {
3868 /* we zero 4 bytes at a time:
3869 * we add 7 instead of 3 so that we set the counter to
3870 * at least 1, otherwise the bdnz instruction will make
3871 * it negative and iterate billions of times.
3873 ppc_addi (code
, ppc_r0
, ins
->sreg1
, 7);
3874 ppc_shift_right_arith_imm (code
, ppc_r0
, ppc_r0
, 2);
3875 ppc_mtctr (code
, ppc_r0
);
3877 ppc_ldptr (code
, ppc_r0
, 0, ppc_sp
);
3878 ppc_neg (code
, ppc_r12
, ppc_r12
);
3879 ppc_stptr_update_indexed (code
, ppc_r0
, ppc_sp
, ppc_r12
);
3881 /* FIXME: make this loop work in 8 byte
3882 increments on PPC64 */
3883 if (ins
->flags
& MONO_INST_INIT
) {
3884 /* adjust the dest reg by -4 so we can use stwu */
3885 /* we actually adjust -8 because we let the loop
3888 ppc_addi (code
, ins
->dreg
, ppc_sp
, (area_offset
- 8));
3889 ppc_li (code
, ppc_r12
, 0);
3890 zero_loop_start
= code
;
3891 ppc_stwu (code
, ppc_r12
, 4, ins
->dreg
);
3892 zero_loop_jump
= code
;
3893 ppc_bc (code
, PPC_BR_DEC_CTR_NONZERO
, 0, 0);
3894 ppc_patch (zero_loop_jump
, zero_loop_start
);
3896 ppc_addi (code
, ins
->dreg
, ppc_sp
, area_offset
);
3901 ppc_mr (code
, ppc_r3
, ins
->sreg1
);
3902 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_throw_exception
));
3903 if ((FORCE_INDIR_CALL
|| cfg
->method
->dynamic
) && !cfg
->compile_aot
) {
3904 ppc_load_func (code
, PPC_CALL_REG
, 0);
3905 ppc_mtlr (code
, PPC_CALL_REG
);
3914 ppc_mr (code
, ppc_r3
, ins
->sreg1
);
3915 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
3916 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_rethrow_exception
));
3917 if ((FORCE_INDIR_CALL
|| cfg
->method
->dynamic
) && !cfg
->compile_aot
) {
3918 ppc_load_func (code
, PPC_CALL_REG
, 0);
3919 ppc_mtlr (code
, PPC_CALL_REG
);
3926 case OP_START_HANDLER
: {
3927 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3928 g_assert (spvar
->inst_basereg
!= ppc_sp
);
3929 code
= emit_reserve_param_area (cfg
, code
);
3930 ppc_mflr (code
, ppc_r0
);
3931 if (ppc_is_imm16 (spvar
->inst_offset
)) {
3932 ppc_stptr (code
, ppc_r0
, spvar
->inst_offset
, spvar
->inst_basereg
);
3934 ppc_load (code
, ppc_r12
, spvar
->inst_offset
);
3935 ppc_stptr_indexed (code
, ppc_r0
, ppc_r12
, spvar
->inst_basereg
);
3939 case OP_ENDFILTER
: {
3940 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3941 g_assert (spvar
->inst_basereg
!= ppc_sp
);
3942 code
= emit_unreserve_param_area (cfg
, code
);
3943 if (ins
->sreg1
!= ppc_r3
)
3944 ppc_mr (code
, ppc_r3
, ins
->sreg1
);
3945 if (ppc_is_imm16 (spvar
->inst_offset
)) {
3946 ppc_ldptr (code
, ppc_r0
, spvar
->inst_offset
, spvar
->inst_basereg
);
3948 ppc_load (code
, ppc_r12
, spvar
->inst_offset
);
3949 ppc_ldptr_indexed (code
, ppc_r0
, spvar
->inst_basereg
, ppc_r12
);
3951 ppc_mtlr (code
, ppc_r0
);
3955 case OP_ENDFINALLY
: {
3956 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3957 g_assert (spvar
->inst_basereg
!= ppc_sp
);
3958 code
= emit_unreserve_param_area (cfg
, code
);
3959 ppc_ldptr (code
, ppc_r0
, spvar
->inst_offset
, spvar
->inst_basereg
);
3960 ppc_mtlr (code
, ppc_r0
);
3964 case OP_CALL_HANDLER
:
3965 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
3967 for (GList
*tmp
= ins
->inst_eh_blocks
; tmp
!= bb
->clause_holes
; tmp
= tmp
->prev
)
3968 mono_cfg_add_try_hole (cfg
, ((MonoLeaveClause
*) tmp
->data
)->clause
, code
, bb
);
3971 ins
->inst_c0
= code
- cfg
->native_code
;
3974 /*if (ins->inst_target_bb->native_offset) {
3976 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
3978 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
3983 ppc_mtctr (code
, ins
->sreg1
);
3984 ppc_bcctr (code
, PPC_BR_ALWAYS
, 0);
3987 ppc_li (code
, ins
->dreg
, 0);
3988 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_EQ
, 2);
3989 ppc_li (code
, ins
->dreg
, 1);
3993 CASE_PPC64 (OP_LCEQ
)
3994 ppc_li (code
, ins
->dreg
, 0);
3995 ppc_bc (code
, PPC_BR_FALSE
, PPC_BR_EQ
, 2);
3996 ppc_li (code
, ins
->dreg
, 1);
4002 CASE_PPC64 (OP_LCLT
)
4003 CASE_PPC64 (OP_LCLT_UN
)
4004 ppc_li (code
, ins
->dreg
, 1);
4005 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_LT
, 2);
4006 ppc_li (code
, ins
->dreg
, 0);
4010 ppc_li (code
, ins
->dreg
, 1);
4011 ppc_bc (code
, PPC_BR_FALSE
, PPC_BR_LT
, 2);
4012 ppc_li (code
, ins
->dreg
, 0);
4018 CASE_PPC64 (OP_LCGT
)
4019 CASE_PPC64 (OP_LCGT_UN
)
4020 ppc_li (code
, ins
->dreg
, 1);
4021 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_GT
, 2);
4022 ppc_li (code
, ins
->dreg
, 0);
4026 ppc_li (code
, ins
->dreg
, 1);
4027 ppc_bc (code
, PPC_BR_FALSE
, PPC_BR_GT
, 2);
4028 ppc_li (code
, ins
->dreg
, 0);
4030 case OP_COND_EXC_EQ
:
4031 case OP_COND_EXC_NE_UN
:
4032 case OP_COND_EXC_LT
:
4033 case OP_COND_EXC_LT_UN
:
4034 case OP_COND_EXC_GT
:
4035 case OP_COND_EXC_GT_UN
:
4036 case OP_COND_EXC_GE
:
4037 case OP_COND_EXC_GE_UN
:
4038 case OP_COND_EXC_LE
:
4039 case OP_COND_EXC_LE_UN
:
4040 EMIT_COND_SYSTEM_EXCEPTION (ins
->opcode
- OP_COND_EXC_EQ
, (const char*)ins
->inst_p1
);
4042 case OP_COND_EXC_IEQ
:
4043 case OP_COND_EXC_INE_UN
:
4044 case OP_COND_EXC_ILT
:
4045 case OP_COND_EXC_ILT_UN
:
4046 case OP_COND_EXC_IGT
:
4047 case OP_COND_EXC_IGT_UN
:
4048 case OP_COND_EXC_IGE
:
4049 case OP_COND_EXC_IGE_UN
:
4050 case OP_COND_EXC_ILE
:
4051 case OP_COND_EXC_ILE_UN
:
4052 EMIT_COND_SYSTEM_EXCEPTION (ins
->opcode
- OP_COND_EXC_IEQ
, (const char*)ins
->inst_p1
);
4064 EMIT_COND_BRANCH (ins
, ins
->opcode
- OP_IBEQ
);
4067 /* floating point opcodes */
4069 g_assert (cfg
->compile_aot
);
4071 /* FIXME: Optimize this */
4073 ppc_mflr (code
, ppc_r12
);
4075 *(double*)code
= *(double*)ins
->inst_p0
;
4077 ppc_lfd (code
, ins
->dreg
, 8, ppc_r12
);
4080 g_assert_not_reached ();
4082 case OP_STORER8_MEMBASE_REG
:
4083 if (ppc_is_imm16 (ins
->inst_offset
)) {
4084 ppc_stfd (code
, ins
->sreg1
, ins
->inst_offset
, ins
->inst_destbasereg
);
4086 if (ppc_is_imm32 (ins
->inst_offset
)) {
4087 ppc_addis (code
, ppc_r11
, ins
->inst_destbasereg
, ppc_ha(ins
->inst_offset
));
4088 ppc_stfd (code
, ins
->sreg1
, ins
->inst_offset
, ppc_r11
);
4090 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
4091 ppc_stfdx (code
, ins
->sreg1
, ins
->inst_destbasereg
, ppc_r0
);
4095 case OP_LOADR8_MEMBASE
:
4096 if (ppc_is_imm16 (ins
->inst_offset
)) {
4097 ppc_lfd (code
, ins
->dreg
, ins
->inst_offset
, ins
->inst_basereg
);
4099 if (ppc_is_imm32 (ins
->inst_offset
)) {
4100 ppc_addis (code
, ppc_r11
, ins
->inst_destbasereg
, ppc_ha(ins
->inst_offset
));
4101 ppc_lfd (code
, ins
->dreg
, ins
->inst_offset
, ppc_r11
);
4103 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
4104 ppc_lfdx (code
, ins
->dreg
, ins
->inst_destbasereg
, ppc_r0
);
4108 case OP_STORER4_MEMBASE_REG
:
4109 ppc_frsp (code
, ins
->sreg1
, ins
->sreg1
);
4110 if (ppc_is_imm16 (ins
->inst_offset
)) {
4111 ppc_stfs (code
, ins
->sreg1
, ins
->inst_offset
, ins
->inst_destbasereg
);
4113 if (ppc_is_imm32 (ins
->inst_offset
)) {
4114 ppc_addis (code
, ppc_r11
, ins
->inst_destbasereg
, ppc_ha(ins
->inst_offset
));
4115 ppc_stfs (code
, ins
->sreg1
, ins
->inst_offset
, ppc_r11
);
4117 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
4118 ppc_stfsx (code
, ins
->sreg1
, ins
->inst_destbasereg
, ppc_r0
);
4122 case OP_LOADR4_MEMBASE
:
4123 if (ppc_is_imm16 (ins
->inst_offset
)) {
4124 ppc_lfs (code
, ins
->dreg
, ins
->inst_offset
, ins
->inst_basereg
);
4126 if (ppc_is_imm32 (ins
->inst_offset
)) {
4127 ppc_addis (code
, ppc_r11
, ins
->inst_destbasereg
, ppc_ha(ins
->inst_offset
));
4128 ppc_lfs (code
, ins
->dreg
, ins
->inst_offset
, ppc_r11
);
4130 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
4131 ppc_lfsx (code
, ins
->dreg
, ins
->inst_destbasereg
, ppc_r0
);
4135 case OP_LOADR4_MEMINDEX
:
4136 ppc_lfsx (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4138 case OP_LOADR8_MEMINDEX
:
4139 ppc_lfdx (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4141 case OP_STORER4_MEMINDEX
:
4142 ppc_frsp (code
, ins
->sreg1
, ins
->sreg1
);
4143 ppc_stfsx (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
4145 case OP_STORER8_MEMINDEX
:
4146 ppc_stfdx (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
4149 case CEE_CONV_R4
: /* FIXME: change precision */
4151 g_assert_not_reached ();
4152 case OP_FCONV_TO_I1
:
4153 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, TRUE
);
4155 case OP_FCONV_TO_U1
:
4156 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, FALSE
);
4158 case OP_FCONV_TO_I2
:
4159 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, TRUE
);
4161 case OP_FCONV_TO_U2
:
4162 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, FALSE
);
4164 case OP_FCONV_TO_I4
:
4166 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, TRUE
);
4168 case OP_FCONV_TO_U4
:
4170 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, FALSE
);
4172 case OP_LCONV_TO_R_UN
:
4173 g_assert_not_reached ();
4174 /* Implemented as helper calls */
4176 case OP_LCONV_TO_OVF_I4_2
:
4177 case OP_LCONV_TO_OVF_I
: {
4178 #ifdef __mono_ppc64__
4181 guint8
*negative_branch
, *msword_positive_branch
, *msword_negative_branch
, *ovf_ex_target
;
4182 // Check if its negative
4183 ppc_cmpi (code
, 0, 0, ins
->sreg1
, 0);
4184 negative_branch
= code
;
4185 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_LT
, 0);
4186 // Its positive msword == 0
4187 ppc_cmpi (code
, 0, 0, ins
->sreg2
, 0);
4188 msword_positive_branch
= code
;
4189 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_EQ
, 0);
4191 ovf_ex_target
= code
;
4192 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_ALWAYS
, 0, "OverflowException");
4194 ppc_patch (negative_branch
, code
);
4195 ppc_cmpi (code
, 0, 0, ins
->sreg2
, -1);
4196 msword_negative_branch
= code
;
4197 ppc_bc (code
, PPC_BR_FALSE
, PPC_BR_EQ
, 0);
4198 ppc_patch (msword_negative_branch
, ovf_ex_target
);
4200 ppc_patch (msword_positive_branch
, code
);
4201 if (ins
->dreg
!= ins
->sreg1
)
4202 ppc_mr (code
, ins
->dreg
, ins
->sreg1
);
4207 ppc_frind (code
, ins
->dreg
, ins
->sreg1
);
4210 ppc_frizd (code
, ins
->dreg
, ins
->sreg1
);
4213 ppc_fripd (code
, ins
->dreg
, ins
->sreg1
);
4216 ppc_frimd (code
, ins
->dreg
, ins
->sreg1
);
4219 ppc_fabsd (code
, ins
->dreg
, ins
->sreg1
);
4222 ppc_fsqrtsd (code
, ins
->dreg
, ins
->sreg1
);
4225 ppc_fsqrtd (code
, ins
->dreg
, ins
->sreg1
);
4228 ppc_fadd (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4231 ppc_fsub (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4234 ppc_fmul (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4237 ppc_fdiv (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4240 ppc_fneg (code
, ins
->dreg
, ins
->sreg1
);
4244 g_assert_not_reached ();
4246 /* These min/max require POWER5 */
4248 ppc_cmp (code
, 0, 0, ins
->sreg1
, ins
->sreg2
);
4249 ppc_isellt (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4252 ppc_cmpl (code
, 0, 0, ins
->sreg1
, ins
->sreg2
);
4253 ppc_isellt (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4256 ppc_cmp (code
, 0, 0, ins
->sreg1
, ins
->sreg2
);
4257 ppc_iselgt (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4260 ppc_cmpl (code
, 0, 0, ins
->sreg1
, ins
->sreg2
);
4261 ppc_iselgt (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4263 CASE_PPC64 (OP_LMIN
)
4264 ppc_cmp (code
, 0, 1, ins
->sreg1
, ins
->sreg2
);
4265 ppc_isellt (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4267 CASE_PPC64 (OP_LMIN_UN
)
4268 ppc_cmpl (code
, 0, 1, ins
->sreg1
, ins
->sreg2
);
4269 ppc_isellt (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4271 CASE_PPC64 (OP_LMAX
)
4272 ppc_cmp (code
, 0, 1, ins
->sreg1
, ins
->sreg2
);
4273 ppc_iselgt (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4275 CASE_PPC64 (OP_LMAX_UN
)
4276 ppc_cmpl (code
, 0, 1, ins
->sreg1
, ins
->sreg2
);
4277 ppc_iselgt (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4280 ppc_fcmpu (code
, 0, ins
->sreg1
, ins
->sreg2
);
4284 ppc_fcmpo (code
, 0, ins
->sreg1
, ins
->sreg2
);
4285 ppc_li (code
, ins
->dreg
, 1);
4286 ppc_bc (code
, ins
->opcode
== OP_FCEQ
? PPC_BR_TRUE
: PPC_BR_FALSE
, PPC_BR_EQ
, 2);
4287 ppc_li (code
, ins
->dreg
, 0);
4291 ppc_fcmpo (code
, 0, ins
->sreg1
, ins
->sreg2
);
4292 ppc_li (code
, ins
->dreg
, 1);
4293 ppc_bc (code
, ins
->opcode
== OP_FCLT
? PPC_BR_TRUE
: PPC_BR_FALSE
, PPC_BR_LT
, 2);
4294 ppc_li (code
, ins
->dreg
, 0);
4297 ppc_fcmpu (code
, 0, ins
->sreg1
, ins
->sreg2
);
4298 ppc_li (code
, ins
->dreg
, 1);
4299 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_SO
, 3);
4300 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_LT
, 2);
4301 ppc_li (code
, ins
->dreg
, 0);
4305 ppc_fcmpo (code
, 0, ins
->sreg1
, ins
->sreg2
);
4306 ppc_li (code
, ins
->dreg
, 1);
4307 ppc_bc (code
, ins
->opcode
== OP_FCGT
? PPC_BR_TRUE
: PPC_BR_FALSE
, PPC_BR_GT
, 2);
4308 ppc_li (code
, ins
->dreg
, 0);
4311 ppc_fcmpu (code
, 0, ins
->sreg1
, ins
->sreg2
);
4312 ppc_li (code
, ins
->dreg
, 1);
4313 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_SO
, 3);
4314 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_GT
, 2);
4315 ppc_li (code
, ins
->dreg
, 0);
4318 EMIT_COND_BRANCH (ins
, CEE_BEQ
- CEE_BEQ
);
4321 EMIT_COND_BRANCH (ins
, CEE_BNE_UN
- CEE_BEQ
);
4324 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_SO
, 2);
4325 EMIT_COND_BRANCH (ins
, CEE_BLT
- CEE_BEQ
);
4328 EMIT_COND_BRANCH_FLAGS (ins
, PPC_BR_TRUE
, PPC_BR_SO
);
4329 EMIT_COND_BRANCH (ins
, CEE_BLT_UN
- CEE_BEQ
);
4332 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_SO
, 2);
4333 EMIT_COND_BRANCH (ins
, CEE_BGT
- CEE_BEQ
);
4336 EMIT_COND_BRANCH_FLAGS (ins
, PPC_BR_TRUE
, PPC_BR_SO
);
4337 EMIT_COND_BRANCH (ins
, CEE_BGT_UN
- CEE_BEQ
);
4340 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_SO
, 2);
4341 EMIT_COND_BRANCH (ins
, CEE_BGE
- CEE_BEQ
);
4344 EMIT_COND_BRANCH (ins
, CEE_BGE_UN
- CEE_BEQ
);
4347 ppc_bc (code
, PPC_BR_TRUE
, PPC_BR_SO
, 2);
4348 EMIT_COND_BRANCH (ins
, CEE_BLE
- CEE_BEQ
);
4351 EMIT_COND_BRANCH (ins
, CEE_BLE_UN
- CEE_BEQ
);
4354 g_assert_not_reached ();
4355 case OP_PPC_CHECK_FINITE
: {
4356 ppc_rlwinm (code
, ins
->sreg1
, ins
->sreg1
, 0, 1, 31);
4357 ppc_addis (code
, ins
->sreg1
, ins
->sreg1
, -32752);
4358 ppc_rlwinmd (code
, ins
->sreg1
, ins
->sreg1
, 1, 31, 31);
4359 EMIT_COND_SYSTEM_EXCEPTION (CEE_BEQ
- CEE_BEQ
, "ArithmeticException");
4362 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)ins
->inst_c1
, ins
->inst_p0
);
4363 #ifdef __mono_ppc64__
4364 ppc_load_sequence (code
, ins
->dreg
, (guint64
)0x0f0f0f0f0f0f0f0fLL
);
4366 ppc_load_sequence (code
, ins
->dreg
, (gulong
)0x0f0f0f0fL
);
4371 #ifdef __mono_ppc64__
4372 case OP_ICONV_TO_I4
:
4374 ppc_extsw (code
, ins
->dreg
, ins
->sreg1
);
4376 case OP_ICONV_TO_U4
:
4378 ppc_clrldi (code
, ins
->dreg
, ins
->sreg1
, 32);
4380 case OP_ICONV_TO_R4
:
4381 case OP_ICONV_TO_R8
:
4382 case OP_LCONV_TO_R4
:
4383 case OP_LCONV_TO_R8
: {
4385 if (ins
->opcode
== OP_ICONV_TO_R4
|| ins
->opcode
== OP_ICONV_TO_R8
) {
4386 ppc_extsw (code
, ppc_r0
, ins
->sreg1
);
4391 if (cpu_hw_caps
& PPC_MOVE_FPR_GPR
) {
4392 ppc_mffgpr (code
, ins
->dreg
, tmp
);
4394 ppc_str (code
, tmp
, -8, ppc_r1
);
4395 ppc_lfd (code
, ins
->dreg
, -8, ppc_r1
);
4397 ppc_fcfid (code
, ins
->dreg
, ins
->dreg
);
4398 if (ins
->opcode
== OP_ICONV_TO_R4
|| ins
->opcode
== OP_LCONV_TO_R4
)
4399 ppc_frsp (code
, ins
->dreg
, ins
->dreg
);
4403 ppc_srad (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4406 ppc_srd (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4409 /* check XER [0-3] (SO, OV, CA): we can't use mcrxr
4411 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
4412 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1 << 13)); /* CA */
4413 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE
, PPC_BR_EQ
, (const char*)ins
->inst_p1
);
4415 case OP_COND_EXC_OV
:
4416 ppc_mfspr (code
, ppc_r0
, ppc_xer
);
4417 ppc_andisd (code
, ppc_r0
, ppc_r0
, (1 << 14)); /* OV */
4418 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE
, PPC_BR_EQ
, (const char*)ins
->inst_p1
);
4430 EMIT_COND_BRANCH (ins
, ins
->opcode
- OP_LBEQ
);
4432 case OP_FCONV_TO_I8
:
4433 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 8, TRUE
);
4435 case OP_FCONV_TO_U8
:
4436 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 8, FALSE
);
4438 case OP_STOREI4_MEMBASE_REG
:
4439 if (ppc_is_imm16 (ins
->inst_offset
)) {
4440 ppc_stw (code
, ins
->sreg1
, ins
->inst_offset
, ins
->inst_destbasereg
);
4442 ppc_load (code
, ppc_r0
, ins
->inst_offset
);
4443 ppc_stwx (code
, ins
->sreg1
, ins
->inst_destbasereg
, ppc_r0
);
4446 case OP_STOREI4_MEMINDEX
:
4447 ppc_stwx (code
, ins
->sreg1
, ins
->sreg2
, ins
->inst_destbasereg
);
4450 ppc_srawi (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
4452 case OP_ISHR_UN_IMM
:
4453 if (ins
->inst_imm
& 0x1f)
4454 ppc_srwi (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
4456 ppc_mr (code
, ins
->dreg
, ins
->sreg1
);
4459 case OP_ICONV_TO_R4
:
4460 case OP_ICONV_TO_R8
: {
4461 if (cpu_hw_caps
& PPC_ISA_64
) {
4462 ppc_srawi(code
, ppc_r0
, ins
->sreg1
, 31);
4463 ppc_stw (code
, ppc_r0
, -8, ppc_r1
);
4464 ppc_stw (code
, ins
->sreg1
, -4, ppc_r1
);
4465 ppc_lfd (code
, ins
->dreg
, -8, ppc_r1
);
4466 ppc_fcfid (code
, ins
->dreg
, ins
->dreg
);
4467 if (ins
->opcode
== OP_ICONV_TO_R4
)
4468 ppc_frsp (code
, ins
->dreg
, ins
->dreg
);
4474 case OP_ATOMIC_ADD_I4
:
4475 CASE_PPC64 (OP_ATOMIC_ADD_I8
) {
4476 int location
= ins
->inst_basereg
;
4477 int addend
= ins
->sreg2
;
4478 guint8
*loop
, *branch
;
4479 g_assert (ins
->inst_offset
== 0);
4483 if (ins
->opcode
== OP_ATOMIC_ADD_I4
)
4484 ppc_lwarx (code
, ppc_r0
, 0, location
);
4485 #ifdef __mono_ppc64__
4487 ppc_ldarx (code
, ppc_r0
, 0, location
);
4490 ppc_add (code
, ppc_r0
, ppc_r0
, addend
);
4492 if (ins
->opcode
== OP_ATOMIC_ADD_I4
)
4493 ppc_stwcxd (code
, ppc_r0
, 0, location
);
4494 #ifdef __mono_ppc64__
4496 ppc_stdcxd (code
, ppc_r0
, 0, location
);
4500 ppc_bc (code
, PPC_BR_FALSE
, PPC_BR_EQ
, 0);
4501 ppc_patch (branch
, loop
);
4504 ppc_mr (code
, ins
->dreg
, ppc_r0
);
4507 case OP_ATOMIC_CAS_I4
:
4508 CASE_PPC64 (OP_ATOMIC_CAS_I8
) {
4509 int location
= ins
->sreg1
;
4510 int value
= ins
->sreg2
;
4511 int comparand
= ins
->sreg3
;
4512 guint8
*start
, *not_equal
, *lost_reservation
;
4516 if (ins
->opcode
== OP_ATOMIC_CAS_I4
)
4517 ppc_lwarx (code
, ppc_r0
, 0, location
);
4518 #ifdef __mono_ppc64__
4520 ppc_ldarx (code
, ppc_r0
, 0, location
);
4523 ppc_cmp (code
, 0, ins
->opcode
== OP_ATOMIC_CAS_I4
? 0 : 1, ppc_r0
, comparand
);
4525 ppc_bc (code
, PPC_BR_FALSE
, PPC_BR_EQ
, 0);
4527 if (ins
->opcode
== OP_ATOMIC_CAS_I4
)
4528 ppc_stwcxd (code
, value
, 0, location
);
4529 #ifdef __mono_ppc64__
4531 ppc_stdcxd (code
, value
, 0, location
);
4534 lost_reservation
= code
;
4535 ppc_bc (code
, PPC_BR_FALSE
, PPC_BR_EQ
, 0);
4536 ppc_patch (lost_reservation
, start
);
4537 ppc_patch (not_equal
, code
);
4540 ppc_mr (code
, ins
->dreg
, ppc_r0
);
4543 case OP_LIVERANGE_START
: {
4544 if (cfg
->verbose_level
> 1)
4545 printf ("R%d START=0x%x\n", MONO_VARINFO (cfg
, ins
->inst_c0
)->vreg
, (int)(code
- cfg
->native_code
));
4546 MONO_VARINFO (cfg
, ins
->inst_c0
)->live_range_start
= code
- cfg
->native_code
;
4549 case OP_LIVERANGE_END
: {
4550 if (cfg
->verbose_level
> 1)
4551 printf ("R%d END=0x%x\n", MONO_VARINFO (cfg
, ins
->inst_c0
)->vreg
, (int)(code
- cfg
->native_code
));
4552 MONO_VARINFO (cfg
, ins
->inst_c0
)->live_range_end
= code
- cfg
->native_code
;
4555 case OP_GC_SAFE_POINT
:
4559 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins
->opcode
), __FUNCTION__
);
4560 g_assert_not_reached ();
4563 if ((cfg
->opt
& MONO_OPT_BRANCH
) && ((code
- cfg
->native_code
- offset
) > max_len
)) {
4564 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %ld)",
4565 mono_inst_name (ins
->opcode
), max_len
, (glong
)(code
- cfg
->native_code
- offset
));
4566 g_assert_not_reached ();
4574 set_code_cursor (cfg
, code
);
4576 #endif /* !DISABLE_JIT */
4579 mono_arch_register_lowlevel_calls (void)
4581 /* The signature doesn't matter */
4582 mono_register_jit_icall (mono_ppc_throw_exception
, mono_icall_sig_void
, TRUE
);
4585 #ifdef __mono_ppc64__
4586 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
4587 #define patch_load_sequence(ip,val) do {\
4588 guint16 *__load = (guint16*)(ip); \
4589 g_assert (sizeof (val) == sizeof (gsize)); \
4590 __load [0] = (((guint64)(gsize)(val)) >> 48) & 0xffff; \
4591 __load [2] = (((guint64)(gsize)(val)) >> 32) & 0xffff; \
4592 __load [6] = (((guint64)(gsize)(val)) >> 16) & 0xffff; \
4593 __load [8] = ((guint64)(gsize)(val)) & 0xffff; \
4595 #elif G_BYTE_ORDER == G_BIG_ENDIAN
4596 #define patch_load_sequence(ip,val) do {\
4597 guint16 *__load = (guint16*)(ip); \
4598 g_assert (sizeof (val) == sizeof (gsize)); \
4599 __load [1] = (((guint64)(gsize)(val)) >> 48) & 0xffff; \
4600 __load [3] = (((guint64)(gsize)(val)) >> 32) & 0xffff; \
4601 __load [7] = (((guint64)(gsize)(val)) >> 16) & 0xffff; \
4602 __load [9] = ((guint64)(gsize)(val)) & 0xffff; \
4605 #error huh? No endianess defined by compiler
4608 #define patch_load_sequence(ip,val) do {\
4609 guint16 *__lis_ori = (guint16*)(ip); \
4610 __lis_ori [1] = (((gulong)(val)) >> 16) & 0xffff; \
4611 __lis_ori [3] = ((gulong)(val)) & 0xffff; \
4617 mono_arch_patch_code_new (MonoCompile
*cfg
, MonoDomain
*domain
, guint8
*code
, MonoJumpInfo
*ji
, gpointer target
)
4619 unsigned char *ip
= ji
->ip
.i
+ code
;
4620 gboolean is_fd
= FALSE
;
4623 case MONO_PATCH_INFO_IP
:
4624 patch_load_sequence (ip
, ip
);
4626 case MONO_PATCH_INFO_SWITCH
: {
4627 gpointer
*table
= (gpointer
*)ji
->data
.table
->table
;
4630 patch_load_sequence (ip
, table
);
4632 for (i
= 0; i
< ji
->data
.table
->table_size
; i
++) {
4633 table
[i
] = (glong
)ji
->data
.table
->table
[i
] + code
;
4635 /* we put into the table the absolute address, no need for ppc_patch in this case */
4638 case MONO_PATCH_INFO_METHODCONST
:
4639 case MONO_PATCH_INFO_CLASS
:
4640 case MONO_PATCH_INFO_IMAGE
:
4641 case MONO_PATCH_INFO_FIELD
:
4642 case MONO_PATCH_INFO_VTABLE
:
4643 case MONO_PATCH_INFO_IID
:
4644 case MONO_PATCH_INFO_SFLDA
:
4645 case MONO_PATCH_INFO_LDSTR
:
4646 case MONO_PATCH_INFO_TYPE_FROM_HANDLE
:
4647 case MONO_PATCH_INFO_LDTOKEN
:
4648 /* from OP_AOTCONST : lis + ori */
4649 patch_load_sequence (ip
, target
);
4651 case MONO_PATCH_INFO_R4
:
4652 case MONO_PATCH_INFO_R8
:
4653 g_assert_not_reached ();
4654 *((gconstpointer
*)(ip
+ 2)) = ji
->data
.target
;
4656 case MONO_PATCH_INFO_EXC_NAME
:
4657 g_assert_not_reached ();
4658 *((gconstpointer
*)(ip
+ 1)) = ji
->data
.name
;
4660 case MONO_PATCH_INFO_NONE
:
4661 case MONO_PATCH_INFO_BB_OVF
:
4662 case MONO_PATCH_INFO_EXC_OVF
:
4663 /* everything is dealt with at epilog output time */
4665 #ifdef PPC_USES_FUNCTION_DESCRIPTOR
4666 case MONO_PATCH_INFO_JIT_ICALL_ID
:
4667 case MONO_PATCH_INFO_ABS
:
4668 case MONO_PATCH_INFO_RGCTX_FETCH
:
4669 case MONO_PATCH_INFO_JIT_ICALL_ADDR
:
4670 case MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR
:
4675 ppc_patch_full (cfg
, domain
, ip
, (const guchar
*)target
, is_fd
);
4681 * Emit code to save the registers in used_int_regs or the registers in the MonoLMF
4682 * structure at positive offset pos from register base_reg. pos is guaranteed to fit into
4683 * the instruction offset immediate for all the registers.
4686 save_registers (MonoCompile
*cfg
, guint8
* code
, int pos
, int base_reg
, gboolean save_lmf
, guint32 used_int_regs
, int cfa_offset
)
4690 for (i
= 13; i
<= 31; i
++) {
4691 if (used_int_regs
& (1 << i
)) {
4692 ppc_str (code
, i
, pos
, base_reg
);
4693 mono_emit_unwind_op_offset (cfg
, code
, i
, pos
- cfa_offset
);
4694 pos
+= sizeof (target_mgreg_t
);
4698 /* pos is the start of the MonoLMF structure */
4699 int offset
= pos
+ G_STRUCT_OFFSET (MonoLMF
, iregs
);
4700 for (i
= 13; i
<= 31; i
++) {
4701 ppc_str (code
, i
, offset
, base_reg
);
4702 mono_emit_unwind_op_offset (cfg
, code
, i
, offset
- cfa_offset
);
4703 offset
+= sizeof (target_mgreg_t
);
4705 offset
= pos
+ G_STRUCT_OFFSET (MonoLMF
, fregs
);
4706 for (i
= 14; i
< 32; i
++) {
4707 ppc_stfd (code
, i
, offset
, base_reg
);
4708 offset
+= sizeof (gdouble
);
4715 * Stack frame layout:
4717 * ------------------- sp
4718 * MonoLMF structure or saved registers
4719 * -------------------
4721 * -------------------
4723 * -------------------
4724 * param area size is cfg->param_area
4725 * -------------------
4726 * linkage area size is PPC_STACK_PARAM_OFFSET
4727 * ------------------- sp
4731 mono_arch_emit_prolog (MonoCompile
*cfg
)
4733 MonoMethod
*method
= cfg
->method
;
4735 MonoMethodSignature
*sig
;
4737 long alloc_size
, pos
, max_offset
, cfa_offset
;
4742 int tailcall_struct_index
;
4744 sig
= mono_method_signature_internal (method
);
4745 cfg
->code_size
= 512 + sig
->param_count
* 32;
4746 code
= cfg
->native_code
= g_malloc (cfg
->code_size
);
4750 /* We currently emit unwind info for aot, but don't use it */
4751 mono_emit_unwind_op_def_cfa (cfg
, code
, ppc_r1
, 0);
4753 if (1 || cfg
->flags
& MONO_CFG_HAS_CALLS
) {
4754 ppc_mflr (code
, ppc_r0
);
4755 ppc_str (code
, ppc_r0
, PPC_RET_ADDR_OFFSET
, ppc_sp
);
4756 mono_emit_unwind_op_offset (cfg
, code
, ppc_lr
, PPC_RET_ADDR_OFFSET
);
4759 alloc_size
= cfg
->stack_offset
;
4762 if (!method
->save_lmf
) {
4763 for (i
= 31; i
>= 13; --i
) {
4764 if (cfg
->used_int_regs
& (1 << i
)) {
4765 pos
+= sizeof (target_mgreg_t
);
4769 pos
+= sizeof (MonoLMF
);
4773 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
4774 if (alloc_size
& (MONO_ARCH_FRAME_ALIGNMENT
- 1)) {
4775 alloc_size
+= MONO_ARCH_FRAME_ALIGNMENT
- 1;
4776 alloc_size
&= ~(MONO_ARCH_FRAME_ALIGNMENT
- 1);
4779 cfg
->stack_usage
= alloc_size
;
4780 g_assert ((alloc_size
& (MONO_ARCH_FRAME_ALIGNMENT
-1)) == 0);
4782 if (ppc_is_imm16 (-alloc_size
)) {
4783 ppc_str_update (code
, ppc_sp
, -alloc_size
, ppc_sp
);
4784 cfa_offset
= alloc_size
;
4785 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, alloc_size
);
4786 code
= save_registers (cfg
, code
, alloc_size
- pos
, ppc_sp
, method
->save_lmf
, cfg
->used_int_regs
, cfa_offset
);
4789 ppc_addi (code
, ppc_r12
, ppc_sp
, -pos
);
4790 ppc_load (code
, ppc_r0
, -alloc_size
);
4791 ppc_str_update_indexed (code
, ppc_sp
, ppc_sp
, ppc_r0
);
4792 cfa_offset
= alloc_size
;
4793 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, alloc_size
);
4794 code
= save_registers (cfg
, code
, 0, ppc_r12
, method
->save_lmf
, cfg
->used_int_regs
, cfa_offset
);
4797 if (cfg
->frame_reg
!= ppc_sp
) {
4798 ppc_mr (code
, cfg
->frame_reg
, ppc_sp
);
4799 mono_emit_unwind_op_def_cfa_reg (cfg
, code
, cfg
->frame_reg
);
4802 /* store runtime generic context */
4803 if (cfg
->rgctx_var
) {
4804 g_assert (cfg
->rgctx_var
->opcode
== OP_REGOFFSET
&&
4805 (cfg
->rgctx_var
->inst_basereg
== ppc_r1
|| cfg
->rgctx_var
->inst_basereg
== ppc_r31
));
4807 ppc_stptr (code
, MONO_ARCH_RGCTX_REG
, cfg
->rgctx_var
->inst_offset
, cfg
->rgctx_var
->inst_basereg
);
4810 /* compute max_offset in order to use short forward jumps
4811 * we always do it on ppc because the immediate displacement
4812 * for jumps is too small
4815 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
4817 bb
->max_offset
= max_offset
;
4819 MONO_BB_FOR_EACH_INS (bb
, ins
)
4820 max_offset
+= ins_get_size (ins
->opcode
);
4823 /* load arguments allocated to register from the stack */
4826 cinfo
= get_call_info (sig
);
4828 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
4829 ArgInfo
*ainfo
= &cinfo
->ret
;
4831 inst
= cfg
->vret_addr
;
4834 if (ppc_is_imm16 (inst
->inst_offset
)) {
4835 ppc_stptr (code
, ainfo
->reg
, inst
->inst_offset
, inst
->inst_basereg
);
4837 ppc_load (code
, ppc_r12
, inst
->inst_offset
);
4838 ppc_stptr_indexed (code
, ainfo
->reg
, ppc_r12
, inst
->inst_basereg
);
4842 tailcall_struct_index
= 0;
4843 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
4844 ArgInfo
*ainfo
= cinfo
->args
+ i
;
4845 inst
= cfg
->args
[pos
];
4847 if (cfg
->verbose_level
> 2)
4848 g_print ("Saving argument %d (type: %d)\n", i
, ainfo
->regtype
);
4849 if (inst
->opcode
== OP_REGVAR
) {
4850 if (ainfo
->regtype
== RegTypeGeneral
)
4851 ppc_mr (code
, inst
->dreg
, ainfo
->reg
);
4852 else if (ainfo
->regtype
== RegTypeFP
)
4853 ppc_fmr (code
, inst
->dreg
, ainfo
->reg
);
4854 else if (ainfo
->regtype
== RegTypeBase
) {
4855 ppc_ldr (code
, ppc_r12
, 0, ppc_sp
);
4856 ppc_ldptr (code
, inst
->dreg
, ainfo
->offset
, ppc_r12
);
4858 g_assert_not_reached ();
4860 if (cfg
->verbose_level
> 2)
4861 g_print ("Argument %ld assigned to register %s\n", pos
, mono_arch_regname (inst
->dreg
));
4863 /* the argument should be put on the stack: FIXME handle size != word */
4864 if (ainfo
->regtype
== RegTypeGeneral
) {
4865 switch (ainfo
->size
) {
4867 if (ppc_is_imm16 (inst
->inst_offset
)) {
4868 ppc_stb (code
, ainfo
->reg
, inst
->inst_offset
, inst
->inst_basereg
);
4870 if (ppc_is_imm32 (inst
->inst_offset
)) {
4871 ppc_addis (code
, ppc_r12
, inst
->inst_basereg
, ppc_ha(inst
->inst_offset
));
4872 ppc_stb (code
, ainfo
->reg
, inst
->inst_offset
, ppc_r12
);
4874 ppc_load (code
, ppc_r12
, inst
->inst_offset
);
4875 ppc_stbx (code
, ainfo
->reg
, inst
->inst_basereg
, ppc_r12
);
4880 if (ppc_is_imm16 (inst
->inst_offset
)) {
4881 ppc_sth (code
, ainfo
->reg
, inst
->inst_offset
, inst
->inst_basereg
);
4883 if (ppc_is_imm32 (inst
->inst_offset
)) {
4884 ppc_addis (code
, ppc_r12
, inst
->inst_basereg
, ppc_ha(inst
->inst_offset
));
4885 ppc_sth (code
, ainfo
->reg
, inst
->inst_offset
, ppc_r12
);
4887 ppc_load (code
, ppc_r12
, inst
->inst_offset
);
4888 ppc_sthx (code
, ainfo
->reg
, inst
->inst_basereg
, ppc_r12
);
4892 #ifdef __mono_ppc64__
4894 if (ppc_is_imm16 (inst
->inst_offset
)) {
4895 ppc_stw (code
, ainfo
->reg
, inst
->inst_offset
, inst
->inst_basereg
);
4897 if (ppc_is_imm32 (inst
->inst_offset
)) {
4898 ppc_addis (code
, ppc_r12
, inst
->inst_basereg
, ppc_ha(inst
->inst_offset
));
4899 ppc_stw (code
, ainfo
->reg
, inst
->inst_offset
, ppc_r12
);
4901 ppc_load (code
, ppc_r12
, inst
->inst_offset
);
4902 ppc_stwx (code
, ainfo
->reg
, inst
->inst_basereg
, ppc_r12
);
4907 if (ppc_is_imm16 (inst
->inst_offset
)) {
4908 ppc_str (code
, ainfo
->reg
, inst
->inst_offset
, inst
->inst_basereg
);
4910 ppc_load (code
, ppc_r12
, inst
->inst_offset
);
4911 ppc_str_indexed (code
, ainfo
->reg
, ppc_r12
, inst
->inst_basereg
);
4916 if (ppc_is_imm16 (inst
->inst_offset
+ 4)) {
4917 ppc_stw (code
, ainfo
->reg
, inst
->inst_offset
, inst
->inst_basereg
);
4918 ppc_stw (code
, ainfo
->reg
+ 1, inst
->inst_offset
+ 4, inst
->inst_basereg
);
4920 ppc_addis (code
, ppc_r12
, inst
->inst_basereg
, ppc_ha(inst
->inst_offset
));
4921 ppc_addi (code
, ppc_r12
, ppc_r12
, inst
->inst_offset
);
4922 ppc_stw (code
, ainfo
->reg
, 0, ppc_r12
);
4923 ppc_stw (code
, ainfo
->reg
+ 1, 4, ppc_r12
);
4928 if (ppc_is_imm16 (inst
->inst_offset
)) {
4929 ppc_stptr (code
, ainfo
->reg
, inst
->inst_offset
, inst
->inst_basereg
);
4931 if (ppc_is_imm32 (inst
->inst_offset
)) {
4932 ppc_addis (code
, ppc_r12
, inst
->inst_basereg
, ppc_ha(inst
->inst_offset
));
4933 ppc_stptr (code
, ainfo
->reg
, inst
->inst_offset
, ppc_r12
);
4935 ppc_load (code
, ppc_r12
, inst
->inst_offset
);
4936 ppc_stptr_indexed (code
, ainfo
->reg
, inst
->inst_basereg
, ppc_r12
);
4941 } else if (ainfo
->regtype
== RegTypeBase
) {
4942 g_assert (ppc_is_imm16 (ainfo
->offset
));
4943 /* load the previous stack pointer in r12 */
4944 ppc_ldr (code
, ppc_r12
, 0, ppc_sp
);
4945 ppc_ldptr (code
, ppc_r0
, ainfo
->offset
, ppc_r12
);
4946 switch (ainfo
->size
) {
4948 if (ppc_is_imm16 (inst
->inst_offset
)) {
4949 ppc_stb (code
, ppc_r0
, inst
->inst_offset
, inst
->inst_basereg
);
4951 if (ppc_is_imm32 (inst
->inst_offset
)) {
4952 ppc_addis (code
, ppc_r12
, inst
->inst_basereg
, ppc_ha(inst
->inst_offset
));
4953 ppc_stb (code
, ppc_r0
, inst
->inst_offset
, ppc_r12
);
4955 ppc_load (code
, ppc_r12
, inst
->inst_offset
);
4956 ppc_stbx (code
, ppc_r0
, inst
->inst_basereg
, ppc_r12
);
4961 if (ppc_is_imm16 (inst
->inst_offset
)) {
4962 ppc_sth (code
, ppc_r0
, inst
->inst_offset
, inst
->inst_basereg
);
4964 if (ppc_is_imm32 (inst
->inst_offset
)) {
4965 ppc_addis (code
, ppc_r12
, inst
->inst_basereg
, ppc_ha(inst
->inst_offset
));
4966 ppc_sth (code
, ppc_r0
, inst
->inst_offset
, ppc_r12
);
4968 ppc_load (code
, ppc_r12
, inst
->inst_offset
);
4969 ppc_sthx (code
, ppc_r0
, inst
->inst_basereg
, ppc_r12
);
4973 #ifdef __mono_ppc64__
4975 if (ppc_is_imm16 (inst
->inst_offset
)) {
4976 ppc_stw (code
, ppc_r0
, inst
->inst_offset
, inst
->inst_basereg
);
4978 if (ppc_is_imm32 (inst
->inst_offset
)) {
4979 ppc_addis (code
, ppc_r12
, inst
->inst_basereg
, ppc_ha(inst
->inst_offset
));
4980 ppc_stw (code
, ppc_r0
, inst
->inst_offset
, ppc_r12
);
4982 ppc_load (code
, ppc_r12
, inst
->inst_offset
);
4983 ppc_stwx (code
, ppc_r0
, inst
->inst_basereg
, ppc_r12
);
4988 if (ppc_is_imm16 (inst
->inst_offset
)) {
4989 ppc_str (code
, ppc_r0
, inst
->inst_offset
, inst
->inst_basereg
);
4991 ppc_load (code
, ppc_r12
, inst
->inst_offset
);
4992 ppc_str_indexed (code
, ppc_r0
, ppc_r12
, inst
->inst_basereg
);
4997 g_assert (ppc_is_imm16 (ainfo
->offset
+ 4));
4998 if (ppc_is_imm16 (inst
->inst_offset
+ 4)) {
4999 ppc_stw (code
, ppc_r0
, inst
->inst_offset
, inst
->inst_basereg
);
5000 ppc_lwz (code
, ppc_r0
, ainfo
->offset
+ 4, ppc_r12
);
5001 ppc_stw (code
, ppc_r0
, inst
->inst_offset
+ 4, inst
->inst_basereg
);
5003 /* use r11 to load the 2nd half of the long before we clobber r12. */
5004 ppc_lwz (code
, ppc_r11
, ainfo
->offset
+ 4, ppc_r12
);
5005 ppc_addis (code
, ppc_r12
, inst
->inst_basereg
, ppc_ha(inst
->inst_offset
));
5006 ppc_addi (code
, ppc_r12
, ppc_r12
, inst
->inst_offset
);
5007 ppc_stw (code
, ppc_r0
, 0, ppc_r12
);
5008 ppc_stw (code
, ppc_r11
, 4, ppc_r12
);
5013 if (ppc_is_imm16 (inst
->inst_offset
)) {
5014 ppc_stptr (code
, ppc_r0
, inst
->inst_offset
, inst
->inst_basereg
);
5016 if (ppc_is_imm32 (inst
->inst_offset
)) {
5017 ppc_addis (code
, ppc_r12
, inst
->inst_basereg
, ppc_ha(inst
->inst_offset
));
5018 ppc_stptr (code
, ppc_r0
, inst
->inst_offset
, ppc_r12
);
5020 ppc_load (code
, ppc_r12
, inst
->inst_offset
);
5021 ppc_stptr_indexed (code
, ppc_r0
, inst
->inst_basereg
, ppc_r12
);
5026 } else if (ainfo
->regtype
== RegTypeFP
) {
5027 g_assert (ppc_is_imm16 (inst
->inst_offset
));
5028 if (ainfo
->size
== 8)
5029 ppc_stfd (code
, ainfo
->reg
, inst
->inst_offset
, inst
->inst_basereg
);
5030 else if (ainfo
->size
== 4)
5031 ppc_stfs (code
, ainfo
->reg
, inst
->inst_offset
, inst
->inst_basereg
);
5033 g_assert_not_reached ();
5034 } else if (ainfo
->regtype
== RegTypeFPStructByVal
) {
5035 int doffset
= inst
->inst_offset
;
5039 g_assert (ppc_is_imm16 (inst
->inst_offset
));
5040 g_assert (ppc_is_imm16 (inst
->inst_offset
+ ainfo
->vtregs
* sizeof (target_mgreg_t
)));
5041 /* FIXME: what if there is no class? */
5042 if (sig
->pinvoke
&& mono_class_from_mono_type_internal (inst
->inst_vtype
))
5043 size
= mono_class_native_size (mono_class_from_mono_type_internal (inst
->inst_vtype
), NULL
);
5044 for (cur_reg
= 0; cur_reg
< ainfo
->vtregs
; ++cur_reg
) {
5045 if (ainfo
->size
== 4) {
5046 ppc_stfs (code
, ainfo
->reg
+ cur_reg
, doffset
, inst
->inst_basereg
);
5048 ppc_stfd (code
, ainfo
->reg
+ cur_reg
, doffset
, inst
->inst_basereg
);
5050 soffset
+= ainfo
->size
;
5051 doffset
+= ainfo
->size
;
5053 } else if (ainfo
->regtype
== RegTypeStructByVal
) {
5054 int doffset
= inst
->inst_offset
;
5058 g_assert (ppc_is_imm16 (inst
->inst_offset
));
5059 g_assert (ppc_is_imm16 (inst
->inst_offset
+ ainfo
->vtregs
* sizeof (target_mgreg_t
)));
5060 /* FIXME: what if there is no class? */
5061 if (sig
->pinvoke
&& mono_class_from_mono_type_internal (inst
->inst_vtype
))
5062 size
= mono_class_native_size (mono_class_from_mono_type_internal (inst
->inst_vtype
), NULL
);
5063 for (cur_reg
= 0; cur_reg
< ainfo
->vtregs
; ++cur_reg
) {
5066 * Darwin handles 1 and 2 byte
5067 * structs specially by
5068 * loading h/b into the arg
5069 * register. Only done for
5073 ppc_sth (code
, ainfo
->reg
+ cur_reg
, doffset
, inst
->inst_basereg
);
5075 ppc_stb (code
, ainfo
->reg
+ cur_reg
, doffset
, inst
->inst_basereg
);
5079 #ifdef __mono_ppc64__
5081 g_assert (cur_reg
== 0);
5082 #if G_BYTE_ORDER == G_BIG_ENDIAN
5083 ppc_sldi (code
, ppc_r0
, ainfo
->reg
,
5084 (sizeof (target_mgreg_t
) - ainfo
->bytes
) * 8);
5085 ppc_stptr (code
, ppc_r0
, doffset
, inst
->inst_basereg
);
5087 if (mono_class_native_size (inst
->klass
, NULL
) == 1) {
5088 ppc_stb (code
, ainfo
->reg
+ cur_reg
, doffset
, inst
->inst_basereg
);
5089 } else if (mono_class_native_size (inst
->klass
, NULL
) == 2) {
5090 ppc_sth (code
, ainfo
->reg
+ cur_reg
, doffset
, inst
->inst_basereg
);
5091 } else if (mono_class_native_size (inst
->klass
, NULL
) == 4) { // WDS -- maybe <=4?
5092 ppc_stw (code
, ainfo
->reg
+ cur_reg
, doffset
, inst
->inst_basereg
);
5094 ppc_stptr (code
, ainfo
->reg
+ cur_reg
, doffset
, inst
->inst_basereg
); // WDS -- Better way?
5100 ppc_stptr (code
, ainfo
->reg
+ cur_reg
, doffset
,
5101 inst
->inst_basereg
);
5104 soffset
+= sizeof (target_mgreg_t
);
5105 doffset
+= sizeof (target_mgreg_t
);
5107 if (ainfo
->vtsize
) {
5108 /* FIXME: we need to do the shifting here, too */
5111 /* load the previous stack pointer in r12 (r0 gets overwritten by the memcpy) */
5112 ppc_ldr (code
, ppc_r12
, 0, ppc_sp
);
5113 if ((size
& MONO_PPC_32_64_CASE (3, 7)) != 0) {
5114 code
= emit_memcpy (code
, size
- soffset
,
5115 inst
->inst_basereg
, doffset
,
5116 ppc_r12
, ainfo
->offset
+ soffset
);
5118 code
= emit_memcpy (code
, ainfo
->vtsize
* sizeof (target_mgreg_t
),
5119 inst
->inst_basereg
, doffset
,
5120 ppc_r12
, ainfo
->offset
+ soffset
);
5123 } else if (ainfo
->regtype
== RegTypeStructByAddr
) {
5124 /* if it was originally a RegTypeBase */
5125 if (ainfo
->offset
) {
5126 /* load the previous stack pointer in r12 */
5127 ppc_ldr (code
, ppc_r12
, 0, ppc_sp
);
5128 ppc_ldptr (code
, ppc_r12
, ainfo
->offset
, ppc_r12
);
5130 ppc_mr (code
, ppc_r12
, ainfo
->reg
);
5133 g_assert (ppc_is_imm16 (inst
->inst_offset
));
5134 code
= emit_memcpy (code
, ainfo
->vtsize
, inst
->inst_basereg
, inst
->inst_offset
, ppc_r12
, 0);
5135 /*g_print ("copy in %s: %d bytes from %d to offset: %d\n", method->name, ainfo->vtsize, ainfo->reg, inst->inst_offset);*/
5137 g_assert_not_reached ();
5142 if (method
->save_lmf
) {
5143 if (cfg
->compile_aot
) {
5144 /* Compute the got address which is needed by the PLT entry */
5145 code
= mono_arch_emit_load_got_addr (cfg
->native_code
, code
, cfg
, NULL
);
5147 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
5148 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_tls_get_lmf_addr_extern
));
5149 if ((FORCE_INDIR_CALL
|| cfg
->method
->dynamic
) && !cfg
->compile_aot
) {
5150 ppc_load_func (code
, PPC_CALL_REG
, 0);
5151 ppc_mtlr (code
, PPC_CALL_REG
);
5156 /* we build the MonoLMF structure on the stack - see mini-ppc.h */
5157 /* lmf_offset is the offset from the previous stack pointer,
5158 * alloc_size is the total stack space allocated, so the offset
5159 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
5160 * The pointer to the struct is put in ppc_r12 (new_lmf).
5161 * The callee-saved registers are already in the MonoLMF structure
5163 ppc_addi (code
, ppc_r12
, ppc_sp
, alloc_size
- lmf_offset
);
5164 /* ppc_r3 is the result from mono_get_lmf_addr () */
5165 ppc_stptr (code
, ppc_r3
, G_STRUCT_OFFSET(MonoLMF
, lmf_addr
), ppc_r12
);
5166 /* new_lmf->previous_lmf = *lmf_addr */
5167 ppc_ldptr (code
, ppc_r0
, G_STRUCT_OFFSET(MonoLMF
, previous_lmf
), ppc_r3
);
5168 ppc_stptr (code
, ppc_r0
, G_STRUCT_OFFSET(MonoLMF
, previous_lmf
), ppc_r12
);
5169 /* *(lmf_addr) = r12 */
5170 ppc_stptr (code
, ppc_r12
, G_STRUCT_OFFSET(MonoLMF
, previous_lmf
), ppc_r3
);
5171 /* save method info */
5172 if (cfg
->compile_aot
)
5174 ppc_load (code
, ppc_r0
, 0);
5176 ppc_load_ptr (code
, ppc_r0
, method
);
5177 ppc_stptr (code
, ppc_r0
, G_STRUCT_OFFSET(MonoLMF
, method
), ppc_r12
);
5178 ppc_stptr (code
, ppc_sp
, G_STRUCT_OFFSET(MonoLMF
, ebp
), ppc_r12
);
5179 /* save the current IP */
5180 if (cfg
->compile_aot
) {
5182 ppc_mflr (code
, ppc_r0
);
5184 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_IP
, NULL
);
5185 #ifdef __mono_ppc64__
5186 ppc_load_sequence (code
, ppc_r0
, (guint64
)0x0101010101010101LL
);
5188 ppc_load_sequence (code
, ppc_r0
, (gulong
)0x01010101L
);
5191 ppc_stptr (code
, ppc_r0
, G_STRUCT_OFFSET(MonoLMF
, eip
), ppc_r12
);
5194 set_code_cursor (cfg
, code
);
5201 mono_arch_emit_epilog (MonoCompile
*cfg
)
5203 MonoMethod
*method
= cfg
->method
;
5205 int max_epilog_size
= 16 + 20*4;
5208 if (cfg
->method
->save_lmf
)
5209 max_epilog_size
+= 128;
5211 code
= realloc_code (cfg
, max_epilog_size
);
5215 if (method
->save_lmf
) {
5217 pos
+= sizeof (MonoLMF
);
5219 /* save the frame reg in r8 */
5220 ppc_mr (code
, ppc_r8
, cfg
->frame_reg
);
5221 ppc_addi (code
, ppc_r12
, cfg
->frame_reg
, cfg
->stack_usage
- lmf_offset
);
5222 /* r5 = previous_lmf */
5223 ppc_ldptr (code
, ppc_r5
, G_STRUCT_OFFSET(MonoLMF
, previous_lmf
), ppc_r12
);
5225 ppc_ldptr (code
, ppc_r6
, G_STRUCT_OFFSET(MonoLMF
, lmf_addr
), ppc_r12
);
5226 /* *(lmf_addr) = previous_lmf */
5227 ppc_stptr (code
, ppc_r5
, G_STRUCT_OFFSET(MonoLMF
, previous_lmf
), ppc_r6
);
5228 /* FIXME: speedup: there is no actual need to restore the registers if
5229 * we didn't actually change them (idea from Zoltan).
5232 ppc_ldr_multiple (code
, ppc_r13
, G_STRUCT_OFFSET(MonoLMF
, iregs
), ppc_r12
);
5234 /*for (i = 14; i < 32; i++) {
5235 ppc_lfd (code, i, G_STRUCT_OFFSET(MonoLMF, fregs) + ((i-14) * sizeof (gdouble)), ppc_r12);
5237 g_assert (ppc_is_imm16 (cfg
->stack_usage
+ PPC_RET_ADDR_OFFSET
));
5238 /* use the saved copy of the frame reg in r8 */
5239 if (1 || cfg
->flags
& MONO_CFG_HAS_CALLS
) {
5240 ppc_ldr (code
, ppc_r0
, cfg
->stack_usage
+ PPC_RET_ADDR_OFFSET
, ppc_r8
);
5241 ppc_mtlr (code
, ppc_r0
);
5243 ppc_addic (code
, ppc_sp
, ppc_r8
, cfg
->stack_usage
);
5245 if (1 || cfg
->flags
& MONO_CFG_HAS_CALLS
) {
5246 long return_offset
= cfg
->stack_usage
+ PPC_RET_ADDR_OFFSET
;
5247 if (ppc_is_imm16 (return_offset
)) {
5248 ppc_ldr (code
, ppc_r0
, return_offset
, cfg
->frame_reg
);
5250 ppc_load (code
, ppc_r12
, return_offset
);
5251 ppc_ldr_indexed (code
, ppc_r0
, cfg
->frame_reg
, ppc_r12
);
5253 ppc_mtlr (code
, ppc_r0
);
5255 if (ppc_is_imm16 (cfg
->stack_usage
)) {
5256 int offset
= cfg
->stack_usage
;
5257 for (i
= 13; i
<= 31; i
++) {
5258 if (cfg
->used_int_regs
& (1 << i
))
5259 offset
-= sizeof (target_mgreg_t
);
5261 if (cfg
->frame_reg
!= ppc_sp
)
5262 ppc_mr (code
, ppc_r12
, cfg
->frame_reg
);
5263 /* note r31 (possibly the frame register) is restored last */
5264 for (i
= 13; i
<= 31; i
++) {
5265 if (cfg
->used_int_regs
& (1 << i
)) {
5266 ppc_ldr (code
, i
, offset
, cfg
->frame_reg
);
5267 offset
+= sizeof (target_mgreg_t
);
5270 if (cfg
->frame_reg
!= ppc_sp
)
5271 ppc_addi (code
, ppc_sp
, ppc_r12
, cfg
->stack_usage
);
5273 ppc_addi (code
, ppc_sp
, ppc_sp
, cfg
->stack_usage
);
5275 ppc_load32 (code
, ppc_r12
, cfg
->stack_usage
);
5276 if (cfg
->used_int_regs
) {
5277 ppc_add (code
, ppc_r12
, cfg
->frame_reg
, ppc_r12
);
5278 for (i
= 31; i
>= 13; --i
) {
5279 if (cfg
->used_int_regs
& (1 << i
)) {
5280 pos
+= sizeof (target_mgreg_t
);
5281 ppc_ldr (code
, i
, -pos
, ppc_r12
);
5284 ppc_mr (code
, ppc_sp
, ppc_r12
);
5286 ppc_add (code
, ppc_sp
, cfg
->frame_reg
, ppc_r12
);
5292 set_code_cursor (cfg
, code
);
5295 #endif /* ifndef DISABLE_JIT */
5297 /* remove once throw_exception_by_name is eliminated */
5299 exception_id_by_name (const char *name
)
5301 if (strcmp (name
, "IndexOutOfRangeException") == 0)
5302 return MONO_EXC_INDEX_OUT_OF_RANGE
;
5303 if (strcmp (name
, "OverflowException") == 0)
5304 return MONO_EXC_OVERFLOW
;
5305 if (strcmp (name
, "ArithmeticException") == 0)
5306 return MONO_EXC_ARITHMETIC
;
5307 if (strcmp (name
, "DivideByZeroException") == 0)
5308 return MONO_EXC_DIVIDE_BY_ZERO
;
5309 if (strcmp (name
, "InvalidCastException") == 0)
5310 return MONO_EXC_INVALID_CAST
;
5311 if (strcmp (name
, "NullReferenceException") == 0)
5312 return MONO_EXC_NULL_REF
;
5313 if (strcmp (name
, "ArrayTypeMismatchException") == 0)
5314 return MONO_EXC_ARRAY_TYPE_MISMATCH
;
5315 if (strcmp (name
, "ArgumentException") == 0)
5316 return MONO_EXC_ARGUMENT
;
5317 g_error ("Unknown intrinsic exception %s\n", name
);
5323 mono_arch_emit_exceptions (MonoCompile
*cfg
)
5325 MonoJumpInfo
*patch_info
;
5328 guint8
* exc_throw_pos
[MONO_EXC_INTRINS_NUM
];
5329 guint8 exc_throw_found
[MONO_EXC_INTRINS_NUM
];
5330 int max_epilog_size
= 50;
5332 for (i
= 0; i
< MONO_EXC_INTRINS_NUM
; i
++) {
5333 exc_throw_pos
[i
] = NULL
;
5334 exc_throw_found
[i
] = 0;
5337 /* count the number of exception infos */
5340 * make sure we have enough space for exceptions
5342 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
5343 if (patch_info
->type
== MONO_PATCH_INFO_EXC
) {
5344 i
= exception_id_by_name ((const char*)patch_info
->data
.target
);
5345 if (!exc_throw_found
[i
]) {
5346 max_epilog_size
+= (2 * PPC_LOAD_SEQUENCE_LENGTH
) + 5 * 4;
5347 exc_throw_found
[i
] = TRUE
;
5349 } else if (patch_info
->type
== MONO_PATCH_INFO_BB_OVF
)
5350 max_epilog_size
+= 12;
5351 else if (patch_info
->type
== MONO_PATCH_INFO_EXC_OVF
) {
5352 MonoOvfJump
*ovfj
= (MonoOvfJump
*)patch_info
->data
.target
;
5353 i
= exception_id_by_name (ovfj
->data
.exception
);
5354 if (!exc_throw_found
[i
]) {
5355 max_epilog_size
+= (2 * PPC_LOAD_SEQUENCE_LENGTH
) + 5 * 4;
5356 exc_throw_found
[i
] = TRUE
;
5358 max_epilog_size
+= 8;
5362 code
= realloc_code (cfg
, max_epilog_size
);
5364 /* add code to raise exceptions */
5365 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
5366 switch (patch_info
->type
) {
5367 case MONO_PATCH_INFO_BB_OVF
: {
5368 MonoOvfJump
*ovfj
= (MonoOvfJump
*)patch_info
->data
.target
;
5369 unsigned char *ip
= patch_info
->ip
.i
+ cfg
->native_code
;
5370 /* patch the initial jump */
5371 ppc_patch (ip
, code
);
5372 ppc_bc (code
, ovfj
->b0_cond
, ovfj
->b1_cond
, 2);
5374 ppc_patch (code
- 4, ip
+ 4); /* jump back after the initiali branch */
5375 /* jump back to the true target */
5377 ip
= ovfj
->data
.bb
->native_offset
+ cfg
->native_code
;
5378 ppc_patch (code
- 4, ip
);
5379 patch_info
->type
= MONO_PATCH_INFO_NONE
;
5382 case MONO_PATCH_INFO_EXC_OVF
: {
5383 MonoOvfJump
*ovfj
= (MonoOvfJump
*)patch_info
->data
.target
;
5384 MonoJumpInfo
*newji
;
5385 unsigned char *ip
= patch_info
->ip
.i
+ cfg
->native_code
;
5386 unsigned char *bcl
= code
;
5387 /* patch the initial jump: we arrived here with a call */
5388 ppc_patch (ip
, code
);
5389 ppc_bc (code
, ovfj
->b0_cond
, ovfj
->b1_cond
, 0);
5391 ppc_patch (code
- 4, ip
+ 4); /* jump back after the initiali branch */
5392 /* patch the conditional jump to the right handler */
5393 /* make it processed next */
5394 newji
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoJumpInfo
));
5395 newji
->type
= MONO_PATCH_INFO_EXC
;
5396 newji
->ip
.i
= bcl
- cfg
->native_code
;
5397 newji
->data
.target
= ovfj
->data
.exception
;
5398 newji
->next
= patch_info
->next
;
5399 patch_info
->next
= newji
;
5400 patch_info
->type
= MONO_PATCH_INFO_NONE
;
5403 case MONO_PATCH_INFO_EXC
: {
5404 MonoClass
*exc_class
;
5406 unsigned char *ip
= patch_info
->ip
.i
+ cfg
->native_code
;
5407 i
= exception_id_by_name ((const char*)patch_info
->data
.target
);
5408 if (exc_throw_pos
[i
] && !(ip
> exc_throw_pos
[i
] && ip
- exc_throw_pos
[i
] > 50000)) {
5409 ppc_patch (ip
, exc_throw_pos
[i
]);
5410 patch_info
->type
= MONO_PATCH_INFO_NONE
;
5413 exc_throw_pos
[i
] = code
;
5416 exc_class
= mono_class_load_from_name (mono_defaults
.corlib
, "System", patch_info
->data
.name
);
5418 ppc_patch (ip
, code
);
5419 /*mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC_NAME, patch_info->data.target);*/
5420 ppc_load (code
, ppc_r3
, m_class_get_type_token (exc_class
));
5421 /* we got here from a conditional call, so the calling ip is set in lr */
5422 ppc_mflr (code
, ppc_r4
);
5423 patch_info
->type
= MONO_PATCH_INFO_JIT_ICALL_ID
;
5424 patch_info
->data
.jit_icall_id
= MONO_JIT_ICALL_mono_arch_throw_corlib_exception
;
5425 patch_info
->ip
.i
= code
- cfg
->native_code
;
5426 if (FORCE_INDIR_CALL
|| cfg
->method
->dynamic
) {
5427 ppc_load_func (code
, PPC_CALL_REG
, 0);
5428 ppc_mtctr (code
, PPC_CALL_REG
);
5429 ppc_bcctr (code
, PPC_BR_ALWAYS
, 0);
5441 set_code_cursor (cfg
, code
);
5447 try_offset_access (void *value
, guint32 idx
)
5449 register void* me
__asm__ ("r2");
5450 void ***p
= (void***)((char*)me
+ 284);
5451 int idx1
= idx
/ 32;
5452 int idx2
= idx
% 32;
5455 if (value
!= p
[idx1
][idx2
])
5462 mono_arch_finish_init (void)
5466 #define CMP_SIZE (PPC_LOAD_SEQUENCE_LENGTH + 4)
5468 #define LOADSTORE_SIZE 4
5469 #define JUMP_IMM_SIZE 12
5470 #define JUMP_IMM32_SIZE (PPC_LOAD_SEQUENCE_LENGTH + 8)
5471 #define ENABLE_WRONG_METHOD_CHECK 0
5474 * LOCKING: called with the domain lock held
5477 mono_arch_build_imt_trampoline (MonoVTable
*vtable
, MonoDomain
*domain
, MonoIMTCheckItem
**imt_entries
, int count
,
5478 gpointer fail_tramp
)
5482 guint8
*code
, *start
;
5484 for (i
= 0; i
< count
; ++i
) {
5485 MonoIMTCheckItem
*item
= imt_entries
[i
];
5486 if (item
->is_equals
) {
5487 if (item
->check_target_idx
) {
5488 if (!item
->compare_done
)
5489 item
->chunk_size
+= CMP_SIZE
;
5490 if (item
->has_target_code
)
5491 item
->chunk_size
+= BR_SIZE
+ JUMP_IMM32_SIZE
;
5493 item
->chunk_size
+= LOADSTORE_SIZE
+ BR_SIZE
+ JUMP_IMM_SIZE
;
5496 item
->chunk_size
+= CMP_SIZE
+ BR_SIZE
+ JUMP_IMM32_SIZE
* 2;
5497 if (!item
->has_target_code
)
5498 item
->chunk_size
+= LOADSTORE_SIZE
;
5500 item
->chunk_size
+= LOADSTORE_SIZE
+ JUMP_IMM_SIZE
;
5501 #if ENABLE_WRONG_METHOD_CHECK
5502 item
->chunk_size
+= CMP_SIZE
+ BR_SIZE
+ 4;
5507 item
->chunk_size
+= CMP_SIZE
+ BR_SIZE
;
5508 imt_entries
[item
->check_target_idx
]->compare_done
= TRUE
;
5510 size
+= item
->chunk_size
;
5512 /* the initial load of the vtable address */
5513 size
+= PPC_LOAD_SEQUENCE_LENGTH
+ LOADSTORE_SIZE
;
5515 code
= mono_method_alloc_generic_virtual_trampoline (domain
, size
);
5517 code
= mono_domain_code_reserve (domain
, size
);
5522 * We need to save and restore r12 because it might be
5523 * used by the caller as the vtable register, so
5524 * clobbering it will trip up the magic trampoline.
5526 * FIXME: Get rid of this by making sure that r12 is
5527 * not used as the vtable register in interface calls.
5529 ppc_stptr (code
, ppc_r12
, PPC_RET_ADDR_OFFSET
, ppc_sp
);
5530 ppc_load (code
, ppc_r12
, (gsize
)(& (vtable
->vtable
[0])));
5532 for (i
= 0; i
< count
; ++i
) {
5533 MonoIMTCheckItem
*item
= imt_entries
[i
];
5534 item
->code_target
= code
;
5535 if (item
->is_equals
) {
5536 if (item
->check_target_idx
) {
5537 if (!item
->compare_done
) {
5538 ppc_load (code
, ppc_r0
, (gsize
)item
->key
);
5539 ppc_compare_log (code
, 0, MONO_ARCH_IMT_REG
, ppc_r0
);
5541 item
->jmp_code
= code
;
5542 ppc_bc (code
, PPC_BR_FALSE
, PPC_BR_EQ
, 0);
5543 if (item
->has_target_code
) {
5544 ppc_load_ptr (code
, ppc_r0
, item
->value
.target_code
);
5546 ppc_ldptr (code
, ppc_r0
, (sizeof (target_mgreg_t
) * item
->value
.vtable_slot
), ppc_r12
);
5547 ppc_ldptr (code
, ppc_r12
, PPC_RET_ADDR_OFFSET
, ppc_sp
);
5549 ppc_mtctr (code
, ppc_r0
);
5550 ppc_bcctr (code
, PPC_BR_ALWAYS
, 0);
5553 ppc_load (code
, ppc_r0
, (gulong
)item
->key
);
5554 ppc_compare_log (code
, 0, MONO_ARCH_IMT_REG
, ppc_r0
);
5555 item
->jmp_code
= code
;
5556 ppc_bc (code
, PPC_BR_FALSE
, PPC_BR_EQ
, 0);
5557 if (item
->has_target_code
) {
5558 ppc_load_ptr (code
, ppc_r0
, item
->value
.target_code
);
5561 ppc_load_ptr (code
, ppc_r0
, & (vtable
->vtable
[item
->value
.vtable_slot
]));
5562 ppc_ldptr_indexed (code
, ppc_r0
, 0, ppc_r0
);
5564 ppc_mtctr (code
, ppc_r0
);
5565 ppc_bcctr (code
, PPC_BR_ALWAYS
, 0);
5566 ppc_patch (item
->jmp_code
, code
);
5567 ppc_load_ptr (code
, ppc_r0
, fail_tramp
);
5568 ppc_mtctr (code
, ppc_r0
);
5569 ppc_bcctr (code
, PPC_BR_ALWAYS
, 0);
5570 item
->jmp_code
= NULL
;
5572 /* enable the commented code to assert on wrong method */
5573 #if ENABLE_WRONG_METHOD_CHECK
5574 ppc_load (code
, ppc_r0
, (guint32
)item
->key
);
5575 ppc_compare_log (code
, 0, MONO_ARCH_IMT_REG
, ppc_r0
);
5576 item
->jmp_code
= code
;
5577 ppc_bc (code
, PPC_BR_FALSE
, PPC_BR_EQ
, 0);
5579 ppc_ldptr (code
, ppc_r0
, (sizeof (target_mgreg_t
) * item
->value
.vtable_slot
), ppc_r12
);
5580 ppc_ldptr (code
, ppc_r12
, PPC_RET_ADDR_OFFSET
, ppc_sp
);
5581 ppc_mtctr (code
, ppc_r0
);
5582 ppc_bcctr (code
, PPC_BR_ALWAYS
, 0);
5583 #if ENABLE_WRONG_METHOD_CHECK
5584 ppc_patch (item
->jmp_code
, code
);
5586 item
->jmp_code
= NULL
;
5591 ppc_load (code
, ppc_r0
, (gulong
)item
->key
);
5592 ppc_compare_log (code
, 0, MONO_ARCH_IMT_REG
, ppc_r0
);
5593 item
->jmp_code
= code
;
5594 ppc_bc (code
, PPC_BR_FALSE
, PPC_BR_LT
, 0);
5597 /* patch the branches to get to the target items */
5598 for (i
= 0; i
< count
; ++i
) {
5599 MonoIMTCheckItem
*item
= imt_entries
[i
];
5600 if (item
->jmp_code
) {
5601 if (item
->check_target_idx
) {
5602 ppc_patch (item
->jmp_code
, imt_entries
[item
->check_target_idx
]->code_target
);
5608 UnlockedAdd (&mono_stats
.imt_trampolines_size
, code
- start
);
5609 g_assert (code
- start
<= size
);
5610 mono_arch_flush_icache (start
, size
);
5611 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE
, NULL
));
5613 mono_tramp_info_register (mono_tramp_info_create (NULL
, start
, code
- start
, NULL
, NULL
), domain
);
5619 mono_arch_find_imt_method (host_mgreg_t
*regs
, guint8
*code
)
5621 host_mgreg_t
*r
= (host_mgreg_t
*)regs
;
5623 return (MonoMethod
*)(gsize
) r
[MONO_ARCH_IMT_REG
];
5627 mono_arch_find_static_call_vtable (host_mgreg_t
*regs
, guint8
*code
)
5629 return (MonoVTable
*)(gsize
) regs
[MONO_ARCH_RGCTX_REG
];
5633 mono_arch_get_cie_program (void)
5637 mono_add_unwind_op_def_cfa (l
, (guint8
*)NULL
, (guint8
*)NULL
, ppc_r1
, 0);
5643 mono_arch_emit_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
5645 MonoInst
*ins
= NULL
;
5648 if (cmethod
->klass
== mono_class_try_get_math_class ()) {
5649 if (strcmp (cmethod
->name
, "Sqrt") == 0) {
5651 } else if (strcmp (cmethod
->name
, "Abs") == 0 && fsig
->params
[0]->type
== MONO_TYPE_R8
) {
5655 if (opcode
&& fsig
->param_count
== 1) {
5656 MONO_INST_NEW (cfg
, ins
, opcode
);
5657 ins
->type
= STACK_R8
;
5658 ins
->dreg
= mono_alloc_freg (cfg
);
5659 ins
->sreg1
= args
[0]->dreg
;
5660 MONO_ADD_INS (cfg
->cbb
, ins
);
5663 /* Check for Min/Max for (u)int(32|64) */
5665 if (cpu_hw_caps
& PPC_ISA_2_03
) {
5666 if (strcmp (cmethod
->name
, "Min") == 0) {
5667 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
5669 if (fsig
->params
[0]->type
== MONO_TYPE_U4
)
5670 opcode
= OP_IMIN_UN
;
5671 #ifdef __mono_ppc64__
5672 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
5674 else if (fsig
->params
[0]->type
== MONO_TYPE_U8
)
5675 opcode
= OP_LMIN_UN
;
5677 } else if (strcmp (cmethod
->name
, "Max") == 0) {
5678 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
5680 if (fsig
->params
[0]->type
== MONO_TYPE_U4
)
5681 opcode
= OP_IMAX_UN
;
5682 #ifdef __mono_ppc64__
5683 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
5685 else if (fsig
->params
[0]->type
== MONO_TYPE_U8
)
5686 opcode
= OP_LMAX_UN
;
5690 * TODO: Floating point version with fsel, but fsel has
5691 * some peculiarities (need a scratch reg unless
5692 * comparing with 0, NaN/Inf behaviour (then MathF too)
5696 if (opcode
&& fsig
->param_count
== 2) {
5697 MONO_INST_NEW (cfg
, ins
, opcode
);
5698 ins
->type
= fsig
->params
[0]->type
== MONO_TYPE_I4
? STACK_I4
: STACK_I8
;
5699 ins
->dreg
= mono_alloc_ireg (cfg
);
5700 ins
->sreg1
= args
[0]->dreg
;
5701 ins
->sreg2
= args
[1]->dreg
;
5702 MONO_ADD_INS (cfg
->cbb
, ins
);
5705 /* Rounding instructions */
5707 if ((cpu_hw_caps
& PPC_ISA_2X
) && (fsig
->param_count
== 1) && (fsig
->params
[0]->type
== MONO_TYPE_R8
)) {
5709 * XXX: sysmath.c and the POWER ISA documentation for
5710 * frin[.] imply rounding is a little more complicated
5711 * than expected; the semantics are slightly different,
5712 * so just "frin." isn't a drop-in replacement. Floor,
5713 * Truncate, and Ceiling seem to work normally though.
5714 * (also, no float versions of these ops, but frsp
5715 * could be preprended?)
5717 //if (!strcmp (cmethod->name, "Round"))
5718 // opcode = OP_ROUND;
5719 if (!strcmp (cmethod
->name
, "Floor"))
5720 opcode
= OP_PPC_FLOOR
;
5721 else if (!strcmp (cmethod
->name
, "Ceiling"))
5722 opcode
= OP_PPC_CEIL
;
5723 else if (!strcmp (cmethod
->name
, "Truncate"))
5724 opcode
= OP_PPC_TRUNC
;
5726 MONO_INST_NEW (cfg
, ins
, opcode
);
5727 ins
->type
= STACK_R8
;
5728 ins
->dreg
= mono_alloc_freg (cfg
);
5729 ins
->sreg1
= args
[0]->dreg
;
5730 MONO_ADD_INS (cfg
->cbb
, ins
);
5734 if (cmethod
->klass
== mono_class_try_get_mathf_class ()) {
5735 if (strcmp (cmethod
->name
, "Sqrt") == 0) {
5737 } /* XXX: POWER has no single-precision normal FPU abs? */
5739 if (opcode
&& fsig
->param_count
== 1) {
5740 MONO_INST_NEW (cfg
, ins
, opcode
);
5741 ins
->type
= STACK_R4
;
5742 ins
->dreg
= mono_alloc_freg (cfg
);
5743 ins
->sreg1
= args
[0]->dreg
;
5744 MONO_ADD_INS (cfg
->cbb
, ins
);
5751 mono_arch_context_get_int_reg (MonoContext
*ctx
, int reg
)
5754 return (host_mgreg_t
)(gsize
)MONO_CONTEXT_GET_SP (ctx
);
5756 return ctx
->regs
[reg
];
5760 mono_arch_get_patch_offset (guint8
*code
)
5766 * mono_aot_emit_load_got_addr:
5768 * Emit code to load the got address.
5769 * On PPC, the result is placed into r30.
5772 mono_arch_emit_load_got_addr (guint8
*start
, guint8
*code
, MonoCompile
*cfg
, MonoJumpInfo
**ji
)
5775 ppc_mflr (code
, ppc_r30
);
5777 mono_add_patch_info (cfg
, code
- start
, MONO_PATCH_INFO_GOT_OFFSET
, NULL
);
5779 *ji
= mono_patch_info_list_prepend (*ji
, code
- start
, MONO_PATCH_INFO_GOT_OFFSET
, NULL
);
5780 /* arch_emit_got_address () patches this */
5781 #if defined(TARGET_POWERPC64)
5787 ppc_load32 (code
, ppc_r0
, 0);
5788 ppc_add (code
, ppc_r30
, ppc_r30
, ppc_r0
);
5791 set_code_cursor (cfg
, code
);
5796 * mono_ppc_emit_load_aotconst:
5798 * Emit code to load the contents of the GOT slot identified by TRAMP_TYPE and
5799 * TARGET from the mscorlib GOT in full-aot code.
5800 * On PPC, the GOT address is assumed to be in r30, and the result is placed into
5804 mono_arch_emit_load_aotconst (guint8
*start
, guint8
*code
, MonoJumpInfo
**ji
, MonoJumpInfoType tramp_type
, gconstpointer target
)
5806 /* Load the mscorlib got address */
5807 ppc_ldptr (code
, ppc_r12
, sizeof (target_mgreg_t
), ppc_r30
);
5808 *ji
= mono_patch_info_list_prepend (*ji
, code
- start
, tramp_type
, target
);
5809 /* arch_emit_got_access () patches this */
5810 ppc_load32 (code
, ppc_r0
, 0);
5811 ppc_ldptr_indexed (code
, ppc_r12
, ppc_r12
, ppc_r0
);
5816 /* Soft Debug support */
5817 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
5824 * mono_arch_set_breakpoint:
5826 * See mini-amd64.c for docs.
5829 mono_arch_set_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
5832 guint8
*orig_code
= code
;
5834 ppc_load_sequence (code
, ppc_r12
, (gsize
)bp_trigger_page
);
5835 ppc_ldptr (code
, ppc_r12
, 0, ppc_r12
);
5837 g_assert (code
- orig_code
== BREAKPOINT_SIZE
);
5839 mono_arch_flush_icache (orig_code
, code
- orig_code
);
5843 * mono_arch_clear_breakpoint:
5845 * See mini-amd64.c for docs.
5848 mono_arch_clear_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
5853 for (i
= 0; i
< BREAKPOINT_SIZE
/ 4; ++i
)
5856 mono_arch_flush_icache (ip
, code
- ip
);
5860 * mono_arch_is_breakpoint_event:
5862 * See mini-amd64.c for docs.
5865 mono_arch_is_breakpoint_event (void *info
, void *sigctx
)
5867 siginfo_t
* sinfo
= (siginfo_t
*) info
;
5868 /* Sometimes the address is off by 4 */
5869 if (sinfo
->si_addr
>= bp_trigger_page
&& (guint8
*)sinfo
->si_addr
<= (guint8
*)bp_trigger_page
+ 128)
5876 * mono_arch_skip_breakpoint:
5878 * See mini-amd64.c for docs.
5881 mono_arch_skip_breakpoint (MonoContext
*ctx
, MonoJitInfo
*ji
)
5883 /* skip the ldptr */
5884 MONO_CONTEXT_SET_IP (ctx
, (guint8
*)MONO_CONTEXT_GET_IP (ctx
) + 4);
5892 * mono_arch_start_single_stepping:
5894 * See mini-amd64.c for docs.
5897 mono_arch_start_single_stepping (void)
5899 mono_mprotect (ss_trigger_page
, mono_pagesize (), 0);
5903 * mono_arch_stop_single_stepping:
5905 * See mini-amd64.c for docs.
5908 mono_arch_stop_single_stepping (void)
5910 mono_mprotect (ss_trigger_page
, mono_pagesize (), MONO_MMAP_READ
);
5914 * mono_arch_is_single_step_event:
5916 * See mini-amd64.c for docs.
5919 mono_arch_is_single_step_event (void *info
, void *sigctx
)
5921 siginfo_t
* sinfo
= (siginfo_t
*) info
;
5922 /* Sometimes the address is off by 4 */
5923 if (sinfo
->si_addr
>= ss_trigger_page
&& (guint8
*)sinfo
->si_addr
<= (guint8
*)ss_trigger_page
+ 128)
5930 * mono_arch_skip_single_step:
5932 * See mini-amd64.c for docs.
5935 mono_arch_skip_single_step (MonoContext
*ctx
)
5937 /* skip the ldptr */
5938 MONO_CONTEXT_SET_IP (ctx
, (guint8
*)MONO_CONTEXT_GET_IP (ctx
) + 4);
5942 * mono_arch_create_seq_point_info:
5944 * See mini-amd64.c for docs.
5947 mono_arch_get_seq_point_info (MonoDomain
*domain
, guint8
*code
)
5956 mono_arch_opcode_supported (int opcode
)
5959 case OP_ATOMIC_ADD_I4
:
5960 case OP_ATOMIC_CAS_I4
:
5961 #ifdef TARGET_POWERPC64
5962 case OP_ATOMIC_ADD_I8
:
5963 case OP_ATOMIC_CAS_I8
:
5972 mono_arch_load_function (MonoJitICallId jit_icall_id
)
5974 gpointer target
= NULL
;
5975 switch (jit_icall_id
) {
5976 #undef MONO_AOT_ICALL
5977 #define MONO_AOT_ICALL(x) case MONO_JIT_ICALL_ ## x: target = (gpointer)x; break;
5978 MONO_AOT_ICALL (mono_ppc_throw_exception
)