2 * mini-ia64.c: IA64 backend for the Mono code generator
5 * Zoltan Varga (vargaz@gmail.com)
7 * (C) 2003 Ximian, Inc.
15 #ifdef __INTEL_COMPILER
16 #include <ia64intrin.h>
19 #include <mono/metadata/appdomain.h>
20 #include <mono/metadata/debug-helpers.h>
21 #include <mono/metadata/threads.h>
22 #include <mono/metadata/profiler-private.h>
23 #include <mono/utils/mono-math.h>
26 #include "mini-ia64.h"
28 #include "jit-icalls.h"
31 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
33 #define IS_IMM32(val) ((((guint64)val) >> 32) == 0)
36 * IA64 register usage:
37 * - local registers are used for global register allocation
38 * - r8..r11, r14..r30 is used for local register allocation
39 * - r31 is a scratch register used within opcode implementations
40 * - FIXME: Use out registers as well
41 * - the first three locals are used for saving ar.pfst, b0, and sp
42 * - compare instructions allways set p6 and p7
46 * There are a lot of places where generated code is disassembled/patched.
47 * The automatic bundling of instructions done by the code generation macros
48 * could complicate things, so it is best to call
49 * ia64_codegen_set_one_ins_per_bundle () at those places.
52 #define ARGS_OFFSET 16
54 #define GP_SCRATCH_REG 31
55 #define GP_SCRATCH_REG2 30
56 #define FP_SCRATCH_REG 32
57 #define FP_SCRATCH_REG2 33
59 #define LOOP_ALIGNMENT 8
60 #define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
62 static const char* gregs
[] = {
63 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9",
64 "r10", "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19",
65 "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29",
66 "r30", "r31", "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
67 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47", "r48", "r49",
68 "r50", "r51", "r52", "r53", "r54", "r55", "r56", "r57", "r58", "r59",
69 "r60", "r61", "r62", "r63", "r64", "r65", "r66", "r67", "r68", "r69",
70 "r70", "r71", "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
71 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87", "r88", "r89",
72 "r90", "r91", "r92", "r93", "r94", "r95", "r96", "r97", "r98", "r99",
73 "r100", "r101", "r102", "r103", "r104", "r105", "r106", "r107", "r108", "r109",
74 "r110", "r111", "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
75 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127"
79 mono_arch_regname (int reg
)
87 static const char* fregs
[] = {
88 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9",
89 "f10", "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19",
90 "f20", "f21", "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29",
91 "f30", "f31", "f32", "f33", "f34", "f35", "f36", "f37", "f38", "f39",
92 "f40", "f41", "f42", "f43", "f44", "f45", "f46", "f47", "f48", "f49",
93 "f50", "f51", "f52", "f53", "f54", "f55", "f56", "f57", "f58", "f59",
94 "f60", "f61", "f62", "f63", "f64", "f65", "f66", "f67", "f68", "f69",
95 "f70", "f71", "f72", "f73", "f74", "f75", "f76", "f77", "f78", "f79",
96 "f80", "f81", "f82", "f83", "f84", "f85", "f86", "f87", "f88", "f89",
97 "f90", "f91", "f92", "f93", "f94", "f95", "f96", "f97", "f98", "f99",
98 "f100", "f101", "f102", "f103", "f104", "f105", "f106", "f107", "f108", "f109",
99 "f110", "f111", "f112", "f113", "f114", "f115", "f116", "f117", "f118", "f119",
100 "f120", "f121", "f122", "f123", "f124", "f125", "f126", "f127"
104 mono_arch_fregname (int reg
)
112 G_GNUC_UNUSED
static void
117 G_GNUC_UNUSED
static gboolean
120 static int count
= 0;
123 if (count
== atoi (getenv ("COUNT"))) {
127 if (count
> atoi (getenv ("COUNT"))) {
135 debug_ins_sched (void)
138 return debug_count ();
148 return debug_count ();
155 ia64_patch (unsigned char* code
, gpointer target
);
162 ArgValuetypeAddrInIReg
,
180 /* Only if storage == ArgAggregate */
190 gboolean need_stack_align
;
191 gboolean vtype_retaddr
;
192 /* The index of the vret arg in the argument list */
199 #define DEBUG(a) if (cfg->verbose_level > 1) a
204 add_general (guint32
*gr
, guint32
*stack_size
, ArgInfo
*ainfo
)
206 ainfo
->offset
= *stack_size
;
208 if (*gr
>= PARAM_REGS
) {
209 ainfo
->storage
= ArgOnStack
;
210 (*stack_size
) += sizeof (gpointer
);
213 ainfo
->storage
= ArgInIReg
;
219 #define FLOAT_PARAM_REGS 8
222 add_float (guint32
*gr
, guint32
*fr
, guint32
*stack_size
, ArgInfo
*ainfo
, gboolean is_double
)
224 ainfo
->offset
= *stack_size
;
226 if (*gr
>= PARAM_REGS
) {
227 ainfo
->storage
= ArgOnStack
;
228 (*stack_size
) += sizeof (gpointer
);
231 ainfo
->storage
= is_double
? ArgInFloatReg
: ArgInFloatRegR4
;
232 ainfo
->reg
= 8 + *fr
;
239 add_valuetype (MonoGenericSharingContext
*gsctx
, MonoMethodSignature
*sig
, ArgInfo
*ainfo
, MonoType
*type
,
241 guint32
*gr
, guint32
*fr
, guint32
*stack_size
)
245 MonoMarshalType
*info
;
246 gboolean is_hfa
= TRUE
;
247 guint32 hfa_type
= 0;
249 klass
= mono_class_from_mono_type (type
);
250 if (type
->type
== MONO_TYPE_TYPEDBYREF
)
251 size
= 3 * sizeof (gpointer
);
252 else if (sig
->pinvoke
)
253 size
= mono_type_native_stack_size (&klass
->byval_arg
, NULL
);
255 size
= mini_type_stack_size (gsctx
, &klass
->byval_arg
, NULL
);
257 if (!sig
->pinvoke
|| (size
== 0)) {
258 /* Allways pass in memory */
259 ainfo
->offset
= *stack_size
;
260 *stack_size
+= ALIGN_TO (size
, 8);
261 ainfo
->storage
= ArgOnStack
;
266 /* Determine whenever it is a HFA (Homogeneous Floating Point Aggregate) */
267 info
= mono_marshal_load_type_info (klass
);
269 for (i
= 0; i
< info
->num_fields
; ++i
) {
270 guint32 ftype
= info
->fields
[i
].field
->type
->type
;
271 if (!(info
->fields
[i
].field
->type
->byref
) &&
272 ((ftype
== MONO_TYPE_R4
) || (ftype
== MONO_TYPE_R8
))) {
275 else if (hfa_type
!= ftype
)
284 ainfo
->storage
= ArgAggregate
;
285 ainfo
->atype
= AggregateNormal
;
288 ainfo
->atype
= hfa_type
== MONO_TYPE_R4
? AggregateSingleHFA
: AggregateDoubleHFA
;
290 if (info
->num_fields
<= 8) {
292 ainfo
->nregs
= info
->num_fields
;
293 ainfo
->nslots
= ainfo
->nregs
;
299 if ((*fr
) + info
->num_fields
> 8)
302 ainfo
->reg
= 8 + (*fr
);
303 ainfo
->nregs
= info
->num_fields
;
304 ainfo
->nslots
= ainfo
->nregs
;
305 (*fr
) += info
->num_fields
;
306 if (ainfo
->atype
== AggregateSingleHFA
) {
308 * FIXME: Have to keep track of the parameter slot number, which is
309 * not the same as *gr.
311 (*gr
) += ALIGN_TO (info
->num_fields
, 2) / 2;
313 (*gr
) += info
->num_fields
;
319 /* This also handles returning of TypedByRef used by some icalls */
322 ainfo
->reg
= IA64_R8
;
323 ainfo
->nregs
= (size
+ 7) / 8;
324 ainfo
->nslots
= ainfo
->nregs
;
331 ainfo
->offset
= *stack_size
;
332 ainfo
->nslots
= (size
+ 7) / 8;
334 if (((*gr
) + ainfo
->nslots
) <= 8) {
335 /* Fits entirely in registers */
336 ainfo
->nregs
= ainfo
->nslots
;
337 (*gr
) += ainfo
->nregs
;
341 ainfo
->nregs
= 8 - (*gr
);
343 (*stack_size
) += (ainfo
->nslots
- ainfo
->nregs
) * 8;
349 * Obtain information about a call according to the calling convention.
350 * For IA64, see the "Itanium Software Conventions and Runtime Architecture
351 * Gude" document for more information.
354 get_call_info (MonoCompile
*cfg
, MonoMemPool
*mp
, MonoMethodSignature
*sig
, gboolean is_pinvoke
)
356 guint32 i
, gr
, fr
, pstart
;
358 int n
= sig
->hasthis
+ sig
->param_count
;
359 guint32 stack_size
= 0;
361 MonoGenericSharingContext
*gsctx
= cfg
? cfg
->generic_sharing_context
: NULL
;
364 cinfo
= mono_mempool_alloc0 (mp
, sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
366 cinfo
= g_malloc0 (sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
373 ret_type
= mono_type_get_underlying_type (sig
->ret
);
374 ret_type
= mini_get_basic_type_from_generic (gsctx
, ret_type
);
375 switch (ret_type
->type
) {
376 case MONO_TYPE_BOOLEAN
:
387 case MONO_TYPE_FNPTR
:
388 case MONO_TYPE_CLASS
:
389 case MONO_TYPE_OBJECT
:
390 case MONO_TYPE_SZARRAY
:
391 case MONO_TYPE_ARRAY
:
392 case MONO_TYPE_STRING
:
393 cinfo
->ret
.storage
= ArgInIReg
;
394 cinfo
->ret
.reg
= IA64_R8
;
398 cinfo
->ret
.storage
= ArgInIReg
;
399 cinfo
->ret
.reg
= IA64_R8
;
403 cinfo
->ret
.storage
= ArgInFloatReg
;
406 case MONO_TYPE_GENERICINST
:
407 if (!mono_type_generic_inst_is_valuetype (ret_type
)) {
408 cinfo
->ret
.storage
= ArgInIReg
;
409 cinfo
->ret
.reg
= IA64_R8
;
413 case MONO_TYPE_VALUETYPE
:
414 case MONO_TYPE_TYPEDBYREF
: {
415 guint32 tmp_gr
= 0, tmp_fr
= 0, tmp_stacksize
= 0;
417 if (sig
->ret
->byref
) {
418 /* This seems to happen with ldfld wrappers */
419 cinfo
->ret
.storage
= ArgInIReg
;
421 add_valuetype (gsctx
, sig
, &cinfo
->ret
, sig
->ret
, TRUE
, &tmp_gr
, &tmp_fr
, &tmp_stacksize
);
422 if (cinfo
->ret
.storage
== ArgOnStack
) {
423 /* The caller passes the address where the value is stored */
424 cinfo
->vtype_retaddr
= TRUE
;
430 cinfo
->ret
.storage
= ArgNone
;
433 g_error ("Can't handle as return value 0x%x", sig
->ret
->type
);
439 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
440 * the first argument, allowing 'this' to be always passed in the first arg reg.
441 * Also do this if the first argument is a reference type, since virtual calls
442 * are sometimes made using calli without sig->hasthis set, like in the delegate
445 if (cinfo
->vtype_retaddr
&& !is_pinvoke
&& (sig
->hasthis
|| (sig
->param_count
> 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx
, sig
->params
[0]))))) {
447 add_general (&gr
, &stack_size
, cinfo
->args
+ 0);
449 add_general (&gr
, &stack_size
, &cinfo
->args
[sig
->hasthis
+ 0]);
452 add_general (&gr
, &stack_size
, &cinfo
->ret
);
453 if (cinfo
->ret
.storage
== ArgInIReg
)
454 cinfo
->ret
.storage
= ArgValuetypeAddrInIReg
;
455 cinfo
->vret_arg_index
= 1;
459 add_general (&gr
, &stack_size
, cinfo
->args
+ 0);
461 if (cinfo
->vtype_retaddr
) {
462 add_general (&gr
, &stack_size
, &cinfo
->ret
);
463 if (cinfo
->ret
.storage
== ArgInIReg
)
464 cinfo
->ret
.storage
= ArgValuetypeAddrInIReg
;
468 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (n
== 0)) {
470 fr
= FLOAT_PARAM_REGS
;
472 /* Emit the signature cookie just before the implicit arguments */
473 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
);
476 for (i
= pstart
; i
< sig
->param_count
; ++i
) {
477 ArgInfo
*ainfo
= &cinfo
->args
[sig
->hasthis
+ i
];
480 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
481 /* We allways pass the sig cookie on the stack for simplicity */
483 * Prevent implicit arguments + the sig cookie from being passed
487 fr
= FLOAT_PARAM_REGS
;
489 /* Emit the signature cookie just before the implicit arguments */
490 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
);
493 if (sig
->params
[i
]->byref
) {
494 add_general (&gr
, &stack_size
, ainfo
);
497 ptype
= mono_type_get_underlying_type (sig
->params
[i
]);
498 ptype
= mini_get_basic_type_from_generic (gsctx
, ptype
);
499 switch (ptype
->type
) {
500 case MONO_TYPE_BOOLEAN
:
503 add_general (&gr
, &stack_size
, ainfo
);
508 add_general (&gr
, &stack_size
, ainfo
);
512 add_general (&gr
, &stack_size
, ainfo
);
517 case MONO_TYPE_FNPTR
:
518 case MONO_TYPE_CLASS
:
519 case MONO_TYPE_OBJECT
:
520 case MONO_TYPE_STRING
:
521 case MONO_TYPE_SZARRAY
:
522 case MONO_TYPE_ARRAY
:
523 add_general (&gr
, &stack_size
, ainfo
);
525 case MONO_TYPE_GENERICINST
:
526 if (!mono_type_generic_inst_is_valuetype (ptype
)) {
527 add_general (&gr
, &stack_size
, ainfo
);
531 case MONO_TYPE_VALUETYPE
:
532 case MONO_TYPE_TYPEDBYREF
:
534 /* We allways pass valuetypes on the stack */
535 add_valuetype (gsctx
, sig
, ainfo
, sig
->params
[i
], FALSE
, &gr
, &fr
, &stack_size
);
539 add_general (&gr
, &stack_size
, ainfo
);
542 add_float (&gr
, &fr
, &stack_size
, ainfo
, FALSE
);
545 add_float (&gr
, &fr
, &stack_size
, ainfo
, TRUE
);
548 g_assert_not_reached ();
552 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (n
> 0) && (sig
->sentinelpos
== sig
->param_count
)) {
554 fr
= FLOAT_PARAM_REGS
;
556 /* Emit the signature cookie just before the implicit arguments */
557 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
);
560 cinfo
->stack_usage
= stack_size
;
561 cinfo
->reg_usage
= gr
;
562 cinfo
->freg_usage
= fr
;
567 * mono_arch_get_argument_info:
568 * @csig: a method signature
569 * @param_count: the number of parameters to consider
570 * @arg_info: an array to store the result infos
572 * Gathers information on parameters such as size, alignment and
573 * padding. arg_info should be large enought to hold param_count + 1 entries.
575 * Returns the size of the argument area on the stack.
578 mono_arch_get_argument_info (MonoMethodSignature
*csig
, int param_count
, MonoJitArgumentInfo
*arg_info
)
581 CallInfo
*cinfo
= get_call_info (NULL
, NULL
, csig
, FALSE
);
582 guint32 args_size
= cinfo
->stack_usage
;
584 /* The arguments are saved to a stack area in mono_arch_instrument_prolog */
586 arg_info
[0].offset
= 0;
589 for (k
= 0; k
< param_count
; k
++) {
590 arg_info
[k
+ 1].offset
= ((k
+ csig
->hasthis
) * 8);
592 arg_info
[k
+ 1].size
= 0;
601 * Initialize the cpu to execute managed code.
604 mono_arch_cpu_init (void)
609 * Initialize architecture specific code.
612 mono_arch_init (void)
617 * Cleanup architecture specific code.
620 mono_arch_cleanup (void)
625 * This function returns the optimizations supported on this cpu.
628 mono_arch_cpu_optimizazions (guint32
*exclude_mask
)
636 mono_arch_get_allocatable_int_vars (MonoCompile
*cfg
)
640 MonoMethodSignature
*sig
;
641 MonoMethodHeader
*header
;
644 header
= cfg
->header
;
646 sig
= mono_method_signature (cfg
->method
);
648 cinfo
= get_call_info (cfg
, cfg
->mempool
, sig
, FALSE
);
650 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
651 MonoInst
*ins
= cfg
->args
[i
];
653 ArgInfo
*ainfo
= &cinfo
->args
[i
];
655 if (ins
->flags
& (MONO_INST_IS_DEAD
|MONO_INST_VOLATILE
|MONO_INST_INDIRECT
))
658 if (ainfo
->storage
== ArgInIReg
) {
659 /* The input registers are non-volatile */
660 ins
->opcode
= OP_REGVAR
;
661 ins
->dreg
= 32 + ainfo
->reg
;
665 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
666 MonoInst
*ins
= cfg
->varinfo
[i
];
667 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
670 if (vmv
->range
.first_use
.abs_pos
>= vmv
->range
.last_use
.abs_pos
)
673 if ((ins
->flags
& (MONO_INST_IS_DEAD
|MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)) ||
674 (ins
->opcode
!= OP_LOCAL
&& ins
->opcode
!= OP_ARG
))
677 if (mono_is_regsize_var (ins
->inst_vtype
)) {
678 g_assert (MONO_VARINFO (cfg
, i
)->reg
== -1);
679 g_assert (i
== vmv
->idx
);
680 vars
= g_list_prepend (vars
, vmv
);
684 vars
= mono_varlist_sort (cfg
, vars
, 0);
690 mono_ia64_alloc_stacked_registers (MonoCompile
*cfg
)
693 guint32 reserved_regs
;
694 MonoMethodHeader
*header
;
696 if (cfg
->arch
.reg_local0
> 0)
700 cinfo
= get_call_info (cfg
, cfg
->mempool
, mono_method_signature (cfg
->method
), FALSE
);
702 header
= cfg
->header
;
704 /* Some registers are reserved for use by the prolog/epilog */
705 reserved_regs
= header
->num_clauses
? 4 : 3;
707 if ((mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
)) ||
708 (cfg
->prof_options
& MONO_PROFILE_ENTER_LEAVE
)) {
709 /* One registers is needed by instrument_epilog to save the return value */
711 if (cinfo
->reg_usage
< 2)
712 /* Number of arguments passed to function call in instrument_prolog */
713 cinfo
->reg_usage
= 2;
716 cfg
->arch
.reg_in0
= 32;
717 cfg
->arch
.reg_local0
= cfg
->arch
.reg_in0
+ cinfo
->reg_usage
+ reserved_regs
;
718 cfg
->arch
.reg_out0
= cfg
->arch
.reg_local0
+ 16;
720 cfg
->arch
.reg_saved_ar_pfs
= cfg
->arch
.reg_local0
- 1;
721 cfg
->arch
.reg_saved_b0
= cfg
->arch
.reg_local0
- 2;
722 cfg
->arch
.reg_fp
= cfg
->arch
.reg_local0
- 3;
725 * Frames without handlers save sp to fp, frames with handlers save it into
726 * a dedicated register.
728 if (header
->num_clauses
)
729 cfg
->arch
.reg_saved_sp
= cfg
->arch
.reg_local0
- 4;
731 cfg
->arch
.reg_saved_sp
= cfg
->arch
.reg_fp
;
733 if ((mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
)) ||
734 (cfg
->prof_options
& MONO_PROFILE_ENTER_LEAVE
)) {
735 cfg
->arch
.reg_saved_return_val
= cfg
->arch
.reg_local0
- reserved_regs
;
739 * Need to allocate at least 2 out register for use by OP_THROW / the system
740 * exception throwing code.
742 cfg
->arch
.n_out_regs
= MAX (cfg
->arch
.n_out_regs
, 2);
746 mono_arch_get_global_int_regs (MonoCompile
*cfg
)
751 mono_ia64_alloc_stacked_registers (cfg
);
753 for (i
= cfg
->arch
.reg_local0
; i
< cfg
->arch
.reg_out0
; ++i
) {
756 regs
= g_list_prepend (regs
, (gpointer
)(gssize
)(i
));
763 * mono_arch_regalloc_cost:
765 * Return the cost, in number of memory references, of the action of
766 * allocating the variable VMV into a register during global register
770 mono_arch_regalloc_cost (MonoCompile
*cfg
, MonoMethodVar
*vmv
)
772 /* FIXME: Increase costs linearly to avoid using all local registers */
778 mono_arch_allocate_vars (MonoCompile
*cfg
)
780 MonoMethodSignature
*sig
;
781 MonoMethodHeader
*header
;
784 guint32 locals_stack_size
, locals_stack_align
;
788 header
= cfg
->header
;
790 sig
= mono_method_signature (cfg
->method
);
792 cinfo
= get_call_info (cfg
, cfg
->mempool
, sig
, FALSE
);
795 * Determine whenever the frame pointer can be eliminated.
796 * FIXME: Remove some of the restrictions.
798 cfg
->arch
.omit_fp
= TRUE
;
800 if (!debug_omit_fp ())
801 cfg
->arch
.omit_fp
= FALSE
;
803 if (cfg
->flags
& MONO_CFG_HAS_ALLOCA
)
804 cfg
->arch
.omit_fp
= FALSE
;
805 if (header
->num_clauses
)
806 cfg
->arch
.omit_fp
= FALSE
;
808 cfg
->arch
.omit_fp
= FALSE
;
809 if ((sig
->ret
->type
!= MONO_TYPE_VOID
) && (cinfo
->ret
.storage
== ArgAggregate
))
810 cfg
->arch
.omit_fp
= FALSE
;
811 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
))
812 cfg
->arch
.omit_fp
= FALSE
;
813 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
814 ArgInfo
*ainfo
= &cinfo
->args
[i
];
816 if (ainfo
->storage
== ArgOnStack
) {
818 * The stack offset can only be determined when the frame
821 cfg
->arch
.omit_fp
= FALSE
;
825 mono_ia64_alloc_stacked_registers (cfg
);
828 * We use the ABI calling conventions for managed code as well.
829 * Exception: valuetypes are never passed or returned in registers.
832 if (cfg
->arch
.omit_fp
) {
833 cfg
->flags
|= MONO_CFG_HAS_SPILLUP
;
834 cfg
->frame_reg
= IA64_SP
;
835 offset
= ARGS_OFFSET
;
838 /* Locals are allocated backwards from %fp */
839 cfg
->frame_reg
= cfg
->arch
.reg_fp
;
843 if (cfg
->method
->save_lmf
) {
847 if (sig
->ret
->type
!= MONO_TYPE_VOID
) {
848 switch (cinfo
->ret
.storage
) {
850 cfg
->ret
->opcode
= OP_REGVAR
;
851 cfg
->ret
->inst_c0
= cinfo
->ret
.reg
;
854 cfg
->ret
->opcode
= OP_REGVAR
;
855 cfg
->ret
->inst_c0
= cinfo
->ret
.reg
;
857 case ArgValuetypeAddrInIReg
:
858 cfg
->vret_addr
->opcode
= OP_REGVAR
;
859 cfg
->vret_addr
->dreg
= cfg
->arch
.reg_in0
+ cinfo
->ret
.reg
;
862 /* Allocate a local to hold the result, the epilog will copy it to the correct place */
863 if (cfg
->arch
.omit_fp
)
864 g_assert_not_reached ();
865 offset
= ALIGN_TO (offset
, 8);
866 offset
+= cinfo
->ret
.nslots
* 8;
867 cfg
->ret
->opcode
= OP_REGOFFSET
;
868 cfg
->ret
->inst_basereg
= cfg
->frame_reg
;
869 cfg
->ret
->inst_offset
= - offset
;
872 g_assert_not_reached ();
874 cfg
->ret
->dreg
= cfg
->ret
->inst_c0
;
877 /* Allocate locals */
878 offsets
= mono_allocate_stack_slots_full (cfg
, cfg
->arch
.omit_fp
? FALSE
: TRUE
, &locals_stack_size
, &locals_stack_align
);
879 if (locals_stack_align
) {
880 offset
= ALIGN_TO (offset
, locals_stack_align
);
882 for (i
= cfg
->locals_start
; i
< cfg
->num_varinfo
; i
++) {
883 if (offsets
[i
] != -1) {
884 MonoInst
*inst
= cfg
->varinfo
[i
];
885 inst
->opcode
= OP_REGOFFSET
;
886 inst
->inst_basereg
= cfg
->frame_reg
;
887 if (cfg
->arch
.omit_fp
)
888 inst
->inst_offset
= (offset
+ offsets
[i
]);
890 inst
->inst_offset
= - (offset
+ offsets
[i
]);
891 // printf ("allocated local %d to ", i); mono_print_tree_nl (inst);
894 offset
+= locals_stack_size
;
896 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
)) {
897 if (cfg
->arch
.omit_fp
)
898 g_assert_not_reached ();
899 g_assert (cinfo
->sig_cookie
.storage
== ArgOnStack
);
900 cfg
->sig_cookie
= cinfo
->sig_cookie
.offset
+ ARGS_OFFSET
;
903 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
904 inst
= cfg
->args
[i
];
905 if (inst
->opcode
!= OP_REGVAR
) {
906 ArgInfo
*ainfo
= &cinfo
->args
[i
];
907 gboolean inreg
= TRUE
;
910 if (sig
->hasthis
&& (i
== 0))
911 arg_type
= &mono_defaults
.object_class
->byval_arg
;
913 arg_type
= sig
->params
[i
- sig
->hasthis
];
915 /* FIXME: VOLATILE is only set if the liveness pass runs */
916 if (inst
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
))
919 inst
->opcode
= OP_REGOFFSET
;
921 switch (ainfo
->storage
) {
923 inst
->opcode
= OP_REGVAR
;
924 inst
->dreg
= cfg
->arch
.reg_in0
+ ainfo
->reg
;
927 case ArgInFloatRegR4
:
929 * Since float regs are volatile, we save the arguments to
930 * the stack in the prolog.
935 if (cfg
->arch
.omit_fp
)
936 g_assert_not_reached ();
937 inst
->opcode
= OP_REGOFFSET
;
938 inst
->inst_basereg
= cfg
->frame_reg
;
939 inst
->inst_offset
= ARGS_OFFSET
+ ainfo
->offset
;
948 if (!inreg
&& (ainfo
->storage
!= ArgOnStack
)) {
951 inst
->opcode
= OP_REGOFFSET
;
952 inst
->inst_basereg
= cfg
->frame_reg
;
953 /* These arguments are saved to the stack in the prolog */
954 switch (ainfo
->storage
) {
956 if (ainfo
->atype
== AggregateSingleHFA
)
957 size
= ainfo
->nslots
* 4;
959 size
= ainfo
->nslots
* 8;
962 size
= sizeof (gpointer
);
966 offset
= ALIGN_TO (offset
, sizeof (gpointer
));
968 if (cfg
->arch
.omit_fp
) {
969 inst
->inst_offset
= offset
;
973 inst
->inst_offset
= - offset
;
980 * FIXME: This doesn't work because some variables are allocated during local
984 if (cfg->arch.omit_fp && offset == 16)
988 cfg
->stack_offset
= offset
;
992 mono_arch_create_vars (MonoCompile
*cfg
)
994 MonoMethodSignature
*sig
;
997 sig
= mono_method_signature (cfg
->method
);
999 cinfo
= get_call_info (cfg
, cfg
->mempool
, sig
, FALSE
);
1001 if (cinfo
->ret
.storage
== ArgAggregate
)
1002 cfg
->ret_var_is_local
= TRUE
;
1003 if (cinfo
->ret
.storage
== ArgValuetypeAddrInIReg
) {
1004 cfg
->vret_addr
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_ARG
);
1005 if (G_UNLIKELY (cfg
->verbose_level
> 1)) {
1006 printf ("vret_addr = ");
1007 mono_print_ins (cfg
->vret_addr
);
1013 add_outarg_reg (MonoCompile
*cfg
, MonoCallInst
*call
, ArgStorage storage
, int reg
, MonoInst
*tree
)
1017 MONO_INST_NEW (cfg
, arg
, OP_NOP
);
1018 arg
->sreg1
= tree
->dreg
;
1022 arg
->opcode
= OP_MOVE
;
1023 arg
->dreg
= mono_alloc_ireg (cfg
);
1025 mono_call_inst_add_outarg_reg (cfg
, call
, arg
->dreg
, reg
, FALSE
);
1028 arg
->opcode
= OP_FMOVE
;
1029 arg
->dreg
= mono_alloc_freg (cfg
);
1031 mono_call_inst_add_outarg_reg (cfg
, call
, arg
->dreg
, reg
, TRUE
);
1033 case ArgInFloatRegR4
:
1034 arg
->opcode
= OP_FCONV_TO_R4
;
1035 arg
->dreg
= mono_alloc_freg (cfg
);
1037 mono_call_inst_add_outarg_reg (cfg
, call
, arg
->dreg
, reg
, TRUE
);
1040 g_assert_not_reached ();
1043 MONO_ADD_INS (cfg
->cbb
, arg
);
1047 emit_sig_cookie (MonoCompile
*cfg
, MonoCallInst
*call
, CallInfo
*cinfo
)
1049 MonoMethodSignature
*tmp_sig
;
1051 /* Emit the signature cookie just before the implicit arguments */
1053 /* FIXME: Add support for signature tokens to AOT */
1054 cfg
->disable_aot
= TRUE
;
1056 g_assert (cinfo
->sig_cookie
.storage
== ArgOnStack
);
1059 * mono_ArgIterator_Setup assumes the signature cookie is
1060 * passed first and all the arguments which were before it are
1061 * passed on the stack after the signature. So compensate by
1062 * passing a different signature.
1064 tmp_sig
= mono_metadata_signature_dup (call
->signature
);
1065 tmp_sig
->param_count
-= call
->signature
->sentinelpos
;
1066 tmp_sig
->sentinelpos
= 0;
1067 memcpy (tmp_sig
->params
, call
->signature
->params
+ call
->signature
->sentinelpos
, tmp_sig
->param_count
* sizeof (MonoType
*));
1069 MONO_INST_NEW (cfg
, sig_arg
, OP_ICONST
);
1070 sig_arg
->dreg
= mono_alloc_ireg (cfg
);
1071 sig_arg
->inst_p0
= tmp_sig
;
1072 MONO_ADD_INS (cfg
->cbb
, sig_arg
);
1074 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, IA64_SP
, 16 + cinfo
->sig_cookie
.offset
, sig_arg
->dreg
);
1078 mono_arch_emit_call (MonoCompile
*cfg
, MonoCallInst
*call
)
1081 MonoMethodSignature
*sig
;
1082 int i
, n
, stack_size
;
1088 mono_ia64_alloc_stacked_registers (cfg
);
1090 sig
= call
->signature
;
1091 n
= sig
->param_count
+ sig
->hasthis
;
1093 cinfo
= get_call_info (cfg
, cfg
->mempool
, sig
, sig
->pinvoke
);
1095 if (cinfo
->ret
.storage
== ArgAggregate
) {
1100 * The valuetype is in registers after the call, need to be copied
1101 * to the stack. Save the address to a local here, so the call
1102 * instruction can access it.
1104 local
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1105 local
->flags
|= MONO_INST_VOLATILE
;
1106 cfg
->arch
.ret_var_addr_local
= local
;
1108 MONO_INST_NEW (cfg
, vtarg
, OP_MOVE
);
1109 vtarg
->sreg1
= call
->vret_var
->dreg
;
1110 vtarg
->dreg
= local
->dreg
;
1111 MONO_ADD_INS (cfg
->cbb
, vtarg
);
1114 if (cinfo
->ret
.storage
== ArgValuetypeAddrInIReg
) {
1115 add_outarg_reg (cfg
, call
, ArgInIReg
, cfg
->arch
.reg_out0
+ cinfo
->ret
.reg
, call
->vret_var
);
1118 for (i
= 0; i
< n
; ++i
) {
1121 ainfo
= cinfo
->args
+ i
;
1123 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
1124 /* Emit the signature cookie just before the implicit arguments */
1125 emit_sig_cookie (cfg
, call
, cinfo
);
1128 in
= call
->args
[i
];
1130 if (sig
->hasthis
&& (i
== 0))
1131 arg_type
= &mono_defaults
.object_class
->byval_arg
;
1133 arg_type
= sig
->params
[i
- sig
->hasthis
];
1135 if ((i
>= sig
->hasthis
) && (MONO_TYPE_ISSTRUCT(arg_type
))) {
1139 if (arg_type
->type
== MONO_TYPE_TYPEDBYREF
) {
1140 size
= sizeof (MonoTypedRef
);
1141 align
= sizeof (gpointer
);
1143 else if (sig
->pinvoke
)
1144 size
= mono_type_native_stack_size (&in
->klass
->byval_arg
, &align
);
1147 * Other backends use mono_type_stack_size (), but that
1148 * aligns the size to 8, which is larger than the size of
1149 * the source, leading to reads of invalid memory if the
1150 * source is at the end of address space.
1152 size
= mono_class_value_size (in
->klass
, &align
);
1158 MONO_INST_NEW (cfg
, arg
, OP_OUTARG_VT
);
1159 arg
->sreg1
= in
->dreg
;
1160 arg
->klass
= in
->klass
;
1161 arg
->backend
.size
= size
;
1162 arg
->inst_p0
= call
;
1163 arg
->inst_p1
= mono_mempool_alloc (cfg
->mempool
, sizeof (ArgInfo
));
1164 memcpy (arg
->inst_p1
, ainfo
, sizeof (ArgInfo
));
1166 MONO_ADD_INS (cfg
->cbb
, arg
);
1170 switch (ainfo
->storage
) {
1172 add_outarg_reg (cfg
, call
, ainfo
->storage
, cfg
->arch
.reg_out0
+ ainfo
->reg
, in
);
1175 case ArgInFloatRegR4
:
1176 add_outarg_reg (cfg
, call
, ainfo
->storage
, ainfo
->reg
, in
);
1179 if (arg_type
->type
== MONO_TYPE_R4
&& !arg_type
->byref
)
1180 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, IA64_SP
, 16 + ainfo
->offset
, in
->dreg
);
1181 else if (arg_type
->type
== MONO_TYPE_R8
&& !arg_type
->byref
)
1182 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, IA64_SP
, 16 + ainfo
->offset
, in
->dreg
);
1184 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, IA64_SP
, 16 + ainfo
->offset
, in
->dreg
);
1187 g_assert_not_reached ();
1192 /* Handle the case where there are no implicit arguments */
1193 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (n
== sig
->sentinelpos
)) {
1194 emit_sig_cookie (cfg
, call
, cinfo
);
1197 call
->stack_usage
= cinfo
->stack_usage
;
1198 cfg
->arch
.n_out_regs
= MAX (cfg
->arch
.n_out_regs
, cinfo
->reg_usage
);
1202 mono_arch_emit_outarg_vt (MonoCompile
*cfg
, MonoInst
*ins
, MonoInst
*src
)
1204 MonoCallInst
*call
= (MonoCallInst
*)ins
->inst_p0
;
1205 ArgInfo
*ainfo
= (ArgInfo
*)ins
->inst_p1
;
1206 int size
= ins
->backend
.size
;
1208 if (ainfo
->storage
== ArgAggregate
) {
1209 MonoInst
*load
, *store
;
1213 * Part of the structure is passed in registers.
1215 for (i
= 0; i
< ainfo
->nregs
; ++i
) {
1216 slot
= ainfo
->reg
+ i
;
1218 if (ainfo
->atype
== AggregateSingleHFA
) {
1219 MONO_INST_NEW (cfg
, load
, OP_LOADR4_MEMBASE
);
1220 load
->inst_basereg
= src
->dreg
;
1221 load
->inst_offset
= i
* 4;
1222 load
->dreg
= mono_alloc_freg (cfg
);
1224 mono_call_inst_add_outarg_reg (cfg
, call
, load
->dreg
, ainfo
->reg
+ i
, TRUE
);
1225 } else if (ainfo
->atype
== AggregateDoubleHFA
) {
1226 MONO_INST_NEW (cfg
, load
, OP_LOADR8_MEMBASE
);
1227 load
->inst_basereg
= src
->dreg
;
1228 load
->inst_offset
= i
* 8;
1229 load
->dreg
= mono_alloc_freg (cfg
);
1231 mono_call_inst_add_outarg_reg (cfg
, call
, load
->dreg
, ainfo
->reg
+ i
, TRUE
);
1233 MONO_INST_NEW (cfg
, load
, OP_LOADI8_MEMBASE
);
1234 load
->inst_basereg
= src
->dreg
;
1235 load
->inst_offset
= i
* 8;
1236 load
->dreg
= mono_alloc_ireg (cfg
);
1238 mono_call_inst_add_outarg_reg (cfg
, call
, load
->dreg
, cfg
->arch
.reg_out0
+ ainfo
->reg
+ i
, FALSE
);
1240 MONO_ADD_INS (cfg
->cbb
, load
);
1244 * Part of the structure is passed on the stack.
1246 for (i
= ainfo
->nregs
; i
< ainfo
->nslots
; ++i
) {
1247 slot
= ainfo
->reg
+ i
;
1249 MONO_INST_NEW (cfg
, load
, OP_LOADI8_MEMBASE
);
1250 load
->inst_basereg
= src
->dreg
;
1251 load
->inst_offset
= i
* sizeof (gpointer
);
1252 load
->dreg
= mono_alloc_preg (cfg
);
1253 MONO_ADD_INS (cfg
->cbb
, load
);
1255 MONO_INST_NEW (cfg
, store
, OP_STOREI8_MEMBASE_REG
);
1256 store
->sreg1
= load
->dreg
;
1257 store
->inst_destbasereg
= IA64_SP
;
1258 store
->inst_offset
= 16 + ainfo
->offset
+ (slot
- 8) * 8;
1259 MONO_ADD_INS (cfg
->cbb
, store
);
1262 mini_emit_memcpy (cfg
, IA64_SP
, 16 + ainfo
->offset
, src
->dreg
, 0, size
, 4);
1267 mono_arch_emit_setret (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
*val
)
1269 CallInfo
*cinfo
= get_call_info (cfg
, cfg
->mempool
, mono_method_signature (method
), FALSE
);
1271 switch (cinfo
->ret
.storage
) {
1273 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
1276 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, cfg
->ret
->dreg
, val
->dreg
);
1279 g_assert_not_reached ();
1284 mono_arch_peephole_pass_1 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
1289 mono_arch_peephole_pass_2 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
1291 MonoInst
*ins
, *n
, *last_ins
= NULL
;
1294 MONO_BB_FOR_EACH_INS_SAFE (bb
, n
, ins
) {
1295 switch (ins
->opcode
) {
1303 if (ins
->dreg
== ins
->sreg1
) {
1304 MONO_DELETE_INS (bb
, ins
);
1310 * OP_MOVE sreg, dreg
1311 * OP_MOVE dreg, sreg
1313 if (last_ins
&& last_ins
->opcode
== OP_MOVE
&&
1314 ins
->sreg1
== last_ins
->dreg
&&
1315 ins
->dreg
== last_ins
->sreg1
) {
1316 MONO_DELETE_INS (bb
, ins
);
1322 /* remove unnecessary multiplication with 1 */
1323 if (ins
->inst_imm
== 1) {
1324 if (ins
->dreg
!= ins
->sreg1
) {
1325 ins
->opcode
= OP_MOVE
;
1327 MONO_DELETE_INS (bb
, ins
);
1337 bb
->last_ins
= last_ins
;
1340 int cond_to_ia64_cmp
[][3] = {
1341 {OP_IA64_CMP_EQ
, OP_IA64_CMP4_EQ
, OP_IA64_FCMP_EQ
},
1342 {OP_IA64_CMP_NE
, OP_IA64_CMP4_NE
, OP_IA64_FCMP_NE
},
1343 {OP_IA64_CMP_LE
, OP_IA64_CMP4_LE
, OP_IA64_FCMP_LE
},
1344 {OP_IA64_CMP_GE
, OP_IA64_CMP4_GE
, OP_IA64_FCMP_GE
},
1345 {OP_IA64_CMP_LT
, OP_IA64_CMP4_LT
, OP_IA64_FCMP_LT
},
1346 {OP_IA64_CMP_GT
, OP_IA64_CMP4_GT
, OP_IA64_FCMP_GT
},
1347 {OP_IA64_CMP_LE_UN
, OP_IA64_CMP4_LE_UN
, OP_IA64_FCMP_LE_UN
},
1348 {OP_IA64_CMP_GE_UN
, OP_IA64_CMP4_GE_UN
, OP_IA64_FCMP_GE_UN
},
1349 {OP_IA64_CMP_LT_UN
, OP_IA64_CMP4_LT_UN
, OP_IA64_FCMP_LT_UN
},
1350 {OP_IA64_CMP_GT_UN
, OP_IA64_CMP4_GT_UN
, OP_IA64_FCMP_GT_UN
}
1354 opcode_to_ia64_cmp (int opcode
, int cmp_opcode
)
1356 return cond_to_ia64_cmp
[mono_opcode_to_cond (opcode
)][mono_opcode_to_type (opcode
, cmp_opcode
)];
1359 int cond_to_ia64_cmp_imm
[][3] = {
1360 {OP_IA64_CMP_EQ_IMM
, OP_IA64_CMP4_EQ_IMM
, 0},
1361 {OP_IA64_CMP_NE_IMM
, OP_IA64_CMP4_NE_IMM
, 0},
1362 {OP_IA64_CMP_GE_IMM
, OP_IA64_CMP4_GE_IMM
, 0},
1363 {OP_IA64_CMP_LE_IMM
, OP_IA64_CMP4_LE_IMM
, 0},
1364 {OP_IA64_CMP_GT_IMM
, OP_IA64_CMP4_GT_IMM
, 0},
1365 {OP_IA64_CMP_LT_IMM
, OP_IA64_CMP4_LT_IMM
, 0},
1366 {OP_IA64_CMP_GE_UN_IMM
, OP_IA64_CMP4_GE_UN_IMM
, 0},
1367 {OP_IA64_CMP_LE_UN_IMM
, OP_IA64_CMP4_LE_UN_IMM
, 0},
1368 {OP_IA64_CMP_GT_UN_IMM
, OP_IA64_CMP4_GT_UN_IMM
, 0},
1369 {OP_IA64_CMP_LT_UN_IMM
, OP_IA64_CMP4_LT_UN_IMM
, 0},
1373 opcode_to_ia64_cmp_imm (int opcode
, int cmp_opcode
)
1375 /* The condition needs to be reversed */
1376 return cond_to_ia64_cmp_imm
[mono_opcode_to_cond (opcode
)][mono_opcode_to_type (opcode
, cmp_opcode
)];
1379 #define NEW_INS(cfg,dest,op) do { \
1380 (dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
1381 (dest)->opcode = (op); \
1382 mono_bblock_insert_after_ins (bb, last_ins, (dest)); \
1383 last_ins = (dest); \
1387 * mono_arch_lowering_pass:
1389 * Converts complex opcodes into simpler ones so that each IR instruction
1390 * corresponds to one machine instruction.
1393 mono_arch_lowering_pass (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
1395 MonoInst
*ins
, *n
, *next
, *temp
, *temp2
, *temp3
, *last_ins
= NULL
;
1398 MONO_BB_FOR_EACH_INS_SAFE (bb
, n
, ins
) {
1399 switch (ins
->opcode
) {
1400 case OP_STOREI1_MEMBASE_IMM
:
1401 case OP_STOREI2_MEMBASE_IMM
:
1402 case OP_STOREI4_MEMBASE_IMM
:
1403 case OP_STOREI8_MEMBASE_IMM
:
1404 case OP_STORE_MEMBASE_IMM
:
1405 /* There are no store_membase instructions on ia64 */
1406 if (ins
->inst_offset
== 0) {
1408 } else if (ia64_is_imm14 (ins
->inst_offset
)) {
1409 NEW_INS (cfg
, temp2
, OP_ADD_IMM
);
1410 temp2
->sreg1
= ins
->inst_destbasereg
;
1411 temp2
->inst_imm
= ins
->inst_offset
;
1412 temp2
->dreg
= mono_alloc_ireg (cfg
);
1415 NEW_INS (cfg
, temp
, OP_I8CONST
);
1416 temp
->inst_c0
= ins
->inst_offset
;
1417 temp
->dreg
= mono_alloc_ireg (cfg
);
1419 NEW_INS (cfg
, temp2
, OP_LADD
);
1420 temp2
->sreg1
= ins
->inst_destbasereg
;
1421 temp2
->sreg2
= temp
->dreg
;
1422 temp2
->dreg
= mono_alloc_ireg (cfg
);
1425 switch (ins
->opcode
) {
1426 case OP_STOREI1_MEMBASE_IMM
:
1427 ins
->opcode
= OP_STOREI1_MEMBASE_REG
;
1429 case OP_STOREI2_MEMBASE_IMM
:
1430 ins
->opcode
= OP_STOREI2_MEMBASE_REG
;
1432 case OP_STOREI4_MEMBASE_IMM
:
1433 ins
->opcode
= OP_STOREI4_MEMBASE_REG
;
1435 case OP_STOREI8_MEMBASE_IMM
:
1436 case OP_STORE_MEMBASE_IMM
:
1437 ins
->opcode
= OP_STOREI8_MEMBASE_REG
;
1440 g_assert_not_reached ();
1443 if (ins
->inst_imm
== 0)
1444 ins
->sreg1
= IA64_R0
;
1446 NEW_INS (cfg
, temp3
, OP_I8CONST
);
1447 temp3
->inst_c0
= ins
->inst_imm
;
1448 temp3
->dreg
= mono_alloc_ireg (cfg
);
1449 ins
->sreg1
= temp3
->dreg
;
1452 ins
->inst_offset
= 0;
1454 ins
->inst_destbasereg
= temp2
->dreg
;
1456 case OP_STOREI1_MEMBASE_REG
:
1457 case OP_STOREI2_MEMBASE_REG
:
1458 case OP_STOREI4_MEMBASE_REG
:
1459 case OP_STOREI8_MEMBASE_REG
:
1460 case OP_STORER4_MEMBASE_REG
:
1461 case OP_STORER8_MEMBASE_REG
:
1462 case OP_STORE_MEMBASE_REG
:
1463 /* There are no store_membase instructions on ia64 */
1464 if (ins
->inst_offset
== 0) {
1467 else if (ia64_is_imm14 (ins
->inst_offset
)) {
1468 NEW_INS (cfg
, temp2
, OP_ADD_IMM
);
1469 temp2
->sreg1
= ins
->inst_destbasereg
;
1470 temp2
->inst_imm
= ins
->inst_offset
;
1471 temp2
->dreg
= mono_alloc_ireg (cfg
);
1474 NEW_INS (cfg
, temp
, OP_I8CONST
);
1475 temp
->inst_c0
= ins
->inst_offset
;
1476 temp
->dreg
= mono_alloc_ireg (cfg
);
1477 NEW_INS (cfg
, temp2
, OP_LADD
);
1478 temp2
->sreg1
= ins
->inst_destbasereg
;
1479 temp2
->sreg2
= temp
->dreg
;
1480 temp2
->dreg
= mono_alloc_ireg (cfg
);
1483 ins
->inst_offset
= 0;
1484 ins
->inst_destbasereg
= temp2
->dreg
;
1486 case OP_LOADI1_MEMBASE
:
1487 case OP_LOADU1_MEMBASE
:
1488 case OP_LOADI2_MEMBASE
:
1489 case OP_LOADU2_MEMBASE
:
1490 case OP_LOADI4_MEMBASE
:
1491 case OP_LOADU4_MEMBASE
:
1492 case OP_LOADI8_MEMBASE
:
1493 case OP_LOAD_MEMBASE
:
1494 case OP_LOADR4_MEMBASE
:
1495 case OP_LOADR8_MEMBASE
:
1496 case OP_ATOMIC_EXCHANGE_I4
:
1497 case OP_ATOMIC_EXCHANGE_I8
:
1498 case OP_ATOMIC_ADD_NEW_I4
:
1499 case OP_ATOMIC_ADD_NEW_I8
:
1500 case OP_ATOMIC_ADD_IMM_NEW_I4
:
1501 case OP_ATOMIC_ADD_IMM_NEW_I8
:
1502 /* There are no membase instructions on ia64 */
1503 if (ins
->inst_offset
== 0) {
1506 else if (ia64_is_imm14 (ins
->inst_offset
)) {
1507 NEW_INS (cfg
, temp2
, OP_ADD_IMM
);
1508 temp2
->sreg1
= ins
->inst_basereg
;
1509 temp2
->inst_imm
= ins
->inst_offset
;
1510 temp2
->dreg
= mono_alloc_ireg (cfg
);
1513 NEW_INS (cfg
, temp
, OP_I8CONST
);
1514 temp
->inst_c0
= ins
->inst_offset
;
1515 temp
->dreg
= mono_alloc_ireg (cfg
);
1516 NEW_INS (cfg
, temp2
, OP_LADD
);
1517 temp2
->sreg1
= ins
->inst_basereg
;
1518 temp2
->sreg2
= temp
->dreg
;
1519 temp2
->dreg
= mono_alloc_ireg (cfg
);
1522 ins
->inst_offset
= 0;
1523 ins
->inst_basereg
= temp2
->dreg
;
1543 case OP_ISHR_UN_IMM
:
1544 case OP_LSHR_UN_IMM
: {
1545 gboolean is_imm
= FALSE
;
1546 gboolean switched
= FALSE
;
1548 if (ins
->opcode
== OP_AND_IMM
&& ins
->inst_imm
== 255) {
1549 ins
->opcode
= OP_ZEXT_I1
;
1553 switch (ins
->opcode
) {
1557 is_imm
= ia64_is_imm14 (ins
->inst_imm
);
1562 is_imm
= ia64_is_imm14 (- (ins
->inst_imm
));
1564 /* A = B - IMM -> A = B + (-IMM) */
1565 ins
->inst_imm
= - ins
->inst_imm
;
1566 ins
->opcode
= OP_IADD_IMM
;
1577 is_imm
= ia64_is_imm8 (ins
->inst_imm
);
1586 case OP_ISHR_UN_IMM
:
1587 case OP_LSHR_UN_IMM
:
1588 is_imm
= (ins
->inst_imm
>= 0) && (ins
->inst_imm
< 64);
1596 ins
->sreg2
= ins
->sreg1
;
1600 ins
->opcode
= mono_op_imm_to_op (ins
->opcode
);
1602 if (ins
->inst_imm
== 0)
1603 ins
->sreg2
= IA64_R0
;
1605 NEW_INS (cfg
, temp
, OP_I8CONST
);
1606 temp
->inst_c0
= ins
->inst_imm
;
1607 temp
->dreg
= mono_alloc_ireg (cfg
);
1608 ins
->sreg2
= temp
->dreg
;
1612 case OP_COMPARE_IMM
:
1613 case OP_ICOMPARE_IMM
:
1614 case OP_LCOMPARE_IMM
: {
1615 /* Instead of compare+b<cond>, ia64 has compare<cond>+br */
1621 /* Branch opts can eliminate the branch */
1622 if (!next
|| (!(MONO_IS_COND_BRANCH_OP (next
) || MONO_IS_COND_EXC (next
) || MONO_IS_SETCC (next
)))) {
1628 * The compare_imm instructions have switched up arguments, and
1629 * some of them take an imm between -127 and 128.
1632 cond
= mono_opcode_to_cond (next
->opcode
);
1633 if ((cond
== CMP_LT
) || (cond
== CMP_GE
))
1634 imm
= ia64_is_imm8 (ins
->inst_imm
- 1);
1635 else if ((cond
== CMP_LT_UN
) || (cond
== CMP_GE_UN
))
1636 imm
= ia64_is_imm8 (ins
->inst_imm
- 1) && (ins
->inst_imm
> 0);
1638 imm
= ia64_is_imm8 (ins
->inst_imm
);
1641 ins
->opcode
= opcode_to_ia64_cmp_imm (next
->opcode
, ins
->opcode
);
1642 ins
->sreg2
= ins
->sreg1
;
1645 ins
->opcode
= opcode_to_ia64_cmp (next
->opcode
, ins
->opcode
);
1647 if (ins
->inst_imm
== 0)
1648 ins
->sreg2
= IA64_R0
;
1650 NEW_INS (cfg
, temp
, OP_I8CONST
);
1651 temp
->inst_c0
= ins
->inst_imm
;
1652 temp
->dreg
= mono_alloc_ireg (cfg
);
1653 ins
->sreg2
= temp
->dreg
;
1657 if (MONO_IS_COND_BRANCH_OP (next
)) {
1658 next
->opcode
= OP_IA64_BR_COND
;
1659 next
->inst_target_bb
= next
->inst_true_bb
;
1660 } else if (MONO_IS_COND_EXC (next
)) {
1661 next
->opcode
= OP_IA64_COND_EXC
;
1662 } else if (MONO_IS_SETCC (next
)) {
1663 next
->opcode
= OP_IA64_CSET
;
1665 printf ("%s\n", mono_inst_name (next
->opcode
));
1675 /* Instead of compare+b<cond>, ia64 has compare<cond>+br */
1679 /* Branch opts can eliminate the branch */
1680 if (!next
|| (!(MONO_IS_COND_BRANCH_OP (next
) || MONO_IS_COND_EXC (next
) || MONO_IS_SETCC (next
)))) {
1685 ins
->opcode
= opcode_to_ia64_cmp (next
->opcode
, ins
->opcode
);
1687 if (MONO_IS_COND_BRANCH_OP (next
)) {
1688 next
->opcode
= OP_IA64_BR_COND
;
1689 next
->inst_target_bb
= next
->inst_true_bb
;
1690 } else if (MONO_IS_COND_EXC (next
)) {
1691 next
->opcode
= OP_IA64_COND_EXC
;
1692 } else if (MONO_IS_SETCC (next
)) {
1693 next
->opcode
= OP_IA64_CSET
;
1695 printf ("%s\n", mono_inst_name (next
->opcode
));
1706 /* The front end removes the fcompare, so introduce it again */
1707 NEW_INS (cfg
, temp
, opcode_to_ia64_cmp (ins
->opcode
, OP_FCOMPARE
));
1708 temp
->sreg1
= ins
->sreg1
;
1709 temp
->sreg2
= ins
->sreg2
;
1711 ins
->opcode
= OP_IA64_CSET
;
1712 MONO_INST_NULLIFY_SREGS (ins
);
1718 gboolean found
= FALSE
;
1719 int shl_op
= ins
->opcode
== OP_IMUL_IMM
? OP_ISHL_IMM
: OP_SHL_IMM
;
1721 /* First the easy cases */
1722 if (ins
->inst_imm
== 1) {
1723 ins
->opcode
= OP_MOVE
;
1726 for (i
= 1; i
< 64; ++i
)
1727 if (ins
->inst_imm
== (((gint64
)1) << i
)) {
1728 ins
->opcode
= shl_op
;
1734 /* This could be optimized */
1737 for (i
= 0; i
< 64; ++i
) {
1738 if (ins
->inst_imm
& (((gint64
)1) << i
)) {
1739 NEW_INS (cfg
, temp
, shl_op
);
1740 temp
->dreg
= mono_alloc_ireg (cfg
);
1741 temp
->sreg1
= ins
->sreg1
;
1745 sum_reg
= temp
->dreg
;
1747 NEW_INS (cfg
, temp2
, OP_LADD
);
1748 temp2
->dreg
= mono_alloc_ireg (cfg
);
1749 temp2
->sreg1
= sum_reg
;
1750 temp2
->sreg2
= temp
->dreg
;
1751 sum_reg
= temp2
->dreg
;
1755 ins
->opcode
= OP_MOVE
;
1756 ins
->sreg1
= sum_reg
;
1760 case OP_LCONV_TO_OVF_U4
:
1761 NEW_INS (cfg
, temp
, OP_IA64_CMP4_LT
);
1762 temp
->sreg1
= ins
->sreg1
;
1763 temp
->sreg2
= IA64_R0
;
1765 NEW_INS (cfg
, temp
, OP_IA64_COND_EXC
);
1766 temp
->inst_p1
= (char*)"OverflowException";
1768 ins
->opcode
= OP_MOVE
;
1770 case OP_LCONV_TO_OVF_I4_UN
:
1771 NEW_INS (cfg
, temp
, OP_ICONST
);
1772 temp
->inst_c0
= 0x7fffffff;
1773 temp
->dreg
= mono_alloc_ireg (cfg
);
1775 NEW_INS (cfg
, temp2
, OP_IA64_CMP4_GT_UN
);
1776 temp2
->sreg1
= ins
->sreg1
;
1777 temp2
->sreg2
= temp
->dreg
;
1779 NEW_INS (cfg
, temp
, OP_IA64_COND_EXC
);
1780 temp
->inst_p1
= (char*)"OverflowException";
1782 ins
->opcode
= OP_MOVE
;
1784 case OP_FCONV_TO_I4
:
1785 case OP_FCONV_TO_I2
:
1786 case OP_FCONV_TO_U2
:
1787 case OP_FCONV_TO_I1
:
1788 case OP_FCONV_TO_U1
:
1789 NEW_INS (cfg
, temp
, OP_FCONV_TO_I8
);
1790 temp
->sreg1
= ins
->sreg1
;
1791 temp
->dreg
= ins
->dreg
;
1793 switch (ins
->opcode
) {
1794 case OP_FCONV_TO_I4
:
1795 ins
->opcode
= OP_SEXT_I4
;
1797 case OP_FCONV_TO_I2
:
1798 ins
->opcode
= OP_SEXT_I2
;
1800 case OP_FCONV_TO_U2
:
1801 ins
->opcode
= OP_ZEXT_I4
;
1803 case OP_FCONV_TO_I1
:
1804 ins
->opcode
= OP_SEXT_I1
;
1806 case OP_FCONV_TO_U1
:
1807 ins
->opcode
= OP_ZEXT_I1
;
1810 g_assert_not_reached ();
1812 ins
->sreg1
= ins
->dreg
;
1820 bb
->last_ins
= last_ins
;
1822 bb
->max_vreg
= cfg
->next_vreg
;
1826 * emit_load_volatile_arguments:
1828 * Load volatile arguments from the stack to the original input registers.
1829 * Required before a tail call.
1831 static Ia64CodegenState
1832 emit_load_volatile_arguments (MonoCompile
*cfg
, Ia64CodegenState code
)
1834 MonoMethod
*method
= cfg
->method
;
1835 MonoMethodSignature
*sig
;
1840 /* FIXME: Generate intermediate code instead */
1842 sig
= mono_method_signature (method
);
1844 cinfo
= get_call_info (cfg
, cfg
->mempool
, sig
, FALSE
);
1846 /* This is the opposite of the code in emit_prolog */
1847 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
1848 ArgInfo
*ainfo
= cinfo
->args
+ i
;
1849 gint32 stack_offset
;
1852 ins
= cfg
->args
[i
];
1854 if (sig
->hasthis
&& (i
== 0))
1855 arg_type
= &mono_defaults
.object_class
->byval_arg
;
1857 arg_type
= sig
->params
[i
- sig
->hasthis
];
1859 arg_type
= mono_type_get_underlying_type (arg_type
);
1861 stack_offset
= ainfo
->offset
+ ARGS_OFFSET
;
1863 /* Save volatile arguments to the stack */
1864 if (ins
->opcode
!= OP_REGVAR
) {
1865 switch (ainfo
->storage
) {
1868 /* FIXME: big offsets */
1869 g_assert (ins
->opcode
== OP_REGOFFSET
);
1870 ia64_adds_imm (code
, GP_SCRATCH_REG
, ins
->inst_offset
, ins
->inst_basereg
);
1871 if (arg_type
->byref
)
1872 ia64_ld8 (code
, cfg
->arch
.reg_in0
+ ainfo
->reg
, GP_SCRATCH_REG
);
1874 switch (arg_type
->type
) {
1876 ia64_ldfs (code
, ainfo
->reg
, GP_SCRATCH_REG
);
1879 ia64_ldfd (code
, ainfo
->reg
, GP_SCRATCH_REG
);
1882 ia64_ld8 (code
, cfg
->arch
.reg_in0
+ ainfo
->reg
, GP_SCRATCH_REG
);
1894 if (ins
->opcode
== OP_REGVAR
) {
1895 /* Argument allocated to (non-volatile) register */
1896 switch (ainfo
->storage
) {
1898 if (ins
->dreg
!= cfg
->arch
.reg_in0
+ ainfo
->reg
)
1899 ia64_mov (code
, cfg
->arch
.reg_in0
+ ainfo
->reg
, ins
->dreg
);
1902 ia64_adds_imm (code
, GP_SCRATCH_REG
, 16 + ainfo
->offset
, cfg
->frame_reg
);
1903 ia64_st8 (code
, GP_SCRATCH_REG
, ins
->dreg
);
1914 static Ia64CodegenState
1915 emit_move_return_value (MonoCompile
*cfg
, MonoInst
*ins
, Ia64CodegenState code
)
1920 /* Move return value to the target register */
1921 switch (ins
->opcode
) {
1923 case OP_VOIDCALL_REG
:
1924 case OP_VOIDCALL_MEMBASE
:
1928 case OP_CALL_MEMBASE
:
1931 case OP_LCALL_MEMBASE
:
1932 g_assert (ins
->dreg
== IA64_R8
);
1936 case OP_FCALL_MEMBASE
:
1937 g_assert (ins
->dreg
== 8);
1938 if (((MonoCallInst
*)ins
)->signature
->ret
->type
== MONO_TYPE_R4
)
1939 ia64_fnorm_d_sf (code
, ins
->dreg
, ins
->dreg
, 0);
1943 case OP_VCALL_MEMBASE
:
1946 case OP_VCALL2_MEMBASE
: {
1949 cinfo
= get_call_info (cfg
, cfg
->mempool
, ((MonoCallInst
*)ins
)->signature
, FALSE
);
1950 storage
= cinfo
->ret
.storage
;
1952 if (storage
== ArgAggregate
) {
1953 MonoInst
*local
= (MonoInst
*)cfg
->arch
.ret_var_addr_local
;
1955 /* Load address of stack space allocated for the return value */
1956 ia64_movl (code
, GP_SCRATCH_REG
, local
->inst_offset
);
1957 ia64_add (code
, GP_SCRATCH_REG
, GP_SCRATCH_REG
, local
->inst_basereg
);
1958 ia64_ld8 (code
, GP_SCRATCH_REG
, GP_SCRATCH_REG
);
1960 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
) {
1961 switch (cinfo
->ret
.atype
) {
1962 case AggregateNormal
:
1963 ia64_st8_inc_imm_hint (code
, GP_SCRATCH_REG
, cinfo
->ret
.reg
+ i
, 8, 0);
1965 case AggregateSingleHFA
:
1966 ia64_stfs_inc_imm_hint (code
, GP_SCRATCH_REG
, cinfo
->ret
.reg
+ i
, 4, 0);
1968 case AggregateDoubleHFA
:
1969 ia64_stfd_inc_imm_hint (code
, GP_SCRATCH_REG
, cinfo
->ret
.reg
+ i
, 8, 0);
1972 g_assert_not_reached ();
1979 g_assert_not_reached ();
1985 #define add_patch_info(cfg,code,patch_type,data) do { \
1986 mono_add_patch_info (cfg, code.buf + code.nins - cfg->native_code, patch_type, data); \
1989 #define emit_cond_system_exception(cfg,code,exc_name,predicate) do { \
1990 MonoInst *tins = mono_branch_optimize_exception_target (cfg, bb, exc_name); \
1992 add_patch_info (cfg, code, MONO_PATCH_INFO_EXC, exc_name); \
1994 add_patch_info (cfg, code, MONO_PATCH_INFO_BB, tins->inst_true_bb); \
1995 ia64_br_cond_pred (code, (predicate), 0); \
1998 static Ia64CodegenState
1999 emit_call (MonoCompile
*cfg
, Ia64CodegenState code
, guint32 patch_type
, gconstpointer data
)
2001 add_patch_info (cfg
, code
, patch_type
, data
);
2003 if ((patch_type
== MONO_PATCH_INFO_ABS
) || (patch_type
== MONO_PATCH_INFO_INTERNAL_METHOD
)) {
2005 /* mono_arch_patch_callsite will patch this */
2006 /* mono_arch_nullify_class_init_trampoline will patch this */
2007 ia64_movl (code
, GP_SCRATCH_REG
, 0);
2008 ia64_ld8_inc_imm (code
, GP_SCRATCH_REG2
, GP_SCRATCH_REG
, 8);
2009 ia64_mov_to_br (code
, IA64_B6
, GP_SCRATCH_REG2
);
2010 ia64_ld8 (code
, IA64_GP
, GP_SCRATCH_REG
);
2011 ia64_br_call_reg (code
, IA64_B0
, IA64_B6
);
2014 /* Can't use a direct call since the displacement might be too small */
2015 /* mono_arch_patch_callsite will patch this */
2016 ia64_movl (code
, GP_SCRATCH_REG
, 0);
2017 ia64_mov_to_br (code
, IA64_B6
, GP_SCRATCH_REG
);
2018 ia64_br_call_reg (code
, IA64_B0
, IA64_B6
);
2024 #define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
2027 mono_arch_output_basic_block (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
2032 Ia64CodegenState code
;
2033 guint8
*code_start
= cfg
->native_code
+ cfg
->code_len
;
2034 MonoInst
*last_ins
= NULL
;
2035 guint last_offset
= 0;
2038 if (cfg
->opt
& MONO_OPT_LOOP
) {
2042 if (cfg
->verbose_level
> 2)
2043 g_print ("Basic block %d starting at offset 0x%x\n", bb
->block_num
, bb
->native_offset
);
2045 cpos
= bb
->max_offset
;
2047 if (cfg
->prof_options
& MONO_PROFILE_COVERAGE
) {
2051 offset
= code_start
- cfg
->native_code
;
2053 ia64_codegen_init (code
, code_start
);
2056 if (strstr (cfg
->method
->name
, "conv_ovf_i1") && (bb
->block_num
== 2))
2060 MONO_BB_FOR_EACH_INS (bb
, ins
) {
2061 offset
= code
.buf
- cfg
->native_code
;
2063 max_len
= ((int)(((guint8
*)ins_get_spec (ins
->opcode
))[MONO_INST_LEN
])) + 128;
2065 while (offset
+ max_len
+ 16 > cfg
->code_size
) {
2066 ia64_codegen_close (code
);
2068 offset
= code
.buf
- cfg
->native_code
;
2070 cfg
->code_size
*= 2;
2071 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
2072 code_start
= cfg
->native_code
+ offset
;
2073 mono_jit_stats
.code_reallocs
++;
2075 ia64_codegen_init (code
, code_start
);
2078 mono_debug_record_line_number (cfg
, ins
, offset
);
2080 switch (ins
->opcode
) {
2083 if (ia64_is_imm14 (ins
->inst_c0
))
2084 ia64_adds_imm (code
, ins
->dreg
, ins
->inst_c0
, IA64_R0
);
2086 ia64_movl (code
, ins
->dreg
, ins
->inst_c0
);
2089 add_patch_info (cfg
, code
, (MonoJumpInfoType
)ins
->inst_i1
, ins
->inst_p0
);
2090 ia64_movl (code
, ins
->dreg
, 0);
2093 ia64_mov (code
, ins
->dreg
, ins
->sreg1
);
2096 case OP_IA64_BR_COND
: {
2098 if (ins
->opcode
== OP_IA64_BR_COND
)
2100 if (ins
->inst_target_bb
->native_offset
) {
2101 guint8
*pos
= code
.buf
+ code
.nins
;
2103 ia64_br_cond_pred (code
, pred
, 0);
2104 ia64_begin_bundle (code
);
2105 ia64_patch (pos
, cfg
->native_code
+ ins
->inst_target_bb
->native_offset
);
2107 add_patch_info (cfg
, code
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
2108 ia64_br_cond_pred (code
, pred
, 0);
2113 ia64_begin_bundle (code
);
2114 ins
->inst_c0
= code
.buf
- cfg
->native_code
;
2117 case OP_RELAXED_NOP
:
2119 case OP_DUMMY_STORE
:
2120 case OP_NOT_REACHED
:
2124 ia64_mov_to_br (code
, IA64_B6
, ins
->sreg1
);
2125 ia64_br_cond_reg (code
, IA64_B6
);
2129 ia64_add (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2133 ia64_sub (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2137 ia64_and (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2141 ia64_or (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2145 ia64_xor (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2149 ia64_sub (code
, ins
->dreg
, IA64_R0
, ins
->sreg1
);
2153 ia64_andcm_imm (code
, ins
->dreg
, -1, ins
->sreg1
);
2157 ia64_shl (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2160 ia64_sxt4 (code
, GP_SCRATCH_REG
, ins
->sreg1
);
2161 ia64_shr (code
, ins
->dreg
, GP_SCRATCH_REG
, ins
->sreg2
);
2164 ia64_shr (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2167 ia64_zxt4 (code
, GP_SCRATCH_REG
, ins
->sreg1
);
2168 ia64_shr_u (code
, ins
->dreg
, GP_SCRATCH_REG
, ins
->sreg2
);
2171 ia64_shr_u (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
2174 /* p6 and p7 is set if there is signed/unsigned overflow */
2176 /* Set p8-p9 == (sreg2 > 0) */
2177 ia64_cmp4_lt (code
, 8, 9, IA64_R0
, ins
->sreg2
);
2179 ia64_add (code
, GP_SCRATCH_REG
, ins
->sreg1
, ins
->sreg2
);
2181 /* (sreg2 > 0) && (res < ins->sreg1) => signed overflow */
2182 ia64_cmp4_lt_pred (code
, 8, 6, 10, GP_SCRATCH_REG
, ins
->sreg1
);
2183 /* (sreg2 <= 0) && (res > ins->sreg1) => signed overflow */
2184 ia64_cmp4_lt_pred (code
, 9, 6, 10, ins
->sreg1
, GP_SCRATCH_REG
);
2186 /* res <u sreg1 => unsigned overflow */
2187 ia64_cmp4_ltu (code
, 7, 10, GP_SCRATCH_REG
, ins
->sreg1
);
2189 /* FIXME: Predicate this since this is a side effect */
2190 ia64_mov (code
, ins
->dreg
, GP_SCRATCH_REG
);
2193 /* p6 and p7 is set if there is signed/unsigned overflow */
2195 /* Set p8-p9 == (sreg2 > 0) */
2196 ia64_cmp4_lt (code
, 8, 9, IA64_R0
, ins
->sreg2
);
2198 ia64_sub (code
, GP_SCRATCH_REG
, ins
->sreg1
, ins
->sreg2
);
2200 /* (sreg2 > 0) && (res > ins->sreg1) => signed overflow */
2201 ia64_cmp4_gt_pred (code
, 8, 6, 10, GP_SCRATCH_REG
, ins
->sreg1
);
2202 /* (sreg2 <= 0) && (res < ins->sreg1) => signed overflow */
2203 ia64_cmp4_lt_pred (code
, 9, 6, 10, GP_SCRATCH_REG
, ins
->sreg1
);
2205 /* sreg1 <u sreg2 => unsigned overflow */
2206 ia64_cmp4_ltu (code
, 7, 10, ins
->sreg1
, ins
->sreg2
);
2208 /* FIXME: Predicate this since this is a side effect */
2209 ia64_mov (code
, ins
->dreg
, GP_SCRATCH_REG
);
2212 /* Same as OP_IADDCC */
2213 ia64_cmp_lt (code
, 8, 9, IA64_R0
, ins
->sreg2
);
2215 ia64_add (code
, GP_SCRATCH_REG
, ins
->sreg1
, ins
->sreg2
);
2217 ia64_cmp_lt_pred (code
, 8, 6, 10, GP_SCRATCH_REG
, ins
->sreg1
);
2218 ia64_cmp_lt_pred (code
, 9, 6, 10, ins
->sreg1
, GP_SCRATCH_REG
);
2220 ia64_cmp_ltu (code
, 7, 10, GP_SCRATCH_REG
, ins
->sreg1
);
2222 ia64_mov (code
, ins
->dreg
, GP_SCRATCH_REG
);
2225 /* Same as OP_ISUBCC */
2227 ia64_cmp_lt (code
, 8, 9, IA64_R0
, ins
->sreg2
);
2229 ia64_sub (code
, GP_SCRATCH_REG
, ins
->sreg1
, ins
->sreg2
);
2231 ia64_cmp_gt_pred (code
, 8, 6, 10, GP_SCRATCH_REG
, ins
->sreg1
);
2232 ia64_cmp_lt_pred (code
, 9, 6, 10, GP_SCRATCH_REG
, ins
->sreg1
);
2234 ia64_cmp_ltu (code
, 7, 10, ins
->sreg1
, ins
->sreg2
);
2236 ia64_mov (code
, ins
->dreg
, GP_SCRATCH_REG
);
2241 ia64_adds_imm (code
, ins
->dreg
, ins
->inst_imm
, ins
->sreg1
);
2246 ia64_and_imm (code
, ins
->dreg
, ins
->inst_imm
, ins
->sreg1
);
2250 ia64_or_imm (code
, ins
->dreg
, ins
->inst_imm
, ins
->sreg1
);
2254 ia64_xor_imm (code
, ins
->dreg
, ins
->inst_imm
, ins
->sreg1
);
2259 ia64_shl_imm (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_imm
);
2263 ia64_shr_imm (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_imm
);
2266 g_assert (ins
->inst_imm
<= 64);
2267 ia64_extr (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_imm
, 32 - ins
->inst_imm
);
2269 case OP_ISHR_UN_IMM
:
2270 ia64_zxt4 (code
, GP_SCRATCH_REG
, ins
->sreg1
);
2271 ia64_shr_u_imm (code
, ins
->dreg
, GP_SCRATCH_REG
, ins
->inst_imm
);
2273 case OP_LSHR_UN_IMM
:
2274 ia64_shr_u_imm (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_imm
);
2277 /* Based on gcc code */
2278 ia64_setf_sig (code
, FP_SCRATCH_REG
, ins
->sreg1
);
2279 ia64_setf_sig (code
, FP_SCRATCH_REG2
, ins
->sreg2
);
2280 ia64_xmpy_l (code
, FP_SCRATCH_REG
, FP_SCRATCH_REG
, FP_SCRATCH_REG2
);
2281 ia64_getf_sig (code
, ins
->dreg
, FP_SCRATCH_REG
);
2284 case OP_STOREI1_MEMBASE_REG
:
2285 ia64_st1_hint (code
, ins
->inst_destbasereg
, ins
->sreg1
, 0);
2287 case OP_STOREI2_MEMBASE_REG
:
2288 ia64_st2_hint (code
, ins
->inst_destbasereg
, ins
->sreg1
, 0);
2290 case OP_STOREI4_MEMBASE_REG
:
2291 ia64_st4_hint (code
, ins
->inst_destbasereg
, ins
->sreg1
, 0);
2293 case OP_STOREI8_MEMBASE_REG
:
2294 case OP_STORE_MEMBASE_REG
:
2295 if (ins
->inst_offset
!= 0) {
2296 /* This is generated by local regalloc */
2297 if (ia64_is_imm14 (ins
->inst_offset
)) {
2298 ia64_adds_imm (code
, GP_SCRATCH_REG
, ins
->inst_offset
, ins
->inst_destbasereg
);
2300 ia64_movl (code
, GP_SCRATCH_REG
, ins
->inst_offset
);
2301 ia64_add (code
, GP_SCRATCH_REG
, GP_SCRATCH_REG
, ins
->inst_destbasereg
);
2303 ins
->inst_destbasereg
= GP_SCRATCH_REG
;
2305 ia64_st8_hint (code
, ins
->inst_destbasereg
, ins
->sreg1
, 0);
2308 case OP_IA64_STOREI1_MEMBASE_INC_REG
:
2309 ia64_st1_inc_imm_hint (code
, ins
->inst_destbasereg
, ins
->sreg1
, 1, 0);
2311 case OP_IA64_STOREI2_MEMBASE_INC_REG
:
2312 ia64_st2_inc_imm_hint (code
, ins
->inst_destbasereg
, ins
->sreg1
, 2, 0);
2314 case OP_IA64_STOREI4_MEMBASE_INC_REG
:
2315 ia64_st4_inc_imm_hint (code
, ins
->inst_destbasereg
, ins
->sreg1
, 4, 0);
2317 case OP_IA64_STOREI8_MEMBASE_INC_REG
:
2318 ia64_st8_inc_imm_hint (code
, ins
->inst_destbasereg
, ins
->sreg1
, 8, 0);
2321 case OP_LOADU1_MEMBASE
:
2322 ia64_ld1 (code
, ins
->dreg
, ins
->inst_basereg
);
2324 case OP_LOADU2_MEMBASE
:
2325 ia64_ld2 (code
, ins
->dreg
, ins
->inst_basereg
);
2327 case OP_LOADU4_MEMBASE
:
2328 ia64_ld4 (code
, ins
->dreg
, ins
->inst_basereg
);
2330 case OP_LOADI1_MEMBASE
:
2331 ia64_ld1 (code
, ins
->dreg
, ins
->inst_basereg
);
2332 ia64_sxt1 (code
, ins
->dreg
, ins
->dreg
);
2334 case OP_LOADI2_MEMBASE
:
2335 ia64_ld2 (code
, ins
->dreg
, ins
->inst_basereg
);
2336 ia64_sxt2 (code
, ins
->dreg
, ins
->dreg
);
2338 case OP_LOADI4_MEMBASE
:
2339 ia64_ld4 (code
, ins
->dreg
, ins
->inst_basereg
);
2340 ia64_sxt4 (code
, ins
->dreg
, ins
->dreg
);
2342 case OP_LOAD_MEMBASE
:
2343 case OP_LOADI8_MEMBASE
:
2344 if (ins
->inst_offset
!= 0) {
2345 /* This is generated by local regalloc */
2346 if (ia64_is_imm14 (ins
->inst_offset
)) {
2347 ia64_adds_imm (code
, GP_SCRATCH_REG
, ins
->inst_offset
, ins
->inst_basereg
);
2349 ia64_movl (code
, GP_SCRATCH_REG
, ins
->inst_offset
);
2350 ia64_add (code
, GP_SCRATCH_REG
, GP_SCRATCH_REG
, ins
->inst_basereg
);
2352 ins
->inst_basereg
= GP_SCRATCH_REG
;
2354 ia64_ld8 (code
, ins
->dreg
, ins
->inst_basereg
);
2357 case OP_IA64_LOADU1_MEMBASE_INC
:
2358 ia64_ld1_inc_imm_hint (code
, ins
->dreg
, ins
->inst_basereg
, 1, 0);
2360 case OP_IA64_LOADU2_MEMBASE_INC
:
2361 ia64_ld2_inc_imm_hint (code
, ins
->dreg
, ins
->inst_basereg
, 2, 0);
2363 case OP_IA64_LOADU4_MEMBASE_INC
:
2364 ia64_ld4_inc_imm_hint (code
, ins
->dreg
, ins
->inst_basereg
, 4, 0);
2366 case OP_IA64_LOADI8_MEMBASE_INC
:
2367 ia64_ld8_inc_imm_hint (code
, ins
->dreg
, ins
->inst_basereg
, 8, 0);
2371 ia64_sxt1 (code
, ins
->dreg
, ins
->sreg1
);
2374 ia64_sxt2 (code
, ins
->dreg
, ins
->sreg1
);
2377 ia64_sxt4 (code
, ins
->dreg
, ins
->sreg1
);
2380 ia64_zxt1 (code
, ins
->dreg
, ins
->sreg1
);
2383 ia64_zxt2 (code
, ins
->dreg
, ins
->sreg1
);
2386 ia64_zxt4 (code
, ins
->dreg
, ins
->sreg1
);
2389 /* Compare opcodes */
2390 case OP_IA64_CMP4_EQ
:
2391 ia64_cmp4_eq (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2393 case OP_IA64_CMP4_NE
:
2394 ia64_cmp4_ne (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2396 case OP_IA64_CMP4_LE
:
2397 ia64_cmp4_le (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2399 case OP_IA64_CMP4_LT
:
2400 ia64_cmp4_lt (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2402 case OP_IA64_CMP4_GE
:
2403 ia64_cmp4_ge (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2405 case OP_IA64_CMP4_GT
:
2406 ia64_cmp4_gt (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2408 case OP_IA64_CMP4_LT_UN
:
2409 ia64_cmp4_ltu (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2411 case OP_IA64_CMP4_LE_UN
:
2412 ia64_cmp4_leu (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2414 case OP_IA64_CMP4_GT_UN
:
2415 ia64_cmp4_gtu (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2417 case OP_IA64_CMP4_GE_UN
:
2418 ia64_cmp4_geu (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2420 case OP_IA64_CMP_EQ
:
2421 ia64_cmp_eq (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2423 case OP_IA64_CMP_NE
:
2424 ia64_cmp_ne (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2426 case OP_IA64_CMP_LE
:
2427 ia64_cmp_le (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2429 case OP_IA64_CMP_LT
:
2430 ia64_cmp_lt (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2432 case OP_IA64_CMP_GE
:
2433 ia64_cmp_ge (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2435 case OP_IA64_CMP_GT
:
2436 ia64_cmp_gt (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2438 case OP_IA64_CMP_GT_UN
:
2439 ia64_cmp_gtu (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2441 case OP_IA64_CMP_LT_UN
:
2442 ia64_cmp_ltu (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2444 case OP_IA64_CMP_GE_UN
:
2445 ia64_cmp_geu (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2447 case OP_IA64_CMP_LE_UN
:
2448 ia64_cmp_leu (code
, 6, 7, ins
->sreg1
, ins
->sreg2
);
2450 case OP_IA64_CMP4_EQ_IMM
:
2451 ia64_cmp4_eq_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2453 case OP_IA64_CMP4_NE_IMM
:
2454 ia64_cmp4_ne_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2456 case OP_IA64_CMP4_LE_IMM
:
2457 ia64_cmp4_le_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2459 case OP_IA64_CMP4_LT_IMM
:
2460 ia64_cmp4_lt_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2462 case OP_IA64_CMP4_GE_IMM
:
2463 ia64_cmp4_ge_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2465 case OP_IA64_CMP4_GT_IMM
:
2466 ia64_cmp4_gt_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2468 case OP_IA64_CMP4_LT_UN_IMM
:
2469 ia64_cmp4_ltu_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2471 case OP_IA64_CMP4_LE_UN_IMM
:
2472 ia64_cmp4_leu_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2474 case OP_IA64_CMP4_GT_UN_IMM
:
2475 ia64_cmp4_gtu_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2477 case OP_IA64_CMP4_GE_UN_IMM
:
2478 ia64_cmp4_geu_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2480 case OP_IA64_CMP_EQ_IMM
:
2481 ia64_cmp_eq_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2483 case OP_IA64_CMP_NE_IMM
:
2484 ia64_cmp_ne_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2486 case OP_IA64_CMP_LE_IMM
:
2487 ia64_cmp_le_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2489 case OP_IA64_CMP_LT_IMM
:
2490 ia64_cmp_lt_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2492 case OP_IA64_CMP_GE_IMM
:
2493 ia64_cmp_ge_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2495 case OP_IA64_CMP_GT_IMM
:
2496 ia64_cmp_gt_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2498 case OP_IA64_CMP_GT_UN_IMM
:
2499 ia64_cmp_gtu_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2501 case OP_IA64_CMP_LT_UN_IMM
:
2502 ia64_cmp_ltu_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2504 case OP_IA64_CMP_GE_UN_IMM
:
2505 ia64_cmp_geu_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2507 case OP_IA64_CMP_LE_UN_IMM
:
2508 ia64_cmp_leu_imm (code
, 6, 7, ins
->inst_imm
, ins
->sreg2
);
2510 case OP_IA64_FCMP_EQ
:
2511 ia64_fcmp_eq_sf (code
, 6, 7, ins
->sreg1
, ins
->sreg2
, 0);
2513 case OP_IA64_FCMP_NE
:
2514 ia64_fcmp_ne_sf (code
, 6, 7, ins
->sreg1
, ins
->sreg2
, 0);
2516 case OP_IA64_FCMP_LT
:
2517 ia64_fcmp_lt_sf (code
, 6, 7, ins
->sreg1
, ins
->sreg2
, 0);
2519 case OP_IA64_FCMP_GT
:
2520 ia64_fcmp_gt_sf (code
, 6, 7, ins
->sreg1
, ins
->sreg2
, 0);
2522 case OP_IA64_FCMP_LE
:
2523 ia64_fcmp_le_sf (code
, 6, 7, ins
->sreg1
, ins
->sreg2
, 0);
2525 case OP_IA64_FCMP_GE
:
2526 ia64_fcmp_ge_sf (code
, 6, 7, ins
->sreg1
, ins
->sreg2
, 0);
2528 case OP_IA64_FCMP_GT_UN
:
2529 ia64_fcmp_gt_sf (code
, 6, 7, ins
->sreg1
, ins
->sreg2
, 0);
2530 ia64_fcmp_unord_sf_pred (code
, 7, 6, 7, ins
->sreg1
, ins
->sreg2
, 0);
2532 case OP_IA64_FCMP_LT_UN
:
2533 ia64_fcmp_lt_sf (code
, 6, 7, ins
->sreg1
, ins
->sreg2
, 0);
2534 ia64_fcmp_unord_sf_pred (code
, 7, 6, 7, ins
->sreg1
, ins
->sreg2
, 0);
2536 case OP_IA64_FCMP_GE_UN
:
2537 ia64_fcmp_ge_sf (code
, 6, 7, ins
->sreg1
, ins
->sreg2
, 0);
2538 ia64_fcmp_unord_sf_pred (code
, 7, 6, 7, ins
->sreg1
, ins
->sreg2
, 0);
2540 case OP_IA64_FCMP_LE_UN
:
2541 ia64_fcmp_le_sf (code
, 6, 7, ins
->sreg1
, ins
->sreg2
, 0);
2542 ia64_fcmp_unord_sf_pred (code
, 7, 6, 7, ins
->sreg1
, ins
->sreg2
, 0);
2545 case OP_COND_EXC_IOV
:
2546 case OP_COND_EXC_OV
:
2547 emit_cond_system_exception (cfg
, code
, "OverflowException", 6);
2549 case OP_COND_EXC_IC
:
2551 emit_cond_system_exception (cfg
, code
, "OverflowException", 7);
2553 case OP_IA64_COND_EXC
:
2554 emit_cond_system_exception (cfg
, code
, ins
->inst_p1
, 6);
2557 ia64_mov_pred (code
, 7, ins
->dreg
, IA64_R0
);
2558 ia64_no_stop (code
);
2559 ia64_add1_pred (code
, 6, ins
->dreg
, IA64_R0
, IA64_R0
);
2561 case OP_ICONV_TO_I1
:
2562 case OP_LCONV_TO_I1
:
2563 /* FIXME: Is this needed ? */
2564 ia64_sxt1 (code
, ins
->dreg
, ins
->sreg1
);
2566 case OP_ICONV_TO_I2
:
2567 case OP_LCONV_TO_I2
:
2568 /* FIXME: Is this needed ? */
2569 ia64_sxt2 (code
, ins
->dreg
, ins
->sreg1
);
2571 case OP_LCONV_TO_I4
:
2572 /* FIXME: Is this needed ? */
2573 ia64_sxt4 (code
, ins
->dreg
, ins
->sreg1
);
2575 case OP_ICONV_TO_U1
:
2576 case OP_LCONV_TO_U1
:
2577 /* FIXME: Is this needed */
2578 ia64_zxt1 (code
, ins
->dreg
, ins
->sreg1
);
2580 case OP_ICONV_TO_U2
:
2581 case OP_LCONV_TO_U2
:
2582 /* FIXME: Is this needed */
2583 ia64_zxt2 (code
, ins
->dreg
, ins
->sreg1
);
2585 case OP_LCONV_TO_U4
:
2586 /* FIXME: Is this needed */
2587 ia64_zxt4 (code
, ins
->dreg
, ins
->sreg1
);
2589 case OP_ICONV_TO_I8
:
2591 case OP_LCONV_TO_I8
:
2593 ia64_sxt4 (code
, ins
->dreg
, ins
->sreg1
);
2595 case OP_LCONV_TO_U8
:
2597 ia64_zxt4 (code
, ins
->dreg
, ins
->sreg1
);
2604 double d
= *(double *)ins
->inst_p0
;
2606 if ((d
== 0.0) && (mono_signbit (d
) == 0))
2607 ia64_fmov (code
, ins
->dreg
, 0);
2609 ia64_fmov (code
, ins
->dreg
, 1);
2611 add_patch_info (cfg
, code
, MONO_PATCH_INFO_R8
, ins
->inst_p0
);
2612 ia64_movl (code
, GP_SCRATCH_REG
, 0);
2613 ia64_ldfd (code
, ins
->dreg
, GP_SCRATCH_REG
);
2618 float f
= *(float *)ins
->inst_p0
;
2620 if ((f
== 0.0) && (mono_signbit (f
) == 0))
2621 ia64_fmov (code
, ins
->dreg
, 0);
2623 ia64_fmov (code
, ins
->dreg
, 1);
2625 add_patch_info (cfg
, code
, MONO_PATCH_INFO_R4
, ins
->inst_p0
);
2626 ia64_movl (code
, GP_SCRATCH_REG
, 0);
2627 ia64_ldfs (code
, ins
->dreg
, GP_SCRATCH_REG
);
2632 ia64_fmov (code
, ins
->dreg
, ins
->sreg1
);
2634 case OP_STORER8_MEMBASE_REG
:
2635 if (ins
->inst_offset
!= 0) {
2636 /* This is generated by local regalloc */
2637 if (ia64_is_imm14 (ins
->inst_offset
)) {
2638 ia64_adds_imm (code
, GP_SCRATCH_REG
, ins
->inst_offset
, ins
->inst_destbasereg
);
2640 ia64_movl (code
, GP_SCRATCH_REG
, ins
->inst_offset
);
2641 ia64_add (code
, GP_SCRATCH_REG
, GP_SCRATCH_REG
, ins
->inst_destbasereg
);
2643 ins
->inst_destbasereg
= GP_SCRATCH_REG
;
2645 ia64_stfd_hint (code
, ins
->inst_destbasereg
, ins
->sreg1
, 0);
2647 case OP_STORER4_MEMBASE_REG
:
2648 ia64_fnorm_s_sf (code
, FP_SCRATCH_REG
, ins
->sreg1
, 0);
2649 ia64_stfs_hint (code
, ins
->inst_destbasereg
, FP_SCRATCH_REG
, 0);
2651 case OP_LOADR8_MEMBASE
:
2652 if (ins
->inst_offset
!= 0) {
2653 /* This is generated by local regalloc */
2654 if (ia64_is_imm14 (ins
->inst_offset
)) {
2655 ia64_adds_imm (code
, GP_SCRATCH_REG
, ins
->inst_offset
, ins
->inst_basereg
);
2657 ia64_movl (code
, GP_SCRATCH_REG
, ins
->inst_offset
);
2658 ia64_add (code
, GP_SCRATCH_REG
, GP_SCRATCH_REG
, ins
->inst_basereg
);
2660 ins
->inst_basereg
= GP_SCRATCH_REG
;
2662 ia64_ldfd (code
, ins
->dreg
, ins
->inst_basereg
);
2664 case OP_LOADR4_MEMBASE
:
2665 ia64_ldfs (code
, ins
->dreg
, ins
->inst_basereg
);
2666 ia64_fnorm_d_sf (code
, ins
->dreg
, ins
->dreg
, 0);
2668 case OP_ICONV_TO_R4
:
2669 case OP_LCONV_TO_R4
:
2670 ia64_setf_sig (code
, ins
->dreg
, ins
->sreg1
);
2671 ia64_fcvt_xf (code
, ins
->dreg
, ins
->dreg
);
2672 ia64_fnorm_s_sf (code
, ins
->dreg
, ins
->dreg
, 0);
2674 case OP_ICONV_TO_R8
:
2675 case OP_LCONV_TO_R8
:
2676 ia64_setf_sig (code
, ins
->dreg
, ins
->sreg1
);
2677 ia64_fcvt_xf (code
, ins
->dreg
, ins
->dreg
);
2678 ia64_fnorm_d_sf (code
, ins
->dreg
, ins
->dreg
, 0);
2680 case OP_FCONV_TO_R4
:
2681 ia64_fnorm_s_sf (code
, ins
->dreg
, ins
->sreg1
, 0);
2683 case OP_FCONV_TO_I8
:
2685 ia64_fcvt_fx_trunc_sf (code
, FP_SCRATCH_REG
, ins
->sreg1
, 0);
2686 ia64_getf_sig (code
, ins
->dreg
, FP_SCRATCH_REG
);
2689 ia64_fma_d_sf (code
, ins
->dreg
, ins
->sreg1
, 1, ins
->sreg2
, 0);
2692 ia64_fms_d_sf (code
, ins
->dreg
, ins
->sreg1
, 1, ins
->sreg2
, 0);
2695 ia64_fma_d_sf (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
, 0, 0);
2698 ia64_fmerge_ns (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg1
);
2702 ia64_fclass_m (code
, 6, 7, ins
->sreg1
, 0x080);
2703 emit_cond_system_exception (cfg
, code
, "ArithmeticException", 6);
2705 ia64_fclass_m (code
, 6, 7, ins
->sreg1
, 0x040);
2706 emit_cond_system_exception (cfg
, code
, "ArithmeticException", 6);
2707 /* Positive infinity */
2708 ia64_fclass_m (code
, 6, 7, ins
->sreg1
, 0x021);
2709 emit_cond_system_exception (cfg
, code
, "ArithmeticException", 6);
2710 /* Negative infinity */
2711 ia64_fclass_m (code
, 6, 7, ins
->sreg1
, 0x022);
2712 emit_cond_system_exception (cfg
, code
, "ArithmeticException", 6);
2717 /* ensure ins->sreg1 is not NULL */
2718 /* Can't use ld8 as this could be a vtype address */
2719 ia64_ld1 (code
, GP_SCRATCH_REG
, ins
->sreg1
);
2722 ia64_adds_imm (code
, GP_SCRATCH_REG
, cfg
->sig_cookie
, cfg
->frame_reg
);
2723 ia64_st8 (code
, ins
->sreg1
, GP_SCRATCH_REG
);
2731 call
= (MonoCallInst
*)ins
;
2733 if (ins
->flags
& MONO_INST_HAS_METHOD
)
2734 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_METHOD
, call
->method
);
2736 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_ABS
, call
->fptr
);
2738 code
= emit_move_return_value (cfg
, ins
, code
);
2746 case OP_VOIDCALL_REG
: {
2747 MonoCallInst
*call
= (MonoCallInst
*)ins
;
2752 * mono_arch_get_this_arg_from_call () needs to find the this argument in a global
2755 cinfo
= get_call_info (cfg
, cfg
->mempool
, call
->signature
, FALSE
);
2756 out_reg
= cfg
->arch
.reg_out0
;
2757 ia64_mov (code
, IA64_R10
, out_reg
);
2760 ia64_mov (code
, IA64_R8
, ins
->sreg1
);
2761 ia64_ld8_inc_imm (code
, GP_SCRATCH_REG2
, IA64_R8
, 8);
2762 ia64_mov_to_br (code
, IA64_B6
, GP_SCRATCH_REG2
);
2763 ia64_ld8 (code
, IA64_GP
, IA64_R8
);
2764 ia64_br_call_reg (code
, IA64_B0
, IA64_B6
);
2766 code
= emit_move_return_value (cfg
, ins
, code
);
2769 case OP_FCALL_MEMBASE
:
2770 case OP_LCALL_MEMBASE
:
2771 case OP_VCALL_MEMBASE
:
2772 case OP_VCALL2_MEMBASE
:
2773 case OP_VOIDCALL_MEMBASE
:
2774 case OP_CALL_MEMBASE
: {
2775 MonoCallInst
*call
= (MonoCallInst
*)ins
;
2779 ia64_mov (code
, IA64_R11
, ins
->sreg1
);
2780 if (ia64_is_imm14 (ins
->inst_offset
))
2781 ia64_adds_imm (code
, IA64_R8
, ins
->inst_offset
, ins
->sreg1
);
2783 ia64_movl (code
, GP_SCRATCH_REG
, ins
->inst_offset
);
2784 ia64_add (code
, IA64_R8
, GP_SCRATCH_REG
, ins
->sreg1
);
2787 if (call
->method
&& ins
->inst_offset
< 0) {
2789 * This is a possible IMT call so save the IMT method in a global
2790 * register where mono_arch_find_imt_method () and its friends can
2793 ia64_movl (code
, IA64_R9
, call
->method
);
2797 * mono_arch_find_this_arg () needs to find the this argument in a global
2800 cinfo
= get_call_info (cfg
, cfg
->mempool
, call
->signature
, FALSE
);
2801 out_reg
= cfg
->arch
.reg_out0
;
2802 ia64_mov (code
, IA64_R10
, out_reg
);
2804 ia64_ld8 (code
, GP_SCRATCH_REG
, IA64_R8
);
2806 ia64_mov_to_br (code
, IA64_B6
, GP_SCRATCH_REG
);
2808 ia64_br_call_reg (code
, IA64_B0
, IA64_B6
);
2810 code
= emit_move_return_value (cfg
, ins
, code
);
2815 * Keep in sync with the code in emit_epilog.
2818 if (cfg
->prof_options
& MONO_PROFILE_ENTER_LEAVE
)
2821 g_assert (!cfg
->method
->save_lmf
);
2823 /* Load arguments into their original registers */
2824 code
= emit_load_volatile_arguments (cfg
, code
);
2826 if (cfg
->arch
.stack_alloc_size
) {
2827 if (cfg
->arch
.omit_fp
) {
2828 if (ia64_is_imm14 (cfg
->arch
.stack_alloc_size
))
2829 ia64_adds_imm (code
, IA64_SP
, (cfg
->arch
.stack_alloc_size
), IA64_SP
);
2831 ia64_movl (code
, GP_SCRATCH_REG
, cfg
->arch
.stack_alloc_size
);
2832 ia64_add (code
, IA64_SP
, GP_SCRATCH_REG
, IA64_SP
);
2836 ia64_mov (code
, IA64_SP
, cfg
->arch
.reg_saved_sp
);
2838 ia64_mov_to_ar_i (code
, IA64_PFS
, cfg
->arch
.reg_saved_ar_pfs
);
2839 ia64_mov_ret_to_br (code
, IA64_B0
, cfg
->arch
.reg_saved_b0
);
2841 add_patch_info (cfg
, code
, MONO_PATCH_INFO_METHOD_JUMP
, ins
->inst_p0
);
2842 ia64_movl (code
, GP_SCRATCH_REG
, 0);
2843 ia64_mov_to_br (code
, IA64_B6
, GP_SCRATCH_REG
);
2844 ia64_br_cond_reg (code
, IA64_B6
);
2849 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_ABS
, mono_break
);
2855 /* FIXME: Sigaltstack support */
2857 /* keep alignment */
2858 ia64_adds_imm (code
, GP_SCRATCH_REG
, MONO_ARCH_LOCALLOC_ALIGNMENT
- 1, ins
->sreg1
);
2859 ia64_movl (code
, GP_SCRATCH_REG2
, ~(MONO_ARCH_LOCALLOC_ALIGNMENT
- 1));
2860 ia64_and (code
, GP_SCRATCH_REG
, GP_SCRATCH_REG
, GP_SCRATCH_REG2
);
2862 ia64_sub (code
, IA64_SP
, IA64_SP
, GP_SCRATCH_REG
);
2864 ia64_mov (code
, ins
->dreg
, IA64_SP
);
2866 /* An area at sp is reserved by the ABI for parameter passing */
2867 abi_offset
= - ALIGN_TO (cfg
->param_area
+ 16, MONO_ARCH_LOCALLOC_ALIGNMENT
);
2868 if (ia64_is_adds_imm (abi_offset
))
2869 ia64_adds_imm (code
, IA64_SP
, abi_offset
, IA64_SP
);
2871 ia64_movl (code
, GP_SCRATCH_REG2
, abi_offset
);
2872 ia64_add (code
, IA64_SP
, IA64_SP
, GP_SCRATCH_REG2
);
2875 if (ins
->flags
& MONO_INST_INIT
) {
2877 ia64_add (code
, GP_SCRATCH_REG2
, ins
->dreg
, GP_SCRATCH_REG
);
2879 ia64_codegen_set_one_ins_per_bundle (code
, TRUE
);
2882 ia64_st8_inc_imm_hint (code
, ins
->dreg
, IA64_R0
, 8, 0);
2883 ia64_cmp_lt (code
, 8, 9, ins
->dreg
, GP_SCRATCH_REG2
);
2884 ia64_br_cond_pred (code
, 8, -2);
2886 ia64_codegen_set_one_ins_per_bundle (code
, FALSE
);
2888 ia64_sub (code
, ins
->dreg
, GP_SCRATCH_REG2
, GP_SCRATCH_REG
);
2893 case OP_LOCALLOC_IMM
: {
2896 /* FIXME: Sigaltstack support */
2898 gssize size
= ins
->inst_imm
;
2899 size
= (size
+ (MONO_ARCH_FRAME_ALIGNMENT
- 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT
- 1);
2901 if (ia64_is_adds_imm (size
))
2902 ia64_adds_imm (code
, GP_SCRATCH_REG
, size
, IA64_R0
);
2904 ia64_movl (code
, GP_SCRATCH_REG
, size
);
2906 ia64_sub (code
, IA64_SP
, IA64_SP
, GP_SCRATCH_REG
);
2907 ia64_mov (code
, ins
->dreg
, IA64_SP
);
2909 /* An area at sp is reserved by the ABI for parameter passing */
2910 abi_offset
= - ALIGN_TO (cfg
->param_area
+ 16, MONO_ARCH_FRAME_ALIGNMENT
);
2911 if (ia64_is_adds_imm (abi_offset
))
2912 ia64_adds_imm (code
, IA64_SP
, abi_offset
, IA64_SP
);
2914 ia64_movl (code
, GP_SCRATCH_REG2
, abi_offset
);
2915 ia64_add (code
, IA64_SP
, IA64_SP
, GP_SCRATCH_REG2
);
2918 if (ins
->flags
& MONO_INST_INIT
) {
2920 ia64_add (code
, GP_SCRATCH_REG2
, ins
->dreg
, GP_SCRATCH_REG
);
2922 ia64_codegen_set_one_ins_per_bundle (code
, TRUE
);
2925 ia64_st8_inc_imm_hint (code
, ins
->dreg
, IA64_R0
, 8, 0);
2926 ia64_cmp_lt (code
, 8, 9, ins
->dreg
, GP_SCRATCH_REG2
);
2927 ia64_br_cond_pred (code
, 8, -2);
2929 ia64_codegen_set_one_ins_per_bundle (code
, FALSE
);
2931 ia64_sub (code
, ins
->dreg
, GP_SCRATCH_REG2
, GP_SCRATCH_REG
);
2937 ia64_adds_imm (code
, ins
->dreg
, ins
->inst_offset
, IA64_TP
);
2938 ia64_ld8 (code
, ins
->dreg
, ins
->dreg
);
2941 /* Synchronization */
2942 case OP_MEMORY_BARRIER
:
2945 case OP_ATOMIC_ADD_IMM_NEW_I4
:
2946 g_assert (ins
->inst_offset
== 0);
2947 ia64_fetchadd4_acq_hint (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_imm
, 0);
2948 ia64_adds_imm (code
, ins
->dreg
, ins
->inst_imm
, ins
->dreg
);
2950 case OP_ATOMIC_ADD_IMM_NEW_I8
:
2951 g_assert (ins
->inst_offset
== 0);
2952 ia64_fetchadd8_acq_hint (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_imm
, 0);
2953 ia64_adds_imm (code
, ins
->dreg
, ins
->inst_imm
, ins
->dreg
);
2955 case OP_ATOMIC_EXCHANGE_I4
:
2956 ia64_xchg4_hint (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
, 0);
2957 ia64_sxt4 (code
, ins
->dreg
, ins
->dreg
);
2959 case OP_ATOMIC_EXCHANGE_I8
:
2960 ia64_xchg8_hint (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
, 0);
2962 case OP_ATOMIC_ADD_NEW_I4
: {
2963 guint8
*label
, *buf
;
2965 /* From libatomic_ops */
2968 ia64_begin_bundle (code
);
2969 label
= code
.buf
+ code
.nins
;
2970 ia64_ld4_acq (code
, GP_SCRATCH_REG
, ins
->sreg1
);
2971 ia64_add (code
, GP_SCRATCH_REG2
, GP_SCRATCH_REG
, ins
->sreg2
);
2972 ia64_mov_to_ar_m (code
, IA64_CCV
, GP_SCRATCH_REG
);
2973 ia64_cmpxchg4_acq_hint (code
, GP_SCRATCH_REG2
, ins
->sreg1
, GP_SCRATCH_REG2
, 0);
2974 ia64_cmp4_eq (code
, 6, 7, GP_SCRATCH_REG
, GP_SCRATCH_REG2
);
2975 buf
= code
.buf
+ code
.nins
;
2976 ia64_br_cond_pred (code
, 7, 0);
2977 ia64_begin_bundle (code
);
2978 ia64_patch (buf
, label
);
2979 ia64_add (code
, ins
->dreg
, GP_SCRATCH_REG
, ins
->sreg2
);
2982 case OP_ATOMIC_ADD_NEW_I8
: {
2983 guint8
*label
, *buf
;
2985 /* From libatomic_ops */
2988 ia64_begin_bundle (code
);
2989 label
= code
.buf
+ code
.nins
;
2990 ia64_ld8_acq (code
, GP_SCRATCH_REG
, ins
->sreg1
);
2991 ia64_add (code
, GP_SCRATCH_REG2
, GP_SCRATCH_REG
, ins
->sreg2
);
2992 ia64_mov_to_ar_m (code
, IA64_CCV
, GP_SCRATCH_REG
);
2993 ia64_cmpxchg8_acq_hint (code
, GP_SCRATCH_REG2
, ins
->sreg1
, GP_SCRATCH_REG2
, 0);
2994 ia64_cmp_eq (code
, 6, 7, GP_SCRATCH_REG
, GP_SCRATCH_REG2
);
2995 buf
= code
.buf
+ code
.nins
;
2996 ia64_br_cond_pred (code
, 7, 0);
2997 ia64_begin_bundle (code
);
2998 ia64_patch (buf
, label
);
2999 ia64_add (code
, ins
->dreg
, GP_SCRATCH_REG
, ins
->sreg2
);
3003 /* Exception handling */
3004 case OP_CALL_HANDLER
:
3006 * Using a call instruction would mess up the register stack, so
3007 * save the return address to a register and use a
3010 ia64_codegen_set_one_ins_per_bundle (code
, TRUE
);
3011 ia64_mov (code
, IA64_R15
, IA64_R0
);
3012 ia64_mov_from_ip (code
, GP_SCRATCH_REG
);
3013 /* Add the length of OP_CALL_HANDLER */
3014 ia64_adds_imm (code
, GP_SCRATCH_REG
, 5 * 16, GP_SCRATCH_REG
);
3015 add_patch_info (cfg
, code
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
3016 ia64_movl (code
, GP_SCRATCH_REG2
, 0);
3017 ia64_mov_to_br (code
, IA64_B6
, GP_SCRATCH_REG2
);
3018 ia64_br_cond_reg (code
, IA64_B6
);
3020 //mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
3021 ia64_codegen_set_one_ins_per_bundle (code
, FALSE
);
3023 case OP_START_HANDLER
: {
3025 * We receive the return address in GP_SCRATCH_REG.
3027 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3030 * R15 determines our caller. It is used since it is writable using
3032 * R15 == 0 means we are called by OP_CALL_HANDLER or via resume_context ()
3033 * R15 != 0 means we are called by call_filter ().
3035 ia64_codegen_set_one_ins_per_bundle (code
, TRUE
);
3036 ia64_cmp_eq (code
, 6, 7, IA64_R15
, IA64_R0
);
3038 ia64_br_cond_pred (code
, 6, 6);
3041 * Called by call_filter:
3042 * Allocate a new stack frame, and set the fp register from the
3043 * value passed in by the caller.
3044 * We allocate a similar frame as is done by the prolog, so
3045 * if an exception is thrown while executing the filter, the
3046 * unwinder can unwind through the filter frame using the unwind
3047 * info for the prolog.
3049 ia64_alloc (code
, cfg
->arch
.reg_saved_ar_pfs
, cfg
->arch
.reg_local0
- cfg
->arch
.reg_in0
, cfg
->arch
.reg_out0
- cfg
->arch
.reg_local0
, cfg
->arch
.n_out_regs
, 0);
3050 ia64_mov_from_br (code
, cfg
->arch
.reg_saved_b0
, IA64_B0
);
3051 ia64_mov (code
, cfg
->arch
.reg_saved_sp
, IA64_SP
);
3052 ia64_mov (code
, cfg
->frame_reg
, IA64_R15
);
3053 /* Signal to endfilter that we are called by call_filter */
3054 ia64_mov (code
, GP_SCRATCH_REG
, IA64_R0
);
3056 /* Branch target: */
3057 if (ia64_is_imm14 (spvar
->inst_offset
))
3058 ia64_adds_imm (code
, GP_SCRATCH_REG2
, spvar
->inst_offset
, cfg
->frame_reg
);
3060 ia64_movl (code
, GP_SCRATCH_REG2
, spvar
->inst_offset
);
3061 ia64_add (code
, GP_SCRATCH_REG2
, cfg
->frame_reg
, GP_SCRATCH_REG2
);
3064 /* Save the return address */
3065 ia64_st8_hint (code
, GP_SCRATCH_REG2
, GP_SCRATCH_REG
, 0);
3066 ia64_codegen_set_one_ins_per_bundle (code
, FALSE
);
3071 case OP_ENDFILTER
: {
3072 /* FIXME: Return the value in ENDFILTER */
3073 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3075 /* Load the return address */
3076 if (ia64_is_imm14 (spvar
->inst_offset
)) {
3077 ia64_adds_imm (code
, GP_SCRATCH_REG
, spvar
->inst_offset
, cfg
->frame_reg
);
3079 ia64_movl (code
, GP_SCRATCH_REG
, spvar
->inst_offset
);
3080 ia64_add (code
, GP_SCRATCH_REG
, cfg
->frame_reg
, GP_SCRATCH_REG
);
3082 ia64_ld8_hint (code
, GP_SCRATCH_REG
, GP_SCRATCH_REG
, 0);
3085 ia64_cmp_eq (code
, 6, 7, GP_SCRATCH_REG
, IA64_R0
);
3086 ia64_br_cond_pred (code
, 7, 4);
3088 /* Called by call_filter */
3090 ia64_mov_to_ar_i (code
, IA64_PFS
, cfg
->arch
.reg_saved_ar_pfs
);
3091 ia64_mov_to_br (code
, IA64_B0
, cfg
->arch
.reg_saved_b0
);
3092 ia64_br_ret_reg (code
, IA64_B0
);
3094 /* Called by CALL_HANDLER */
3095 ia64_mov_to_br (code
, IA64_B6
, GP_SCRATCH_REG
);
3096 ia64_br_cond_reg (code
, IA64_B6
);
3100 ia64_mov (code
, cfg
->arch
.reg_out0
, ins
->sreg1
);
3101 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
3102 (gpointer
)"mono_arch_throw_exception");
3105 * This might be the last instruction in the method, so add a dummy
3106 * instruction so the unwinder will work.
3108 ia64_break_i (code
, 0);
3111 ia64_mov (code
, cfg
->arch
.reg_out0
, ins
->sreg1
);
3112 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
3113 (gpointer
)"mono_arch_rethrow_exception");
3115 ia64_break_i (code
, 0);
3119 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins
->opcode
), __FUNCTION__
);
3120 g_assert_not_reached ();
3123 if ((code
.buf
- cfg
->native_code
- offset
) > max_len
) {
3124 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %ld)",
3125 mono_inst_name (ins
->opcode
), max_len
, code
.buf
- cfg
->native_code
- offset
);
3126 g_assert_not_reached ();
3132 last_offset
= offset
;
3135 ia64_codegen_close (code
);
3137 cfg
->code_len
= code
.buf
- cfg
->native_code
;
3141 mono_arch_register_lowlevel_calls (void)
3145 static Ia64InsType ins_types_in_template
[32][3] = {
3146 {IA64_INS_TYPE_M
, IA64_INS_TYPE_I
, IA64_INS_TYPE_I
},
3147 {IA64_INS_TYPE_M
, IA64_INS_TYPE_I
, IA64_INS_TYPE_I
},
3148 {IA64_INS_TYPE_M
, IA64_INS_TYPE_I
, IA64_INS_TYPE_I
},
3149 {IA64_INS_TYPE_M
, IA64_INS_TYPE_I
, IA64_INS_TYPE_I
},
3150 {IA64_INS_TYPE_M
, IA64_INS_TYPE_LX
, IA64_INS_TYPE_LX
},
3151 {IA64_INS_TYPE_M
, IA64_INS_TYPE_LX
, IA64_INS_TYPE_LX
},
3154 {IA64_INS_TYPE_M
, IA64_INS_TYPE_M
, IA64_INS_TYPE_I
},
3155 {IA64_INS_TYPE_M
, IA64_INS_TYPE_M
, IA64_INS_TYPE_I
},
3156 {IA64_INS_TYPE_M
, IA64_INS_TYPE_M
, IA64_INS_TYPE_I
},
3157 {IA64_INS_TYPE_M
, IA64_INS_TYPE_M
, IA64_INS_TYPE_I
},
3158 {IA64_INS_TYPE_M
, IA64_INS_TYPE_F
, IA64_INS_TYPE_I
},
3159 {IA64_INS_TYPE_M
, IA64_INS_TYPE_F
, IA64_INS_TYPE_I
},
3160 {IA64_INS_TYPE_M
, IA64_INS_TYPE_M
, IA64_INS_TYPE_F
},
3161 {IA64_INS_TYPE_M
, IA64_INS_TYPE_M
, IA64_INS_TYPE_F
},
3162 {IA64_INS_TYPE_M
, IA64_INS_TYPE_I
, IA64_INS_TYPE_B
},
3163 {IA64_INS_TYPE_M
, IA64_INS_TYPE_I
, IA64_INS_TYPE_B
},
3164 {IA64_INS_TYPE_M
, IA64_INS_TYPE_B
, IA64_INS_TYPE_B
},
3165 {IA64_INS_TYPE_M
, IA64_INS_TYPE_B
, IA64_INS_TYPE_B
},
3168 {IA64_INS_TYPE_B
, IA64_INS_TYPE_B
, IA64_INS_TYPE_B
},
3169 {IA64_INS_TYPE_B
, IA64_INS_TYPE_B
, IA64_INS_TYPE_B
},
3170 {IA64_INS_TYPE_M
, IA64_INS_TYPE_M
, IA64_INS_TYPE_B
},
3171 {IA64_INS_TYPE_M
, IA64_INS_TYPE_M
, IA64_INS_TYPE_B
},
3174 {IA64_INS_TYPE_M
, IA64_INS_TYPE_F
, IA64_INS_TYPE_B
},
3175 {IA64_INS_TYPE_M
, IA64_INS_TYPE_F
, IA64_INS_TYPE_B
},
3180 static gboolean stops_in_template
[32][3] = {
3181 { FALSE
, FALSE
, FALSE
},
3182 { FALSE
, FALSE
, TRUE
},
3183 { FALSE
, TRUE
, FALSE
},
3184 { FALSE
, TRUE
, TRUE
},
3185 { FALSE
, FALSE
, FALSE
},
3186 { FALSE
, FALSE
, TRUE
},
3187 { FALSE
, FALSE
, FALSE
},
3188 { FALSE
, FALSE
, FALSE
},
3190 { FALSE
, FALSE
, FALSE
},
3191 { FALSE
, FALSE
, TRUE
},
3192 { TRUE
, FALSE
, FALSE
},
3193 { TRUE
, FALSE
, TRUE
},
3194 { FALSE
, FALSE
, FALSE
},
3195 { FALSE
, FALSE
, TRUE
},
3196 { FALSE
, FALSE
, FALSE
},
3197 { FALSE
, FALSE
, TRUE
},
3199 { FALSE
, FALSE
, FALSE
},
3200 { FALSE
, FALSE
, TRUE
},
3201 { FALSE
, FALSE
, FALSE
},
3202 { FALSE
, FALSE
, TRUE
},
3203 { FALSE
, FALSE
, FALSE
},
3204 { FALSE
, FALSE
, FALSE
},
3205 { FALSE
, FALSE
, FALSE
},
3206 { FALSE
, FALSE
, TRUE
},
3208 { FALSE
, FALSE
, FALSE
},
3209 { FALSE
, FALSE
, TRUE
},
3210 { FALSE
, FALSE
, FALSE
},
3211 { FALSE
, FALSE
, FALSE
},
3212 { FALSE
, FALSE
, FALSE
},
3213 { FALSE
, FALSE
, TRUE
},
3214 { FALSE
, FALSE
, FALSE
},
3215 { FALSE
, FALSE
, FALSE
}
3218 static int last_stop_in_template
[32] = {
3219 -1, 2, 1, 2, -1, 2, -1, -1,
3220 -1, 2, 0, 2, -1, 2, -1, 2,
3221 -1, 2, -1, 2, -1, -1, -1, 2,
3222 -1, 2, -1, -1, -1, 2, -1, -1
3225 static guint64 nops_for_ins_types
[6] = {
3234 #define ITYPE_MATCH(itype1, itype2) (((itype1) == (itype2)) || (((itype2) == IA64_INS_TYPE_A) && (((itype1) == IA64_INS_TYPE_I) || ((itype1) == IA64_INS_TYPE_M))))
3241 #define DEBUG_INS_SCHED(a) do { a; } while (0)
3243 #define DEBUG_INS_SCHED(a)
3247 ia64_analyze_deps (Ia64CodegenState
*code
, int *deps_start
, int *stops
)
3249 int i
, pos
, ins_index
, current_deps_start
, current_ins_start
, reg
;
3250 guint8
*deps
= code
->dep_info
;
3251 gboolean need_stop
, no_stop
;
3253 for (i
= 0; i
< code
->nins
; ++i
)
3257 current_deps_start
= 0;
3258 current_ins_start
= 0;
3259 deps_start
[ins_index
] = current_ins_start
;
3262 DEBUG_INS_SCHED (printf ("BEGIN.\n"));
3263 while (pos
< code
->dep_info_pos
) {
3265 switch (deps
[pos
]) {
3266 case IA64_END_OF_INS
:
3268 current_ins_start
= pos
+ 2;
3269 deps_start
[ins_index
] = current_ins_start
;
3271 DEBUG_INS_SCHED (printf ("(%d) END INS.\n", ins_index
- 1));
3276 reg
= deps
[pos
+ 1];
3278 DEBUG_INS_SCHED (printf ("READ GR: %d\n", reg
));
3279 for (i
= current_deps_start
; i
< current_ins_start
; i
+= 2)
3280 if (deps
[i
] == IA64_WRITE_GR
&& deps
[i
+ 1] == reg
)
3284 reg
= code
->dep_info
[pos
+ 1];
3286 DEBUG_INS_SCHED (printf ("WRITE GR: %d\n", reg
));
3287 for (i
= current_deps_start
; i
< current_ins_start
; i
+= 2)
3288 if (deps
[i
] == IA64_WRITE_GR
&& deps
[i
+ 1] == reg
)
3292 reg
= deps
[pos
+ 1];
3294 DEBUG_INS_SCHED (printf ("READ PR: %d\n", reg
));
3295 for (i
= current_deps_start
; i
< current_ins_start
; i
+= 2)
3296 if (((deps
[i
] == IA64_WRITE_PR
) || (deps
[i
] == IA64_WRITE_PR_FLOAT
)) && deps
[i
+ 1] == reg
)
3299 case IA64_READ_PR_BRANCH
:
3300 reg
= deps
[pos
+ 1];
3302 /* Writes to prs by non-float instructions are visible to branches */
3303 DEBUG_INS_SCHED (printf ("READ PR BRANCH: %d\n", reg
));
3304 for (i
= current_deps_start
; i
< current_ins_start
; i
+= 2)
3305 if (deps
[i
] == IA64_WRITE_PR_FLOAT
&& deps
[i
+ 1] == reg
)
3309 reg
= code
->dep_info
[pos
+ 1];
3311 DEBUG_INS_SCHED (printf ("WRITE PR: %d\n", reg
));
3312 for (i
= current_deps_start
; i
< current_ins_start
; i
+= 2)
3313 if (((deps
[i
] == IA64_WRITE_PR
) || (deps
[i
] == IA64_WRITE_PR_FLOAT
)) && deps
[i
+ 1] == reg
)
3316 case IA64_WRITE_PR_FLOAT
:
3317 reg
= code
->dep_info
[pos
+ 1];
3319 DEBUG_INS_SCHED (printf ("WRITE PR FP: %d\n", reg
));
3320 for (i
= current_deps_start
; i
< current_ins_start
; i
+= 2)
3321 if (((deps
[i
] == IA64_WRITE_GR
) || (deps
[i
] == IA64_WRITE_PR_FLOAT
)) && deps
[i
+ 1] == reg
)
3325 reg
= deps
[pos
+ 1];
3327 DEBUG_INS_SCHED (printf ("READ BR: %d\n", reg
));
3328 for (i
= current_deps_start
; i
< current_ins_start
; i
+= 2)
3329 if (deps
[i
] == IA64_WRITE_BR
&& deps
[i
+ 1] == reg
)
3333 reg
= code
->dep_info
[pos
+ 1];
3335 DEBUG_INS_SCHED (printf ("WRITE BR: %d\n", reg
));
3336 for (i
= current_deps_start
; i
< current_ins_start
; i
+= 2)
3337 if (deps
[i
] == IA64_WRITE_BR
&& deps
[i
+ 1] == reg
)
3340 case IA64_READ_BR_BRANCH
:
3341 reg
= deps
[pos
+ 1];
3343 /* Writes to brs are visible to branches */
3344 DEBUG_INS_SCHED (printf ("READ BR BRACH: %d\n", reg
));
3347 reg
= deps
[pos
+ 1];
3349 DEBUG_INS_SCHED (printf ("READ BR: %d\n", reg
));
3350 for (i
= current_deps_start
; i
< current_ins_start
; i
+= 2)
3351 if (deps
[i
] == IA64_WRITE_FR
&& deps
[i
+ 1] == reg
)
3355 reg
= code
->dep_info
[pos
+ 1];
3357 DEBUG_INS_SCHED (printf ("WRITE BR: %d\n", reg
));
3358 for (i
= current_deps_start
; i
< current_ins_start
; i
+= 2)
3359 if (deps
[i
] == IA64_WRITE_FR
&& deps
[i
+ 1] == reg
)
3363 reg
= deps
[pos
+ 1];
3365 DEBUG_INS_SCHED (printf ("READ AR: %d\n", reg
));
3366 for (i
= current_deps_start
; i
< current_ins_start
; i
+= 2)
3367 if (deps
[i
] == IA64_WRITE_AR
&& deps
[i
+ 1] == reg
)
3371 reg
= code
->dep_info
[pos
+ 1];
3373 DEBUG_INS_SCHED (printf ("WRITE AR: %d\n", reg
));
3374 for (i
= current_deps_start
; i
< current_ins_start
; i
+= 2)
3375 if (deps
[i
] == IA64_WRITE_AR
&& deps
[i
+ 1] == reg
)
3380 * Explicitly indicate that a stop is not required. Useful for
3381 * example when two predicated instructions with negated predicates
3382 * write the same registers.
3387 g_assert_not_reached ();
3391 if (need_stop
&& !no_stop
) {
3392 g_assert (ins_index
> 0);
3393 stops
[ins_index
- 1] = 1;
3395 DEBUG_INS_SCHED (printf ("STOP\n"));
3396 current_deps_start
= current_ins_start
;
3398 /* Skip remaining deps for this instruction */
3399 while (deps
[pos
] != IA64_END_OF_INS
)
3404 if (code
->nins
> 0) {
3405 /* No dependency info for the last instruction */
3406 stops
[code
->nins
- 1] = 1;
3409 deps_start
[code
->nins
] = code
->dep_info_pos
;
3413 ia64_real_emit_bundle (Ia64CodegenState
*code
, int *deps_start
, int *stops
, int n
, guint64
template, guint64 ins1
, guint64 ins2
, guint64 ins3
, guint8 nops
)
3415 int stop_pos
, i
, deps_to_shift
, dep_shift
;
3417 g_assert (n
<= code
->nins
);
3419 // if (n > 1) printf ("FOUND: %ld.\n", template);
3421 ia64_emit_bundle_template (code
, template, ins1
, ins2
, ins3
);
3423 stop_pos
= last_stop_in_template
[template] + 1;
3427 /* Compute the number of 'real' instructions before the stop */
3428 deps_to_shift
= stop_pos
;
3429 if (stop_pos
>= 3 && (nops
& (1 << 2)))
3431 if (stop_pos
>= 2 && (nops
& (1 << 1)))
3433 if (stop_pos
>= 1 && (nops
& (1 << 0)))
3437 * We have to keep some dependencies whose instructions have been shifted
3438 * out of the buffer. So nullify the end_of_ins markers in the dependency
3441 for (i
= deps_start
[deps_to_shift
]; i
< deps_start
[n
]; i
+= 2)
3442 if (code
->dep_info
[i
] == IA64_END_OF_INS
)
3443 code
->dep_info
[i
] = IA64_NONE
;
3445 g_assert (deps_start
[deps_to_shift
] <= code
->dep_info_pos
);
3446 memcpy (code
->dep_info
, &code
->dep_info
[deps_start
[deps_to_shift
]], code
->dep_info_pos
- deps_start
[deps_to_shift
]);
3447 code
->dep_info_pos
= code
->dep_info_pos
- deps_start
[deps_to_shift
];
3449 dep_shift
= deps_start
[deps_to_shift
];
3450 for (i
= 0; i
< code
->nins
+ 1 - n
; ++i
)
3451 deps_start
[i
] = deps_start
[n
+ i
] - dep_shift
;
3453 /* Determine the exact positions of instructions with unwind ops */
3454 if (code
->unw_op_count
) {
3456 int curr_ins
, curr_ins_pos
;
3459 curr_ins_pos
= ((code
->buf
- code
->region_start
- 16) / 16) * 3;
3460 for (i
= 0; i
< 3; ++i
) {
3461 if (! (nops
& (1 << i
))) {
3462 ins_pos
[curr_ins
] = curr_ins_pos
+ i
;
3467 for (i
= code
->unw_op_pos
; i
< code
->unw_op_count
; ++i
) {
3468 if (code
->unw_ops_pos
[i
] < n
) {
3469 code
->unw_ops
[i
].when
= ins_pos
[code
->unw_ops_pos
[i
]];
3470 //printf ("UNW-OP: %d -> %d\n", code->unw_ops_pos [i], code->unw_ops [i].when);
3473 if (code
->unw_op_pos
< code
->unw_op_count
)
3474 code
->unw_op_pos
+= n
;
3477 if (n
== code
->nins
) {
3482 memcpy (&code
->instructions
[0], &code
->instructions
[n
], (code
->nins
- n
) * sizeof (guint64
));
3483 memcpy (&code
->itypes
[0], &code
->itypes
[n
], (code
->nins
- n
) * sizeof (int));
3484 memcpy (&stops
[0], &stops
[n
], (code
->nins
- n
) * sizeof (int));
3490 ia64_emit_bundle (Ia64CodegenState
*code
, gboolean flush
)
3492 int i
, ins_type
, template, nins_to_emit
;
3493 int deps_start
[16];
3498 * We implement a simple scheduler which tries to put three instructions
3499 * per bundle, then two, then one.
3501 ia64_analyze_deps (code
, deps_start
, stops
);
3503 if ((code
->nins
>= 3) && !code
->one_ins_per_bundle
) {
3504 /* Find a suitable template */
3505 for (template = 0; template < 32; ++template) {
3506 if (stops_in_template
[template][0] != stops
[0] ||
3507 stops_in_template
[template][1] != stops
[1] ||
3508 stops_in_template
[template][2] != stops
[2])
3512 for (i
= 0; i
< 3; ++i
) {
3513 ins_type
= ins_types_in_template
[template][i
];
3514 switch (code
->itypes
[i
]) {
3515 case IA64_INS_TYPE_A
:
3516 found
&= (ins_type
== IA64_INS_TYPE_I
) || (ins_type
== IA64_INS_TYPE_M
);
3519 found
&= (ins_type
== code
->itypes
[i
]);
3525 found
= debug_ins_sched ();
3528 ia64_real_emit_bundle (code
, deps_start
, stops
, 3, template, code
->instructions
[0], code
->instructions
[1], code
->instructions
[2], 0);
3534 if (code
->nins
< IA64_INS_BUFFER_SIZE
&& !flush
)
3535 /* Wait for more instructions */
3538 /* If it didn't work out, try putting two instructions into one bundle */
3539 if ((code
->nins
>= 2) && !code
->one_ins_per_bundle
) {
3540 /* Try a nop at the end */
3541 for (template = 0; template < 32; ++template) {
3542 if (stops_in_template
[template][0] != stops
[0] ||
3543 ((stops_in_template
[template][1] != stops
[1]) &&
3544 (stops_in_template
[template][2] != stops
[1])))
3548 if (!ITYPE_MATCH (ins_types_in_template
[template][0], code
->itypes
[0]) ||
3549 !ITYPE_MATCH (ins_types_in_template
[template][1], code
->itypes
[1]))
3552 if (!debug_ins_sched ())
3555 ia64_real_emit_bundle (code
, deps_start
, stops
, 2, template, code
->instructions
[0], code
->instructions
[1], nops_for_ins_types
[ins_types_in_template
[template][2]], 1 << 2);
3560 if (code
->nins
< IA64_INS_BUFFER_SIZE
&& !flush
)
3561 /* Wait for more instructions */
3564 if ((code
->nins
>= 2) && !code
->one_ins_per_bundle
) {
3565 /* Try a nop in the middle */
3566 for (template = 0; template < 32; ++template) {
3567 if (((stops_in_template
[template][0] != stops
[0]) &&
3568 (stops_in_template
[template][1] != stops
[0])) ||
3569 stops_in_template
[template][2] != stops
[1])
3572 if (!ITYPE_MATCH (ins_types_in_template
[template][0], code
->itypes
[0]) ||
3573 !ITYPE_MATCH (ins_types_in_template
[template][2], code
->itypes
[1]))
3576 if (!debug_ins_sched ())
3579 ia64_real_emit_bundle (code
, deps_start
, stops
, 2, template, code
->instructions
[0], nops_for_ins_types
[ins_types_in_template
[template][1]], code
->instructions
[1], 1 << 1);
3584 if ((code
->nins
>= 2) && flush
&& !code
->one_ins_per_bundle
) {
3585 /* Try a nop at the beginning */
3586 for (template = 0; template < 32; ++template) {
3587 if ((stops_in_template
[template][1] != stops
[0]) ||
3588 (stops_in_template
[template][2] != stops
[1]))
3591 if (!ITYPE_MATCH (ins_types_in_template
[template][1], code
->itypes
[0]) ||
3592 !ITYPE_MATCH (ins_types_in_template
[template][2], code
->itypes
[1]))
3595 if (!debug_ins_sched ())
3598 ia64_real_emit_bundle (code
, deps_start
, stops
, 2, template, nops_for_ins_types
[ins_types_in_template
[template][0]], code
->instructions
[0], code
->instructions
[1], 1 << 0);
3603 if (code
->nins
< IA64_INS_BUFFER_SIZE
&& !flush
)
3604 /* Wait for more instructions */
3608 nins_to_emit
= code
->nins
;
3612 while (nins_to_emit
> 0) {
3613 if (!debug_ins_sched ())
3615 switch (code
->itypes
[0]) {
3616 case IA64_INS_TYPE_A
:
3618 ia64_real_emit_bundle (code
, deps_start
, stops
, 1, IA64_TEMPLATE_MIIS
, code
->instructions
[0], IA64_NOP_I
, IA64_NOP_I
, 0);
3620 ia64_real_emit_bundle (code
, deps_start
, stops
, 1, IA64_TEMPLATE_MII
, code
->instructions
[0], IA64_NOP_I
, IA64_NOP_I
, 0);
3622 case IA64_INS_TYPE_I
:
3624 ia64_real_emit_bundle (code
, deps_start
, stops
, 1, IA64_TEMPLATE_MIIS
, IA64_NOP_M
, code
->instructions
[0], IA64_NOP_I
, 0);
3626 ia64_real_emit_bundle (code
, deps_start
, stops
, 1, IA64_TEMPLATE_MII
, IA64_NOP_M
, code
->instructions
[0], IA64_NOP_I
, 0);
3628 case IA64_INS_TYPE_M
:
3630 ia64_real_emit_bundle (code
, deps_start
, stops
, 1, IA64_TEMPLATE_MIIS
, code
->instructions
[0], IA64_NOP_I
, IA64_NOP_I
, 0);
3632 ia64_real_emit_bundle (code
, deps_start
, stops
, 1, IA64_TEMPLATE_MII
, code
->instructions
[0], IA64_NOP_I
, IA64_NOP_I
, 0);
3634 case IA64_INS_TYPE_B
:
3636 ia64_real_emit_bundle (code
, deps_start
, stops
, 1, IA64_TEMPLATE_MIBS
, IA64_NOP_M
, IA64_NOP_I
, code
->instructions
[0], 0);
3638 ia64_real_emit_bundle (code
, deps_start
, stops
, 1, IA64_TEMPLATE_MIB
, IA64_NOP_M
, IA64_NOP_I
, code
->instructions
[0], 0);
3640 case IA64_INS_TYPE_F
:
3642 ia64_real_emit_bundle (code
, deps_start
, stops
, 1, IA64_TEMPLATE_MFIS
, IA64_NOP_M
, code
->instructions
[0], IA64_NOP_I
, 0);
3644 ia64_real_emit_bundle (code
, deps_start
, stops
, 1, IA64_TEMPLATE_MFI
, IA64_NOP_M
, code
->instructions
[0], IA64_NOP_I
, 0);
3646 case IA64_INS_TYPE_LX
:
3647 if (stops
[0] || stops
[1])
3648 ia64_real_emit_bundle (code
, deps_start
, stops
, 2, IA64_TEMPLATE_MLXS
, IA64_NOP_M
, code
->instructions
[0], code
->instructions
[1], 0);
3650 ia64_real_emit_bundle (code
, deps_start
, stops
, 2, IA64_TEMPLATE_MLX
, IA64_NOP_M
, code
->instructions
[0], code
->instructions
[1], 0);
3654 g_assert_not_reached ();
3660 unw_dyn_region_info_t
*
3661 mono_ia64_create_unwind_region (Ia64CodegenState
*code
)
3663 unw_dyn_region_info_t
*r
;
3665 g_assert (code
->nins
== 0);
3666 r
= g_malloc0 (_U_dyn_region_info_size (code
->unw_op_count
));
3667 memcpy (&r
->op
, &code
->unw_ops
, sizeof (unw_dyn_op_t
) * code
->unw_op_count
);
3668 r
->op_count
= code
->unw_op_count
;
3669 r
->insn_count
= ((code
->buf
- code
->region_start
) >> 4) * 3;
3670 code
->unw_op_count
= 0;
3671 code
->unw_op_pos
= 0;
3672 code
->region_start
= code
->buf
;
3678 ia64_patch (unsigned char* code
, gpointer target
)
3681 guint64 instructions
[3];
3682 guint8 gen_buf
[16];
3683 Ia64CodegenState gen
;
3688 * code encodes both the position inside the buffer and code.nins when
3689 * the instruction was emitted.
3691 ins_to_skip
= (guint64
)code
% 16;
3692 code
= (unsigned char*)((guint64
)code
& ~15);
3695 * Search for the first instruction which is 'patchable', skipping
3696 * ins_to_skip instructions.
3701 template = ia64_bundle_template (code
);
3702 instructions
[0] = ia64_bundle_ins1 (code
);
3703 instructions
[1] = ia64_bundle_ins2 (code
);
3704 instructions
[2] = ia64_bundle_ins3 (code
);
3706 ia64_codegen_init (gen
, gen_buf
);
3709 for (i
= 0; i
< 3; ++i
) {
3710 guint64 ins
= instructions
[i
];
3711 int opcode
= ia64_ins_opcode (ins
);
3713 if (ins
== nops_for_ins_types
[ins_types_in_template
[template][i
]])
3721 switch (ins_types_in_template
[template][i
]) {
3722 case IA64_INS_TYPE_A
:
3723 case IA64_INS_TYPE_M
:
3724 if ((opcode
== 8) && (ia64_ins_x2a (ins
) == 2) && (ia64_ins_ve (ins
) == 0)) {
3726 ia64_adds_imm_pred (gen
, ia64_ins_qp (ins
), ia64_ins_r1 (ins
), (guint64
)target
, ia64_ins_r3 (ins
));
3727 instructions
[i
] = gen
.instructions
[0];
3733 case IA64_INS_TYPE_B
:
3734 if ((opcode
== 4) && (ia64_ins_btype (ins
) == 0)) {
3736 gint64 disp
= ((guint8
*)target
- code
) >> 4;
3739 ia64_br_cond_hint_pred (gen
, ia64_ins_qp (ins
), disp
, 0, 0, 0);
3741 instructions
[i
] = gen
.instructions
[0];
3744 else if (opcode
== 5) {
3746 gint64 disp
= ((guint8
*)target
- code
) >> 4;
3749 ia64_br_call_hint_pred (gen
, ia64_ins_qp (ins
), ia64_ins_b1 (ins
), disp
, 0, 0, 0);
3750 instructions
[i
] = gen
.instructions
[0];
3756 case IA64_INS_TYPE_LX
:
3760 if ((opcode
== 6) && (ia64_ins_vc (ins
) == 0)) {
3762 ia64_movl_pred (gen
, ia64_ins_qp (ins
), ia64_ins_r1 (ins
), target
);
3763 instructions
[1] = gen
.instructions
[0];
3764 instructions
[2] = gen
.instructions
[1];
3777 ia64_codegen_init (gen
, code
);
3778 ia64_emit_bundle_template (&gen
, template, instructions
[0], instructions
[1], instructions
[2]);
3788 mono_arch_patch_code (MonoMethod
*method
, MonoDomain
*domain
, guint8
*code
, MonoJumpInfo
*ji
, gboolean run_cctors
)
3790 MonoJumpInfo
*patch_info
;
3792 for (patch_info
= ji
; patch_info
; patch_info
= patch_info
->next
) {
3793 unsigned char *ip
= patch_info
->ip
.i
+ code
;
3794 const unsigned char *target
;
3796 target
= mono_resolve_patch_target (method
, domain
, code
, patch_info
, run_cctors
);
3798 if (patch_info
->type
== MONO_PATCH_INFO_NONE
)
3800 if (mono_compile_aot
) {
3804 ia64_patch (ip
, (gpointer
)target
);
3809 mono_arch_emit_prolog (MonoCompile
*cfg
)
3811 MonoMethod
*method
= cfg
->method
;
3812 MonoMethodSignature
*sig
;
3814 int alloc_size
, pos
, i
;
3815 Ia64CodegenState code
;
3818 sig
= mono_method_signature (method
);
3821 cinfo
= get_call_info (cfg
, cfg
->mempool
, sig
, FALSE
);
3823 cfg
->code_size
= MAX (cfg
->header
->code_size
* 4, 512);
3825 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
))
3826 cfg
->code_size
+= 1024;
3827 if (cfg
->prof_options
& MONO_PROFILE_ENTER_LEAVE
)
3828 cfg
->code_size
+= 1024;
3830 cfg
->native_code
= g_malloc (cfg
->code_size
);
3832 ia64_codegen_init (code
, cfg
->native_code
);
3834 alloc_size
= ALIGN_TO (cfg
->stack_offset
, MONO_ARCH_FRAME_ALIGNMENT
);
3835 if (cfg
->param_area
)
3836 alloc_size
+= cfg
->param_area
;
3840 alloc_size
= ALIGN_TO (alloc_size
, MONO_ARCH_FRAME_ALIGNMENT
);
3842 if (cfg
->flags
& MONO_CFG_HAS_ALLOCA
)
3843 /* Force sp to be saved/restored */
3844 alloc_size
+= MONO_ARCH_FRAME_ALIGNMENT
;
3846 cfg
->arch
.stack_alloc_size
= alloc_size
;
3850 if (method
->save_lmf
) {
3851 /* No LMF on IA64 */
3856 ia64_unw_save_reg (code
, UNW_IA64_AR_PFS
, UNW_IA64_GR
+ cfg
->arch
.reg_saved_ar_pfs
);
3857 ia64_alloc (code
, cfg
->arch
.reg_saved_ar_pfs
, cfg
->arch
.reg_local0
- cfg
->arch
.reg_in0
, cfg
->arch
.reg_out0
- cfg
->arch
.reg_local0
, cfg
->arch
.n_out_regs
, 0);
3858 ia64_unw_save_reg (code
, UNW_IA64_RP
, UNW_IA64_GR
+ cfg
->arch
.reg_saved_b0
);
3859 ia64_mov_from_br (code
, cfg
->arch
.reg_saved_b0
, IA64_B0
);
3861 if ((alloc_size
|| cinfo
->stack_usage
) && !cfg
->arch
.omit_fp
) {
3862 ia64_unw_save_reg (code
, UNW_IA64_SP
, UNW_IA64_GR
+ cfg
->arch
.reg_saved_sp
);
3863 ia64_mov (code
, cfg
->arch
.reg_saved_sp
, IA64_SP
);
3864 if (cfg
->frame_reg
!= cfg
->arch
.reg_saved_sp
)
3865 ia64_mov (code
, cfg
->frame_reg
, IA64_SP
);
3869 #if defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
3870 int pagesize
= getpagesize ();
3872 if (alloc_size
>= pagesize
) {
3873 gint32 remaining_size
= alloc_size
;
3875 /* Generate stack touching code */
3876 ia64_mov (code
, GP_SCRATCH_REG
, IA64_SP
);
3877 while (remaining_size
>= pagesize
) {
3878 ia64_movl (code
, GP_SCRATCH_REG2
, pagesize
);
3879 ia64_sub (code
, GP_SCRATCH_REG
, GP_SCRATCH_REG
, GP_SCRATCH_REG2
);
3880 ia64_ld8 (code
, GP_SCRATCH_REG2
, GP_SCRATCH_REG
);
3881 remaining_size
-= pagesize
;
3885 if (ia64_is_imm14 (-alloc_size
)) {
3886 if (cfg
->arch
.omit_fp
)
3887 ia64_unw_add (code
, UNW_IA64_SP
, (-alloc_size
));
3888 ia64_adds_imm (code
, IA64_SP
, (-alloc_size
), IA64_SP
);
3891 ia64_movl (code
, GP_SCRATCH_REG
, -alloc_size
);
3892 if (cfg
->arch
.omit_fp
)
3893 ia64_unw_add (code
, UNW_IA64_SP
, (-alloc_size
));
3894 ia64_add (code
, IA64_SP
, GP_SCRATCH_REG
, IA64_SP
);
3898 ia64_begin_bundle (code
);
3900 /* Initialize unwind info */
3901 cfg
->arch
.r_pro
= mono_ia64_create_unwind_region (&code
);
3903 if (sig
->ret
->type
!= MONO_TYPE_VOID
) {
3904 if ((cinfo
->ret
.storage
== ArgInIReg
) && (cfg
->ret
->opcode
!= OP_REGVAR
)) {
3905 /* Save volatile arguments to the stack */
3910 /* Keep this in sync with emit_load_volatile_arguments */
3911 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
3912 ArgInfo
*ainfo
= cinfo
->args
+ i
;
3913 gint32 stack_offset
;
3916 inst
= cfg
->args
[i
];
3918 if (sig
->hasthis
&& (i
== 0))
3919 arg_type
= &mono_defaults
.object_class
->byval_arg
;
3921 arg_type
= sig
->params
[i
- sig
->hasthis
];
3923 arg_type
= mono_type_get_underlying_type (arg_type
);
3925 stack_offset
= ainfo
->offset
+ ARGS_OFFSET
;
3928 * FIXME: Native code might pass non register sized integers
3929 * without initializing the upper bits.
3931 if (method
->wrapper_type
== MONO_WRAPPER_NATIVE_TO_MANAGED
&& !arg_type
->byref
&& ainfo
->storage
== ArgInIReg
) {
3932 int reg
= cfg
->arch
.reg_in0
+ ainfo
->reg
;
3934 switch (mono_type_to_load_membase (cfg
, arg_type
)) {
3935 case OP_LOADI1_MEMBASE
:
3936 ia64_sxt1 (code
, reg
, reg
);
3938 case OP_LOADU1_MEMBASE
:
3939 ia64_zxt1 (code
, reg
, reg
);
3941 case OP_LOADI2_MEMBASE
:
3942 ia64_sxt2 (code
, reg
, reg
);
3944 case OP_LOADU2_MEMBASE
:
3945 ia64_zxt2 (code
, reg
, reg
);
3952 /* Save volatile arguments to the stack */
3953 if (inst
->opcode
!= OP_REGVAR
) {
3954 switch (ainfo
->storage
) {
3957 case ArgInFloatRegR4
:
3958 g_assert (inst
->opcode
== OP_REGOFFSET
);
3959 if (ia64_is_adds_imm (inst
->inst_offset
))
3960 ia64_adds_imm (code
, GP_SCRATCH_REG
, inst
->inst_offset
, inst
->inst_basereg
);
3962 ia64_movl (code
, GP_SCRATCH_REG2
, inst
->inst_offset
);
3963 ia64_add (code
, GP_SCRATCH_REG
, GP_SCRATCH_REG
, GP_SCRATCH_REG2
);
3965 if (arg_type
->byref
)
3966 ia64_st8_hint (code
, GP_SCRATCH_REG
, cfg
->arch
.reg_in0
+ ainfo
->reg
, 0);
3968 switch (arg_type
->type
) {
3970 ia64_stfs_hint (code
, GP_SCRATCH_REG
, ainfo
->reg
, 0);
3973 ia64_stfd_hint (code
, GP_SCRATCH_REG
, ainfo
->reg
, 0);
3976 ia64_st8_hint (code
, GP_SCRATCH_REG
, cfg
->arch
.reg_in0
+ ainfo
->reg
, 0);
3984 if (ainfo
->nslots
!= ainfo
->nregs
)
3987 g_assert (inst
->opcode
== OP_REGOFFSET
);
3988 ia64_adds_imm (code
, GP_SCRATCH_REG
, inst
->inst_offset
, inst
->inst_basereg
);
3989 for (i
= 0; i
< ainfo
->nregs
; ++i
) {
3990 switch (ainfo
->atype
) {
3991 case AggregateNormal
:
3992 ia64_st8_inc_imm_hint (code
, GP_SCRATCH_REG
, cfg
->arch
.reg_in0
+ ainfo
->reg
+ i
, sizeof (gpointer
), 0);
3994 case AggregateSingleHFA
:
3995 ia64_stfs_inc_imm_hint (code
, GP_SCRATCH_REG
, ainfo
->reg
+ i
, 4, 0);
3997 case AggregateDoubleHFA
:
3998 ia64_stfd_inc_imm_hint (code
, GP_SCRATCH_REG
, ainfo
->reg
+ i
, sizeof (gpointer
), 0);
4006 g_assert_not_reached ();
4010 if (inst
->opcode
== OP_REGVAR
) {
4011 /* Argument allocated to (non-volatile) register */
4012 switch (ainfo
->storage
) {
4014 if (inst
->dreg
!= cfg
->arch
.reg_in0
+ ainfo
->reg
)
4015 ia64_mov (code
, inst
->dreg
, cfg
->arch
.reg_in0
+ ainfo
->reg
);
4018 ia64_adds_imm (code
, GP_SCRATCH_REG
, 16 + ainfo
->offset
, cfg
->frame_reg
);
4019 ia64_ld8 (code
, inst
->dreg
, GP_SCRATCH_REG
);
4027 if (method
->save_lmf
) {
4028 /* No LMF on IA64 */
4031 ia64_codegen_close (code
);
4033 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
))
4034 code
.buf
= mono_arch_instrument_prolog (cfg
, mono_trace_enter_method
, code
.buf
, TRUE
);
4036 cfg
->code_len
= code
.buf
- cfg
->native_code
;
4038 g_assert (cfg
->code_len
< cfg
->code_size
);
4040 cfg
->arch
.prolog_end_offset
= cfg
->code_len
;
4046 mono_arch_emit_epilog (MonoCompile
*cfg
)
4048 MonoMethod
*method
= cfg
->method
;
4050 int max_epilog_size
= 16 * 4;
4051 Ia64CodegenState code
;
4056 if (mono_jit_trace_calls
!= NULL
)
4057 max_epilog_size
+= 1024;
4059 cfg
->arch
.epilog_begin_offset
= cfg
->code_len
;
4061 while (cfg
->code_len
+ max_epilog_size
> cfg
->code_size
) {
4062 cfg
->code_size
*= 2;
4063 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
4064 mono_jit_stats
.code_reallocs
++;
4067 /* FIXME: Emit unwind info */
4069 buf
= cfg
->native_code
+ cfg
->code_len
;
4071 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
))
4072 buf
= mono_arch_instrument_epilog (cfg
, mono_trace_leave_method
, buf
, TRUE
);
4074 ia64_codegen_init (code
, buf
);
4076 /* the code restoring the registers must be kept in sync with OP_JMP */
4079 if (method
->save_lmf
) {
4080 /* No LMF on IA64 */
4083 /* Load returned vtypes into registers if needed */
4084 cinfo
= get_call_info (cfg
, cfg
->mempool
, mono_method_signature (method
), FALSE
);
4085 ainfo
= &cinfo
->ret
;
4086 switch (ainfo
->storage
) {
4088 if (ainfo
->nslots
!= ainfo
->nregs
)
4091 g_assert (cfg
->ret
->opcode
== OP_REGOFFSET
);
4092 ia64_adds_imm (code
, GP_SCRATCH_REG
, cfg
->ret
->inst_offset
, cfg
->ret
->inst_basereg
);
4093 for (i
= 0; i
< ainfo
->nregs
; ++i
) {
4094 switch (ainfo
->atype
) {
4095 case AggregateNormal
:
4096 ia64_ld8_inc_imm_hint (code
, ainfo
->reg
+ i
, GP_SCRATCH_REG
, sizeof (gpointer
), 0);
4098 case AggregateSingleHFA
:
4099 ia64_ldfs_inc_imm_hint (code
, ainfo
->reg
+ i
, GP_SCRATCH_REG
, 4, 0);
4101 case AggregateDoubleHFA
:
4102 ia64_ldfd_inc_imm_hint (code
, ainfo
->reg
+ i
, GP_SCRATCH_REG
, sizeof (gpointer
), 0);
4105 g_assert_not_reached ();
4113 ia64_begin_bundle (code
);
4115 code
.region_start
= cfg
->native_code
;
4117 /* Label the unwind state at the start of the exception throwing region */
4118 //ia64_unw_label_state (code, 1234);
4120 if (cfg
->arch
.stack_alloc_size
) {
4121 if (cfg
->arch
.omit_fp
) {
4122 if (ia64_is_imm14 (cfg
->arch
.stack_alloc_size
)) {
4123 ia64_unw_pop_frames (code
, 1);
4124 ia64_adds_imm (code
, IA64_SP
, (cfg
->arch
.stack_alloc_size
), IA64_SP
);
4126 ia64_movl (code
, GP_SCRATCH_REG
, cfg
->arch
.stack_alloc_size
);
4127 ia64_unw_pop_frames (code
, 1);
4128 ia64_add (code
, IA64_SP
, GP_SCRATCH_REG
, IA64_SP
);
4132 ia64_unw_pop_frames (code
, 1);
4133 ia64_mov (code
, IA64_SP
, cfg
->arch
.reg_saved_sp
);
4136 ia64_mov_to_ar_i (code
, IA64_PFS
, cfg
->arch
.reg_saved_ar_pfs
);
4137 ia64_mov_ret_to_br (code
, IA64_B0
, cfg
->arch
.reg_saved_b0
);
4138 ia64_br_ret_reg (code
, IA64_B0
);
4140 ia64_codegen_close (code
);
4142 cfg
->arch
.r_epilog
= mono_ia64_create_unwind_region (&code
);
4143 cfg
->arch
.r_pro
->next
= cfg
->arch
.r_epilog
;
4145 cfg
->code_len
= code
.buf
- cfg
->native_code
;
4147 g_assert (cfg
->code_len
< cfg
->code_size
);
4151 mono_arch_emit_exceptions (MonoCompile
*cfg
)
4153 MonoJumpInfo
*patch_info
;
4155 Ia64CodegenState code
;
4156 gboolean empty
= TRUE
;
4157 //unw_dyn_region_info_t *r_exceptions;
4158 MonoClass
*exc_classes
[16];
4159 guint8
*exc_throw_start
[16], *exc_throw_end
[16];
4160 guint32 code_size
= 0;
4162 /* Compute needed space */
4163 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
4164 if (patch_info
->type
== MONO_PATCH_INFO_EXC
)
4166 if (patch_info
->type
== MONO_PATCH_INFO_R8
)
4167 code_size
+= 8 + 7; /* sizeof (double) + alignment */
4168 if (patch_info
->type
== MONO_PATCH_INFO_R4
)
4169 code_size
+= 4 + 7; /* sizeof (float) + alignment */
4175 while (cfg
->code_len
+ code_size
> (cfg
->code_size
- 16)) {
4176 cfg
->code_size
*= 2;
4177 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
4178 mono_jit_stats
.code_reallocs
++;
4181 ia64_codegen_init (code
, cfg
->native_code
+ cfg
->code_len
);
4183 /* The unwind state here is the same as before the epilog */
4184 //ia64_unw_copy_state (code, 1234);
4186 /* add code to raise exceptions */
4187 /* FIXME: Optimize this */
4189 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
4190 switch (patch_info
->type
) {
4191 case MONO_PATCH_INFO_EXC
: {
4192 MonoClass
*exc_class
;
4195 guint64 exc_token_index
;
4197 exc_class
= mono_class_from_name (mono_defaults
.corlib
, "System", patch_info
->data
.name
);
4198 g_assert (exc_class
);
4199 exc_token_index
= mono_metadata_token_index (exc_class
->type_token
);
4200 throw_ip
= cfg
->native_code
+ patch_info
->ip
.i
;
4202 ia64_begin_bundle (code
);
4204 ia64_patch (cfg
->native_code
+ patch_info
->ip
.i
, code
.buf
);
4206 /* Find a throw sequence for the same exception class */
4207 for (i
= 0; i
< nthrows
; ++i
)
4208 if (exc_classes
[i
] == exc_class
)
4212 gint64 offset
= exc_throw_end
[i
] - 16 - throw_ip
;
4214 if (ia64_is_adds_imm (offset
))
4215 ia64_adds_imm (code
, cfg
->arch
.reg_out0
+ 1, offset
, IA64_R0
);
4217 ia64_movl (code
, cfg
->arch
.reg_out0
+ 1, offset
);
4219 buf
= code
.buf
+ code
.nins
;
4220 ia64_br_cond_pred (code
, 0, 0);
4221 ia64_begin_bundle (code
);
4222 ia64_patch (buf
, exc_throw_start
[i
]);
4224 patch_info
->type
= MONO_PATCH_INFO_NONE
;
4229 ia64_movl (code
, cfg
->arch
.reg_out0
+ 1, 0);
4231 ia64_begin_bundle (code
);
4234 exc_classes
[nthrows
] = exc_class
;
4235 exc_throw_start
[nthrows
] = code
.buf
;
4239 if (ia64_is_adds_imm (exc_token_index
))
4240 ia64_adds_imm (code
, cfg
->arch
.reg_out0
+ 0, exc_token_index
, IA64_R0
);
4242 ia64_movl (code
, cfg
->arch
.reg_out0
+ 0, exc_token_index
);
4244 patch_info
->data
.name
= "mono_arch_throw_corlib_exception";
4245 patch_info
->type
= MONO_PATCH_INFO_INTERNAL_METHOD
;
4246 patch_info
->ip
.i
= code
.buf
+ code
.nins
- cfg
->native_code
;
4249 ia64_movl (code
, GP_SCRATCH_REG
, 0);
4250 ia64_ld8_inc_imm (code
, GP_SCRATCH_REG2
, GP_SCRATCH_REG
, 8);
4251 ia64_mov_to_br (code
, IA64_B6
, GP_SCRATCH_REG2
);
4252 ia64_ld8 (code
, IA64_GP
, GP_SCRATCH_REG
);
4254 ia64_br_call_reg (code
, IA64_B0
, IA64_B6
);
4256 /* Patch up the throw offset */
4257 ia64_begin_bundle (code
);
4259 ia64_patch (buf
, (gpointer
)(code
.buf
- 16 - throw_ip
));
4262 exc_throw_end
[nthrows
] = code
.buf
;
4276 /* The unwinder needs this to work */
4277 ia64_break_i (code
, 0);
4279 ia64_codegen_close (code
);
4282 //r_exceptions = mono_ia64_create_unwind_region (&code);
4283 //cfg->arch.r_epilog = r_exceptions;
4285 cfg
->code_len
= code
.buf
- cfg
->native_code
;
4287 g_assert (cfg
->code_len
< cfg
->code_size
);
4291 mono_arch_instrument_prolog (MonoCompile
*cfg
, void *func
, void *p
, gboolean enable_arguments
)
4293 Ia64CodegenState code
;
4294 CallInfo
*cinfo
= NULL
;
4295 MonoMethodSignature
*sig
;
4297 int i
, n
, stack_area
= 0;
4299 ia64_codegen_init (code
, p
);
4301 /* Keep this in sync with mono_arch_get_argument_info */
4303 if (enable_arguments
) {
4304 /* Allocate a new area on the stack and save arguments there */
4305 sig
= mono_method_signature (cfg
->method
);
4307 cinfo
= get_call_info (cfg
, cfg
->mempool
, sig
, FALSE
);
4309 n
= sig
->param_count
+ sig
->hasthis
;
4311 stack_area
= ALIGN_TO (n
* 8, 16);
4314 ia64_movl (code
, GP_SCRATCH_REG
, stack_area
);
4316 ia64_sub (code
, IA64_SP
, IA64_SP
, GP_SCRATCH_REG
);
4318 /* FIXME: Allocate out registers */
4320 ia64_mov (code
, cfg
->arch
.reg_out0
+ 1, IA64_SP
);
4322 /* Required by the ABI */
4323 ia64_adds_imm (code
, IA64_SP
, -16, IA64_SP
);
4325 add_patch_info (cfg
, code
, MONO_PATCH_INFO_METHODCONST
, cfg
->method
);
4326 ia64_movl (code
, cfg
->arch
.reg_out0
+ 0, 0);
4328 /* Save arguments to the stack */
4329 for (i
= 0; i
< n
; ++i
) {
4330 ins
= cfg
->args
[i
];
4332 if (ins
->opcode
== OP_REGVAR
) {
4333 ia64_movl (code
, GP_SCRATCH_REG
, (i
* 8));
4334 ia64_add (code
, GP_SCRATCH_REG
, cfg
->arch
.reg_out0
+ 1, GP_SCRATCH_REG
);
4335 ia64_st8 (code
, GP_SCRATCH_REG
, ins
->dreg
);
4338 ia64_movl (code
, GP_SCRATCH_REG
, ins
->inst_offset
);
4339 ia64_add (code
, GP_SCRATCH_REG
, ins
->inst_basereg
, GP_SCRATCH_REG
);
4340 ia64_ld8 (code
, GP_SCRATCH_REG2
, GP_SCRATCH_REG
);
4341 ia64_movl (code
, GP_SCRATCH_REG
, (i
* 8));
4342 ia64_add (code
, GP_SCRATCH_REG
, cfg
->arch
.reg_out0
+ 1, GP_SCRATCH_REG
);
4343 ia64_st8 (code
, GP_SCRATCH_REG
, GP_SCRATCH_REG2
);
4348 ia64_mov (code
, cfg
->arch
.reg_out0
+ 1, IA64_R0
);
4351 ia64_mov (code
, cfg
->arch
.reg_out0
+ 1, IA64_R0
);
4353 add_patch_info (cfg
, code
, MONO_PATCH_INFO_METHODCONST
, cfg
->method
);
4354 ia64_movl (code
, cfg
->arch
.reg_out0
+ 0, 0);
4356 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_ABS
, (gpointer
)func
);
4358 if (enable_arguments
&& stack_area
) {
4359 ia64_movl (code
, GP_SCRATCH_REG
, stack_area
);
4361 ia64_add (code
, IA64_SP
, IA64_SP
, GP_SCRATCH_REG
);
4363 ia64_adds_imm (code
, IA64_SP
, 16, IA64_SP
);
4366 ia64_codegen_close (code
);
4372 mono_arch_instrument_epilog_full (MonoCompile
*cfg
, void *func
, void *p
, gboolean enable_arguments
, gboolean preserve_argument_registers
)
4374 Ia64CodegenState code
;
4375 CallInfo
*cinfo
= NULL
;
4376 MonoMethod
*method
= cfg
->method
;
4377 MonoMethodSignature
*sig
= mono_method_signature (cfg
->method
);
4379 ia64_codegen_init (code
, p
);
4381 cinfo
= get_call_info (cfg
, cfg
->mempool
, sig
, FALSE
);
4383 /* Save return value + pass it to func */
4384 switch (cinfo
->ret
.storage
) {
4388 ia64_mov (code
, cfg
->arch
.reg_saved_return_val
, cinfo
->ret
.reg
);
4389 ia64_mov (code
, cfg
->arch
.reg_out0
+ 1, cinfo
->ret
.reg
);
4392 ia64_adds_imm (code
, IA64_SP
, -16, IA64_SP
);
4393 ia64_adds_imm (code
, GP_SCRATCH_REG
, 16, IA64_SP
);
4394 ia64_stfd_hint (code
, GP_SCRATCH_REG
, cinfo
->ret
.reg
, 0);
4395 ia64_fmov (code
, 8 + 1, cinfo
->ret
.reg
);
4397 case ArgValuetypeAddrInIReg
:
4398 ia64_mov (code
, cfg
->arch
.reg_out0
+ 1, cfg
->arch
.reg_in0
+ cinfo
->ret
.reg
);
4407 add_patch_info (cfg
, code
, MONO_PATCH_INFO_METHODCONST
, method
);
4408 ia64_movl (code
, cfg
->arch
.reg_out0
+ 0, 0);
4409 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_ABS
, (gpointer
)func
);
4411 /* Restore return value */
4412 switch (cinfo
->ret
.storage
) {
4416 ia64_mov (code
, cinfo
->ret
.reg
, cfg
->arch
.reg_saved_return_val
);
4419 ia64_adds_imm (code
, GP_SCRATCH_REG
, 16, IA64_SP
);
4420 ia64_ldfd (code
, cinfo
->ret
.reg
, GP_SCRATCH_REG
);
4422 case ArgValuetypeAddrInIReg
:
4430 ia64_codegen_close (code
);
4436 mono_arch_save_unwind_info (MonoCompile
*cfg
)
4440 /* FIXME: Unregister this for dynamic methods */
4442 di
= g_malloc0 (sizeof (unw_dyn_info_t
));
4443 di
->start_ip
= (unw_word_t
) cfg
->native_code
;
4444 di
->end_ip
= (unw_word_t
) cfg
->native_code
+ cfg
->code_len
;
4446 di
->format
= UNW_INFO_FORMAT_DYNAMIC
;
4447 di
->u
.pi
.name_ptr
= (unw_word_t
)mono_method_full_name (cfg
->method
, TRUE
);
4448 di
->u
.pi
.regions
= cfg
->arch
.r_pro
;
4450 _U_dyn_register (di
);
4454 unw_dyn_region_info_t *region = di->u.pi.regions;
4456 printf ("Unwind info for method %s:\n", mono_method_full_name (cfg->method, TRUE));
4458 printf (" [Region: %d]\n", region->insn_count);
4459 region = region->next;
4466 mono_arch_flush_icache (guint8
*code
, gint size
)
4468 guint8
* p
= (guint8
*)((guint64
)code
& ~(0x3f));
4469 guint8
* end
= (guint8
*)((guint64
)code
+ size
);
4471 #ifdef __INTEL_COMPILER
4472 /* icc doesn't define an fc.i instrinsic, but fc==fc.i on itanium 2 */
4479 __asm__
__volatile__ ("fc.i %0"::"r"(p
));
4480 /* FIXME: This could be increased to 128 on some cpus */
4487 mono_arch_flush_register_windows (void)
4489 /* Not needed because of libunwind */
4493 mono_arch_is_inst_imm (gint64 imm
)
4495 /* The lowering pass will take care of it */
4501 * Determine whenever the trap whose info is in SIGINFO is caused by
4505 mono_arch_is_int_overflow (void *sigctx
, void *info
)
4507 /* Division is emulated with explicit overflow checks */
4512 mono_arch_get_patch_offset (guint8
*code
)
4520 mono_arch_get_delegate_method_ptr_addr (guint8
* code
, mgreg_t
*regs
)
4528 mono_arch_setup_jit_tls_data (MonoJitTlsData
*tls
)
4533 mono_arch_free_jit_tls_data (MonoJitTlsData
*tls
)
4537 #ifdef MONO_ARCH_HAVE_IMT
4540 * LOCKING: called with the domain lock held
4543 mono_arch_build_imt_thunk (MonoVTable
*vtable
, MonoDomain
*domain
, MonoIMTCheckItem
**imt_entries
, int count
,
4544 gpointer fail_tramp
)
4548 guint8
*start
, *buf
;
4549 Ia64CodegenState code
;
4552 buf
= g_malloc0 (size
);
4553 ia64_codegen_init (code
, buf
);
4555 /* IA64_R9 contains the IMT method */
4557 for (i
= 0; i
< count
; ++i
) {
4558 MonoIMTCheckItem
*item
= imt_entries
[i
];
4559 ia64_begin_bundle (code
);
4560 item
->code_target
= (guint8
*)code
.buf
+ code
.nins
;
4561 if (item
->is_equals
) {
4562 gboolean fail_case
= !item
->check_target_idx
&& fail_tramp
;
4564 if (item
->check_target_idx
|| fail_case
) {
4565 if (!item
->compare_done
|| fail_case
) {
4566 ia64_movl (code
, GP_SCRATCH_REG
, item
->key
);
4567 ia64_cmp_eq (code
, 6, 7, IA64_R9
, GP_SCRATCH_REG
);
4569 item
->jmp_code
= (guint8
*)code
.buf
+ code
.nins
;
4570 ia64_br_cond_pred (code
, 7, 0);
4572 if (item
->has_target_code
) {
4573 ia64_movl (code
, GP_SCRATCH_REG
, item
->value
.target_code
);
4575 ia64_movl (code
, GP_SCRATCH_REG
, &(vtable
->vtable
[item
->value
.vtable_slot
]));
4576 ia64_ld8 (code
, GP_SCRATCH_REG
, GP_SCRATCH_REG
);
4578 ia64_mov_to_br (code
, IA64_B6
, GP_SCRATCH_REG
);
4579 ia64_br_cond_reg (code
, IA64_B6
);
4582 ia64_begin_bundle (code
);
4583 ia64_patch (item
->jmp_code
, (guint8
*)code
.buf
+ code
.nins
);
4584 ia64_movl (code
, GP_SCRATCH_REG
, fail_tramp
);
4585 ia64_mov_to_br (code
, IA64_B6
, GP_SCRATCH_REG
);
4586 ia64_br_cond_reg (code
, IA64_B6
);
4587 item
->jmp_code
= NULL
;
4590 /* enable the commented code to assert on wrong method */
4591 #if ENABLE_WRONG_METHOD_CHECK
4592 g_assert_not_reached ();
4594 ia64_movl (code
, GP_SCRATCH_REG
, &(vtable
->vtable
[item
->value
.vtable_slot
]));
4595 ia64_ld8 (code
, GP_SCRATCH_REG
, GP_SCRATCH_REG
);
4596 ia64_mov_to_br (code
, IA64_B6
, GP_SCRATCH_REG
);
4597 ia64_br_cond_reg (code
, IA64_B6
);
4598 #if ENABLE_WRONG_METHOD_CHECK
4599 g_assert_not_reached ();
4603 ia64_movl (code
, GP_SCRATCH_REG
, item
->key
);
4604 ia64_cmp_geu (code
, 6, 7, IA64_R9
, GP_SCRATCH_REG
);
4605 item
->jmp_code
= (guint8
*)code
.buf
+ code
.nins
;
4606 ia64_br_cond_pred (code
, 6, 0);
4609 /* patch the branches to get to the target items */
4610 for (i
= 0; i
< count
; ++i
) {
4611 MonoIMTCheckItem
*item
= imt_entries
[i
];
4612 if (item
->jmp_code
) {
4613 if (item
->check_target_idx
) {
4614 ia64_patch (item
->jmp_code
, imt_entries
[item
->check_target_idx
]->code_target
);
4619 ia64_codegen_close (code
);
4620 g_assert (code
.buf
- buf
<= size
);
4622 size
= code
.buf
- buf
;
4624 start
= mono_method_alloc_generic_virtual_thunk (domain
, size
+ 16);
4625 start
= (gpointer
)ALIGN_TO (start
, 16);
4627 start
= mono_domain_code_reserve (domain
, size
);
4629 memcpy (start
, buf
, size
);
4631 mono_arch_flush_icache (start
, size
);
4633 mono_stats
.imt_thunks_size
+= size
;
4639 mono_arch_find_imt_method (mgreg_t
*regs
, guint8
*code
)
4641 return (MonoMethod
*)regs
[IA64_R9
];
4645 mono_arch_emit_imt_argument (MonoCompile
*cfg
, MonoCallInst
*call
, MonoInst
*imt_arg
)
4647 /* Done by the implementation of the CALL_MEMBASE opcodes */
4652 mono_arch_get_this_arg_from_call (mgreg_t
*regs
, guint8
*code
)
4654 return (gpointer
)regs
[IA64_R10
];
4658 mono_arch_get_delegate_invoke_impl (MonoMethodSignature
*sig
, gboolean has_target
)
4664 mono_arch_emit_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
4666 MonoInst
*ins
= NULL
;
4668 if (cmethod
->klass
->image
== mono_defaults
.corlib
&&
4669 (strcmp (cmethod
->klass
->name_space
, "System.Threading") == 0) &&
4670 (strcmp (cmethod
->klass
->name
, "Interlocked") == 0)) {
4673 * We don't use the generic version in mini_emit_inst_for_method () since we
4674 * ia64 has atomic_add_imm opcodes.
4676 if (strcmp (cmethod
->name
, "Increment") == 0) {
4679 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
4680 opcode
= OP_ATOMIC_ADD_IMM_NEW_I4
;
4681 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
4682 opcode
= OP_ATOMIC_ADD_IMM_NEW_I8
;
4684 g_assert_not_reached ();
4685 MONO_INST_NEW (cfg
, ins
, opcode
);
4686 ins
->dreg
= mono_alloc_preg (cfg
);
4688 ins
->inst_basereg
= args
[0]->dreg
;
4689 ins
->inst_offset
= 0;
4690 MONO_ADD_INS (cfg
->cbb
, ins
);
4691 } else if (strcmp (cmethod
->name
, "Decrement") == 0) {
4694 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
4695 opcode
= OP_ATOMIC_ADD_IMM_NEW_I4
;
4696 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
4697 opcode
= OP_ATOMIC_ADD_IMM_NEW_I8
;
4699 g_assert_not_reached ();
4700 MONO_INST_NEW (cfg
, ins
, opcode
);
4701 ins
->dreg
= mono_alloc_preg (cfg
);
4703 ins
->inst_basereg
= args
[0]->dreg
;
4704 ins
->inst_offset
= 0;
4705 MONO_ADD_INS (cfg
->cbb
, ins
);
4706 } else if (strcmp (cmethod
->name
, "Add") == 0) {
4708 gboolean is_imm
= FALSE
;
4711 if ((args
[1]->opcode
== OP_ICONST
) || (args
[1]->opcode
== OP_I8CONST
)) {
4712 imm
= (args
[1]->opcode
== OP_ICONST
) ? args
[1]->inst_c0
: args
[1]->inst_l
;
4714 is_imm
= (imm
== 1 || imm
== 4 || imm
== 8 || imm
== 16 || imm
== -1 || imm
== -4 || imm
== -8 || imm
== -16);
4718 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
4719 opcode
= OP_ATOMIC_ADD_IMM_NEW_I4
;
4720 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
4721 opcode
= OP_ATOMIC_ADD_IMM_NEW_I8
;
4723 g_assert_not_reached ();
4725 MONO_INST_NEW (cfg
, ins
, opcode
);
4726 ins
->dreg
= mono_alloc_ireg (cfg
);
4727 ins
->inst_basereg
= args
[0]->dreg
;
4728 ins
->inst_offset
= 0;
4729 ins
->inst_imm
= imm
;
4730 ins
->type
= (opcode
== OP_ATOMIC_ADD_IMM_NEW_I4
) ? STACK_I4
: STACK_I8
;
4732 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
4733 opcode
= OP_ATOMIC_ADD_NEW_I4
;
4734 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
4735 opcode
= OP_ATOMIC_ADD_NEW_I8
;
4737 g_assert_not_reached ();
4739 MONO_INST_NEW (cfg
, ins
, opcode
);
4740 ins
->dreg
= mono_alloc_ireg (cfg
);
4741 ins
->inst_basereg
= args
[0]->dreg
;
4742 ins
->inst_offset
= 0;
4743 ins
->sreg2
= args
[1]->dreg
;
4744 ins
->type
= (opcode
== OP_ATOMIC_ADD_NEW_I4
) ? STACK_I4
: STACK_I8
;
4746 MONO_ADD_INS (cfg
->cbb
, ins
);
4754 mono_arch_print_tree (MonoInst
*tree
, int arity
)
4760 mono_arch_get_domain_intrinsic (MonoCompile
* cfg
)
4762 return mono_get_domain_intrinsic (cfg
);
4766 mono_arch_context_get_int_reg (MonoContext
*ctx
, int reg
)
4768 /* FIXME: implement */
4769 g_assert_not_reached ();