3 * Sparc backend for the Mono code generator
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
10 * Christopher Taylor (ct@gentoo.org)
11 * Mark Crichton (crichton@gimp.org)
12 * Zoltan Varga (vargaz@freemail.hu)
14 * (C) 2003 Ximian, Inc.
28 #include <mono/metadata/appdomain.h>
29 #include <mono/metadata/debug-helpers.h>
30 #include <mono/metadata/tokentype.h>
31 #include <mono/utils/mono-math.h>
32 #include <mono/utils/mono-hwcap.h>
33 #include <mono/utils/unlocked.h>
35 #include "mini-sparc.h"
37 #include "cpu-sparc.h"
38 #include "jit-icalls.h"
40 #include "mono/utils/mono-tls-inline.h"
43 * Sparc V9 means two things:
44 * - the instruction set
47 * V9 instructions are only usable if the underlying processor is 64 bit. Most Sparc
48 * processors in use are 64 bit processors. The V9 ABI is only usable if the
49 * mono executable is a 64 bit executable. So it would make sense to use the 64 bit
50 * instructions without using the 64 bit ABI.
55 * - %i0..%i<n> hold the incoming arguments, these are never written by JITted
56 * code. Unused input registers are used for global register allocation.
57 * - %o0..%o5 and %l7 is used for local register allocation and passing arguments
58 * - %l0..%l6 is used for global register allocation
59 * - %o7 and %g1 is used as scratch registers in opcodes
60 * - all floating point registers are used for local register allocation except %f0.
61 * Only double precision registers are used.
63 * - fp registers %d0..%d30 are used for parameter passing, and %d32..%d62 are
64 * used for local allocation.
69 * - doubles and longs must be stored in dword aligned locations
73 * The following things are not implemented or do not work:
74 * - some fp arithmetic corner cases
75 * The following tests in mono/mini are expected to fail:
76 * - test_0_simple_double_casts
77 * This test casts (guint64)-1 to double and then back to guint64 again.
78 * Under x86, it returns 0, while under sparc it returns -1.
80 * In addition to this, the runtime requires the trunc function, or its
81 * solaris counterpart, aintl, to do some double->int conversions. If this
82 * function is not available, it is emulated somewhat, but the results can be
88 * - optimize sparc_set according to the memory model
89 * - when non-AOT compiling, compute patch targets immediately so we don't
90 * have to emit the 6 byte template.
92 * - struct arguments/returns
97 * - sparc_call_simple can't be used in a lot of places since the displacement
98 * might not fit into an imm30.
99 * - g1 can't be used in a lot of places since it is used as a scratch reg in
101 * - sparc_f0 can't be used as a scratch register on V9
102 * - the %d34..%d62 fp registers are encoded as: %dx = %f(x - 32 + 1), ie.
104 * - ldind.i4/u4 needs to sign extend/clear out upper word -> slows things down
105 * - ins->dreg can't be used as a scatch register in r4 opcodes since it might
106 * be a double precision register which has no single precision part.
107 * - passing/returning structs is hard to implement, because:
108 * - the spec is very hard to understand
109 * - it requires knowledge about the fields of structure, needs to handle
110 * nested structures etc.
114 * Possible optimizations:
115 * - delay slot scheduling
116 * - allocate large constants to registers
117 * - add more mul/div/rem optimizations
121 #define MONO_SPARC_THR_TLS 1
125 * There was a 64 bit bug in glib-2.2: g_bit_nth_msf (0, -1) would return 32,
126 * causing infinite loops in dominator computation. So glib-2.4 is required.
129 #if GLIB_MAJOR_VERSION == 2 && GLIB_MINOR_VERSION < 4
130 #error "glib 2.4 or later is required for 64 bit mode."
134 #define SIGNAL_STACK_SIZE (64 * 1024)
136 #define STACK_BIAS MONO_SPARC_STACK_BIAS
140 /* %g1 is used by sparc_set */
141 #define GP_SCRATCH_REG sparc_g4
142 /* %f0 is used for parameter passing */
143 #define FP_SCRATCH_REG sparc_f30
144 #define ARGS_OFFSET (STACK_BIAS + 128)
148 #define FP_SCRATCH_REG sparc_f0
149 #define ARGS_OFFSET 68
150 #define GP_SCRATCH_REG sparc_g1
154 /* Whenever this is a 64bit executable */
156 static gboolean v64
= TRUE
;
158 static gboolean v64
= FALSE
;
161 static gpointer
mono_arch_get_lmf_addr (void);
164 mono_arch_regname (int reg
) {
165 static const char * rnames
[] = {
166 "sparc_g0", "sparc_g1", "sparc_g2", "sparc_g3", "sparc_g4",
167 "sparc_g5", "sparc_g6", "sparc_g7", "sparc_o0", "sparc_o1",
168 "sparc_o2", "sparc_o3", "sparc_o4", "sparc_o5", "sparc_sp",
169 "sparc_call", "sparc_l0", "sparc_l1", "sparc_l2", "sparc_l3",
170 "sparc_l4", "sparc_l5", "sparc_l6", "sparc_l7", "sparc_i0",
171 "sparc_i1", "sparc_i2", "sparc_i3", "sparc_i4", "sparc_i5",
172 "sparc_fp", "sparc_retadr"
174 if (reg
>= 0 && reg
< 32)
180 mono_arch_fregname (int reg
) {
181 static const char *rnames
[] = {
182 "sparc_f0", "sparc_f1", "sparc_f2", "sparc_f3", "sparc_f4",
183 "sparc_f5", "sparc_f6", "sparc_f7", "sparc_f8", "sparc_f9",
184 "sparc_f10", "sparc_f11", "sparc_f12", "sparc_f13", "sparc_f14",
185 "sparc_f15", "sparc_f16", "sparc_f17", "sparc_f18", "sparc_f19",
186 "sparc_f20", "sparc_f21", "sparc_f22", "sparc_f23", "sparc_f24",
187 "sparc_f25", "sparc_f26", "sparc_f27", "sparc_f28", "sparc_f29",
188 "sparc_f30", "sparc_f31"
191 if (reg
>= 0 && reg
< 32)
198 * Initialize the cpu to execute managed code.
201 mono_arch_cpu_init (void)
206 * Initialize architecture specific code.
209 mono_arch_init (void)
214 * Cleanup architecture specific code.
217 mono_arch_cleanup (void)
222 mono_arch_have_fast_tls (void)
228 * This function returns the optimizations supported on this cpu.
231 mono_arch_cpu_optimizations (guint32
*exclude_mask
)
238 * On some processors, the cmov instructions are even slower than the
241 if (mono_hwcap_sparc_is_v9
)
242 opts
|= MONO_OPT_CMOV
| MONO_OPT_FCMOV
;
244 *exclude_mask
|= MONO_OPT_CMOV
| MONO_OPT_FCMOV
;
250 * This function test for all SIMD functions supported.
252 * Returns a bitmask corresponding to all supported versions.
256 mono_arch_cpu_enumerate_simd_versions (void)
258 /* SIMD is currently unimplemented */
263 #define flushi(addr) __asm__ __volatile__ ("iflush %0"::"r"(addr):"memory")
264 #else /* assume Sun's compiler */
265 static void flushi(void *addr
)
272 void sync_instruction_memory(caddr_t addr
, int len
);
276 mono_arch_flush_icache (guint8
*code
, gint size
)
279 /* Hopefully this is optimized based on the actual CPU */
280 sync_instruction_memory (code
, size
);
282 gulong start
= (gulong
) code
;
283 gulong end
= start
+ size
;
286 /* Sparcv9 chips only need flushes on 32 byte
287 * cacheline boundaries.
289 * Sparcv8 needs a flush every 8 bytes.
291 align
= (mono_hwcap_sparc_is_v9
? 32 : 8);
293 start
&= ~(align
- 1);
294 end
= (end
+ (align
- 1)) & ~(align
- 1);
296 while (start
< end
) {
298 __asm__
__volatile__ ("iflush %0"::"r"(start
));
310 * Flush all register windows to memory. Every register window is saved to
311 * a 16 word area on the stack pointed to by its %sp register.
314 mono_sparc_flushw (void)
316 static guint32 start
[64];
317 static int inited
= 0;
319 static void (*flushw
) (void);
324 sparc_save_imm (code
, sparc_sp
, -160, sparc_sp
);
327 sparc_restore_simple (code
);
329 g_assert ((code
- start
) < 64);
331 mono_arch_flush_icache ((guint8
*)start
, (guint8
*)code
- (guint8
*)start
);
333 flushw
= (gpointer
)start
;
342 mono_arch_flush_register_windows (void)
344 mono_sparc_flushw ();
348 mono_arch_is_inst_imm (int opcode
, int imm_opcode
, gint64 imm
)
350 return sparc_is_imm13 (imm
);
354 mono_sparc_is_v9 (void) {
355 return mono_hwcap_sparc_is_v9
;
359 mono_sparc_is_sparc64 (void) {
371 ArgInFloatReg
, /* V9 only */
372 ArgInDoubleReg
/* V9 only */
377 /* This needs to be offset by %i0 or %o0 depending on caller/callee */
380 guint32 vt_offset
; /* for valuetypes */
398 add_general (guint32
*gr
, guint32
*stack_size
, ArgInfo
*ainfo
, gboolean pair
)
400 ainfo
->offset
= *stack_size
;
403 if (*gr
>= PARAM_REGS
) {
404 ainfo
->storage
= ArgOnStack
;
407 ainfo
->storage
= ArgInIReg
;
412 /* Allways reserve stack space for parameters passed in registers */
413 (*stack_size
) += sizeof (target_mgreg_t
);
416 if (*gr
< PARAM_REGS
- 1) {
417 /* A pair of registers */
418 ainfo
->storage
= ArgInIRegPair
;
422 else if (*gr
>= PARAM_REGS
) {
423 /* A pair of stack locations */
424 ainfo
->storage
= ArgOnStackPair
;
427 ainfo
->storage
= ArgInSplitRegStack
;
432 (*stack_size
) += 2 * sizeof (target_mgreg_t
);
438 #define FLOAT_PARAM_REGS 32
441 add_float (guint32
*gr
, guint32
*stack_size
, ArgInfo
*ainfo
, gboolean single
)
443 ainfo
->offset
= *stack_size
;
446 if (*gr
>= FLOAT_PARAM_REGS
) {
447 ainfo
->storage
= ArgOnStack
;
450 /* A single is passed in an even numbered fp register */
451 ainfo
->storage
= ArgInFloatReg
;
452 ainfo
->reg
= *gr
+ 1;
457 if (*gr
< FLOAT_PARAM_REGS
) {
458 /* A double register */
459 ainfo
->storage
= ArgInDoubleReg
;
464 ainfo
->storage
= ArgOnStack
;
468 (*stack_size
) += sizeof (target_mgreg_t
);
476 * Obtain information about a call according to the calling convention.
477 * For V8, see the "System V ABI, Sparc Processor Supplement" Sparc V8 version
478 * document for more information.
479 * For V9, see the "Low Level System Information (64-bit psABI)" chapter in
480 * the 'Sparc Compliance Definition 2.4' document.
483 get_call_info (MonoCompile
*cfg
, MonoMethodSignature
*sig
, gboolean is_pinvoke
)
486 int n
= sig
->hasthis
+ sig
->param_count
;
487 guint32 stack_size
= 0;
491 cinfo
= g_malloc0 (sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
497 if (MONO_TYPE_ISSTRUCT ((sig
->ret
))) {
498 /* The address of the return value is passed in %o0 */
499 add_general (&gr
, &stack_size
, &cinfo
->ret
, FALSE
);
500 cinfo
->ret
.reg
+= sparc_i0
;
501 /* FIXME: Pass this after this as on other platforms */
508 add_general (&gr
, &stack_size
, cinfo
->args
+ 0, FALSE
);
510 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (n
== 0)) {
513 /* Emit the signature cookie just before the implicit arguments */
514 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
, FALSE
);
517 for (i
= 0; i
< sig
->param_count
; ++i
) {
518 ArgInfo
*ainfo
= &cinfo
->args
[sig
->hasthis
+ i
];
521 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
524 /* Emit the signature cookie just before the implicit arguments */
525 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
, FALSE
);
528 DEBUG(printf("param %d: ", i
));
529 if (sig
->params
[i
]->byref
) {
530 DEBUG(printf("byref\n"));
532 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
535 ptype
= mini_get_underlying_type (sig
->params
[i
]);
536 switch (ptype
->type
) {
537 case MONO_TYPE_BOOLEAN
:
540 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
541 /* the value is in the ls byte */
542 ainfo
->offset
+= sizeof (target_mgreg_t
) - 1;
547 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
548 /* the value is in the ls word */
549 ainfo
->offset
+= sizeof (target_mgreg_t
) - 2;
553 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
554 /* the value is in the ls dword */
555 ainfo
->offset
+= sizeof (target_mgreg_t
) - 4;
560 case MONO_TYPE_FNPTR
:
561 case MONO_TYPE_CLASS
:
562 case MONO_TYPE_OBJECT
:
563 case MONO_TYPE_STRING
:
564 case MONO_TYPE_SZARRAY
:
565 case MONO_TYPE_ARRAY
:
566 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
568 case MONO_TYPE_GENERICINST
:
569 if (!mono_type_generic_inst_is_valuetype (ptype
)) {
570 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
574 case MONO_TYPE_VALUETYPE
:
579 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
581 case MONO_TYPE_TYPEDBYREF
:
582 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
587 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
589 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
594 add_float (&fr
, &stack_size
, ainfo
, TRUE
);
597 /* single precision values are passed in integer registers */
598 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
603 add_float (&fr
, &stack_size
, ainfo
, FALSE
);
606 /* double precision values are passed in a pair of registers */
607 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
611 g_assert_not_reached ();
615 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (n
> 0) && (sig
->sentinelpos
== sig
->param_count
)) {
618 /* Emit the signature cookie just before the implicit arguments */
619 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
, FALSE
);
623 ret_type
= mini_get_underlying_type (sig
->ret
);
624 switch (ret_type
->type
) {
625 case MONO_TYPE_BOOLEAN
:
636 case MONO_TYPE_FNPTR
:
637 case MONO_TYPE_CLASS
:
638 case MONO_TYPE_OBJECT
:
639 case MONO_TYPE_SZARRAY
:
640 case MONO_TYPE_ARRAY
:
641 case MONO_TYPE_STRING
:
642 cinfo
->ret
.storage
= ArgInIReg
;
643 cinfo
->ret
.reg
= sparc_i0
;
650 cinfo
->ret
.storage
= ArgInIReg
;
651 cinfo
->ret
.reg
= sparc_i0
;
655 cinfo
->ret
.storage
= ArgInIRegPair
;
656 cinfo
->ret
.reg
= sparc_i0
;
663 cinfo
->ret
.storage
= ArgInFReg
;
664 cinfo
->ret
.reg
= sparc_f0
;
666 case MONO_TYPE_GENERICINST
:
667 if (!mono_type_generic_inst_is_valuetype (ret_type
)) {
668 cinfo
->ret
.storage
= ArgInIReg
;
669 cinfo
->ret
.reg
= sparc_i0
;
675 case MONO_TYPE_VALUETYPE
:
684 cinfo
->ret
.storage
= ArgOnStack
;
686 case MONO_TYPE_TYPEDBYREF
:
689 /* Same as a valuetype with size 24 */
696 cinfo
->ret
.storage
= ArgOnStack
;
701 g_error ("Can't handle as return value 0x%x", sig
->ret
->type
);
704 cinfo
->stack_usage
= stack_size
;
705 cinfo
->reg_usage
= gr
;
710 mono_arch_get_allocatable_int_vars (MonoCompile
*cfg
)
716 * FIXME: If an argument is allocated to a register, then load it from the
717 * stack in the prolog.
720 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
721 MonoInst
*ins
= cfg
->varinfo
[i
];
722 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
725 if (vmv
->range
.first_use
.abs_pos
>= vmv
->range
.last_use
.abs_pos
)
728 /* FIXME: Make arguments on stack allocateable to registers */
729 if (ins
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
) || (ins
->opcode
== OP_REGVAR
) || (ins
->opcode
== OP_ARG
))
732 if (mono_is_regsize_var (ins
->inst_vtype
)) {
733 g_assert (MONO_VARINFO (cfg
, i
)->reg
== -1);
734 g_assert (i
== vmv
->idx
);
736 vars
= mono_varlist_insert_sorted (cfg
, vars
, vmv
, FALSE
);
744 mono_arch_get_global_int_regs (MonoCompile
*cfg
)
748 MonoMethodSignature
*sig
;
751 sig
= mono_method_signature_internal (cfg
->method
);
753 cinfo
= get_call_info (cfg
, sig
, FALSE
);
755 /* Use unused input registers */
756 for (i
= cinfo
->reg_usage
; i
< 6; ++i
)
757 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (sparc_i0
+ i
));
759 /* Use %l0..%l6 as global registers */
760 for (i
= sparc_l0
; i
< sparc_l7
; ++i
)
761 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (i
));
769 * mono_arch_regalloc_cost:
771 * Return the cost, in number of memory references, of the action of
772 * allocating the variable VMV into a register during global register
776 mono_arch_regalloc_cost (MonoCompile
*cfg
, MonoMethodVar
*vmv
)
782 * Set var information according to the calling convention. sparc version.
783 * The locals var stuff should most likely be split in another method.
787 mono_arch_allocate_vars (MonoCompile
*cfg
)
789 MonoMethodSignature
*sig
;
790 MonoMethodHeader
*header
;
792 int i
, offset
, size
, align
, curinst
;
795 header
= cfg
->header
;
797 sig
= mono_method_signature_internal (cfg
->method
);
799 cinfo
= get_call_info (cfg
, sig
, FALSE
);
801 if (sig
->ret
->type
!= MONO_TYPE_VOID
) {
802 switch (cinfo
->ret
.storage
) {
805 cfg
->ret
->opcode
= OP_REGVAR
;
806 cfg
->ret
->inst_c0
= cinfo
->ret
.reg
;
808 case ArgInIRegPair
: {
809 MonoType
*t
= mini_get_underlying_type (sig
->ret
);
810 if (((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
811 MonoInst
*low
= get_vreg_to_inst (cfg
, MONO_LVREG_LS (cfg
->ret
->dreg
));
812 MonoInst
*high
= get_vreg_to_inst (cfg
, MONO_LVREG_MS (cfg
->ret
->dreg
));
814 low
->opcode
= OP_REGVAR
;
815 low
->dreg
= cinfo
->ret
.reg
+ 1;
816 high
->opcode
= OP_REGVAR
;
817 high
->dreg
= cinfo
->ret
.reg
;
819 cfg
->ret
->opcode
= OP_REGVAR
;
820 cfg
->ret
->inst_c0
= cinfo
->ret
.reg
;
825 g_assert_not_reached ();
828 cfg
->vret_addr
->opcode
= OP_REGOFFSET
;
829 cfg
->vret_addr
->inst_basereg
= sparc_fp
;
830 cfg
->vret_addr
->inst_offset
= 64;
836 cfg
->ret
->dreg
= cfg
->ret
->inst_c0
;
840 * We use the ABI calling conventions for managed code as well.
841 * Exception: valuetypes are never returned in registers on V9.
842 * FIXME: Use something more optimized.
845 /* Locals are allocated backwards from %fp */
846 cfg
->frame_reg
= sparc_fp
;
850 * Reserve a stack slot for holding information used during exception
853 if (header
->num_clauses
)
854 offset
+= sizeof (target_mgreg_t
) * 2;
856 if (cfg
->method
->save_lmf
) {
857 offset
+= sizeof (MonoLMF
);
858 cfg
->arch
.lmf_offset
= offset
;
861 curinst
= cfg
->locals_start
;
862 for (i
= curinst
; i
< cfg
->num_varinfo
; ++i
) {
863 inst
= cfg
->varinfo
[i
];
865 if ((inst
->opcode
== OP_REGVAR
) || (inst
->opcode
== OP_REGOFFSET
)) {
866 //g_print ("allocating local %d to %s\n", i, mono_arch_regname (inst->dreg));
870 if (inst
->flags
& MONO_INST_IS_DEAD
)
873 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
874 * pinvoke wrappers when they call functions returning structure */
875 if (inst
->backend
.is_pinvoke
&& MONO_TYPE_ISSTRUCT (inst
->inst_vtype
) && inst
->inst_vtype
->type
!= MONO_TYPE_TYPEDBYREF
)
876 size
= mono_class_native_size (mono_class_from_mono_type_internal (inst
->inst_vtype
), &align
);
878 size
= mini_type_stack_size (inst
->inst_vtype
, &align
);
881 * This is needed since structures containing doubles must be doubleword
883 * FIXME: Do this only if needed.
885 if (MONO_TYPE_ISSTRUCT (inst
->inst_vtype
))
889 * variables are accessed as negative offsets from %fp, so increase
890 * the offset before assigning it to a variable
895 offset
&= ~(align
- 1);
896 inst
->opcode
= OP_REGOFFSET
;
897 inst
->inst_basereg
= sparc_fp
;
898 inst
->inst_offset
= STACK_BIAS
+ -offset
;
900 //g_print ("allocating local %d to [%s - %d]\n", i, mono_arch_regname (inst->inst_basereg), - inst->inst_offset);
903 if (sig
->call_convention
== MONO_CALL_VARARG
) {
904 cfg
->sig_cookie
= cinfo
->sig_cookie
.offset
+ ARGS_OFFSET
;
907 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
908 inst
= cfg
->args
[i
];
909 if (inst
->opcode
!= OP_REGVAR
) {
910 ArgInfo
*ainfo
= &cinfo
->args
[i
];
911 gboolean inreg
= TRUE
;
915 if (sig
->hasthis
&& (i
== 0))
916 arg_type
= mono_get_object_type ();
918 arg_type
= sig
->params
[i
- sig
->hasthis
];
921 if (!arg_type
->byref
&& ((arg_type
->type
== MONO_TYPE_R4
)
922 || (arg_type
->type
== MONO_TYPE_R8
)))
924 * Since float arguments are passed in integer registers, we need to
925 * save them to the stack in the prolog.
930 /* FIXME: Allocate volatile arguments to registers */
931 /* FIXME: This makes the argument holding a vtype address into volatile */
932 if (inst
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
))
935 if (MONO_TYPE_ISSTRUCT (arg_type
))
936 /* FIXME: this isn't needed */
939 inst
->opcode
= OP_REGOFFSET
;
942 storage
= ArgOnStack
;
944 storage
= ainfo
->storage
;
948 inst
->opcode
= OP_REGVAR
;
949 inst
->dreg
= sparc_i0
+ ainfo
->reg
;
952 if (inst
->type
== STACK_I8
) {
953 MonoInst
*low
= get_vreg_to_inst (cfg
, MONO_LVREG_LS (inst
->dreg
));
954 MonoInst
*high
= get_vreg_to_inst (cfg
, MONO_LVREG_MS (inst
->dreg
));
956 low
->opcode
= OP_REGVAR
;
957 low
->dreg
= sparc_i0
+ ainfo
->reg
+ 1;
958 high
->opcode
= OP_REGVAR
;
959 high
->dreg
= sparc_i0
+ ainfo
->reg
;
961 inst
->opcode
= OP_REGVAR
;
962 inst
->dreg
= sparc_i0
+ ainfo
->reg
;
967 * Since float regs are volatile, we save the arguments to
968 * the stack in the prolog.
969 * FIXME: Avoid this if the method contains no calls.
973 case ArgInSplitRegStack
:
974 /* Split arguments are saved to the stack in the prolog */
975 inst
->opcode
= OP_REGOFFSET
;
976 /* in parent frame */
977 inst
->inst_basereg
= sparc_fp
;
978 inst
->inst_offset
= ainfo
->offset
+ ARGS_OFFSET
;
980 if (!arg_type
->byref
&& (arg_type
->type
== MONO_TYPE_R8
)) {
982 * It is very hard to load doubles from non-doubleword aligned
983 * memory locations. So if the offset is misaligned, we copy the
984 * argument to a stack location in the prolog.
986 if ((inst
->inst_offset
- STACK_BIAS
) % 8) {
987 inst
->inst_basereg
= sparc_fp
;
991 offset
&= ~(align
- 1);
992 inst
->inst_offset
= STACK_BIAS
+ -offset
;
1001 if (MONO_TYPE_ISSTRUCT (arg_type
)) {
1002 /* Add a level of indirection */
1004 * It would be easier to add OP_LDIND_I here, but ldind_i instructions
1005 * are destructively modified in a lot of places in inssel.brg.
1008 MONO_INST_NEW (cfg
, indir
, 0);
1010 inst
->opcode
= OP_VTARG_ADDR
;
1011 inst
->inst_left
= indir
;
1017 * spillvars are stored between the normal locals and the storage reserved
1021 cfg
->stack_offset
= offset
;
1027 mono_arch_create_vars (MonoCompile
*cfg
)
1029 MonoMethodSignature
*sig
;
1031 sig
= mono_method_signature_internal (cfg
->method
);
1033 if (MONO_TYPE_ISSTRUCT ((sig
->ret
))) {
1034 cfg
->vret_addr
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_ARG
);
1035 if (G_UNLIKELY (cfg
->verbose_level
> 1)) {
1036 printf ("vret_addr = ");
1037 mono_print_ins (cfg
->vret_addr
);
1041 if (!sig
->ret
->byref
&& (sig
->ret
->type
== MONO_TYPE_I8
|| sig
->ret
->type
== MONO_TYPE_U8
)) {
1042 MonoInst
*low
= get_vreg_to_inst (cfg
, MONO_LVREG_LS (cfg
->ret
->dreg
));
1043 MonoInst
*high
= get_vreg_to_inst (cfg
, MONO_LVREG_MS (cfg
->ret
->dreg
));
1045 low
->flags
|= MONO_INST_VOLATILE
;
1046 high
->flags
|= MONO_INST_VOLATILE
;
1049 /* Add a properly aligned dword for use by int<->float conversion opcodes */
1050 cfg
->arch
.float_spill_slot
= mono_compile_create_var (cfg
, m_class_get_byval_arg (mono_defaults
.double_class
), OP_ARG
);
1051 ((MonoInst
*)cfg
->arch
.float_spill_slot
)->flags
|= MONO_INST_VOLATILE
;
1055 add_outarg_reg (MonoCompile
*cfg
, MonoCallInst
*call
, ArgStorage storage
, int reg
, guint32 sreg
)
1059 MONO_INST_NEW (cfg
, arg
, 0);
1065 arg
->opcode
= OP_MOVE
;
1066 arg
->dreg
= mono_alloc_ireg (cfg
);
1068 mono_call_inst_add_outarg_reg (cfg
, call
, arg
->dreg
, reg
, FALSE
);
1071 arg
->opcode
= OP_FMOVE
;
1072 arg
->dreg
= mono_alloc_freg (cfg
);
1074 mono_call_inst_add_outarg_reg (cfg
, call
, arg
->dreg
, reg
, TRUE
);
1077 g_assert_not_reached ();
1080 MONO_ADD_INS (cfg
->cbb
, arg
);
1084 add_outarg_load (MonoCompile
*cfg
, MonoCallInst
*call
, int opcode
, int basereg
, int offset
, int reg
)
1086 int dreg
= mono_alloc_ireg (cfg
);
1088 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, dreg
, sparc_sp
, offset
);
1090 mono_call_inst_add_outarg_reg (cfg
, call
, dreg
, reg
, FALSE
);
1094 emit_pass_long (MonoCompile
*cfg
, MonoCallInst
*call
, ArgInfo
*ainfo
, MonoInst
*in
)
1096 int offset
= ARGS_OFFSET
+ ainfo
->offset
;
1098 switch (ainfo
->storage
) {
1100 add_outarg_reg (cfg
, call
, ArgInIReg
, sparc_o0
+ ainfo
->reg
+ 1, MONO_LVREG_LS (in
->dreg
));
1101 add_outarg_reg (cfg
, call
, ArgInIReg
, sparc_o0
+ ainfo
->reg
, MONO_LVREG_MS (in
->dreg
));
1103 case ArgOnStackPair
:
1104 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, sparc_sp
, offset
, MONO_LVREG_MS (in
->dreg
));
1105 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, sparc_sp
, offset
+ 4, MONO_LVREG_LS (in
->dreg
));
1107 case ArgInSplitRegStack
:
1108 add_outarg_reg (cfg
, call
, ArgInIReg
, sparc_o0
+ ainfo
->reg
, MONO_LVREG_MS (in
->dreg
));
1109 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, sparc_sp
, offset
+ 4, MONO_LVREG_LS (in
->dreg
));
1112 g_assert_not_reached ();
1117 emit_pass_double (MonoCompile
*cfg
, MonoCallInst
*call
, ArgInfo
*ainfo
, MonoInst
*in
)
1119 int offset
= ARGS_OFFSET
+ ainfo
->offset
;
1121 switch (ainfo
->storage
) {
1123 /* floating-point <-> integer transfer must go through memory */
1124 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, sparc_sp
, offset
, in
->dreg
);
1126 /* Load into a register pair */
1127 add_outarg_load (cfg
, call
, OP_LOADI4_MEMBASE
, sparc_sp
, offset
, sparc_o0
+ ainfo
->reg
);
1128 add_outarg_load (cfg
, call
, OP_LOADI4_MEMBASE
, sparc_sp
, offset
+ 4, sparc_o0
+ ainfo
->reg
+ 1);
1130 case ArgOnStackPair
:
1131 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, sparc_sp
, offset
, in
->dreg
);
1133 case ArgInSplitRegStack
:
1134 /* floating-point <-> integer transfer must go through memory */
1135 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, sparc_sp
, offset
, in
->dreg
);
1136 /* Load most significant word into register */
1137 add_outarg_load (cfg
, call
, OP_LOADI4_MEMBASE
, sparc_sp
, offset
, sparc_o0
+ ainfo
->reg
);
1140 g_assert_not_reached ();
1145 emit_pass_float (MonoCompile
*cfg
, MonoCallInst
*call
, ArgInfo
*ainfo
, MonoInst
*in
)
1147 int offset
= ARGS_OFFSET
+ ainfo
->offset
;
1149 switch (ainfo
->storage
) {
1151 /* floating-point <-> integer transfer must go through memory */
1152 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, sparc_sp
, offset
, in
->dreg
);
1153 add_outarg_load (cfg
, call
, OP_LOADI4_MEMBASE
, sparc_sp
, offset
, sparc_o0
+ ainfo
->reg
);
1156 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, sparc_sp
, offset
, in
->dreg
);
1159 g_assert_not_reached ();
1164 emit_pass_other (MonoCompile
*cfg
, MonoCallInst
*call
, ArgInfo
*ainfo
, MonoType
*arg_type
, MonoInst
*in
);
1167 emit_pass_vtype (MonoCompile
*cfg
, MonoCallInst
*call
, CallInfo
*cinfo
, ArgInfo
*ainfo
, MonoType
*arg_type
, MonoInst
*in
, gboolean pinvoke
)
1170 guint32 align
, offset
, pad
, size
;
1172 if (arg_type
->type
== MONO_TYPE_TYPEDBYREF
) {
1173 size
= MONO_ABI_SIZEOF (MonoTypedRef
);
1174 align
= sizeof (target_mgreg_t
);
1177 size
= mono_type_native_stack_size (m_class_get_byval_arg (in
->klass
), &align
);
1180 * Other backends use mono_type_stack_size (), but that
1181 * aligns the size to 8, which is larger than the size of
1182 * the source, leading to reads of invalid memory if the
1183 * source is at the end of address space.
1185 size
= mono_class_value_size (in
->klass
, &align
);
1188 /* The first 6 argument locations are reserved */
1189 if (cinfo
->stack_usage
< 6 * sizeof (target_mgreg_t
))
1190 cinfo
->stack_usage
= 6 * sizeof (target_mgreg_t
);
1192 offset
= ALIGN_TO ((ARGS_OFFSET
- STACK_BIAS
) + cinfo
->stack_usage
, align
);
1193 pad
= offset
- ((ARGS_OFFSET
- STACK_BIAS
) + cinfo
->stack_usage
);
1195 cinfo
->stack_usage
+= size
;
1196 cinfo
->stack_usage
+= pad
;
1199 * We use OP_OUTARG_VT to copy the valuetype to a stack location, then
1200 * use the normal OUTARG opcodes to pass the address of the location to
1204 MONO_INST_NEW (cfg
, arg
, OP_OUTARG_VT
);
1205 arg
->sreg1
= in
->dreg
;
1206 arg
->klass
= in
->klass
;
1207 arg
->backend
.size
= size
;
1208 arg
->inst_p0
= call
;
1209 arg
->inst_p1
= mono_mempool_alloc (cfg
->mempool
, sizeof (ArgInfo
));
1210 memcpy (arg
->inst_p1
, ainfo
, sizeof (ArgInfo
));
1211 ((ArgInfo
*)(arg
->inst_p1
))->offset
= STACK_BIAS
+ offset
;
1212 MONO_ADD_INS (cfg
->cbb
, arg
);
1214 MONO_INST_NEW (cfg
, arg
, OP_ADD_IMM
);
1215 arg
->dreg
= mono_alloc_preg (cfg
);
1216 arg
->sreg1
= sparc_sp
;
1217 arg
->inst_imm
= STACK_BIAS
+ offset
;
1218 MONO_ADD_INS (cfg
->cbb
, arg
);
1220 emit_pass_other (cfg
, call
, ainfo
, NULL
, arg
);
1225 emit_pass_other (MonoCompile
*cfg
, MonoCallInst
*call
, ArgInfo
*ainfo
, MonoType
*arg_type
, MonoInst
*in
)
1227 int offset
= ARGS_OFFSET
+ ainfo
->offset
;
1230 switch (ainfo
->storage
) {
1232 add_outarg_reg (cfg
, call
, ArgInIReg
, sparc_o0
+ ainfo
->reg
, in
->dreg
);
1239 opcode
= OP_STOREI1_MEMBASE_REG
;
1240 else if (offset
& 0x2)
1241 opcode
= OP_STOREI2_MEMBASE_REG
;
1243 opcode
= OP_STOREI4_MEMBASE_REG
;
1244 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, opcode
, sparc_sp
, offset
, in
->dreg
);
1248 g_assert_not_reached ();
1253 emit_sig_cookie (MonoCompile
*cfg
, MonoCallInst
*call
, CallInfo
*cinfo
)
1255 MonoMethodSignature
*tmp_sig
;
1258 * mono_ArgIterator_Setup assumes the signature cookie is
1259 * passed first and all the arguments which were before it are
1260 * passed on the stack after the signature. So compensate by
1261 * passing a different signature.
1263 tmp_sig
= mono_metadata_signature_dup (call
->signature
);
1264 tmp_sig
->param_count
-= call
->signature
->sentinelpos
;
1265 tmp_sig
->sentinelpos
= 0;
1266 memcpy (tmp_sig
->params
, call
->signature
->params
+ call
->signature
->sentinelpos
, tmp_sig
->param_count
* sizeof (MonoType
*));
1268 /* FIXME: Add support for signature tokens to AOT */
1269 cfg
->disable_aot
= TRUE
;
1270 /* We allways pass the signature on the stack for simplicity */
1271 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STORE_MEMBASE_IMM
, sparc_sp
, ARGS_OFFSET
+ cinfo
->sig_cookie
.offset
, tmp_sig
);
1275 mono_arch_emit_call (MonoCompile
*cfg
, MonoCallInst
*call
)
1278 MonoMethodSignature
*sig
;
1282 guint32 extra_space
= 0;
1284 sig
= call
->signature
;
1285 n
= sig
->param_count
+ sig
->hasthis
;
1287 cinfo
= get_call_info (cfg
, sig
, sig
->pinvoke
);
1289 if (sig
->ret
&& MONO_TYPE_ISSTRUCT (sig
->ret
)) {
1290 /* Set the 'struct/union return pointer' location on the stack */
1291 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, sparc_sp
, 64, call
->vret_var
->dreg
);
1294 for (i
= 0; i
< n
; ++i
) {
1297 ainfo
= cinfo
->args
+ i
;
1299 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
1300 /* Emit the signature cookie just before the first implicit argument */
1301 emit_sig_cookie (cfg
, call
, cinfo
);
1304 in
= call
->args
[i
];
1306 if (sig
->hasthis
&& (i
== 0))
1307 arg_type
= mono_get_object_type ();
1309 arg_type
= sig
->params
[i
- sig
->hasthis
];
1311 arg_type
= mini_get_underlying_type (arg_type
);
1312 if ((i
>= sig
->hasthis
) && (MONO_TYPE_ISSTRUCT(sig
->params
[i
- sig
->hasthis
])))
1313 emit_pass_vtype (cfg
, call
, cinfo
, ainfo
, arg_type
, in
, sig
->pinvoke
);
1314 else if (!arg_type
->byref
&& ((arg_type
->type
== MONO_TYPE_I8
) || (arg_type
->type
== MONO_TYPE_U8
)))
1315 emit_pass_long (cfg
, call
, ainfo
, in
);
1316 else if (!arg_type
->byref
&& (arg_type
->type
== MONO_TYPE_R8
))
1317 emit_pass_double (cfg
, call
, ainfo
, in
);
1318 else if (!arg_type
->byref
&& (arg_type
->type
== MONO_TYPE_R4
))
1319 emit_pass_float (cfg
, call
, ainfo
, in
);
1321 emit_pass_other (cfg
, call
, ainfo
, arg_type
, in
);
1324 /* Handle the case where there are no implicit arguments */
1325 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (n
== sig
->sentinelpos
)) {
1326 emit_sig_cookie (cfg
, call
, cinfo
);
1329 call
->stack_usage
= cinfo
->stack_usage
+ extra_space
;
1335 mono_arch_emit_outarg_vt (MonoCompile
*cfg
, MonoInst
*ins
, MonoInst
*src
)
1337 ArgInfo
*ainfo
= (ArgInfo
*)ins
->inst_p1
;
1338 int size
= ins
->backend
.size
;
1340 mini_emit_memcpy (cfg
, sparc_sp
, ainfo
->offset
, src
->dreg
, 0, size
, TARGET_SIZEOF_VOID_P
);
1344 mono_arch_emit_setret (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
*val
)
1346 CallInfo
*cinfo
= get_call_info (cfg
, mono_method_signature_internal (method
), FALSE
);
1347 MonoType
*ret
= mini_get_underlying_type (mono_method_signature_internal (method
)->ret
);
1349 switch (cinfo
->ret
.storage
) {
1351 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
1354 if (ret
->type
== MONO_TYPE_I8
|| ret
->type
== MONO_TYPE_U8
) {
1355 MONO_EMIT_NEW_UNALU (cfg
, OP_LMOVE
, cfg
->ret
->dreg
, val
->dreg
);
1357 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, MONO_LVREG_MS (cfg
->ret
->dreg
), MONO_LVREG_MS (val
->dreg
));
1358 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, MONO_LVREG_LS (cfg
->ret
->dreg
), MONO_LVREG_LS (val
->dreg
));
1362 if (ret
->type
== MONO_TYPE_R4
)
1363 MONO_EMIT_NEW_UNALU (cfg
, OP_SETFRET
, cfg
->ret
->dreg
, val
->dreg
);
1365 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, cfg
->ret
->dreg
, val
->dreg
);
1368 g_assert_not_reached ();
1374 int cond_to_sparc_cond
[][3] = {
1375 {sparc_be
, sparc_be
, sparc_fbe
},
1376 {sparc_bne
, sparc_bne
, 0},
1377 {sparc_ble
, sparc_ble
, sparc_fble
},
1378 {sparc_bge
, sparc_bge
, sparc_fbge
},
1379 {sparc_bl
, sparc_bl
, sparc_fbl
},
1380 {sparc_bg
, sparc_bg
, sparc_fbg
},
1381 {sparc_bleu
, sparc_bleu
, 0},
1382 {sparc_beu
, sparc_beu
, 0},
1383 {sparc_blu
, sparc_blu
, sparc_fbl
},
1384 {sparc_bgu
, sparc_bgu
, sparc_fbg
}
1387 /* Map opcode to the sparc condition codes */
1389 opcode_to_sparc_cond (int opcode
)
1395 case OP_COND_EXC_OV
:
1396 case OP_COND_EXC_IOV
:
1399 case OP_COND_EXC_IC
:
1401 case OP_COND_EXC_NO
:
1402 case OP_COND_EXC_NC
:
1405 rel
= mono_opcode_to_cond (opcode
);
1406 t
= mono_opcode_to_type (opcode
, -1);
1408 return cond_to_sparc_cond
[rel
][t
];
1415 #define COMPUTE_DISP(ins) \
1416 if (ins->inst_true_bb->native_offset) \
1417 disp = (ins->inst_true_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2; \
1420 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1424 #define DEFAULT_ICC sparc_xcc_short
1426 #define DEFAULT_ICC sparc_icc_short
1430 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) \
1434 COMPUTE_DISP(ins); \
1435 predict = (disp != 0) ? 1 : 0; \
1436 g_assert (sparc_is_imm19 (disp)); \
1437 sparc_branchp (code, (annul), cond, icc, (predict), disp); \
1438 if (filldelay) sparc_nop (code); \
1440 #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_ICC ((ins), (cond), (annul), (filldelay), (sparc_xcc_short))
1441 #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) \
1445 COMPUTE_DISP(ins); \
1446 predict = (disp != 0) ? 1 : 0; \
1447 g_assert (sparc_is_imm19 (disp)); \
1448 sparc_fbranch (code, (annul), cond, disp); \
1449 if (filldelay) sparc_nop (code); \
1452 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) g_assert_not_reached ()
1453 #define EMIT_COND_BRANCH_GENERAL(ins,bop,cond,annul,filldelay) \
1456 COMPUTE_DISP(ins); \
1457 g_assert (sparc_is_imm22 (disp)); \
1458 sparc_ ## bop (code, (annul), cond, disp); \
1459 if (filldelay) sparc_nop (code); \
1461 #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),branch,(cond),annul,filldelay)
1462 #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),fbranch,(cond),annul,filldelay)
1465 #define EMIT_COND_BRANCH_PREDICTED(ins,cond,annul,filldelay) \
1469 COMPUTE_DISP(ins); \
1470 predict = (disp != 0) ? 1 : 0; \
1471 g_assert (sparc_is_imm19 (disp)); \
1472 sparc_branchp (code, (annul), (cond), DEFAULT_ICC, (predict), disp); \
1473 if (filldelay) sparc_nop (code); \
1476 #define EMIT_COND_BRANCH_BPR(ins,bop,predict,annul,filldelay) \
1479 COMPUTE_DISP(ins); \
1480 g_assert (sparc_is_imm22 (disp)); \
1481 sparc_ ## bop (code, (annul), (predict), ins->sreg1, disp); \
1482 if (filldelay) sparc_nop (code); \
1485 /* emit an exception if condition is fail */
1487 * We put the exception throwing code out-of-line, at the end of the method
1489 #define EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,filldelay,icc) do { \
1490 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
1491 MONO_PATCH_INFO_EXC, sexc_name); \
1492 if (mono_hwcap_sparc_is_v9 && ((icc) != sparc_icc_short)) { \
1493 sparc_branchp (code, 0, (cond), (icc), 0, 0); \
1496 sparc_branch (code, 0, cond, 0); \
1498 if (filldelay) sparc_nop (code); \
1501 #define EMIT_COND_SYSTEM_EXCEPTION(ins,cond,sexc_name) EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,TRUE,DEFAULT_ICC)
1503 #define EMIT_COND_SYSTEM_EXCEPTION_BPR(ins,bop,sexc_name) do { \
1504 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
1505 MONO_PATCH_INFO_EXC, sexc_name); \
1506 sparc_ ## bop (code, FALSE, FALSE, ins->sreg1, 0); \
1510 #define EMIT_ALU_IMM(ins,op,setcc) do { \
1511 if (sparc_is_imm13 ((ins)->inst_imm)) \
1512 sparc_ ## op ## _imm (code, (setcc), (ins)->sreg1, ins->inst_imm, (ins)->dreg); \
1514 sparc_set (code, ins->inst_imm, sparc_o7); \
1515 sparc_ ## op (code, (setcc), (ins)->sreg1, sparc_o7, (ins)->dreg); \
1519 #define EMIT_LOAD_MEMBASE(ins,op) do { \
1520 if (sparc_is_imm13 (ins->inst_offset)) \
1521 sparc_ ## op ## _imm (code, ins->inst_basereg, ins->inst_offset, ins->dreg); \
1523 sparc_set (code, ins->inst_offset, sparc_o7); \
1524 sparc_ ## op (code, ins->inst_basereg, sparc_o7, ins->dreg); \
1529 #define EMIT_STORE_MEMBASE_IMM(ins,op) do { \
1531 if (ins->inst_imm == 0) \
1534 sparc_set (code, ins->inst_imm, sparc_o7); \
1537 if (!sparc_is_imm13 (ins->inst_offset)) { \
1538 sparc_set (code, ins->inst_offset, GP_SCRATCH_REG); \
1539 sparc_ ## op (code, sreg, ins->inst_destbasereg, GP_SCRATCH_REG); \
1542 sparc_ ## op ## _imm (code, sreg, ins->inst_destbasereg, ins->inst_offset); \
1545 #define EMIT_STORE_MEMBASE_REG(ins,op) do { \
1546 if (!sparc_is_imm13 (ins->inst_offset)) { \
1547 sparc_set (code, ins->inst_offset, sparc_o7); \
1548 sparc_ ## op (code, ins->sreg1, ins->inst_destbasereg, sparc_o7); \
1551 sparc_ ## op ## _imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); \
1554 #define EMIT_CALL() do { \
1556 sparc_set_template (code, sparc_o7); \
1557 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7); \
1560 sparc_call_simple (code, 0); \
1566 * A call template is 7 instructions long, so we want to avoid it if possible.
1569 emit_call (MonoCompile
*cfg
, guint32
*code
, guint32 patch_type
, gconstpointer data
)
1574 /* FIXME: This only works if the target method is already compiled */
1575 if (0 && v64
&& !cfg
->compile_aot
) {
1576 MonoJumpInfo patch_info
;
1578 patch_info
.type
= patch_type
;
1579 patch_info
.data
.target
= data
;
1581 target
= mono_resolve_patch_target (cfg
->method
, cfg
->domain
, NULL
, &patch_info
, FALSE
, error
);
1582 mono_error_raise_exception_deprecated (error
); /* FIXME: don't raise here */
1584 /* FIXME: Add optimizations if the target is close enough */
1585 sparc_set (code
, target
, sparc_o7
);
1586 sparc_jmpl (code
, sparc_o7
, sparc_g0
, sparc_o7
);
1590 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, patch_type
, data
);
1598 mono_arch_peephole_pass_1 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
1603 mono_arch_peephole_pass_2 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
1605 MonoInst
*ins
, *n
, *last_ins
= NULL
;
1608 MONO_BB_FOR_EACH_INS_SAFE (bb
, n
, ins
) {
1609 switch (ins
->opcode
) {
1611 /* remove unnecessary multiplication with 1 */
1612 if (ins
->inst_imm
== 1) {
1613 if (ins
->dreg
!= ins
->sreg1
) {
1614 ins
->opcode
= OP_MOVE
;
1616 MONO_DELETE_INS (bb
, ins
);
1622 case OP_LOAD_MEMBASE
:
1623 case OP_LOADI4_MEMBASE
:
1625 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1626 * OP_LOAD_MEMBASE offset(basereg), reg
1628 if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_REG
1629 || last_ins
->opcode
== OP_STORE_MEMBASE_REG
) &&
1630 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
1631 ins
->inst_offset
== last_ins
->inst_offset
) {
1632 if (ins
->dreg
== last_ins
->sreg1
) {
1633 MONO_DELETE_INS (bb
, ins
);
1636 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1637 ins
->opcode
= OP_MOVE
;
1638 ins
->sreg1
= last_ins
->sreg1
;
1642 * Note: reg1 must be different from the basereg in the second load
1643 * OP_LOAD_MEMBASE offset(basereg), reg1
1644 * OP_LOAD_MEMBASE offset(basereg), reg2
1646 * OP_LOAD_MEMBASE offset(basereg), reg1
1647 * OP_MOVE reg1, reg2
1649 } if (last_ins
&& (last_ins
->opcode
== OP_LOADI4_MEMBASE
1650 || last_ins
->opcode
== OP_LOAD_MEMBASE
) &&
1651 ins
->inst_basereg
!= last_ins
->dreg
&&
1652 ins
->inst_basereg
== last_ins
->inst_basereg
&&
1653 ins
->inst_offset
== last_ins
->inst_offset
) {
1655 if (ins
->dreg
== last_ins
->dreg
) {
1656 MONO_DELETE_INS (bb
, ins
);
1659 ins
->opcode
= OP_MOVE
;
1660 ins
->sreg1
= last_ins
->dreg
;
1663 //g_assert_not_reached ();
1667 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1668 * OP_LOAD_MEMBASE offset(basereg), reg
1670 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1671 * OP_ICONST reg, imm
1673 } else if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_IMM
1674 || last_ins
->opcode
== OP_STORE_MEMBASE_IMM
) &&
1675 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
1676 ins
->inst_offset
== last_ins
->inst_offset
) {
1677 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1678 ins
->opcode
= OP_ICONST
;
1679 ins
->inst_c0
= last_ins
->inst_imm
;
1680 g_assert_not_reached (); // check this rule
1685 case OP_LOADI1_MEMBASE
:
1686 if (last_ins
&& (last_ins
->opcode
== OP_STOREI1_MEMBASE_REG
) &&
1687 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
1688 ins
->inst_offset
== last_ins
->inst_offset
) {
1689 if (ins
->dreg
== last_ins
->sreg1
) {
1690 MONO_DELETE_INS (bb
, ins
);
1693 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1694 ins
->opcode
= OP_MOVE
;
1695 ins
->sreg1
= last_ins
->sreg1
;
1699 case OP_LOADI2_MEMBASE
:
1700 if (last_ins
&& (last_ins
->opcode
== OP_STOREI2_MEMBASE_REG
) &&
1701 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
1702 ins
->inst_offset
== last_ins
->inst_offset
) {
1703 if (ins
->dreg
== last_ins
->sreg1
) {
1704 MONO_DELETE_INS (bb
, ins
);
1707 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1708 ins
->opcode
= OP_MOVE
;
1709 ins
->sreg1
= last_ins
->sreg1
;
1713 case OP_STOREI4_MEMBASE_IMM
:
1714 /* Convert pairs of 0 stores to a dword 0 store */
1715 /* Used when initializing temporaries */
1716 /* We know sparc_fp is dword aligned */
1717 if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_IMM
) &&
1718 (ins
->inst_destbasereg
== last_ins
->inst_destbasereg
) &&
1719 (ins
->inst_destbasereg
== sparc_fp
) &&
1720 (ins
->inst_offset
< 0) &&
1721 ((ins
->inst_offset
% 8) == 0) &&
1722 ((ins
->inst_offset
== last_ins
->inst_offset
- 4)) &&
1723 (ins
->inst_imm
== 0) &&
1724 (last_ins
->inst_imm
== 0)) {
1725 if (mono_hwcap_sparc_is_v9
) {
1726 last_ins
->opcode
= OP_STOREI8_MEMBASE_IMM
;
1727 last_ins
->inst_offset
= ins
->inst_offset
;
1728 MONO_DELETE_INS (bb
, ins
);
1739 case OP_COND_EXC_EQ
:
1740 case OP_COND_EXC_GE
:
1741 case OP_COND_EXC_GT
:
1742 case OP_COND_EXC_LE
:
1743 case OP_COND_EXC_LT
:
1744 case OP_COND_EXC_NE_UN
:
1746 * Convert compare with zero+branch to BRcc
1749 * This only works in 64 bit mode, since it examines all 64
1750 * bits of the register.
1751 * Only do this if the method is small since BPr only has a 16bit
1754 if (v64
&& (cfg
->header
->code_size
< 10000) && last_ins
&&
1755 (last_ins
->opcode
== OP_COMPARE_IMM
) &&
1756 (last_ins
->inst_imm
== 0)) {
1757 switch (ins
->opcode
) {
1759 ins
->opcode
= OP_SPARC_BRZ
;
1762 ins
->opcode
= OP_SPARC_BRNZ
;
1765 ins
->opcode
= OP_SPARC_BRLZ
;
1768 ins
->opcode
= OP_SPARC_BRGZ
;
1771 ins
->opcode
= OP_SPARC_BRGEZ
;
1774 ins
->opcode
= OP_SPARC_BRLEZ
;
1776 case OP_COND_EXC_EQ
:
1777 ins
->opcode
= OP_SPARC_COND_EXC_EQZ
;
1779 case OP_COND_EXC_GE
:
1780 ins
->opcode
= OP_SPARC_COND_EXC_GEZ
;
1782 case OP_COND_EXC_GT
:
1783 ins
->opcode
= OP_SPARC_COND_EXC_GTZ
;
1785 case OP_COND_EXC_LE
:
1786 ins
->opcode
= OP_SPARC_COND_EXC_LEZ
;
1788 case OP_COND_EXC_LT
:
1789 ins
->opcode
= OP_SPARC_COND_EXC_LTZ
;
1791 case OP_COND_EXC_NE_UN
:
1792 ins
->opcode
= OP_SPARC_COND_EXC_NEZ
;
1795 g_assert_not_reached ();
1797 ins
->sreg1
= last_ins
->sreg1
;
1799 MONO_DELETE_INS (bb
, ins
);
1807 if (ins
->dreg
== ins
->sreg1
) {
1808 MONO_DELETE_INS (bb
, ins
);
1812 * OP_MOVE sreg, dreg
1813 * OP_MOVE dreg, sreg
1815 if (last_ins
&& last_ins
->opcode
== OP_MOVE
&&
1816 ins
->sreg1
== last_ins
->dreg
&&
1817 ins
->dreg
== last_ins
->sreg1
) {
1818 MONO_DELETE_INS (bb
, ins
);
1826 bb
->last_ins
= last_ins
;
1830 mono_arch_decompose_long_opts (MonoCompile
*cfg
, MonoInst
*ins
)
1832 switch (ins
->opcode
) {
1834 MONO_EMIT_NEW_BIALU (cfg
, OP_SUBCC
, MONO_LVREG_LS (ins
->dreg
), 0, MONO_LVREG_LS (ins
->sreg1
));
1835 MONO_EMIT_NEW_BIALU (cfg
, OP_SBB
, MONO_LVREG_MS (ins
->dreg
), 0, MONO_LVREG_MS (ins
->sreg1
));
1844 mono_arch_lowering_pass (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
1848 /* FIXME: Strange loads from the stack in basic-float.cs:test_2_rem */
1851 sparc_patch (guint32
*code
, const gpointer target
)
1854 guint32 ins
= *code
;
1855 guint32 op
= ins
>> 30;
1856 guint32 op2
= (ins
>> 22) & 0x7;
1857 guint32 rd
= (ins
>> 25) & 0x1f;
1858 guint8
* target8
= (guint8
*)target
;
1859 gint64 disp
= (target8
- (guint8
*)code
) >> 2;
1862 // g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
1864 if ((op
== 0) && (op2
== 2)) {
1865 if (!sparc_is_imm22 (disp
))
1868 *code
= ((ins
>> 22) << 22) | (disp
& 0x3fffff);
1870 else if ((op
== 0) && (op2
== 1)) {
1871 if (!sparc_is_imm19 (disp
))
1874 *code
= ((ins
>> 19) << 19) | (disp
& 0x7ffff);
1876 else if ((op
== 0) && (op2
== 3)) {
1877 if (!sparc_is_imm16 (disp
))
1880 *code
&= ~(0x180000 | 0x3fff);
1881 *code
|= ((disp
<< 21) & (0x180000)) | (disp
& 0x3fff);
1883 else if ((op
== 0) && (op2
== 6)) {
1884 if (!sparc_is_imm22 (disp
))
1887 *code
= ((ins
>> 22) << 22) | (disp
& 0x3fffff);
1889 else if ((op
== 0) && (op2
== 4)) {
1890 guint32 ins2
= code
[1];
1892 if (((ins2
>> 30) == 2) && (((ins2
>> 19) & 0x3f) == 2)) {
1893 /* sethi followed by or */
1895 sparc_set (p
, target8
, rd
);
1896 while (p
<= (code
+ 1))
1899 else if (ins2
== 0x01000000) {
1900 /* sethi followed by nop */
1902 sparc_set (p
, target8
, rd
);
1903 while (p
<= (code
+ 1))
1906 else if ((sparc_inst_op (ins2
) == 3) && (sparc_inst_imm (ins2
))) {
1907 /* sethi followed by load/store */
1909 guint32 t
= (guint32
)target8
;
1910 *code
&= ~(0x3fffff);
1912 *(code
+ 1) &= ~(0x3ff);
1913 *(code
+ 1) |= (t
& 0x3ff);
1917 (sparc_inst_rd (ins
) == sparc_g1
) &&
1918 (sparc_inst_op (c
[1]) == 0) && (sparc_inst_op2 (c
[1]) == 4) &&
1919 (sparc_inst_op (c
[2]) == 2) && (sparc_inst_op3 (c
[2]) == 2) &&
1920 (sparc_inst_op (c
[3]) == 2) && (sparc_inst_op3 (c
[3]) == 2))
1924 reg
= sparc_inst_rd (c
[1]);
1925 sparc_set (p
, target8
, reg
);
1929 else if ((sparc_inst_op (ins2
) == 2) && (sparc_inst_op3 (ins2
) == 0x38) &&
1930 (sparc_inst_imm (ins2
))) {
1931 /* sethi followed by jmpl */
1933 guint32 t
= (guint32
)target8
;
1934 *code
&= ~(0x3fffff);
1936 *(code
+ 1) &= ~(0x3ff);
1937 *(code
+ 1) |= (t
& 0x3ff);
1943 else if (op
== 01) {
1944 gint64 disp
= (target8
- (guint8
*)code
) >> 2;
1946 if (!sparc_is_imm30 (disp
))
1948 sparc_call_simple (code
, target8
- (guint8
*)code
);
1950 else if ((op
== 2) && (sparc_inst_op3 (ins
) == 0x2) && sparc_inst_imm (ins
)) {
1952 g_assert (sparc_is_imm13 (target8
));
1954 *code
|= (guint32
)target8
;
1956 else if ((sparc_inst_op (ins
) == 2) && (sparc_inst_op3 (ins
) == 0x7)) {
1957 /* sparc_set case 5. */
1961 reg
= sparc_inst_rd (c
[3]);
1962 sparc_set (p
, target
, reg
);
1969 // g_print ("patched with 0x%08x\n", ins);
1973 * mono_sparc_emit_save_lmf:
1975 * Emit the code neccesary to push a new entry onto the lmf stack. Used by
1976 * trampolines as well.
1979 mono_sparc_emit_save_lmf (guint32
*code
, guint32 lmf_offset
)
1982 sparc_sti_imm (code
, sparc_o0
, sparc_fp
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, lmf_addr
));
1983 /* Save previous_lmf */
1984 sparc_ldi (code
, sparc_o0
, sparc_g0
, sparc_o7
);
1985 sparc_sti_imm (code
, sparc_o7
, sparc_fp
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
1987 sparc_add_imm (code
, FALSE
, sparc_fp
, lmf_offset
, sparc_o7
);
1988 sparc_sti (code
, sparc_o7
, sparc_o0
, sparc_g0
);
1994 mono_sparc_emit_restore_lmf (guint32
*code
, guint32 lmf_offset
)
1996 /* Load previous_lmf */
1997 sparc_ldi_imm (code
, sparc_fp
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, previous_lmf
), sparc_l0
);
1999 sparc_ldi_imm (code
, sparc_fp
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, lmf_addr
), sparc_l1
);
2000 /* *(lmf) = previous_lmf */
2001 sparc_sti (code
, sparc_l0
, sparc_l1
, sparc_g0
);
2006 emit_save_sp_to_lmf (MonoCompile
*cfg
, guint32
*code
)
2009 * Since register windows are saved to the current value of %sp, we need to
2010 * set the sp field in the lmf before the call, not in the prolog.
2012 if (cfg
->method
->save_lmf
) {
2013 gint32 lmf_offset
= MONO_SPARC_STACK_BIAS
- cfg
->arch
.lmf_offset
;
2016 sparc_sti_imm (code
, sparc_sp
, sparc_fp
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, sp
));
2023 emit_vret_token (MonoInst
*ins
, guint32
*code
)
2025 MonoCallInst
*call
= (MonoCallInst
*)ins
;
2029 * The sparc ABI requires that calls to functions which return a structure
2030 * contain an additional unimpl instruction which is checked by the callee.
2032 if (call
->signature
->pinvoke
&& MONO_TYPE_ISSTRUCT(call
->signature
->ret
)) {
2033 if (call
->signature
->ret
->type
== MONO_TYPE_TYPEDBYREF
)
2034 size
= mini_type_stack_size (call
->signature
->ret
, NULL
);
2036 size
= mono_class_native_size (call
->signature
->ret
->data
.klass
, NULL
);
2037 sparc_unimp (code
, size
& 0xfff);
2044 emit_move_return_value (MonoInst
*ins
, guint32
*code
)
2046 /* Move return value to the target register */
2047 /* FIXME: do more things in the local reg allocator */
2048 switch (ins
->opcode
) {
2050 case OP_VOIDCALL_REG
:
2051 case OP_VOIDCALL_MEMBASE
:
2055 case OP_CALL_MEMBASE
:
2056 g_assert (ins
->dreg
== sparc_o0
);
2060 case OP_LCALL_MEMBASE
:
2062 * ins->dreg is the least significant reg due to the lreg: LCALL rule
2063 * in inssel-long32.brg.
2066 sparc_mov_reg_reg (code
, sparc_o0
, ins
->dreg
);
2068 g_assert (ins
->dreg
== sparc_o1
);
2073 case OP_FCALL_MEMBASE
:
2075 if (((MonoCallInst
*)ins
)->signature
->ret
->type
== MONO_TYPE_R4
) {
2076 sparc_fmovs (code
, sparc_f0
, ins
->dreg
);
2077 sparc_fstod (code
, ins
->dreg
, ins
->dreg
);
2080 sparc_fmovd (code
, sparc_f0
, ins
->dreg
);
2082 sparc_fmovs (code
, sparc_f0
, ins
->dreg
);
2083 if (((MonoCallInst
*)ins
)->signature
->ret
->type
== MONO_TYPE_R4
)
2084 sparc_fstod (code
, ins
->dreg
, ins
->dreg
);
2086 sparc_fmovs (code
, sparc_f1
, ins
->dreg
+ 1);
2091 case OP_VCALL_MEMBASE
:
2094 case OP_VCALL2_MEMBASE
:
2104 * emit_load_volatile_arguments:
2106 * Load volatile arguments from the stack to the original input registers.
2107 * Required before a tailcall.
2110 emit_load_volatile_arguments (MonoCompile
*cfg
, guint32
*code
)
2112 MonoMethod
*method
= cfg
->method
;
2113 MonoMethodSignature
*sig
;
2118 /* FIXME: Generate intermediate code instead */
2120 sig
= mono_method_signature_internal (method
);
2122 cinfo
= get_call_info (cfg
, sig
, FALSE
);
2124 /* This is the opposite of the code in emit_prolog */
2126 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
2127 ArgInfo
*ainfo
= cinfo
->args
+ i
;
2128 gint32 stack_offset
;
2131 inst
= cfg
->args
[i
];
2133 if (sig
->hasthis
&& (i
== 0))
2134 arg_type
= mono_get_object_type ();
2136 arg_type
= sig
->params
[i
- sig
->hasthis
];
2138 stack_offset
= ainfo
->offset
+ ARGS_OFFSET
;
2139 ireg
= sparc_i0
+ ainfo
->reg
;
2141 if (ainfo
->storage
== ArgInSplitRegStack
) {
2142 g_assert (inst
->opcode
== OP_REGOFFSET
);
2144 if (!sparc_is_imm13 (stack_offset
))
2146 sparc_st_imm (code
, inst
->inst_basereg
, stack_offset
, sparc_i5
);
2149 if (!v64
&& !arg_type
->byref
&& (arg_type
->type
== MONO_TYPE_R8
)) {
2150 if (ainfo
->storage
== ArgInIRegPair
) {
2151 if (!sparc_is_imm13 (inst
->inst_offset
+ 4))
2153 sparc_ld_imm (code
, inst
->inst_basereg
, inst
->inst_offset
, ireg
);
2154 sparc_ld_imm (code
, inst
->inst_basereg
, inst
->inst_offset
+ 4, ireg
+ 1);
2157 if (ainfo
->storage
== ArgInSplitRegStack
) {
2158 if (stack_offset
!= inst
->inst_offset
) {
2159 sparc_ld_imm (code
, inst
->inst_basereg
, inst
->inst_offset
, sparc_i5
);
2160 sparc_ld_imm (code
, inst
->inst_basereg
, inst
->inst_offset
+ 4, sparc_o7
);
2161 sparc_st_imm (code
, sparc_o7
, sparc_fp
, stack_offset
+ 4);
2166 if (ainfo
->storage
== ArgOnStackPair
) {
2167 if (stack_offset
!= inst
->inst_offset
) {
2168 /* stack_offset is not dword aligned, so we need to make a copy */
2169 sparc_ld_imm (code
, inst
->inst_basereg
, inst
->inst_offset
, sparc_o7
);
2170 sparc_st_imm (code
, sparc_o7
, sparc_fp
, stack_offset
);
2172 sparc_ld_imm (code
, inst
->inst_basereg
, inst
->inst_offset
+ 4, sparc_o7
);
2173 sparc_st_imm (code
, sparc_o7
, sparc_fp
, stack_offset
+ 4);
2178 g_assert_not_reached ();
2181 if ((ainfo
->storage
== ArgInIReg
) && (inst
->opcode
!= OP_REGVAR
)) {
2182 /* Argument in register, but need to be saved to stack */
2183 if (!sparc_is_imm13 (stack_offset
))
2185 if ((stack_offset
- ARGS_OFFSET
) & 0x1)
2186 /* FIXME: Is this ldsb or ldub ? */
2187 sparc_ldsb_imm (code
, inst
->inst_basereg
, stack_offset
, ireg
);
2189 if ((stack_offset
- ARGS_OFFSET
) & 0x2)
2190 sparc_ldsh_imm (code
, inst
->inst_basereg
, stack_offset
, ireg
);
2192 if ((stack_offset
- ARGS_OFFSET
) & 0x4)
2193 sparc_ld_imm (code
, inst
->inst_basereg
, stack_offset
, ireg
);
2196 sparc_ldx_imm (code
, inst
->inst_basereg
, stack_offset
, ireg
);
2198 sparc_ld_imm (code
, inst
->inst_basereg
, stack_offset
, ireg
);
2201 else if ((ainfo
->storage
== ArgInIRegPair
) && (inst
->opcode
!= OP_REGVAR
)) {
2202 /* Argument in regpair, but need to be saved to stack */
2203 if (!sparc_is_imm13 (inst
->inst_offset
+ 4))
2205 sparc_ld_imm (code
, inst
->inst_basereg
, inst
->inst_offset
, ireg
);
2206 sparc_st_imm (code
, inst
->inst_basereg
, inst
->inst_offset
+ 4, ireg
+ 1);
2208 else if ((ainfo
->storage
== ArgInFloatReg
) && (inst
->opcode
!= OP_REGVAR
)) {
2211 else if ((ainfo
->storage
== ArgInDoubleReg
) && (inst
->opcode
!= OP_REGVAR
)) {
2215 if ((ainfo
->storage
== ArgInSplitRegStack
) || (ainfo
->storage
== ArgOnStack
))
2216 if (inst
->opcode
== OP_REGVAR
)
2217 /* FIXME: Load the argument into memory */
2227 * mono_sparc_is_virtual_call:
2229 * Determine whenever the instruction at CODE is a virtual call.
2232 mono_sparc_is_virtual_call (guint32
*code
)
2239 if ((sparc_inst_op (*code
) == 0x2) && (sparc_inst_op3 (*code
) == 0x38)) {
2241 * Register indirect call. If it is a virtual call, then the
2242 * instruction in the delay slot is a special kind of nop.
2245 /* Construct special nop */
2246 sparc_or_imm (p
, FALSE
, sparc_g0
, 0xca, sparc_g0
);
2249 if (code
[1] == p
[0])
2257 #define BR_SMALL_SIZE 2
2258 #define BR_LARGE_SIZE 2
2259 #define JUMP_IMM_SIZE 5
2260 #define ENABLE_WRONG_METHOD_CHECK 0
2263 * LOCKING: called with the domain lock held
2266 mono_arch_build_imt_trampoline (MonoVTable
*vtable
, MonoDomain
*domain
, MonoIMTCheckItem
**imt_entries
, int count
,
2267 gpointer fail_tramp
)
2271 guint32
*code
, *start
;
2273 for (i
= 0; i
< count
; ++i
) {
2274 MonoIMTCheckItem
*item
= imt_entries
[i
];
2275 if (item
->is_equals
) {
2276 if (item
->check_target_idx
) {
2277 if (!item
->compare_done
)
2278 item
->chunk_size
+= CMP_SIZE
;
2279 item
->chunk_size
+= BR_SMALL_SIZE
+ JUMP_IMM_SIZE
;
2282 item
->chunk_size
+= 16;
2283 item
->chunk_size
+= JUMP_IMM_SIZE
;
2284 #if ENABLE_WRONG_METHOD_CHECK
2285 item
->chunk_size
+= CMP_SIZE
+ BR_SMALL_SIZE
+ 1;
2289 item
->chunk_size
+= CMP_SIZE
+ BR_LARGE_SIZE
;
2290 imt_entries
[item
->check_target_idx
]->compare_done
= TRUE
;
2292 size
+= item
->chunk_size
;
2295 code
= mono_method_alloc_generic_virtual_trampoline (domain
, size
* 4);
2297 code
= mono_domain_code_reserve (domain
, size
* 4);
2299 for (i
= 0; i
< count
; ++i
) {
2300 MonoIMTCheckItem
*item
= imt_entries
[i
];
2301 item
->code_target
= (guint8
*)code
;
2302 if (item
->is_equals
) {
2303 gboolean fail_case
= !item
->check_target_idx
&& fail_tramp
;
2305 if (item
->check_target_idx
|| fail_case
) {
2306 if (!item
->compare_done
|| fail_case
) {
2307 sparc_set (code
, (guint32
)item
->key
, sparc_g5
);
2308 sparc_cmp (code
, MONO_ARCH_IMT_REG
, sparc_g5
);
2310 item
->jmp_code
= (guint8
*)code
;
2311 sparc_branch (code
, 0, sparc_bne
, 0);
2313 if (item
->has_target_code
) {
2314 sparc_set (code
, item
->value
.target_code
, sparc_f5
);
2316 sparc_set (code
, ((guint32
)(&(vtable
->vtable
[item
->value
.vtable_slot
]))), sparc_g5
);
2317 sparc_ld (code
, sparc_g5
, 0, sparc_g5
);
2319 sparc_jmpl (code
, sparc_g5
, sparc_g0
, sparc_g0
);
2323 sparc_patch (item
->jmp_code
, code
);
2324 sparc_set (code
, fail_tramp
, sparc_g5
);
2325 sparc_jmpl (code
, sparc_g5
, sparc_g0
, sparc_g0
);
2327 item
->jmp_code
= NULL
;
2330 /* enable the commented code to assert on wrong method */
2331 #if ENABLE_WRONG_METHOD_CHECK
2332 g_assert_not_reached ();
2334 sparc_set (code
, ((guint32
)(&(vtable
->vtable
[item
->value
.vtable_slot
]))), sparc_g5
);
2335 sparc_ld (code
, sparc_g5
, 0, sparc_g5
);
2336 sparc_jmpl (code
, sparc_g5
, sparc_g0
, sparc_g0
);
2338 #if ENABLE_WRONG_METHOD_CHECK
2339 g_assert_not_reached ();
2343 sparc_set (code
, (guint32
)item
->key
, sparc_g5
);
2344 sparc_cmp (code
, MONO_ARCH_IMT_REG
, sparc_g5
);
2345 item
->jmp_code
= (guint8
*)code
;
2346 sparc_branch (code
, 0, sparc_beu
, 0);
2350 /* patch the branches to get to the target items */
2351 for (i
= 0; i
< count
; ++i
) {
2352 MonoIMTCheckItem
*item
= imt_entries
[i
];
2353 if (item
->jmp_code
) {
2354 if (item
->check_target_idx
) {
2355 sparc_patch ((guint32
*)item
->jmp_code
, imt_entries
[item
->check_target_idx
]->code_target
);
2360 mono_arch_flush_icache ((guint8
*)start
, (code
- start
));
2361 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE
, NULL
));
2363 UnlockedAdd (&mono_stats
.imt_trampolines_size
, (code
- start
));
2364 g_assert (code
- start
<= size
);
2366 mono_tramp_info_register (mono_tramp_info_create (NULL
, start
, code
- start
, NULL
, NULL
), domain
);
2372 mono_arch_find_imt_method (host_mgreg_t
*regs
, guint8
*code
)
2375 g_assert_not_reached ();
2378 return (MonoMethod
*)regs
[sparc_g1
];
2382 mono_arch_get_this_arg_from_call (host_mgreg_t
*regs
, guint8
*code
)
2384 mono_sparc_flushw ();
2386 return (gpointer
)regs
[sparc_o0
];
2390 * Some conventions used in the following code.
2391 * 2) The only scratch registers we have are o7 and g1. We try to
2392 * stick to o7 when we can, and use g1 when necessary.
2396 mono_arch_output_basic_block (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
2401 guint32
*code
= (guint32
*)(cfg
->native_code
+ cfg
->code_len
);
2402 MonoInst
*last_ins
= NULL
;
2406 if (cfg
->verbose_level
> 2)
2407 g_print ("Basic block %d starting at offset 0x%x\n", bb
->block_num
, bb
->native_offset
);
2409 cpos
= bb
->max_offset
;
2411 MONO_BB_FOR_EACH_INS (bb
, ins
) {
2414 offset
= (guint8
*)code
- cfg
->native_code
;
2415 spec
= ins_get_spec (ins
->opcode
);
2416 max_len
= ins_get_size (ins
->opcode
);
2417 code
= realloc_code (cfg
, max_len
);
2418 code_start
= (guint8
*)code
;
2419 // if (ins->cil_code)
2420 // g_print ("cil code\n");
2421 mono_debug_record_line_number (cfg
, ins
, offset
);
2423 switch (ins
->opcode
) {
2424 case OP_STOREI1_MEMBASE_IMM
:
2425 EMIT_STORE_MEMBASE_IMM (ins
, stb
);
2427 case OP_STOREI2_MEMBASE_IMM
:
2428 EMIT_STORE_MEMBASE_IMM (ins
, sth
);
2430 case OP_STORE_MEMBASE_IMM
:
2431 EMIT_STORE_MEMBASE_IMM (ins
, sti
);
2433 case OP_STOREI4_MEMBASE_IMM
:
2434 EMIT_STORE_MEMBASE_IMM (ins
, st
);
2436 case OP_STOREI8_MEMBASE_IMM
:
2438 EMIT_STORE_MEMBASE_IMM (ins
, stx
);
2440 /* Only generated by peephole opts */
2441 g_assert ((ins
->inst_offset
% 8) == 0);
2442 g_assert (ins
->inst_imm
== 0);
2443 EMIT_STORE_MEMBASE_IMM (ins
, stx
);
2446 case OP_STOREI1_MEMBASE_REG
:
2447 EMIT_STORE_MEMBASE_REG (ins
, stb
);
2449 case OP_STOREI2_MEMBASE_REG
:
2450 EMIT_STORE_MEMBASE_REG (ins
, sth
);
2452 case OP_STOREI4_MEMBASE_REG
:
2453 EMIT_STORE_MEMBASE_REG (ins
, st
);
2455 case OP_STOREI8_MEMBASE_REG
:
2457 EMIT_STORE_MEMBASE_REG (ins
, stx
);
2459 /* Only used by OP_MEMSET */
2460 EMIT_STORE_MEMBASE_REG (ins
, std
);
2463 case OP_STORE_MEMBASE_REG
:
2464 EMIT_STORE_MEMBASE_REG (ins
, sti
);
2467 sparc_set (code
, ins
->inst_c0
, ins
->dreg
);
2468 sparc_ld (code
, ins
->dreg
, sparc_g0
, ins
->dreg
);
2470 case OP_LOADI4_MEMBASE
:
2472 EMIT_LOAD_MEMBASE (ins
, ldsw
);
2474 EMIT_LOAD_MEMBASE (ins
, ld
);
2477 case OP_LOADU4_MEMBASE
:
2478 EMIT_LOAD_MEMBASE (ins
, ld
);
2480 case OP_LOADU1_MEMBASE
:
2481 EMIT_LOAD_MEMBASE (ins
, ldub
);
2483 case OP_LOADI1_MEMBASE
:
2484 EMIT_LOAD_MEMBASE (ins
, ldsb
);
2486 case OP_LOADU2_MEMBASE
:
2487 EMIT_LOAD_MEMBASE (ins
, lduh
);
2489 case OP_LOADI2_MEMBASE
:
2490 EMIT_LOAD_MEMBASE (ins
, ldsh
);
2492 case OP_LOAD_MEMBASE
:
2494 EMIT_LOAD_MEMBASE (ins
, ldx
);
2496 EMIT_LOAD_MEMBASE (ins
, ld
);
2500 case OP_LOADI8_MEMBASE
:
2501 EMIT_LOAD_MEMBASE (ins
, ldx
);
2504 case OP_ICONV_TO_I1
:
2505 sparc_sll_imm (code
, ins
->sreg1
, 24, sparc_o7
);
2506 sparc_sra_imm (code
, sparc_o7
, 24, ins
->dreg
);
2508 case OP_ICONV_TO_I2
:
2509 sparc_sll_imm (code
, ins
->sreg1
, 16, sparc_o7
);
2510 sparc_sra_imm (code
, sparc_o7
, 16, ins
->dreg
);
2512 case OP_ICONV_TO_U1
:
2513 sparc_and_imm (code
, FALSE
, ins
->sreg1
, 0xff, ins
->dreg
);
2515 case OP_ICONV_TO_U2
:
2516 sparc_sll_imm (code
, ins
->sreg1
, 16, sparc_o7
);
2517 sparc_srl_imm (code
, sparc_o7
, 16, ins
->dreg
);
2519 case OP_LCONV_TO_OVF_U4
:
2520 case OP_ICONV_TO_OVF_U4
:
2521 /* Only used on V9 */
2522 sparc_cmp_imm (code
, ins
->sreg1
, 0);
2523 mono_add_patch_info (cfg
, (guint8
*)(code
) - (cfg
)->native_code
,
2524 MONO_PATCH_INFO_EXC
, "OverflowException");
2525 sparc_branchp (code
, 0, sparc_bl
, sparc_xcc_short
, 0, 0);
2527 sparc_set (code
, 1, sparc_o7
);
2528 sparc_sllx_imm (code
, sparc_o7
, 32, sparc_o7
);
2529 sparc_cmp (code
, ins
->sreg1
, sparc_o7
);
2530 mono_add_patch_info (cfg
, (guint8
*)(code
) - (cfg
)->native_code
,
2531 MONO_PATCH_INFO_EXC
, "OverflowException");
2532 sparc_branchp (code
, 0, sparc_bge
, sparc_xcc_short
, 0, 0);
2534 sparc_mov_reg_reg (code
, ins
->sreg1
, ins
->dreg
);
2536 case OP_LCONV_TO_OVF_I4_UN
:
2537 case OP_ICONV_TO_OVF_I4_UN
:
2538 /* Only used on V9 */
2544 sparc_cmp (code
, ins
->sreg1
, ins
->sreg2
);
2546 case OP_COMPARE_IMM
:
2547 case OP_ICOMPARE_IMM
:
2548 if (sparc_is_imm13 (ins
->inst_imm
))
2549 sparc_cmp_imm (code
, ins
->sreg1
, ins
->inst_imm
);
2551 sparc_set (code
, ins
->inst_imm
, sparc_o7
);
2552 sparc_cmp (code
, ins
->sreg1
, sparc_o7
);
2557 * gdb does not like encountering 'ta 1' in the debugged code. So
2558 * instead of emitting a trap, we emit a call a C function and place a
2561 //sparc_ta (code, 1);
2562 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_JIT_ICALL_ID
,
2563 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break
));
2568 sparc_add (code
, TRUE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2571 sparc_add (code
, FALSE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2576 /* according to inssel-long32.brg, this should set cc */
2577 EMIT_ALU_IMM (ins
, add
, TRUE
);
2581 /* according to inssel-long32.brg, this should set cc */
2582 sparc_addx (code
, TRUE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2586 EMIT_ALU_IMM (ins
, addx
, TRUE
);
2590 sparc_sub (code
, TRUE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2593 sparc_sub (code
, FALSE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2598 /* according to inssel-long32.brg, this should set cc */
2599 EMIT_ALU_IMM (ins
, sub
, TRUE
);
2603 /* according to inssel-long32.brg, this should set cc */
2604 sparc_subx (code
, TRUE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2608 EMIT_ALU_IMM (ins
, subx
, TRUE
);
2611 sparc_and (code
, FALSE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2615 EMIT_ALU_IMM (ins
, and, FALSE
);
2618 /* Sign extend sreg1 into %y */
2619 sparc_sra_imm (code
, ins
->sreg1
, 31, sparc_o7
);
2620 sparc_wry (code
, sparc_o7
, sparc_g0
);
2621 sparc_sdiv (code
, TRUE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2622 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code
, sparc_boverflow
, "ArithmeticException", TRUE
, sparc_icc_short
);
2625 sparc_wry (code
, sparc_g0
, sparc_g0
);
2626 sparc_udiv (code
, FALSE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2632 /* Transform division into a shift */
2633 for (i
= 1; i
< 30; ++i
) {
2635 if (ins
->inst_imm
== imm
)
2641 sparc_srl_imm (code
, ins
->sreg1
, 31, sparc_o7
);
2642 sparc_add (code
, FALSE
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2643 sparc_sra_imm (code
, ins
->dreg
, 1, ins
->dreg
);
2646 /* http://compilers.iecc.com/comparch/article/93-04-079 */
2647 sparc_sra_imm (code
, ins
->sreg1
, 31, sparc_o7
);
2648 sparc_srl_imm (code
, sparc_o7
, 32 - i
, sparc_o7
);
2649 sparc_add (code
, FALSE
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2650 sparc_sra_imm (code
, ins
->dreg
, i
, ins
->dreg
);
2654 /* Sign extend sreg1 into %y */
2655 sparc_sra_imm (code
, ins
->sreg1
, 31, sparc_o7
);
2656 sparc_wry (code
, sparc_o7
, sparc_g0
);
2657 EMIT_ALU_IMM (ins
, sdiv
, TRUE
);
2658 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code
, sparc_boverflow
, "ArithmeticException", TRUE
, sparc_icc_short
);
2662 case OP_IDIV_UN_IMM
:
2663 sparc_wry (code
, sparc_g0
, sparc_g0
);
2664 EMIT_ALU_IMM (ins
, udiv
, FALSE
);
2667 /* Sign extend sreg1 into %y */
2668 sparc_sra_imm (code
, ins
->sreg1
, 31, sparc_o7
);
2669 sparc_wry (code
, sparc_o7
, sparc_g0
);
2670 sparc_sdiv (code
, TRUE
, ins
->sreg1
, ins
->sreg2
, sparc_o7
);
2671 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code
, sparc_boverflow
, "ArithmeticException", TRUE
, sparc_icc_short
);
2672 sparc_smul (code
, FALSE
, ins
->sreg2
, sparc_o7
, sparc_o7
);
2673 sparc_sub (code
, FALSE
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2676 sparc_wry (code
, sparc_g0
, sparc_g0
);
2677 sparc_udiv (code
, FALSE
, ins
->sreg1
, ins
->sreg2
, sparc_o7
);
2678 sparc_umul (code
, FALSE
, ins
->sreg2
, sparc_o7
, sparc_o7
);
2679 sparc_sub (code
, FALSE
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2683 /* Sign extend sreg1 into %y */
2684 sparc_sra_imm (code
, ins
->sreg1
, 31, sparc_o7
);
2685 sparc_wry (code
, sparc_o7
, sparc_g0
);
2686 if (!sparc_is_imm13 (ins
->inst_imm
)) {
2687 sparc_set (code
, ins
->inst_imm
, GP_SCRATCH_REG
);
2688 sparc_sdiv (code
, TRUE
, ins
->sreg1
, GP_SCRATCH_REG
, sparc_o7
);
2689 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code
, sparc_boverflow
, "ArithmeticException", TRUE
, sparc_icc_short
);
2690 sparc_smul (code
, FALSE
, sparc_o7
, GP_SCRATCH_REG
, sparc_o7
);
2693 sparc_sdiv_imm (code
, TRUE
, ins
->sreg1
, ins
->inst_imm
, sparc_o7
);
2694 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code
, sparc_boverflow
, "ArithmeticException", TRUE
, sparc_icc_short
);
2695 sparc_smul_imm (code
, FALSE
, sparc_o7
, ins
->inst_imm
, sparc_o7
);
2697 sparc_sub (code
, FALSE
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2699 case OP_IREM_UN_IMM
:
2700 sparc_set (code
, ins
->inst_imm
, GP_SCRATCH_REG
);
2701 sparc_wry (code
, sparc_g0
, sparc_g0
);
2702 sparc_udiv (code
, FALSE
, ins
->sreg1
, GP_SCRATCH_REG
, sparc_o7
);
2703 sparc_umul (code
, FALSE
, GP_SCRATCH_REG
, sparc_o7
, sparc_o7
);
2704 sparc_sub (code
, FALSE
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2707 sparc_or (code
, FALSE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2711 EMIT_ALU_IMM (ins
, or, FALSE
);
2714 sparc_xor (code
, FALSE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2718 EMIT_ALU_IMM (ins
, xor, FALSE
);
2721 sparc_sll (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2725 if (ins
->inst_imm
< (1 << 5))
2726 sparc_sll_imm (code
, ins
->sreg1
, ins
->inst_imm
, ins
->dreg
);
2728 sparc_set (code
, ins
->inst_imm
, sparc_o7
);
2729 sparc_sll (code
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2733 sparc_sra (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2737 if (ins
->inst_imm
< (1 << 5))
2738 sparc_sra_imm (code
, ins
->sreg1
, ins
->inst_imm
, ins
->dreg
);
2740 sparc_set (code
, ins
->inst_imm
, sparc_o7
);
2741 sparc_sra (code
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2745 case OP_ISHR_UN_IMM
:
2746 if (ins
->inst_imm
< (1 << 5))
2747 sparc_srl_imm (code
, ins
->sreg1
, ins
->inst_imm
, ins
->dreg
);
2749 sparc_set (code
, ins
->inst_imm
, sparc_o7
);
2750 sparc_srl (code
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2754 sparc_srl (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2757 sparc_sllx (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2760 if (ins
->inst_imm
< (1 << 6))
2761 sparc_sllx_imm (code
, ins
->sreg1
, ins
->inst_imm
, ins
->dreg
);
2763 sparc_set (code
, ins
->inst_imm
, sparc_o7
);
2764 sparc_sllx (code
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2768 sparc_srax (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2771 if (ins
->inst_imm
< (1 << 6))
2772 sparc_srax_imm (code
, ins
->sreg1
, ins
->inst_imm
, ins
->dreg
);
2774 sparc_set (code
, ins
->inst_imm
, sparc_o7
);
2775 sparc_srax (code
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2779 sparc_srlx (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2781 case OP_LSHR_UN_IMM
:
2782 if (ins
->inst_imm
< (1 << 6))
2783 sparc_srlx_imm (code
, ins
->sreg1
, ins
->inst_imm
, ins
->dreg
);
2785 sparc_set (code
, ins
->inst_imm
, sparc_o7
);
2786 sparc_srlx (code
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2790 /* can't use sparc_not */
2791 sparc_xnor (code
, FALSE
, ins
->sreg1
, sparc_g0
, ins
->dreg
);
2794 /* can't use sparc_neg */
2795 sparc_sub (code
, FALSE
, sparc_g0
, ins
->sreg1
, ins
->dreg
);
2798 sparc_smul (code
, FALSE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2804 if ((ins
->inst_imm
== 1) && (ins
->sreg1
== ins
->dreg
))
2807 /* Transform multiplication into a shift */
2808 for (i
= 0; i
< 30; ++i
) {
2810 if (ins
->inst_imm
== imm
)
2814 sparc_sll_imm (code
, ins
->sreg1
, i
, ins
->dreg
);
2816 EMIT_ALU_IMM (ins
, smul
, FALSE
);
2820 sparc_smul (code
, TRUE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2821 sparc_rdy (code
, sparc_g1
);
2822 sparc_sra_imm (code
, ins
->dreg
, 31, sparc_o7
);
2823 sparc_cmp (code
, sparc_g1
, sparc_o7
);
2824 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins
, sparc_bne
, "OverflowException", TRUE
, sparc_icc_short
);
2826 case OP_IMUL_OVF_UN
:
2827 sparc_umul (code
, TRUE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2828 sparc_rdy (code
, sparc_o7
);
2829 sparc_cmp (code
, sparc_o7
, sparc_g0
);
2830 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins
, sparc_bne
, "OverflowException", TRUE
, sparc_icc_short
);
2833 sparc_set (code
, ins
->inst_c0
, ins
->dreg
);
2836 sparc_set (code
, ins
->inst_l
, ins
->dreg
);
2839 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)ins
->inst_i1
, ins
->inst_p0
);
2840 sparc_set_template (code
, ins
->dreg
);
2843 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)ins
->inst_i1
, ins
->inst_p0
);
2844 sparc_set_template (code
, ins
->dreg
);
2846 case OP_ICONV_TO_I4
:
2847 case OP_ICONV_TO_U4
:
2849 if (ins
->sreg1
!= ins
->dreg
)
2850 sparc_mov_reg_reg (code
, ins
->sreg1
, ins
->dreg
);
2854 if (ins
->sreg1
!= ins
->dreg
)
2855 sparc_fmovd (code
, ins
->sreg1
, ins
->dreg
);
2857 sparc_fmovs (code
, ins
->sreg1
, ins
->dreg
);
2858 sparc_fmovs (code
, ins
->sreg1
+ 1, ins
->dreg
+ 1);
2862 /* ensure ins->sreg1 is not NULL */
2863 /* Might be misaligned in case of vtypes so use a byte load */
2864 sparc_ldsb_imm (code
, ins
->sreg1
, 0, sparc_g0
);
2867 sparc_add_imm (code
, FALSE
, sparc_fp
, cfg
->sig_cookie
, sparc_o7
);
2868 sparc_sti_imm (code
, sparc_o7
, ins
->sreg1
, 0);
2876 call
= (MonoCallInst
*)ins
;
2877 g_assert (!call
->virtual);
2878 code
= emit_save_sp_to_lmf (cfg
, code
);
2880 const MonoJumpInfoTarget patch
= mono_call_to_patch (call
);
2881 code
= emit_call (cfg
, code
, patch
.type
, patch
.target
);
2882 code
= emit_vret_token (ins
, code
);
2883 code
= emit_move_return_value (ins
, code
);
2890 case OP_VOIDCALL_REG
:
2892 call
= (MonoCallInst
*)ins
;
2893 code
= emit_save_sp_to_lmf (cfg
, code
);
2894 sparc_jmpl (code
, ins
->sreg1
, sparc_g0
, sparc_callsite
);
2896 * We emit a special kind of nop in the delay slot to tell the
2897 * trampoline code that this is a virtual call, thus an unbox
2898 * trampoline might need to be called.
2901 sparc_or_imm (code
, FALSE
, sparc_g0
, 0xca, sparc_g0
);
2905 code
= emit_vret_token (ins
, code
);
2906 code
= emit_move_return_value (ins
, code
);
2908 case OP_FCALL_MEMBASE
:
2909 case OP_LCALL_MEMBASE
:
2910 case OP_VCALL_MEMBASE
:
2911 case OP_VCALL2_MEMBASE
:
2912 case OP_VOIDCALL_MEMBASE
:
2913 case OP_CALL_MEMBASE
:
2914 call
= (MonoCallInst
*)ins
;
2915 code
= emit_save_sp_to_lmf (cfg
, code
);
2916 if (sparc_is_imm13 (ins
->inst_offset
)) {
2917 sparc_ldi_imm (code
, ins
->inst_basereg
, ins
->inst_offset
, sparc_o7
);
2919 sparc_set (code
, ins
->inst_offset
, sparc_o7
);
2920 sparc_ldi (code
, ins
->inst_basereg
, sparc_o7
, sparc_o7
);
2922 sparc_jmpl (code
, sparc_o7
, sparc_g0
, sparc_callsite
);
2924 sparc_or_imm (code
, FALSE
, sparc_g0
, 0xca, sparc_g0
);
2928 code
= emit_vret_token (ins
, code
);
2929 code
= emit_move_return_value (ins
, code
);
2932 if (mono_method_signature_internal (cfg
->method
)->ret
->type
== MONO_TYPE_R4
)
2933 sparc_fdtos (code
, ins
->sreg1
, sparc_f0
);
2936 sparc_fmovd (code
, ins
->sreg1
, ins
->dreg
);
2938 /* FIXME: Why not use fmovd ? */
2939 sparc_fmovs (code
, ins
->sreg1
, ins
->dreg
);
2940 sparc_fmovs (code
, ins
->sreg1
+ 1, ins
->dreg
+ 1);
2948 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
2949 /* Perform stack touching */
2953 /* Keep alignment */
2954 /* Add 4 to compensate for the rounding of localloc_offset */
2955 sparc_add_imm (code
, FALSE
, ins
->sreg1
, 4 + MONO_ARCH_LOCALLOC_ALIGNMENT
- 1, ins
->dreg
);
2956 sparc_set (code
, ~(MONO_ARCH_LOCALLOC_ALIGNMENT
- 1), sparc_o7
);
2957 sparc_and (code
, FALSE
, ins
->dreg
, sparc_o7
, ins
->dreg
);
2959 if ((ins
->flags
& MONO_INST_INIT
) && (ins
->sreg1
== ins
->dreg
)) {
2961 size_reg
= sparc_g4
;
2963 size_reg
= sparc_g1
;
2965 sparc_mov_reg_reg (code
, ins
->dreg
, size_reg
);
2968 size_reg
= ins
->sreg1
;
2970 sparc_sub (code
, FALSE
, sparc_sp
, ins
->dreg
, ins
->dreg
);
2971 /* Keep %sp valid at all times */
2972 sparc_mov_reg_reg (code
, ins
->dreg
, sparc_sp
);
2973 /* Round localloc_offset too so the result is at least 8 aligned */
2974 offset2
= ALIGN_TO (cfg
->arch
.localloc_offset
, 8);
2975 g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS
+ offset2
));
2976 sparc_add_imm (code
, FALSE
, ins
->dreg
, MONO_SPARC_STACK_BIAS
+ offset2
, ins
->dreg
);
2978 if (ins
->flags
& MONO_INST_INIT
) {
2980 /* Initialize memory region */
2981 sparc_cmp_imm (code
, size_reg
, 0);
2983 sparc_branch (code
, 0, sparc_be
, 0);
2985 sparc_set (code
, 0, sparc_o7
);
2986 sparc_sub_imm (code
, 0, size_reg
, mono_hwcap_sparc_is_v9
? 8 : 4, size_reg
);
2989 if (mono_hwcap_sparc_is_v9
)
2990 sparc_stx (code
, sparc_g0
, ins
->dreg
, sparc_o7
);
2992 sparc_st (code
, sparc_g0
, ins
->dreg
, sparc_o7
);
2993 sparc_cmp (code
, sparc_o7
, size_reg
);
2995 sparc_branch (code
, 0, sparc_bl
, 0);
2996 sparc_patch (br
[2], br
[1]);
2998 sparc_add_imm (code
, 0, sparc_o7
, mono_hwcap_sparc_is_v9
? 8 : 4, sparc_o7
);
2999 sparc_patch (br
[0], code
);
3003 case OP_LOCALLOC_IMM
: {
3004 gint32 offset
= ins
->inst_imm
;
3007 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
3008 /* Perform stack touching */
3012 /* To compensate for the rounding of localloc_offset */
3013 offset
+= sizeof (target_mgreg_t
);
3014 offset
= ALIGN_TO (offset
, MONO_ARCH_FRAME_ALIGNMENT
);
3015 if (sparc_is_imm13 (offset
))
3016 sparc_sub_imm (code
, FALSE
, sparc_sp
, offset
, sparc_sp
);
3018 sparc_set (code
, offset
, sparc_o7
);
3019 sparc_sub (code
, FALSE
, sparc_sp
, sparc_o7
, sparc_sp
);
3021 /* Round localloc_offset too so the result is at least 8 aligned */
3022 offset2
= ALIGN_TO (cfg
->arch
.localloc_offset
, 8);
3023 g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS
+ offset2
));
3024 sparc_add_imm (code
, FALSE
, sparc_sp
, MONO_SPARC_STACK_BIAS
+ offset2
, ins
->dreg
);
3025 if ((ins
->flags
& MONO_INST_INIT
) && (offset
> 0)) {
3031 while (i
< offset
) {
3032 if (mono_hwcap_sparc_is_v9
) {
3033 sparc_stx_imm (code
, sparc_g0
, ins
->dreg
, i
);
3037 sparc_st_imm (code
, sparc_g0
, ins
->dreg
, i
);
3043 sparc_set (code
, offset
, sparc_o7
);
3044 sparc_sub_imm (code
, 0, sparc_o7
, mono_hwcap_sparc_is_v9
? 8 : 4, sparc_o7
);
3045 /* beginning of loop */
3047 if (mono_hwcap_sparc_is_v9
)
3048 sparc_stx (code
, sparc_g0
, ins
->dreg
, sparc_o7
);
3050 sparc_st (code
, sparc_g0
, ins
->dreg
, sparc_o7
);
3051 sparc_cmp_imm (code
, sparc_o7
, 0);
3053 sparc_branch (code
, 0, sparc_bne
, 0);
3055 sparc_sub_imm (code
, 0, sparc_o7
, mono_hwcap_sparc_is_v9
? 8 : 4, sparc_o7
);
3056 sparc_patch (br
[1], br
[0]);
3062 sparc_mov_reg_reg (code
, ins
->sreg1
, sparc_o0
);
3063 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
3064 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_throw_exception
));
3068 sparc_mov_reg_reg (code
, ins
->sreg1
, sparc_o0
);
3069 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
3070 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_rethrow_exception
));
3073 case OP_START_HANDLER
: {
3075 * The START_HANDLER instruction marks the beginning of a handler
3076 * block. It is called using a call instruction, so %o7 contains
3077 * the return address. Since the handler executes in the same stack
3078 * frame as the method itself, we can't use save/restore to save
3079 * the return address. Instead, we save it into a dedicated
3082 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3083 if (!sparc_is_imm13 (spvar
->inst_offset
)) {
3084 sparc_set (code
, spvar
->inst_offset
, GP_SCRATCH_REG
);
3085 sparc_sti (code
, sparc_o7
, spvar
->inst_basereg
, GP_SCRATCH_REG
);
3088 sparc_sti_imm (code
, sparc_o7
, spvar
->inst_basereg
, spvar
->inst_offset
);
3091 case OP_ENDFILTER
: {
3092 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3093 if (!sparc_is_imm13 (spvar
->inst_offset
)) {
3094 sparc_set (code
, spvar
->inst_offset
, GP_SCRATCH_REG
);
3095 sparc_ldi (code
, spvar
->inst_basereg
, GP_SCRATCH_REG
, sparc_o7
);
3098 sparc_ldi_imm (code
, spvar
->inst_basereg
, spvar
->inst_offset
, sparc_o7
);
3099 sparc_jmpl_imm (code
, sparc_o7
, 8, sparc_g0
);
3101 sparc_mov_reg_reg (code
, ins
->sreg1
, sparc_o0
);
3104 case OP_ENDFINALLY
: {
3105 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3106 if (!sparc_is_imm13 (spvar
->inst_offset
)) {
3107 sparc_set (code
, spvar
->inst_offset
, GP_SCRATCH_REG
);
3108 sparc_ldi (code
, spvar
->inst_basereg
, GP_SCRATCH_REG
, sparc_o7
);
3111 sparc_ldi_imm (code
, spvar
->inst_basereg
, spvar
->inst_offset
, sparc_o7
);
3112 sparc_jmpl_imm (code
, sparc_o7
, 8, sparc_g0
);
3116 case OP_CALL_HANDLER
:
3117 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
3118 /* This is a jump inside the method, so call_simple works even on V9 */
3119 sparc_call_simple (code
, 0);
3121 for (GList
*tmp
= ins
->inst_eh_blocks
; tmp
!= bb
->clause_holes
; tmp
= tmp
->prev
)
3122 mono_cfg_add_try_hole (cfg
, ((MonoLeaveClause
*) tmp
->data
)->clause
, code
, bb
);
3125 ins
->inst_c0
= (guint8
*)code
- cfg
->native_code
;
3127 case OP_RELAXED_NOP
:
3130 case OP_DUMMY_ICONST
:
3131 case OP_DUMMY_I8CONST
:
3132 case OP_DUMMY_R8CONST
:
3133 case OP_DUMMY_R4CONST
:
3134 case OP_NOT_REACHED
:
3138 //g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
3139 if ((ins
->inst_target_bb
== bb
->next_bb
) && ins
== bb
->last_ins
)
3141 if (ins
->inst_target_bb
->native_offset
) {
3142 gint32 disp
= (ins
->inst_target_bb
->native_offset
- ((guint8
*)code
- cfg
->native_code
)) >> 2;
3143 g_assert (sparc_is_imm22 (disp
));
3144 sparc_branch (code
, 1, sparc_ba
, disp
);
3146 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
3147 sparc_branch (code
, 1, sparc_ba
, 0);
3152 sparc_jmp (code
, ins
->sreg1
, sparc_g0
);
3160 if (v64
&& (cfg
->opt
& MONO_OPT_CMOV
)) {
3161 sparc_clr_reg (code
, ins
->dreg
);
3162 sparc_movcc_imm (code
, sparc_xcc
, opcode_to_sparc_cond (ins
->opcode
), 1, ins
->dreg
);
3165 sparc_clr_reg (code
, ins
->dreg
);
3167 sparc_branchp (code
, 1, opcode_to_sparc_cond (ins
->opcode
), DEFAULT_ICC
, 0, 2);
3169 sparc_branch (code
, 1, opcode_to_sparc_cond (ins
->opcode
), 2);
3172 sparc_set (code
, 1, ins
->dreg
);
3180 if (v64
&& (cfg
->opt
& MONO_OPT_CMOV
)) {
3181 sparc_clr_reg (code
, ins
->dreg
);
3182 sparc_movcc_imm (code
, sparc_icc
, opcode_to_sparc_cond (ins
->opcode
), 1, ins
->dreg
);
3185 sparc_clr_reg (code
, ins
->dreg
);
3186 sparc_branchp (code
, 1, opcode_to_sparc_cond (ins
->opcode
), sparc_icc_short
, 0, 2);
3188 sparc_set (code
, 1, ins
->dreg
);
3191 case OP_COND_EXC_EQ
:
3192 case OP_COND_EXC_NE_UN
:
3193 case OP_COND_EXC_LT
:
3194 case OP_COND_EXC_LT_UN
:
3195 case OP_COND_EXC_GT
:
3196 case OP_COND_EXC_GT_UN
:
3197 case OP_COND_EXC_GE
:
3198 case OP_COND_EXC_GE_UN
:
3199 case OP_COND_EXC_LE
:
3200 case OP_COND_EXC_LE_UN
:
3201 case OP_COND_EXC_OV
:
3202 case OP_COND_EXC_NO
:
3204 case OP_COND_EXC_NC
:
3205 case OP_COND_EXC_IEQ
:
3206 case OP_COND_EXC_INE_UN
:
3207 case OP_COND_EXC_ILT
:
3208 case OP_COND_EXC_ILT_UN
:
3209 case OP_COND_EXC_IGT
:
3210 case OP_COND_EXC_IGT_UN
:
3211 case OP_COND_EXC_IGE
:
3212 case OP_COND_EXC_IGE_UN
:
3213 case OP_COND_EXC_ILE
:
3214 case OP_COND_EXC_ILE_UN
:
3215 case OP_COND_EXC_IOV
:
3216 case OP_COND_EXC_INO
:
3217 case OP_COND_EXC_IC
:
3218 case OP_COND_EXC_INC
:
3222 EMIT_COND_SYSTEM_EXCEPTION (ins
, opcode_to_sparc_cond (ins
->opcode
), ins
->inst_p1
);
3225 case OP_SPARC_COND_EXC_EQZ
:
3226 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins
, brz
, ins
->inst_p1
);
3228 case OP_SPARC_COND_EXC_GEZ
:
3229 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins
, brgez
, ins
->inst_p1
);
3231 case OP_SPARC_COND_EXC_GTZ
:
3232 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins
, brgz
, ins
->inst_p1
);
3234 case OP_SPARC_COND_EXC_LEZ
:
3235 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins
, brlez
, ins
->inst_p1
);
3237 case OP_SPARC_COND_EXC_LTZ
:
3238 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins
, brlz
, ins
->inst_p1
);
3240 case OP_SPARC_COND_EXC_NEZ
:
3241 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins
, brnz
, ins
->inst_p1
);
3254 if (mono_hwcap_sparc_is_v9
)
3255 EMIT_COND_BRANCH_PREDICTED (ins
, opcode_to_sparc_cond (ins
->opcode
), 1, 1);
3257 EMIT_COND_BRANCH (ins
, opcode_to_sparc_cond (ins
->opcode
), 1, 1);
3262 EMIT_COND_BRANCH_BPR (ins
, brz
, 1, 1, 1);
3264 case OP_SPARC_BRLEZ
:
3265 EMIT_COND_BRANCH_BPR (ins
, brlez
, 1, 1, 1);
3268 EMIT_COND_BRANCH_BPR (ins
, brlz
, 1, 1, 1);
3271 EMIT_COND_BRANCH_BPR (ins
, brnz
, 1, 1, 1);
3274 EMIT_COND_BRANCH_BPR (ins
, brgz
, 1, 1, 1);
3276 case OP_SPARC_BRGEZ
:
3277 EMIT_COND_BRANCH_BPR (ins
, brgez
, 1, 1, 1);
3280 /* floating point opcodes */
3282 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_R8
, ins
->inst_p0
);
3284 sparc_set_template (code
, sparc_o7
);
3286 sparc_sethi (code
, 0, sparc_o7
);
3288 sparc_lddf_imm (code
, sparc_o7
, 0, ins
->dreg
);
3291 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_R4
, ins
->inst_p0
);
3293 sparc_set_template (code
, sparc_o7
);
3295 sparc_sethi (code
, 0, sparc_o7
);
3297 sparc_ldf_imm (code
, sparc_o7
, 0, FP_SCRATCH_REG
);
3299 /* Extend to double */
3300 sparc_fstod (code
, FP_SCRATCH_REG
, ins
->dreg
);
3302 case OP_STORER8_MEMBASE_REG
:
3303 if (!sparc_is_imm13 (ins
->inst_offset
+ 4)) {
3304 sparc_set (code
, ins
->inst_offset
, sparc_o7
);
3305 /* SPARCV9 handles misaligned fp loads/stores */
3306 if (!v64
&& (ins
->inst_offset
% 8)) {
3308 sparc_add (code
, FALSE
, ins
->inst_destbasereg
, sparc_o7
, sparc_o7
);
3309 sparc_stf (code
, ins
->sreg1
, sparc_o7
, sparc_g0
);
3310 sparc_stf_imm (code
, ins
->sreg1
+ 1, sparc_o7
, 4);
3312 sparc_stdf (code
, ins
->sreg1
, ins
->inst_destbasereg
, sparc_o7
);
3315 if (!v64
&& (ins
->inst_offset
% 8)) {
3317 sparc_stf_imm (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3318 sparc_stf_imm (code
, ins
->sreg1
+ 1, ins
->inst_destbasereg
, ins
->inst_offset
+ 4);
3320 sparc_stdf_imm (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3323 case OP_LOADR8_MEMBASE
:
3324 EMIT_LOAD_MEMBASE (ins
, lddf
);
3326 case OP_STORER4_MEMBASE_REG
:
3327 /* This requires a double->single conversion */
3328 sparc_fdtos (code
, ins
->sreg1
, FP_SCRATCH_REG
);
3329 if (!sparc_is_imm13 (ins
->inst_offset
)) {
3330 sparc_set (code
, ins
->inst_offset
, sparc_o7
);
3331 sparc_stf (code
, FP_SCRATCH_REG
, ins
->inst_destbasereg
, sparc_o7
);
3334 sparc_stf_imm (code
, FP_SCRATCH_REG
, ins
->inst_destbasereg
, ins
->inst_offset
);
3336 case OP_LOADR4_MEMBASE
: {
3337 /* ldf needs a single precision register */
3338 int dreg
= ins
->dreg
;
3339 ins
->dreg
= FP_SCRATCH_REG
;
3340 EMIT_LOAD_MEMBASE (ins
, ldf
);
3342 /* Extend to double */
3343 sparc_fstod (code
, FP_SCRATCH_REG
, ins
->dreg
);
3346 case OP_ICONV_TO_R4
: {
3347 MonoInst
*spill
= cfg
->arch
.float_spill_slot
;
3348 gint32 reg
= spill
->inst_basereg
;
3349 gint32 offset
= spill
->inst_offset
;
3351 g_assert (spill
->opcode
== OP_REGOFFSET
);
3353 if (!sparc_is_imm13 (offset
)) {
3354 sparc_set (code
, offset
, sparc_o7
);
3355 sparc_stx (code
, ins
->sreg1
, reg
, offset
);
3356 sparc_lddf (code
, reg
, offset
, FP_SCRATCH_REG
);
3358 sparc_stx_imm (code
, ins
->sreg1
, reg
, offset
);
3359 sparc_lddf_imm (code
, reg
, offset
, FP_SCRATCH_REG
);
3361 sparc_fxtos (code
, FP_SCRATCH_REG
, FP_SCRATCH_REG
);
3363 if (!sparc_is_imm13 (offset
)) {
3364 sparc_set (code
, offset
, sparc_o7
);
3365 sparc_st (code
, ins
->sreg1
, reg
, sparc_o7
);
3366 sparc_ldf (code
, reg
, sparc_o7
, FP_SCRATCH_REG
);
3368 sparc_st_imm (code
, ins
->sreg1
, reg
, offset
);
3369 sparc_ldf_imm (code
, reg
, offset
, FP_SCRATCH_REG
);
3371 sparc_fitos (code
, FP_SCRATCH_REG
, FP_SCRATCH_REG
);
3373 sparc_fstod (code
, FP_SCRATCH_REG
, ins
->dreg
);
3376 case OP_ICONV_TO_R8
: {
3377 MonoInst
*spill
= cfg
->arch
.float_spill_slot
;
3378 gint32 reg
= spill
->inst_basereg
;
3379 gint32 offset
= spill
->inst_offset
;
3381 g_assert (spill
->opcode
== OP_REGOFFSET
);
3384 if (!sparc_is_imm13 (offset
)) {
3385 sparc_set (code
, offset
, sparc_o7
);
3386 sparc_stx (code
, ins
->sreg1
, reg
, sparc_o7
);
3387 sparc_lddf (code
, reg
, sparc_o7
, FP_SCRATCH_REG
);
3389 sparc_stx_imm (code
, ins
->sreg1
, reg
, offset
);
3390 sparc_lddf_imm (code
, reg
, offset
, FP_SCRATCH_REG
);
3392 sparc_fxtod (code
, FP_SCRATCH_REG
, ins
->dreg
);
3394 if (!sparc_is_imm13 (offset
)) {
3395 sparc_set (code
, offset
, sparc_o7
);
3396 sparc_st (code
, ins
->sreg1
, reg
, sparc_o7
);
3397 sparc_ldf (code
, reg
, sparc_o7
, FP_SCRATCH_REG
);
3399 sparc_st_imm (code
, ins
->sreg1
, reg
, offset
);
3400 sparc_ldf_imm (code
, reg
, offset
, FP_SCRATCH_REG
);
3402 sparc_fitod (code
, FP_SCRATCH_REG
, ins
->dreg
);
3406 case OP_FCONV_TO_I1
:
3407 case OP_FCONV_TO_U1
:
3408 case OP_FCONV_TO_I2
:
3409 case OP_FCONV_TO_U2
:
3414 case OP_FCONV_TO_I4
:
3415 case OP_FCONV_TO_U4
: {
3416 MonoInst
*spill
= cfg
->arch
.float_spill_slot
;
3417 gint32 reg
= spill
->inst_basereg
;
3418 gint32 offset
= spill
->inst_offset
;
3420 g_assert (spill
->opcode
== OP_REGOFFSET
);
3422 sparc_fdtoi (code
, ins
->sreg1
, FP_SCRATCH_REG
);
3423 if (!sparc_is_imm13 (offset
)) {
3424 sparc_set (code
, offset
, sparc_o7
);
3425 sparc_stdf (code
, FP_SCRATCH_REG
, reg
, sparc_o7
);
3426 sparc_ld (code
, reg
, sparc_o7
, ins
->dreg
);
3428 sparc_stdf_imm (code
, FP_SCRATCH_REG
, reg
, offset
);
3429 sparc_ld_imm (code
, reg
, offset
, ins
->dreg
);
3432 switch (ins
->opcode
) {
3433 case OP_FCONV_TO_I1
:
3434 case OP_FCONV_TO_U1
:
3435 sparc_and_imm (code
, 0, ins
->dreg
, 0xff, ins
->dreg
);
3437 case OP_FCONV_TO_I2
:
3438 case OP_FCONV_TO_U2
:
3439 sparc_set (code
, 0xffff, sparc_o7
);
3440 sparc_and (code
, 0, ins
->dreg
, sparc_o7
, ins
->dreg
);
3447 case OP_FCONV_TO_I8
:
3448 case OP_FCONV_TO_U8
:
3450 g_assert_not_reached ();
3452 case OP_FCONV_TO_R4
:
3453 /* FIXME: Change precision ? */
3455 sparc_fmovd (code
, ins
->sreg1
, ins
->dreg
);
3457 sparc_fmovs (code
, ins
->sreg1
, ins
->dreg
);
3458 sparc_fmovs (code
, ins
->sreg1
+ 1, ins
->dreg
+ 1);
3461 case OP_LCONV_TO_R_UN
: {
3463 g_assert_not_reached ();
3466 case OP_LCONV_TO_OVF_I
:
3467 case OP_LCONV_TO_OVF_I4_2
: {
3468 guint32
*br
[3], *label
[1];
3471 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
3473 sparc_cmp_imm (code
, ins
->sreg1
, 0);
3475 sparc_branch (code
, 1, sparc_bneg
, 0);
3479 /* ms word must be 0 */
3480 sparc_cmp_imm (code
, ins
->sreg2
, 0);
3482 sparc_branch (code
, 1, sparc_be
, 0);
3487 EMIT_COND_SYSTEM_EXCEPTION (ins
, sparc_ba
, "OverflowException");
3490 sparc_patch (br
[0], code
);
3492 /* ms word must 0xfffffff */
3493 sparc_cmp_imm (code
, ins
->sreg2
, -1);
3495 sparc_branch (code
, 1, sparc_bne
, 0);
3497 sparc_patch (br
[2], label
[0]);
3500 sparc_patch (br
[1], code
);
3501 if (ins
->sreg1
!= ins
->dreg
)
3502 sparc_mov_reg_reg (code
, ins
->sreg1
, ins
->dreg
);
3506 sparc_faddd (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
3509 sparc_fsubd (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
3512 sparc_fmuld (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
3515 sparc_fdivd (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
3519 sparc_fnegd (code
, ins
->sreg1
, ins
->dreg
);
3521 /* FIXME: why don't use fnegd ? */
3522 sparc_fnegs (code
, ins
->sreg1
, ins
->dreg
);
3526 sparc_fdivd (code
, ins
->sreg1
, ins
->sreg2
, FP_SCRATCH_REG
);
3527 sparc_fmuld (code
, ins
->sreg2
, FP_SCRATCH_REG
, FP_SCRATCH_REG
);
3528 sparc_fsubd (code
, ins
->sreg1
, FP_SCRATCH_REG
, ins
->dreg
);
3531 sparc_fcmpd (code
, ins
->sreg1
, ins
->sreg2
);
3538 sparc_fcmpd (code
, ins
->sreg1
, ins
->sreg2
);
3539 sparc_clr_reg (code
, ins
->dreg
);
3540 switch (ins
->opcode
) {
3543 sparc_fbranch (code
, 1, opcode_to_sparc_cond (ins
->opcode
), 4);
3545 sparc_set (code
, 1, ins
->dreg
);
3546 sparc_fbranch (code
, 1, sparc_fbu
, 2);
3548 sparc_set (code
, 1, ins
->dreg
);
3551 sparc_fbranch (code
, 1, opcode_to_sparc_cond (ins
->opcode
), 2);
3553 sparc_set (code
, 1, ins
->dreg
);
3559 EMIT_FLOAT_COND_BRANCH (ins
, opcode_to_sparc_cond (ins
->opcode
), 1, 1);
3562 /* clt.un + brfalse */
3564 sparc_fbranch (code
, 1, sparc_fbul
, 0);
3567 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fba
, 1, 1);
3568 sparc_patch (p
, (guint8
*)code
);
3572 /* cgt.un + brfalse */
3574 sparc_fbranch (code
, 1, sparc_fbug
, 0);
3577 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fba
, 1, 1);
3578 sparc_patch (p
, (guint8
*)code
);
3582 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fbne
, 1, 1);
3583 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fbu
, 1, 1);
3586 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fbl
, 1, 1);
3587 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fbu
, 1, 1);
3590 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fbg
, 1, 1);
3591 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fbu
, 1, 1);
3594 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fbge
, 1, 1);
3595 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fbu
, 1, 1);
3598 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fble
, 1, 1);
3599 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fbu
, 1, 1);
3602 MonoInst
*spill
= cfg
->arch
.float_spill_slot
;
3603 gint32 reg
= spill
->inst_basereg
;
3604 gint32 offset
= spill
->inst_offset
;
3606 g_assert (spill
->opcode
== OP_REGOFFSET
);
3608 if (!sparc_is_imm13 (offset
)) {
3609 sparc_set (code
, offset
, sparc_o7
);
3610 sparc_stdf (code
, ins
->sreg1
, reg
, sparc_o7
);
3611 sparc_lduh (code
, reg
, sparc_o7
, sparc_o7
);
3613 sparc_stdf_imm (code
, ins
->sreg1
, reg
, offset
);
3614 sparc_lduh_imm (code
, reg
, offset
, sparc_o7
);
3616 sparc_srl_imm (code
, sparc_o7
, 4, sparc_o7
);
3617 sparc_and_imm (code
, FALSE
, sparc_o7
, 2047, sparc_o7
);
3618 sparc_cmp_imm (code
, sparc_o7
, 2047);
3619 EMIT_COND_SYSTEM_EXCEPTION (ins
, sparc_be
, "OverflowException");
3621 sparc_fmovd (code
, ins
->sreg1
, ins
->dreg
);
3623 sparc_fmovs (code
, ins
->sreg1
, ins
->dreg
);
3624 sparc_fmovs (code
, ins
->sreg1
+ 1, ins
->dreg
+ 1);
3629 case OP_MEMORY_BARRIER
:
3630 sparc_membar (code
, sparc_membar_all
);
3632 case OP_LIVERANGE_START
: {
3633 if (cfg
->verbose_level
> 1)
3634 printf ("R%d START=0x%x\n", MONO_VARINFO (cfg
, ins
->inst_c0
)->vreg
, (int)(code
- cfg
->native_code
));
3635 MONO_VARINFO (cfg
, ins
->inst_c0
)->live_range_start
= code
- cfg
->native_code
;
3638 case OP_LIVERANGE_END
: {
3639 if (cfg
->verbose_level
> 1)
3640 printf ("R%d END=0x%x\n", MONO_VARINFO (cfg
, ins
->inst_c0
)->vreg
, (int)(code
- cfg
->native_code
));
3641 MONO_VARINFO (cfg
, ins
->inst_c0
)->live_range_end
= code
- cfg
->native_code
;
3644 case OP_GC_SAFE_POINT
:
3649 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins
->opcode
), __FUNCTION__
);
3651 g_warning ("%s:%d: unknown opcode %s\n", __FILE__
, __LINE__
, mono_inst_name (ins
->opcode
));
3653 g_assert_not_reached ();
3656 if ((((guint8
*)code
) - code_start
) > max_len
) {
3657 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
3658 mono_inst_name (ins
->opcode
), max_len
, ((guint8
*)code
) - code_start
);
3659 g_assert_not_reached ();
3666 set_code_cursor (cfg
, code
);
3670 mono_arch_register_lowlevel_calls (void)
3672 mono_register_jit_icall (mono_arch_get_lmf_addr
, NULL
, TRUE
);
3676 mono_arch_patch_code (MonoCompile
*cfg
, MonoMethod
*method
, MonoDomain
*domain
, guint8
*code
, MonoJumpInfo
*ji
, gboolean run_cctors
, MonoError
*error
)
3678 MonoJumpInfo
*patch_info
;
3682 /* FIXME: Move part of this to arch independent code */
3683 for (patch_info
= ji
; patch_info
; patch_info
= patch_info
->next
) {
3684 unsigned char *ip
= patch_info
->ip
.i
+ code
;
3687 target
= mono_resolve_patch_target (method
, domain
, code
, patch_info
, run_cctors
, error
);
3688 return_if_nok (error
);
3690 switch (patch_info
->type
) {
3691 case MONO_PATCH_INFO_NONE
:
3693 case MONO_PATCH_INFO_METHOD_JUMP
: {
3694 guint32
*ip2
= (guint32
*)ip
;
3695 /* Might already been patched */
3696 sparc_set_template (ip2
, sparc_o7
);
3702 sparc_patch ((guint32
*)ip
, target
);
3706 #error obsolete tracing?
3708 mono_arch_instrument_prolog (MonoCompile
*cfg
, MonoJitICallId func
, void *p
, gboolean enable_arguments
)
3711 guint32
*code
= (guint32
*)p
;
3712 MonoMethodSignature
*sig
= mono_method_signature_internal (cfg
->method
);
3715 /* Save registers to stack */
3716 for (i
= 0; i
< 6; ++i
)
3717 sparc_sti_imm (code
, sparc_i0
+ i
, sparc_fp
, ARGS_OFFSET
+ (i
* sizeof (target_mgreg_t
)));
3719 cinfo
= get_call_info (cfg
, sig
, FALSE
);
3721 /* Save float regs on V9, since they are caller saved */
3722 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
3723 ArgInfo
*ainfo
= cinfo
->args
+ i
;
3724 gint32 stack_offset
;
3726 stack_offset
= ainfo
->offset
+ ARGS_OFFSET
;
3728 if (ainfo
->storage
== ArgInFloatReg
) {
3729 if (!sparc_is_imm13 (stack_offset
))
3731 sparc_stf_imm (code
, ainfo
->reg
, sparc_fp
, stack_offset
);
3733 else if (ainfo
->storage
== ArgInDoubleReg
) {
3734 /* The offset is guaranteed to be aligned by the ABI rules */
3735 sparc_stdf_imm (code
, ainfo
->reg
, sparc_fp
, stack_offset
);
3739 sparc_set (code
, cfg
->method
, sparc_o0
);
3740 sparc_add_imm (code
, FALSE
, sparc_fp
, MONO_SPARC_STACK_BIAS
, sparc_o1
);
3742 mono_add_patch_info (cfg
, (guint8
*)code
-cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
, GUINT_TO_POINTER (func
));
3745 /* Restore float regs on V9 */
3746 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
3747 ArgInfo
*ainfo
= cinfo
->args
+ i
;
3748 gint32 stack_offset
;
3750 stack_offset
= ainfo
->offset
+ ARGS_OFFSET
;
3752 if (ainfo
->storage
== ArgInFloatReg
) {
3753 if (!sparc_is_imm13 (stack_offset
))
3755 sparc_ldf_imm (code
, sparc_fp
, stack_offset
, ainfo
->reg
);
3757 else if (ainfo
->storage
== ArgInDoubleReg
) {
3758 /* The offset is guaranteed to be aligned by the ABI rules */
3759 sparc_lddf_imm (code
, sparc_fp
, stack_offset
, ainfo
->reg
);
3776 #error obsolete tracing?
3778 mono_arch_instrument_epilog (MonoCompile
*cfg
, MonoJitICallId func
, void *p
, gboolean enable_arguments
)
3780 guint32
*code
= (guint32
*)p
;
3781 int save_mode
= SAVE_NONE
;
3782 MonoMethod
*method
= cfg
->method
;
3784 switch (mini_get_underlying_type (mono_method_signature_internal (method
)->ret
)->type
) {
3785 case MONO_TYPE_VOID
:
3786 /* special case string .ctor icall */
3787 if (strcmp (".ctor", method
->name
) && method
->klass
== mono_defaults
.string_class
)
3788 save_mode
= SAVE_ONE
;
3790 save_mode
= SAVE_NONE
;
3795 save_mode
= SAVE_ONE
;
3797 save_mode
= SAVE_TWO
;
3802 save_mode
= SAVE_FP
;
3804 case MONO_TYPE_VALUETYPE
:
3805 save_mode
= SAVE_STRUCT
;
3808 save_mode
= SAVE_ONE
;
3812 /* Save the result to the stack and also put it into the output registers */
3814 switch (save_mode
) {
3817 sparc_st_imm (code
, sparc_i0
, sparc_fp
, 68);
3818 sparc_st_imm (code
, sparc_i0
, sparc_fp
, 72);
3819 sparc_mov_reg_reg (code
, sparc_i0
, sparc_o1
);
3820 sparc_mov_reg_reg (code
, sparc_i1
, sparc_o2
);
3823 sparc_sti_imm (code
, sparc_i0
, sparc_fp
, ARGS_OFFSET
);
3824 sparc_mov_reg_reg (code
, sparc_i0
, sparc_o1
);
3828 sparc_stdf_imm (code
, sparc_f0
, sparc_fp
, ARGS_OFFSET
);
3830 sparc_stdf_imm (code
, sparc_f0
, sparc_fp
, 72);
3831 sparc_ld_imm (code
, sparc_fp
, 72, sparc_o1
);
3832 sparc_ld_imm (code
, sparc_fp
, 72 + 4, sparc_o2
);
3837 sparc_mov_reg_reg (code
, sparc_i0
, sparc_o1
);
3839 sparc_ld_imm (code
, sparc_fp
, 64, sparc_o1
);
3847 sparc_set (code
, cfg
->method
, sparc_o0
);
3849 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
, GUINT_TO_POINTER (func
));
3852 /* Restore result */
3854 switch (save_mode
) {
3856 sparc_ld_imm (code
, sparc_fp
, 68, sparc_i0
);
3857 sparc_ld_imm (code
, sparc_fp
, 72, sparc_i0
);
3860 sparc_ldi_imm (code
, sparc_fp
, ARGS_OFFSET
, sparc_i0
);
3863 sparc_lddf_imm (code
, sparc_fp
, ARGS_OFFSET
, sparc_f0
);
3874 mono_arch_emit_prolog (MonoCompile
*cfg
)
3876 MonoMethod
*method
= cfg
->method
;
3877 MonoMethodSignature
*sig
;
3883 cfg
->code_size
= 256;
3884 cfg
->native_code
= g_malloc (cfg
->code_size
);
3885 code
= (guint32
*)cfg
->native_code
;
3887 /* FIXME: Generate intermediate code instead */
3889 offset
= cfg
->stack_offset
;
3890 offset
+= (16 * sizeof (target_mgreg_t
)); /* register save area */
3892 offset
+= 4; /* struct/union return pointer */
3895 /* add parameter area size for called functions */
3896 if (cfg
->param_area
< (6 * sizeof (target_mgreg_t
)))
3897 /* Reserve space for the first 6 arguments even if it is unused */
3898 offset
+= 6 * sizeof (target_mgreg_t
);
3900 offset
+= cfg
->param_area
;
3902 /* align the stack size */
3903 offset
= ALIGN_TO (offset
, MONO_ARCH_FRAME_ALIGNMENT
);
3906 * localloc'd memory is stored between the local variables (whose
3907 * size is given by cfg->stack_offset), and between the space reserved
3910 cfg
->arch
.localloc_offset
= offset
- cfg
->stack_offset
;
3912 cfg
->stack_offset
= offset
;
3914 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
3915 /* Perform stack touching */
3919 if (!sparc_is_imm13 (- cfg
->stack_offset
)) {
3920 /* Can't use sparc_o7 here, since we're still in the caller's frame */
3921 sparc_set (code
, (- cfg
->stack_offset
), GP_SCRATCH_REG
);
3922 sparc_save (code
, sparc_sp
, GP_SCRATCH_REG
, sparc_sp
);
3925 sparc_save_imm (code
, sparc_sp
, - cfg
->stack_offset
, sparc_sp
);
3928 if (strstr (cfg->method->name, "foo")) {
3929 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
3930 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_sparc_break));
3931 sparc_call_simple (code, 0);
3936 sig
= mono_method_signature_internal (method
);
3938 cinfo
= get_call_info (cfg
, sig
, FALSE
);
3940 /* Keep in sync with emit_load_volatile_arguments */
3941 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
3942 ArgInfo
*ainfo
= cinfo
->args
+ i
;
3943 gint32 stack_offset
;
3945 inst
= cfg
->args
[i
];
3947 if (sig
->hasthis
&& (i
== 0))
3948 arg_type
= mono_get_object_type ();
3950 arg_type
= sig
->params
[i
- sig
->hasthis
];
3952 stack_offset
= ainfo
->offset
+ ARGS_OFFSET
;
3954 /* Save the split arguments so they will reside entirely on the stack */
3955 if (ainfo
->storage
== ArgInSplitRegStack
) {
3956 /* Save the register to the stack */
3957 g_assert (inst
->opcode
== OP_REGOFFSET
);
3958 if (!sparc_is_imm13 (stack_offset
))
3960 sparc_st_imm (code
, sparc_i5
, inst
->inst_basereg
, stack_offset
);
3963 if (!v64
&& !arg_type
->byref
&& (arg_type
->type
== MONO_TYPE_R8
)) {
3964 /* Save the argument to a dword aligned stack location */
3966 * stack_offset contains the offset of the argument on the stack.
3967 * inst->inst_offset contains the dword aligned offset where the value
3970 if (ainfo
->storage
== ArgInIRegPair
) {
3971 if (!sparc_is_imm13 (inst
->inst_offset
+ 4))
3973 sparc_st_imm (code
, sparc_i0
+ ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
3974 sparc_st_imm (code
, sparc_i0
+ ainfo
->reg
+ 1, inst
->inst_basereg
, inst
->inst_offset
+ 4);
3977 if (ainfo
->storage
== ArgInSplitRegStack
) {
3979 g_assert_not_reached ();
3981 if (stack_offset
!= inst
->inst_offset
) {
3982 /* stack_offset is not dword aligned, so we need to make a copy */
3983 sparc_st_imm (code
, sparc_i5
, inst
->inst_basereg
, inst
->inst_offset
);
3984 sparc_ld_imm (code
, sparc_fp
, stack_offset
+ 4, sparc_o7
);
3985 sparc_st_imm (code
, sparc_o7
, inst
->inst_basereg
, inst
->inst_offset
+ 4);
3989 if (ainfo
->storage
== ArgOnStackPair
) {
3991 g_assert_not_reached ();
3993 if (stack_offset
!= inst
->inst_offset
) {
3994 /* stack_offset is not dword aligned, so we need to make a copy */
3995 sparc_ld_imm (code
, sparc_fp
, stack_offset
, sparc_o7
);
3996 sparc_st_imm (code
, sparc_o7
, inst
->inst_basereg
, inst
->inst_offset
);
3997 sparc_ld_imm (code
, sparc_fp
, stack_offset
+ 4, sparc_o7
);
3998 sparc_st_imm (code
, sparc_o7
, inst
->inst_basereg
, inst
->inst_offset
+ 4);
4002 g_assert_not_reached ();
4005 if ((ainfo
->storage
== ArgInIReg
) && (inst
->opcode
!= OP_REGVAR
)) {
4006 /* Argument in register, but need to be saved to stack */
4007 if (!sparc_is_imm13 (stack_offset
))
4009 if ((stack_offset
- ARGS_OFFSET
) & 0x1)
4010 sparc_stb_imm (code
, sparc_i0
+ ainfo
->reg
, inst
->inst_basereg
, stack_offset
);
4012 if ((stack_offset
- ARGS_OFFSET
) & 0x2)
4013 sparc_sth_imm (code
, sparc_i0
+ ainfo
->reg
, inst
->inst_basereg
, stack_offset
);
4015 if ((stack_offset
- ARGS_OFFSET
) & 0x4)
4016 sparc_st_imm (code
, sparc_i0
+ ainfo
->reg
, inst
->inst_basereg
, stack_offset
);
4019 sparc_stx_imm (code
, sparc_i0
+ ainfo
->reg
, inst
->inst_basereg
, stack_offset
);
4021 sparc_st_imm (code
, sparc_i0
+ ainfo
->reg
, inst
->inst_basereg
, stack_offset
);
4025 if ((ainfo
->storage
== ArgInIRegPair
) && (inst
->opcode
!= OP_REGVAR
)) {
4029 /* Argument in regpair, but need to be saved to stack */
4030 if (!sparc_is_imm13 (inst
->inst_offset
+ 4))
4032 sparc_st_imm (code
, sparc_i0
+ ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
4033 sparc_st_imm (code
, sparc_i0
+ ainfo
->reg
+ 1, inst
->inst_basereg
, inst
->inst_offset
+ 4);
4035 else if ((ainfo
->storage
== ArgInFloatReg
) && (inst
->opcode
!= OP_REGVAR
)) {
4036 if (!sparc_is_imm13 (stack_offset
))
4038 sparc_stf_imm (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
4040 else if ((ainfo
->storage
== ArgInDoubleReg
) && (inst
->opcode
!= OP_REGVAR
)) {
4041 /* The offset is guaranteed to be aligned by the ABI rules */
4042 sparc_stdf_imm (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
4045 if ((ainfo
->storage
== ArgInFloatReg
) && (inst
->opcode
== OP_REGVAR
)) {
4046 /* Need to move into the a double precision register */
4047 sparc_fstod (code
, ainfo
->reg
, ainfo
->reg
- 1);
4050 if ((ainfo
->storage
== ArgInSplitRegStack
) || (ainfo
->storage
== ArgOnStack
))
4051 if (inst
->opcode
== OP_REGVAR
)
4052 /* FIXME: Load the argument into memory */
4058 if (cfg
->method
->save_lmf
) {
4059 gint32 lmf_offset
= STACK_BIAS
- cfg
->arch
.lmf_offset
;
4062 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, MONO_PATCH_INFO_IP
, NULL
);
4063 sparc_set_template (code
, sparc_o7
);
4064 sparc_sti_imm (code
, sparc_o7
, sparc_fp
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, ip
));
4066 sparc_sti_imm (code
, sparc_sp
, sparc_fp
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, sp
));
4068 sparc_sti_imm (code
, sparc_fp
, sparc_fp
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, ebp
));
4070 /* FIXME: add a relocation for this */
4071 sparc_set (code
, cfg
->method
, sparc_o7
);
4072 sparc_sti_imm (code
, sparc_o7
, sparc_fp
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, method
));
4074 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
4075 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_get_lmf_addr
));
4078 code
= (guint32
*)mono_sparc_emit_save_lmf (code
, lmf_offset
);
4081 #error obsolete tracing?
4082 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
))
4083 code
= (guint32
*)mono_arch_instrument_prolog (cfg
, MONO_JIT_ICALL_mono_trace_enter_method
, code
, TRUE
);
4085 set_code_cursor (cfg
, code
);
4087 return (guint8
*)code
;
4091 mono_arch_emit_epilog (MonoCompile
*cfg
)
4093 MonoMethod
*method
= cfg
->method
;
4096 int max_epilog_size
= 16 + 20 * 4;
4098 if (cfg
->method
->save_lmf
)
4099 max_epilog_size
+= 128;
4101 if (mono_jit_trace_calls
!= NULL
)
4102 max_epilog_size
+= 50;
4104 code
= (guint32
*)realloc_code (cfg
, max_epilog_size
);
4106 #error obsolete tracing?
4107 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
))
4108 code
= (guint32
*)mono_arch_instrument_epilog (cfg
, mono_trace_leave_method
, code
, TRUE
);
4110 if (cfg
->method
->save_lmf
) {
4111 gint32 lmf_offset
= STACK_BIAS
- cfg
->arch
.lmf_offset
;
4113 code
= mono_sparc_emit_restore_lmf (code
, lmf_offset
);
4117 * The V8 ABI requires that calls to functions which return a structure
4120 if (!v64
&& mono_method_signature_internal (cfg
->method
)->pinvoke
&& MONO_TYPE_ISSTRUCT(mono_method_signature_internal (cfg
->method
)->ret
))
4121 sparc_jmpl_imm (code
, sparc_i7
, 12, sparc_g0
);
4125 /* Only fold last instruction into the restore if the exit block has an in count of 1
4126 and the previous block hasn't been optimized away since it may have an in count > 1 */
4127 if (cfg
->bb_exit
->in_count
== 1 && cfg
->bb_exit
->in_bb
[0]->native_offset
!= cfg
->bb_exit
->native_offset
)
4131 * FIXME: The last instruction might have a branch pointing into it like in
4132 * int_ceq sparc_i0 <-
4136 /* Try folding last instruction into the restore */
4137 if (can_fold
&& (sparc_inst_op (code
[-2]) == 0x2) && (sparc_inst_op3 (code
[-2]) == 0x2) && sparc_inst_imm (code
[-2]) && (sparc_inst_rd (code
[-2]) == sparc_i0
)) {
4138 /* or reg, imm, %i0 */
4139 int reg
= sparc_inst_rs1 (code
[-2]);
4140 int imm
= (((gint32
)(sparc_inst_imm13 (code
[-2]))) << 19) >> 19;
4141 code
[-2] = code
[-1];
4143 sparc_restore_imm (code
, reg
, imm
, sparc_o0
);
4146 if (can_fold
&& (sparc_inst_op (code
[-2]) == 0x2) && (sparc_inst_op3 (code
[-2]) == 0x2) && (!sparc_inst_imm (code
[-2])) && (sparc_inst_rd (code
[-2]) == sparc_i0
)) {
4147 /* or reg, reg, %i0 */
4148 int reg1
= sparc_inst_rs1 (code
[-2]);
4149 int reg2
= sparc_inst_rs2 (code
[-2]);
4150 code
[-2] = code
[-1];
4152 sparc_restore (code
, reg1
, reg2
, sparc_o0
);
4155 sparc_restore_imm (code
, sparc_g0
, 0, sparc_g0
);
4157 set_code_cursor (cfg
, code
);
4161 mono_arch_emit_exceptions (MonoCompile
*cfg
)
4163 MonoJumpInfo
*patch_info
;
4168 MonoClass
*exc_classes
[16];
4169 guint8
*exc_throw_start
[16], *exc_throw_end
[16];
4171 /* Compute needed space */
4172 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
4173 if (patch_info
->type
== MONO_PATCH_INFO_EXC
)
4178 * make sure we have enough space for exceptions
4181 code_size
= exc_count
* (20 * 4);
4183 code_size
= exc_count
* 24;
4185 code
= (guint32
*)realloc_code (cfg
, code_size
);
4187 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
4188 switch (patch_info
->type
) {
4189 case MONO_PATCH_INFO_EXC
: {
4190 MonoClass
*exc_class
;
4191 guint32
*buf
, *buf2
;
4192 guint32 throw_ip
, type_idx
;
4195 sparc_patch ((guint32
*)(cfg
->native_code
+ patch_info
->ip
.i
), code
);
4197 exc_class
= mono_class_load_from_name (mono_defaults
.corlib
, "System", patch_info
->data
.name
);
4198 type_idx
= m_class_get_type_token (exc_class
) - MONO_TOKEN_TYPE_DEF
;
4199 throw_ip
= patch_info
->ip
.i
;
4201 /* Find a throw sequence for the same exception class */
4202 for (i
= 0; i
< nthrows
; ++i
)
4203 if (exc_classes
[i
] == exc_class
)
4207 guint32 throw_offset
= (((guint8
*)exc_throw_end
[i
] - cfg
->native_code
) - throw_ip
) >> 2;
4208 if (!sparc_is_imm13 (throw_offset
))
4209 sparc_set32 (code
, throw_offset
, sparc_o1
);
4211 disp
= (exc_throw_start
[i
] - (guint8
*)code
) >> 2;
4212 g_assert (sparc_is_imm22 (disp
));
4213 sparc_branch (code
, 0, sparc_ba
, disp
);
4214 if (sparc_is_imm13 (throw_offset
))
4215 sparc_set32 (code
, throw_offset
, sparc_o1
);
4218 patch_info
->type
= MONO_PATCH_INFO_NONE
;
4221 /* Emit the template for setting o1 */
4223 if (sparc_is_imm13 (((((guint8
*)code
- cfg
->native_code
) - throw_ip
) >> 2) - 8))
4224 /* Can use a short form */
4227 sparc_set_template (code
, sparc_o1
);
4231 exc_classes
[nthrows
] = exc_class
;
4232 exc_throw_start
[nthrows
] = (guint8
*)code
;
4236 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
4237 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_sparc_break));
4241 /* first arg = type token */
4242 /* Pass the type index to reduce the size of the sparc_set */
4243 if (!sparc_is_imm13 (type_idx
))
4244 sparc_set32 (code
, type_idx
, sparc_o0
);
4246 /* second arg = offset between the throw ip and the current ip */
4247 /* On sparc, the saved ip points to the call instruction */
4248 disp
= (((guint8
*)code
- cfg
->native_code
) - throw_ip
) >> 2;
4249 sparc_set32 (buf
, disp
, sparc_o1
);
4254 exc_throw_end
[nthrows
] = (guint8
*)code
;
4258 patch_info
->data
.jit_icall_id
= MONO_JIT_ICALL_mono_arch_throw_corlib_exception
;
4259 patch_info
->type
= MONO_PATCH_INFO_JIT_ICALL_ID
;
4260 patch_info
->ip
.i
= (guint8
*)code
- cfg
->native_code
;
4264 if (sparc_is_imm13 (type_idx
)) {
4265 /* Put it into the delay slot */
4268 sparc_set32 (code
, type_idx
, sparc_o0
);
4269 g_assert (code
- buf
== 1);
4279 set_code_cursor (cfg
, code
);
4282 set_code_cursor (cfg
, code
);
4285 gboolean lmf_addr_key_inited
= FALSE
;
4287 #ifdef MONO_SPARC_THR_TLS
4288 thread_key_t lmf_addr_key
;
4290 pthread_key_t lmf_addr_key
;
4294 mono_arch_get_lmf_addr (void)
4296 /* This is perf critical so we bypass the IO layer */
4297 /* The thr_... functions seem to be somewhat faster */
4298 #ifdef MONO_SPARC_THR_TLS
4300 thr_getspecific (lmf_addr_key
, &res
);
4303 return pthread_getspecific (lmf_addr_key
);
4307 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
4310 * There seems to be no way to determine stack boundaries under solaris,
4311 * so it's not possible to determine whenever a SIGSEGV is caused by stack
4314 #error "--with-sigaltstack=yes not supported on solaris"
4319 mono_arch_tls_init (void)
4321 MonoJitTlsData
*jit_tls
;
4323 if (!lmf_addr_key_inited
) {
4326 lmf_addr_key_inited
= TRUE
;
4328 #ifdef MONO_SPARC_THR_TLS
4329 res
= thr_keycreate (&lmf_addr_key
, NULL
);
4331 res
= pthread_key_create (&lmf_addr_key
, NULL
);
4333 g_assert (res
== 0);
4337 jit_tls
= mono_get_jit_tls ();
4339 #ifdef MONO_SPARC_THR_TLS
4340 thr_setspecific (lmf_addr_key
, &jit_tls
->lmf
);
4342 pthread_setspecific (lmf_addr_key
, &jit_tls
->lmf
);
4347 mono_arch_finish_init (void)
4352 mono_arch_emit_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
4354 MonoInst
*ins
= NULL
;
4360 * mono_arch_get_argument_info:
4361 * @csig: a method signature
4362 * @param_count: the number of parameters to consider
4363 * @arg_info: an array to store the result infos
4365 * Gathers information on parameters such as size, alignment and
4366 * padding. arg_info should be large enought to hold param_count + 1 entries.
4368 * Returns the size of the activation frame.
4371 mono_arch_get_argument_info (MonoMethodSignature
*csig
, int param_count
, MonoJitArgumentInfo
*arg_info
)
4377 cinfo
= get_call_info (NULL
, csig
, FALSE
);
4379 if (csig
->hasthis
) {
4380 ainfo
= &cinfo
->args
[0];
4381 arg_info
[0].offset
= ARGS_OFFSET
- MONO_SPARC_STACK_BIAS
+ ainfo
->offset
;
4384 for (k
= 0; k
< param_count
; k
++) {
4385 ainfo
= &cinfo
->args
[k
+ csig
->hasthis
];
4387 arg_info
[k
+ 1].offset
= ARGS_OFFSET
- MONO_SPARC_STACK_BIAS
+ ainfo
->offset
;
4388 arg_info
[k
+ 1].size
= mono_type_size (csig
->params
[k
], &align
);
4397 mono_arch_context_get_int_reg (MonoContext
*ctx
, int reg
)
4399 /* FIXME: implement */
4400 g_assert_not_reached ();
4404 mono_arch_opcode_supported (int opcode
)
4410 mono_arch_tailcall_supported (MonoCompile
*cfg
, MonoMethodSignature
*caller_sig
, MonoMethodSignature
*callee_sig
, gboolean virtual_
)
4416 mono_arch_load_function (MonoJitICallId jit_icall_id
)