2 * mini-sparc.c: Sparc backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
9 * Christopher Taylor (ct@gentoo.org)
10 * Mark Crichton (crichton@gimp.org)
11 * Zoltan Varga (vargaz@freemail.hu)
13 * (C) 2003 Ximian, Inc.
21 #include <sys/systeminfo.h>
28 #include <mono/metadata/appdomain.h>
29 #include <mono/metadata/debug-helpers.h>
30 #include <mono/metadata/tokentype.h>
31 #include <mono/utils/mono-math.h>
33 #include "mini-sparc.h"
35 #include "cpu-sparc.h"
36 #include "jit-icalls.h"
40 * Sparc V9 means two things:
41 * - the instruction set
44 * V9 instructions are only usable if the underlying processor is 64 bit. Most Sparc
45 * processors in use are 64 bit processors. The V9 ABI is only usable if the
46 * mono executable is a 64 bit executable. So it would make sense to use the 64 bit
47 * instructions without using the 64 bit ABI.
52 * - %i0..%i<n> hold the incoming arguments, these are never written by JITted
53 * code. Unused input registers are used for global register allocation.
54 * - %o0..%o5 and %l7 is used for local register allocation and passing arguments
55 * - %l0..%l6 is used for global register allocation
56 * - %o7 and %g1 is used as scratch registers in opcodes
57 * - all floating point registers are used for local register allocation except %f0.
58 * Only double precision registers are used.
60 * - fp registers %d0..%d30 are used for parameter passing, and %d32..%d62 are
61 * used for local allocation.
66 * - doubles and longs must be stored in dword aligned locations
70 * The following things are not implemented or do not work:
71 * - some fp arithmetic corner cases
72 * The following tests in mono/mini are expected to fail:
73 * - test_0_simple_double_casts
74 * This test casts (guint64)-1 to double and then back to guint64 again.
75 * Under x86, it returns 0, while under sparc it returns -1.
77 * In addition to this, the runtime requires the trunc function, or its
78 * solaris counterpart, aintl, to do some double->int conversions. If this
79 * function is not available, it is emulated somewhat, but the results can be
85 * - optimize sparc_set according to the memory model
86 * - when non-AOT compiling, compute patch targets immediately so we don't
87 * have to emit the 6 byte template.
89 * - struct arguments/returns
94 * - sparc_call_simple can't be used in a lot of places since the displacement
95 * might not fit into an imm30.
96 * - g1 can't be used in a lot of places since it is used as a scratch reg in
98 * - sparc_f0 can't be used as a scratch register on V9
99 * - the %d34..%d62 fp registers are encoded as: %dx = %f(x - 32 + 1), ie.
101 * - ldind.i4/u4 needs to sign extend/clear out upper word -> slows things down
102 * - ins->dreg can't be used as a scatch register in r4 opcodes since it might
103 * be a double precision register which has no single precision part.
104 * - passing/returning structs is hard to implement, because:
105 * - the spec is very hard to understand
106 * - it requires knowledge about the fields of structure, needs to handle
107 * nested structures etc.
111 * Possible optimizations:
112 * - delay slot scheduling
113 * - allocate large constants to registers
114 * - add more mul/div/rem optimizations
118 #define MONO_SPARC_THR_TLS 1
122 * There was a 64 bit bug in glib-2.2: g_bit_nth_msf (0, -1) would return 32,
123 * causing infinite loops in dominator computation. So glib-2.4 is required.
126 #if GLIB_MAJOR_VERSION == 2 && GLIB_MINOR_VERSION < 4
127 #error "glib 2.4 or later is required for 64 bit mode."
131 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
133 #define SIGNAL_STACK_SIZE (64 * 1024)
135 #define STACK_BIAS MONO_SPARC_STACK_BIAS
139 /* %g1 is used by sparc_set */
140 #define GP_SCRATCH_REG sparc_g4
141 /* %f0 is used for parameter passing */
142 #define FP_SCRATCH_REG sparc_f30
143 #define ARGS_OFFSET (STACK_BIAS + 128)
147 #define FP_SCRATCH_REG sparc_f0
148 #define ARGS_OFFSET 68
149 #define GP_SCRATCH_REG sparc_g1
153 /* Whenever the CPU supports v9 instructions */
154 static gboolean sparcv9
= FALSE
;
156 /* Whenever this is a 64bit executable */
158 static gboolean v64
= TRUE
;
160 static gboolean v64
= FALSE
;
163 static gpointer
mono_arch_get_lmf_addr (void);
166 mono_arch_regname (int reg
) {
167 static const char * rnames
[] = {
168 "sparc_g0", "sparc_g1", "sparc_g2", "sparc_g3", "sparc_g4",
169 "sparc_g5", "sparc_g6", "sparc_g7", "sparc_o0", "sparc_o1",
170 "sparc_o2", "sparc_o3", "sparc_o4", "sparc_o5", "sparc_sp",
171 "sparc_call", "sparc_l0", "sparc_l1", "sparc_l2", "sparc_l3",
172 "sparc_l4", "sparc_l5", "sparc_l6", "sparc_l7", "sparc_i0",
173 "sparc_i1", "sparc_i2", "sparc_i3", "sparc_i4", "sparc_i5",
174 "sparc_fp", "sparc_retadr"
176 if (reg
>= 0 && reg
< 32)
182 mono_arch_fregname (int reg
) {
183 static const char *rnames
[] = {
184 "sparc_f0", "sparc_f1", "sparc_f2", "sparc_f3", "sparc_f4",
185 "sparc_f5", "sparc_f6", "sparc_f7", "sparc_f8", "sparc_f9",
186 "sparc_f10", "sparc_f11", "sparc_f12", "sparc_f13", "sparc_f14",
187 "sparc_f15", "sparc_f16", "sparc_f17", "sparc_f18", "sparc_f19",
188 "sparc_f20", "sparc_f21", "sparc_f22", "sparc_f23", "sparc_f24",
189 "sparc_f25", "sparc_f26", "sparc_f27", "sparc_f28", "sparc_f29",
190 "sparc_f30", "sparc_f31"
193 if (reg
>= 0 && reg
< 32)
200 * Initialize the cpu to execute managed code.
203 mono_arch_cpu_init (void)
206 /* make sure sparcv9 is initialized for embedded use */
207 mono_arch_cpu_optimizazions(&dummy
);
211 * Initialize architecture specific code.
214 mono_arch_init (void)
219 * Cleanup architecture specific code.
222 mono_arch_cleanup (void)
227 * This function returns the optimizations supported on this cpu.
230 mono_arch_cpu_optimizazions (guint32
*exclude_mask
)
238 if (!sysinfo (SI_ISALIST
, buf
, 1024))
239 g_assert_not_reached ();
241 /* From glibc. If the getpagesize is 8192, we're on sparc64, which
242 * (in)directly implies that we're a v9 or better.
243 * Improvements to this are greatly accepted...
244 * Also, we don't differentiate between v7 and v8. I sense SIGILL
245 * sniffing in my future.
247 if (getpagesize() == 8192)
248 strcpy (buf
, "sparcv9");
250 strcpy (buf
, "sparcv8");
254 * On some processors, the cmov instructions are even slower than the
257 if (strstr (buf
, "sparcv9")) {
258 opts
|= MONO_OPT_CMOV
| MONO_OPT_FCMOV
;
262 *exclude_mask
|= MONO_OPT_CMOV
| MONO_OPT_FCMOV
;
268 #define flushi(addr) __asm__ __volatile__ ("iflush %0"::"r"(addr):"memory")
269 #else /* assume Sun's compiler */
270 static void flushi(void *addr
)
277 void sync_instruction_memory(caddr_t addr
, int len
);
281 mono_arch_flush_icache (guint8
*code
, gint size
)
284 /* Hopefully this is optimized based on the actual CPU */
285 sync_instruction_memory (code
, size
);
287 gulong start
= (gulong
) code
;
288 gulong end
= start
+ size
;
291 /* Sparcv9 chips only need flushes on 32 byte
292 * cacheline boundaries.
294 * Sparcv8 needs a flush every 8 bytes.
296 align
= (sparcv9
? 32 : 8);
298 start
&= ~(align
- 1);
299 end
= (end
+ (align
- 1)) & ~(align
- 1);
301 while (start
< end
) {
303 __asm__
__volatile__ ("iflush %0"::"r"(start
));
315 * Flush all register windows to memory. Every register window is saved to
316 * a 16 word area on the stack pointed to by its %sp register.
319 mono_sparc_flushw (void)
321 static guint32 start
[64];
322 static int inited
= 0;
324 static void (*flushw
) (void);
329 sparc_save_imm (code
, sparc_sp
, -160, sparc_sp
);
332 sparc_restore_simple (code
);
334 g_assert ((code
- start
) < 64);
336 mono_arch_flush_icache ((guint8
*)start
, (guint8
*)code
- (guint8
*)start
);
338 flushw
= (gpointer
)start
;
347 mono_arch_flush_register_windows (void)
349 mono_sparc_flushw ();
353 mono_arch_is_inst_imm (gint64 imm
)
355 return sparc_is_imm13 (imm
);
359 mono_sparc_is_v9 (void) {
364 mono_sparc_is_sparc64 (void) {
376 ArgInFloatReg
, /* V9 only */
377 ArgInDoubleReg
/* V9 only */
382 /* This needs to be offset by %i0 or %o0 depending on caller/callee */
385 guint32 vt_offset
; /* for valuetypes */
403 add_general (guint32
*gr
, guint32
*stack_size
, ArgInfo
*ainfo
, gboolean pair
)
405 ainfo
->offset
= *stack_size
;
408 if (*gr
>= PARAM_REGS
) {
409 ainfo
->storage
= ArgOnStack
;
412 ainfo
->storage
= ArgInIReg
;
417 /* Allways reserve stack space for parameters passed in registers */
418 (*stack_size
) += sizeof (gpointer
);
421 if (*gr
< PARAM_REGS
- 1) {
422 /* A pair of registers */
423 ainfo
->storage
= ArgInIRegPair
;
427 else if (*gr
>= PARAM_REGS
) {
428 /* A pair of stack locations */
429 ainfo
->storage
= ArgOnStackPair
;
432 ainfo
->storage
= ArgInSplitRegStack
;
437 (*stack_size
) += 2 * sizeof (gpointer
);
443 #define FLOAT_PARAM_REGS 32
446 add_float (guint32
*gr
, guint32
*stack_size
, ArgInfo
*ainfo
, gboolean single
)
448 ainfo
->offset
= *stack_size
;
451 if (*gr
>= FLOAT_PARAM_REGS
) {
452 ainfo
->storage
= ArgOnStack
;
455 /* A single is passed in an even numbered fp register */
456 ainfo
->storage
= ArgInFloatReg
;
457 ainfo
->reg
= *gr
+ 1;
462 if (*gr
< FLOAT_PARAM_REGS
) {
463 /* A double register */
464 ainfo
->storage
= ArgInDoubleReg
;
469 ainfo
->storage
= ArgOnStack
;
473 (*stack_size
) += sizeof (gpointer
);
481 * Obtain information about a call according to the calling convention.
482 * For V8, see the "System V ABI, Sparc Processor Supplement" Sparc V8 version
483 * document for more information.
484 * For V9, see the "Low Level System Information (64-bit psABI)" chapter in
485 * the 'Sparc Compliance Definition 2.4' document.
488 get_call_info (MonoCompile
*cfg
, MonoMethodSignature
*sig
, gboolean is_pinvoke
)
491 int n
= sig
->hasthis
+ sig
->param_count
;
492 guint32 stack_size
= 0;
495 MonoGenericSharingContext
*gsctx
= cfg
? cfg
->generic_sharing_context
: NULL
;
497 cinfo
= g_malloc0 (sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
503 if (MONO_TYPE_ISSTRUCT ((sig
->ret
))) {
504 /* The address of the return value is passed in %o0 */
505 add_general (&gr
, &stack_size
, &cinfo
->ret
, FALSE
);
506 cinfo
->ret
.reg
+= sparc_i0
;
512 add_general (&gr
, &stack_size
, cinfo
->args
+ 0, FALSE
);
514 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (n
== 0)) {
517 /* Emit the signature cookie just before the implicit arguments */
518 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
, FALSE
);
521 for (i
= 0; i
< sig
->param_count
; ++i
) {
522 ArgInfo
*ainfo
= &cinfo
->args
[sig
->hasthis
+ i
];
525 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
528 /* Emit the signature cookie just before the implicit arguments */
529 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
, FALSE
);
532 DEBUG(printf("param %d: ", i
));
533 if (sig
->params
[i
]->byref
) {
534 DEBUG(printf("byref\n"));
536 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
539 ptype
= mono_type_get_underlying_type (sig
->params
[i
]);
540 ptype
= mini_get_basic_type_from_generic (gsctx
, ptype
);
541 switch (ptype
->type
) {
542 case MONO_TYPE_BOOLEAN
:
545 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
546 /* the value is in the ls byte */
547 ainfo
->offset
+= sizeof (gpointer
) - 1;
552 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
553 /* the value is in the ls word */
554 ainfo
->offset
+= sizeof (gpointer
) - 2;
558 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
559 /* the value is in the ls dword */
560 ainfo
->offset
+= sizeof (gpointer
) - 4;
565 case MONO_TYPE_FNPTR
:
566 case MONO_TYPE_CLASS
:
567 case MONO_TYPE_OBJECT
:
568 case MONO_TYPE_STRING
:
569 case MONO_TYPE_SZARRAY
:
570 case MONO_TYPE_ARRAY
:
571 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
573 case MONO_TYPE_GENERICINST
:
574 if (!mono_type_generic_inst_is_valuetype (ptype
)) {
575 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
579 case MONO_TYPE_VALUETYPE
:
584 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
586 case MONO_TYPE_TYPEDBYREF
:
587 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
592 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
594 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
599 add_float (&fr
, &stack_size
, ainfo
, TRUE
);
602 /* single precision values are passed in integer registers */
603 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
608 add_float (&fr
, &stack_size
, ainfo
, FALSE
);
611 /* double precision values are passed in a pair of registers */
612 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
616 g_assert_not_reached ();
620 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (n
> 0) && (sig
->sentinelpos
== sig
->param_count
)) {
623 /* Emit the signature cookie just before the implicit arguments */
624 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
, FALSE
);
628 ret_type
= mono_type_get_underlying_type (sig
->ret
);
629 ret_type
= mini_get_basic_type_from_generic (gsctx
, ret_type
);
630 switch (ret_type
->type
) {
631 case MONO_TYPE_BOOLEAN
:
642 case MONO_TYPE_FNPTR
:
643 case MONO_TYPE_CLASS
:
644 case MONO_TYPE_OBJECT
:
645 case MONO_TYPE_SZARRAY
:
646 case MONO_TYPE_ARRAY
:
647 case MONO_TYPE_STRING
:
648 cinfo
->ret
.storage
= ArgInIReg
;
649 cinfo
->ret
.reg
= sparc_i0
;
656 cinfo
->ret
.storage
= ArgInIReg
;
657 cinfo
->ret
.reg
= sparc_i0
;
661 cinfo
->ret
.storage
= ArgInIRegPair
;
662 cinfo
->ret
.reg
= sparc_i0
;
669 cinfo
->ret
.storage
= ArgInFReg
;
670 cinfo
->ret
.reg
= sparc_f0
;
672 case MONO_TYPE_GENERICINST
:
673 if (!mono_type_generic_inst_is_valuetype (ret_type
)) {
674 cinfo
->ret
.storage
= ArgInIReg
;
675 cinfo
->ret
.reg
= sparc_i0
;
681 case MONO_TYPE_VALUETYPE
:
690 cinfo
->ret
.storage
= ArgOnStack
;
692 case MONO_TYPE_TYPEDBYREF
:
695 /* Same as a valuetype with size 24 */
702 cinfo
->ret
.storage
= ArgOnStack
;
707 g_error ("Can't handle as return value 0x%x", sig
->ret
->type
);
710 cinfo
->stack_usage
= stack_size
;
711 cinfo
->reg_usage
= gr
;
716 mono_arch_get_allocatable_int_vars (MonoCompile
*cfg
)
722 * FIXME: If an argument is allocated to a register, then load it from the
723 * stack in the prolog.
726 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
727 MonoInst
*ins
= cfg
->varinfo
[i
];
728 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
731 if (vmv
->range
.first_use
.abs_pos
>= vmv
->range
.last_use
.abs_pos
)
734 /* FIXME: Make arguments on stack allocateable to registers */
735 if (ins
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
) || (ins
->opcode
== OP_REGVAR
) || (ins
->opcode
== OP_ARG
))
738 if (mono_is_regsize_var (ins
->inst_vtype
)) {
739 g_assert (MONO_VARINFO (cfg
, i
)->reg
== -1);
740 g_assert (i
== vmv
->idx
);
742 vars
= mono_varlist_insert_sorted (cfg
, vars
, vmv
, FALSE
);
750 mono_arch_get_global_int_regs (MonoCompile
*cfg
)
754 MonoMethodSignature
*sig
;
757 sig
= mono_method_signature (cfg
->method
);
759 cinfo
= get_call_info (cfg
, sig
, FALSE
);
761 /* Use unused input registers */
762 for (i
= cinfo
->reg_usage
; i
< 6; ++i
)
763 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (sparc_i0
+ i
));
765 /* Use %l0..%l6 as global registers */
766 for (i
= sparc_l0
; i
< sparc_l7
; ++i
)
767 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (i
));
775 * mono_arch_regalloc_cost:
777 * Return the cost, in number of memory references, of the action of
778 * allocating the variable VMV into a register during global register
782 mono_arch_regalloc_cost (MonoCompile
*cfg
, MonoMethodVar
*vmv
)
788 * Set var information according to the calling convention. sparc version.
789 * The locals var stuff should most likely be split in another method.
793 mono_arch_allocate_vars (MonoCompile
*cfg
)
795 MonoMethodSignature
*sig
;
796 MonoMethodHeader
*header
;
798 int i
, offset
, size
, align
, curinst
;
801 header
= mono_method_get_header (cfg
->method
);
803 sig
= mono_method_signature (cfg
->method
);
805 cinfo
= get_call_info (cfg
, sig
, FALSE
);
807 if (sig
->ret
->type
!= MONO_TYPE_VOID
) {
808 switch (cinfo
->ret
.storage
) {
811 cfg
->ret
->opcode
= OP_REGVAR
;
812 cfg
->ret
->inst_c0
= cinfo
->ret
.reg
;
814 case ArgInIRegPair
: {
815 MonoType
*t
= mono_type_get_underlying_type (sig
->ret
);
816 if (((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
817 MonoInst
*low
= get_vreg_to_inst (cfg
, cfg
->ret
->dreg
+ 1);
818 MonoInst
*high
= get_vreg_to_inst (cfg
, cfg
->ret
->dreg
+ 2);
820 low
->opcode
= OP_REGVAR
;
821 low
->dreg
= cinfo
->ret
.reg
+ 1;
822 high
->opcode
= OP_REGVAR
;
823 high
->dreg
= cinfo
->ret
.reg
;
825 cfg
->ret
->opcode
= OP_REGVAR
;
826 cfg
->ret
->inst_c0
= cinfo
->ret
.reg
;
831 g_assert_not_reached ();
834 cfg
->vret_addr
->opcode
= OP_REGOFFSET
;
835 cfg
->vret_addr
->inst_basereg
= sparc_fp
;
836 cfg
->vret_addr
->inst_offset
= 64;
842 cfg
->ret
->dreg
= cfg
->ret
->inst_c0
;
846 * We use the ABI calling conventions for managed code as well.
847 * Exception: valuetypes are never returned in registers on V9.
848 * FIXME: Use something more optimized.
851 /* Locals are allocated backwards from %fp */
852 cfg
->frame_reg
= sparc_fp
;
856 * Reserve a stack slot for holding information used during exception
859 if (header
->num_clauses
)
860 offset
+= sizeof (gpointer
) * 2;
862 if (cfg
->method
->save_lmf
) {
863 offset
+= sizeof (MonoLMF
);
864 cfg
->arch
.lmf_offset
= offset
;
867 curinst
= cfg
->locals_start
;
868 for (i
= curinst
; i
< cfg
->num_varinfo
; ++i
) {
869 inst
= cfg
->varinfo
[i
];
871 if ((inst
->opcode
== OP_REGVAR
) || (inst
->opcode
== OP_REGOFFSET
)) {
872 //g_print ("allocating local %d to %s\n", i, mono_arch_regname (inst->dreg));
876 if (inst
->flags
& MONO_INST_IS_DEAD
)
879 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
880 * pinvoke wrappers when they call functions returning structure */
881 if (inst
->backend
.is_pinvoke
&& MONO_TYPE_ISSTRUCT (inst
->inst_vtype
) && inst
->inst_vtype
->type
!= MONO_TYPE_TYPEDBYREF
)
882 size
= mono_class_native_size (mono_class_from_mono_type (inst
->inst_vtype
), &align
);
884 size
= mini_type_stack_size (cfg
->generic_sharing_context
, inst
->inst_vtype
, &align
);
887 * This is needed since structures containing doubles must be doubleword
889 * FIXME: Do this only if needed.
891 if (MONO_TYPE_ISSTRUCT (inst
->inst_vtype
))
895 * variables are accessed as negative offsets from %fp, so increase
896 * the offset before assigning it to a variable
901 offset
&= ~(align
- 1);
902 inst
->opcode
= OP_REGOFFSET
;
903 inst
->inst_basereg
= sparc_fp
;
904 inst
->inst_offset
= STACK_BIAS
+ -offset
;
906 //g_print ("allocating local %d to [%s - %d]\n", i, mono_arch_regname (inst->inst_basereg), - inst->inst_offset);
909 if (sig
->call_convention
== MONO_CALL_VARARG
) {
910 cfg
->sig_cookie
= cinfo
->sig_cookie
.offset
+ ARGS_OFFSET
;
913 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
914 inst
= cfg
->args
[i
];
915 if (inst
->opcode
!= OP_REGVAR
) {
916 ArgInfo
*ainfo
= &cinfo
->args
[i
];
917 gboolean inreg
= TRUE
;
921 if (sig
->hasthis
&& (i
== 0))
922 arg_type
= &mono_defaults
.object_class
->byval_arg
;
924 arg_type
= sig
->params
[i
- sig
->hasthis
];
927 if (!arg_type
->byref
&& ((arg_type
->type
== MONO_TYPE_R4
)
928 || (arg_type
->type
== MONO_TYPE_R8
)))
930 * Since float arguments are passed in integer registers, we need to
931 * save them to the stack in the prolog.
936 /* FIXME: Allocate volatile arguments to registers */
937 /* FIXME: This makes the argument holding a vtype address into volatile */
938 if (inst
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
))
941 if (MONO_TYPE_ISSTRUCT (arg_type
))
942 /* FIXME: this isn't needed */
945 inst
->opcode
= OP_REGOFFSET
;
948 storage
= ArgOnStack
;
950 storage
= ainfo
->storage
;
954 inst
->opcode
= OP_REGVAR
;
955 inst
->dreg
= sparc_i0
+ ainfo
->reg
;
958 if (inst
->type
== STACK_I8
) {
959 MonoInst
*low
= get_vreg_to_inst (cfg
, inst
->dreg
+ 1);
960 MonoInst
*high
= get_vreg_to_inst (cfg
, inst
->dreg
+ 2);
962 low
->opcode
= OP_REGVAR
;
963 low
->dreg
= sparc_i0
+ ainfo
->reg
+ 1;
964 high
->opcode
= OP_REGVAR
;
965 high
->dreg
= sparc_i0
+ ainfo
->reg
;
967 inst
->opcode
= OP_REGVAR
;
968 inst
->dreg
= sparc_i0
+ ainfo
->reg
;
973 * Since float regs are volatile, we save the arguments to
974 * the stack in the prolog.
975 * FIXME: Avoid this if the method contains no calls.
979 case ArgInSplitRegStack
:
980 /* Split arguments are saved to the stack in the prolog */
981 inst
->opcode
= OP_REGOFFSET
;
982 /* in parent frame */
983 inst
->inst_basereg
= sparc_fp
;
984 inst
->inst_offset
= ainfo
->offset
+ ARGS_OFFSET
;
986 if (!arg_type
->byref
&& (arg_type
->type
== MONO_TYPE_R8
)) {
988 * It is very hard to load doubles from non-doubleword aligned
989 * memory locations. So if the offset is misaligned, we copy the
990 * argument to a stack location in the prolog.
992 if ((inst
->inst_offset
- STACK_BIAS
) % 8) {
993 inst
->inst_basereg
= sparc_fp
;
997 offset
&= ~(align
- 1);
998 inst
->inst_offset
= STACK_BIAS
+ -offset
;
1007 if (MONO_TYPE_ISSTRUCT (arg_type
)) {
1008 /* Add a level of indirection */
1010 * It would be easier to add OP_LDIND_I here, but ldind_i instructions
1011 * are destructively modified in a lot of places in inssel.brg.
1014 MONO_INST_NEW (cfg
, indir
, 0);
1016 inst
->opcode
= OP_VTARG_ADDR
;
1017 inst
->inst_left
= indir
;
1023 * spillvars are stored between the normal locals and the storage reserved
1027 cfg
->stack_offset
= offset
;
1033 mono_arch_create_vars (MonoCompile
*cfg
)
1035 MonoMethodSignature
*sig
;
1037 sig
= mono_method_signature (cfg
->method
);
1039 if (MONO_TYPE_ISSTRUCT ((sig
->ret
))) {
1040 cfg
->vret_addr
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_ARG
);
1041 if (G_UNLIKELY (cfg
->verbose_level
> 1)) {
1042 printf ("vret_addr = ");
1043 mono_print_ins (cfg
->vret_addr
);
1047 if (!sig
->ret
->byref
&& (sig
->ret
->type
== MONO_TYPE_I8
|| sig
->ret
->type
== MONO_TYPE_U8
)) {
1048 MonoInst
*low
= get_vreg_to_inst (cfg
, cfg
->ret
->dreg
+ 1);
1049 MonoInst
*high
= get_vreg_to_inst (cfg
, cfg
->ret
->dreg
+ 2);
1051 low
->flags
|= MONO_INST_VOLATILE
;
1052 high
->flags
|= MONO_INST_VOLATILE
;
1055 /* Add a properly aligned dword for use by int<->float conversion opcodes */
1056 cfg
->arch
.float_spill_slot
= mono_compile_create_var (cfg
, &mono_defaults
.double_class
->byval_arg
, OP_ARG
);
1057 ((MonoInst
*)cfg
->arch
.float_spill_slot
)->flags
|= MONO_INST_VOLATILE
;
1061 add_outarg_reg (MonoCompile
*cfg
, MonoCallInst
*call
, ArgStorage storage
, int reg
, guint32 sreg
)
1065 MONO_INST_NEW (cfg
, arg
, 0);
1071 arg
->opcode
= OP_MOVE
;
1072 arg
->dreg
= mono_alloc_ireg (cfg
);
1074 mono_call_inst_add_outarg_reg (cfg
, call
, arg
->dreg
, reg
, FALSE
);
1077 arg
->opcode
= OP_FMOVE
;
1078 arg
->dreg
= mono_alloc_freg (cfg
);
1080 mono_call_inst_add_outarg_reg (cfg
, call
, arg
->dreg
, reg
, TRUE
);
1083 g_assert_not_reached ();
1086 MONO_ADD_INS (cfg
->cbb
, arg
);
1090 add_outarg_load (MonoCompile
*cfg
, MonoCallInst
*call
, int opcode
, int basereg
, int offset
, int reg
)
1092 int dreg
= mono_alloc_ireg (cfg
);
1094 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, dreg
, sparc_sp
, offset
);
1096 mono_call_inst_add_outarg_reg (cfg
, call
, dreg
, reg
, FALSE
);
1100 emit_pass_long (MonoCompile
*cfg
, MonoCallInst
*call
, ArgInfo
*ainfo
, MonoInst
*in
)
1102 int offset
= ARGS_OFFSET
+ ainfo
->offset
;
1104 switch (ainfo
->storage
) {
1106 add_outarg_reg (cfg
, call
, ArgInIReg
, sparc_o0
+ ainfo
->reg
+ 1, in
->dreg
+ 1);
1107 add_outarg_reg (cfg
, call
, ArgInIReg
, sparc_o0
+ ainfo
->reg
, in
->dreg
+ 2);
1109 case ArgOnStackPair
:
1110 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, sparc_sp
, offset
, in
->dreg
+ 2);
1111 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, sparc_sp
, offset
+ 4, in
->dreg
+ 1);
1113 case ArgInSplitRegStack
:
1114 add_outarg_reg (cfg
, call
, ArgInIReg
, sparc_o0
+ ainfo
->reg
, in
->dreg
+ 2);
1115 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, sparc_sp
, offset
+ 4, in
->dreg
+ 1);
1118 g_assert_not_reached ();
1123 emit_pass_double (MonoCompile
*cfg
, MonoCallInst
*call
, ArgInfo
*ainfo
, MonoInst
*in
)
1125 int offset
= ARGS_OFFSET
+ ainfo
->offset
;
1127 switch (ainfo
->storage
) {
1129 /* floating-point <-> integer transfer must go through memory */
1130 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, sparc_sp
, offset
, in
->dreg
);
1132 /* Load into a register pair */
1133 add_outarg_load (cfg
, call
, OP_LOADI4_MEMBASE
, sparc_sp
, offset
, sparc_o0
+ ainfo
->reg
);
1134 add_outarg_load (cfg
, call
, OP_LOADI4_MEMBASE
, sparc_sp
, offset
+ 4, sparc_o0
+ ainfo
->reg
+ 1);
1136 case ArgOnStackPair
:
1137 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, sparc_sp
, offset
, in
->dreg
);
1139 case ArgInSplitRegStack
:
1140 /* floating-point <-> integer transfer must go through memory */
1141 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, sparc_sp
, offset
, in
->dreg
);
1142 /* Load most significant word into register */
1143 add_outarg_load (cfg
, call
, OP_LOADI4_MEMBASE
, sparc_sp
, offset
, sparc_o0
+ ainfo
->reg
);
1146 g_assert_not_reached ();
1151 emit_pass_float (MonoCompile
*cfg
, MonoCallInst
*call
, ArgInfo
*ainfo
, MonoInst
*in
)
1153 int offset
= ARGS_OFFSET
+ ainfo
->offset
;
1155 switch (ainfo
->storage
) {
1157 /* floating-point <-> integer transfer must go through memory */
1158 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, sparc_sp
, offset
, in
->dreg
);
1159 add_outarg_load (cfg
, call
, OP_LOADI4_MEMBASE
, sparc_sp
, offset
, sparc_o0
+ ainfo
->reg
);
1162 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, sparc_sp
, offset
, in
->dreg
);
1165 g_assert_not_reached ();
1170 emit_pass_other (MonoCompile
*cfg
, MonoCallInst
*call
, ArgInfo
*ainfo
, MonoType
*arg_type
, MonoInst
*in
);
1173 emit_pass_vtype (MonoCompile
*cfg
, MonoCallInst
*call
, CallInfo
*cinfo
, ArgInfo
*ainfo
, MonoType
*arg_type
, MonoInst
*in
, gboolean pinvoke
)
1176 guint32 align
, offset
, pad
, size
;
1178 if (arg_type
->type
== MONO_TYPE_TYPEDBYREF
) {
1179 size
= sizeof (MonoTypedRef
);
1180 align
= sizeof (gpointer
);
1183 size
= mono_type_native_stack_size (&in
->klass
->byval_arg
, &align
);
1186 * Other backends use mono_type_stack_size (), but that
1187 * aligns the size to 8, which is larger than the size of
1188 * the source, leading to reads of invalid memory if the
1189 * source is at the end of address space.
1191 size
= mono_class_value_size (in
->klass
, &align
);
1194 /* The first 6 argument locations are reserved */
1195 if (cinfo
->stack_usage
< 6 * sizeof (gpointer
))
1196 cinfo
->stack_usage
= 6 * sizeof (gpointer
);
1198 offset
= ALIGN_TO ((ARGS_OFFSET
- STACK_BIAS
) + cinfo
->stack_usage
, align
);
1199 pad
= offset
- ((ARGS_OFFSET
- STACK_BIAS
) + cinfo
->stack_usage
);
1201 cinfo
->stack_usage
+= size
;
1202 cinfo
->stack_usage
+= pad
;
1205 * We use OP_OUTARG_VT to copy the valuetype to a stack location, then
1206 * use the normal OUTARG opcodes to pass the address of the location to
1210 MONO_INST_NEW (cfg
, arg
, OP_OUTARG_VT
);
1211 arg
->sreg1
= in
->dreg
;
1212 arg
->klass
= in
->klass
;
1213 arg
->backend
.size
= size
;
1214 arg
->inst_p0
= call
;
1215 arg
->inst_p1
= mono_mempool_alloc (cfg
->mempool
, sizeof (ArgInfo
));
1216 memcpy (arg
->inst_p1
, ainfo
, sizeof (ArgInfo
));
1217 ((ArgInfo
*)(arg
->inst_p1
))->offset
= STACK_BIAS
+ offset
;
1218 MONO_ADD_INS (cfg
->cbb
, arg
);
1220 MONO_INST_NEW (cfg
, arg
, OP_ADD_IMM
);
1221 arg
->dreg
= mono_alloc_preg (cfg
);
1222 arg
->sreg1
= sparc_sp
;
1223 arg
->inst_imm
= STACK_BIAS
+ offset
;
1224 MONO_ADD_INS (cfg
->cbb
, arg
);
1226 emit_pass_other (cfg
, call
, ainfo
, NULL
, arg
);
1231 emit_pass_other (MonoCompile
*cfg
, MonoCallInst
*call
, ArgInfo
*ainfo
, MonoType
*arg_type
, MonoInst
*in
)
1233 int offset
= ARGS_OFFSET
+ ainfo
->offset
;
1236 switch (ainfo
->storage
) {
1238 add_outarg_reg (cfg
, call
, ArgInIReg
, sparc_o0
+ ainfo
->reg
, in
->dreg
);
1245 opcode
= OP_STOREI1_MEMBASE_REG
;
1246 else if (offset
& 0x2)
1247 opcode
= OP_STOREI2_MEMBASE_REG
;
1249 opcode
= OP_STOREI4_MEMBASE_REG
;
1250 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, opcode
, sparc_sp
, offset
, in
->dreg
);
1254 g_assert_not_reached ();
1259 emit_sig_cookie (MonoCompile
*cfg
, MonoCallInst
*call
, CallInfo
*cinfo
)
1261 MonoMethodSignature
*tmp_sig
;
1264 * mono_ArgIterator_Setup assumes the signature cookie is
1265 * passed first and all the arguments which were before it are
1266 * passed on the stack after the signature. So compensate by
1267 * passing a different signature.
1269 tmp_sig
= mono_metadata_signature_dup (call
->signature
);
1270 tmp_sig
->param_count
-= call
->signature
->sentinelpos
;
1271 tmp_sig
->sentinelpos
= 0;
1272 memcpy (tmp_sig
->params
, call
->signature
->params
+ call
->signature
->sentinelpos
, tmp_sig
->param_count
* sizeof (MonoType
*));
1274 /* FIXME: Add support for signature tokens to AOT */
1275 cfg
->disable_aot
= TRUE
;
1276 /* We allways pass the signature on the stack for simplicity */
1277 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STORE_MEMBASE_IMM
, sparc_sp
, ARGS_OFFSET
+ cinfo
->sig_cookie
.offset
, tmp_sig
);
1281 mono_arch_emit_call (MonoCompile
*cfg
, MonoCallInst
*call
)
1284 MonoMethodSignature
*sig
;
1288 guint32 extra_space
= 0;
1290 sig
= call
->signature
;
1291 n
= sig
->param_count
+ sig
->hasthis
;
1293 cinfo
= get_call_info (cfg
, sig
, sig
->pinvoke
);
1295 if (sig
->ret
&& MONO_TYPE_ISSTRUCT (sig
->ret
)) {
1296 /* Set the 'struct/union return pointer' location on the stack */
1297 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, sparc_sp
, 64, call
->vret_var
->dreg
);
1300 for (i
= 0; i
< n
; ++i
) {
1303 ainfo
= cinfo
->args
+ i
;
1305 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
1306 /* Emit the signature cookie just before the first implicit argument */
1307 emit_sig_cookie (cfg
, call
, cinfo
);
1310 in
= call
->args
[i
];
1312 if (sig
->hasthis
&& (i
== 0))
1313 arg_type
= &mono_defaults
.object_class
->byval_arg
;
1315 arg_type
= sig
->params
[i
- sig
->hasthis
];
1317 arg_type
= mono_type_get_underlying_type (arg_type
);
1318 if ((i
>= sig
->hasthis
) && (MONO_TYPE_ISSTRUCT(sig
->params
[i
- sig
->hasthis
])))
1319 emit_pass_vtype (cfg
, call
, cinfo
, ainfo
, arg_type
, in
, sig
->pinvoke
);
1320 else if (!arg_type
->byref
&& ((arg_type
->type
== MONO_TYPE_I8
) || (arg_type
->type
== MONO_TYPE_U8
)))
1321 emit_pass_long (cfg
, call
, ainfo
, in
);
1322 else if (!arg_type
->byref
&& (arg_type
->type
== MONO_TYPE_R8
))
1323 emit_pass_double (cfg
, call
, ainfo
, in
);
1324 else if (!arg_type
->byref
&& (arg_type
->type
== MONO_TYPE_R4
))
1325 emit_pass_float (cfg
, call
, ainfo
, in
);
1327 emit_pass_other (cfg
, call
, ainfo
, arg_type
, in
);
1330 /* Handle the case where there are no implicit arguments */
1331 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (n
== sig
->sentinelpos
)) {
1332 emit_sig_cookie (cfg
, call
, cinfo
);
1335 call
->stack_usage
= cinfo
->stack_usage
+ extra_space
;
1341 mono_arch_emit_outarg_vt (MonoCompile
*cfg
, MonoInst
*ins
, MonoInst
*src
)
1343 ArgInfo
*ainfo
= (ArgInfo
*)ins
->inst_p1
;
1344 int size
= ins
->backend
.size
;
1346 mini_emit_memcpy (cfg
, sparc_sp
, ainfo
->offset
, src
->dreg
, 0, size
, 0);
1350 mono_arch_emit_setret (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
*val
)
1352 CallInfo
*cinfo
= get_call_info (cfg
, mono_method_signature (method
), FALSE
);
1353 MonoType
*ret
= mini_type_get_underlying_type (cfg
->generic_sharing_context
, mono_method_signature (method
)->ret
);
1355 switch (cinfo
->ret
.storage
) {
1357 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
1360 if (ret
->type
== MONO_TYPE_I8
|| ret
->type
== MONO_TYPE_U8
) {
1361 MONO_EMIT_NEW_UNALU (cfg
, OP_LMOVE
, cfg
->ret
->dreg
, val
->dreg
);
1363 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
+ 2, val
->dreg
+ 2);
1364 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
+ 1, val
->dreg
+ 1);
1368 if (ret
->type
== MONO_TYPE_R4
)
1369 MONO_EMIT_NEW_UNALU (cfg
, OP_SETFRET
, cfg
->ret
->dreg
, val
->dreg
);
1371 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, cfg
->ret
->dreg
, val
->dreg
);
1374 g_assert_not_reached ();
1380 int cond_to_sparc_cond
[][3] = {
1381 {sparc_be
, sparc_be
, sparc_fbe
},
1382 {sparc_bne
, sparc_bne
, 0},
1383 {sparc_ble
, sparc_ble
, sparc_fble
},
1384 {sparc_bge
, sparc_bge
, sparc_fbge
},
1385 {sparc_bl
, sparc_bl
, sparc_fbl
},
1386 {sparc_bg
, sparc_bg
, sparc_fbg
},
1387 {sparc_bleu
, sparc_bleu
, 0},
1388 {sparc_beu
, sparc_beu
, 0},
1389 {sparc_blu
, sparc_blu
, sparc_fbl
},
1390 {sparc_bgu
, sparc_bgu
, sparc_fbg
}
1393 /* Map opcode to the sparc condition codes */
1394 static inline SparcCond
1395 opcode_to_sparc_cond (int opcode
)
1401 case OP_COND_EXC_OV
:
1402 case OP_COND_EXC_IOV
:
1405 case OP_COND_EXC_IC
:
1407 case OP_COND_EXC_NO
:
1408 case OP_COND_EXC_NC
:
1411 rel
= mono_opcode_to_cond (opcode
);
1412 t
= mono_opcode_to_type (opcode
, -1);
1414 return cond_to_sparc_cond
[rel
][t
];
1421 #define COMPUTE_DISP(ins) \
1422 if (ins->inst_true_bb->native_offset) \
1423 disp = (ins->inst_true_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2; \
1426 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1430 #define DEFAULT_ICC sparc_xcc_short
1432 #define DEFAULT_ICC sparc_icc_short
1436 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) \
1440 COMPUTE_DISP(ins); \
1441 predict = (disp != 0) ? 1 : 0; \
1442 g_assert (sparc_is_imm19 (disp)); \
1443 sparc_branchp (code, (annul), cond, icc, (predict), disp); \
1444 if (filldelay) sparc_nop (code); \
1446 #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_ICC ((ins), (cond), (annul), (filldelay), (sparc_xcc_short))
1447 #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) \
1451 COMPUTE_DISP(ins); \
1452 predict = (disp != 0) ? 1 : 0; \
1453 g_assert (sparc_is_imm19 (disp)); \
1454 sparc_fbranch (code, (annul), cond, disp); \
1455 if (filldelay) sparc_nop (code); \
1458 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) g_assert_not_reached ()
1459 #define EMIT_COND_BRANCH_GENERAL(ins,bop,cond,annul,filldelay) \
1462 COMPUTE_DISP(ins); \
1463 g_assert (sparc_is_imm22 (disp)); \
1464 sparc_ ## bop (code, (annul), cond, disp); \
1465 if (filldelay) sparc_nop (code); \
1467 #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),branch,(cond),annul,filldelay)
1468 #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),fbranch,(cond),annul,filldelay)
1471 #define EMIT_COND_BRANCH_PREDICTED(ins,cond,annul,filldelay) \
1475 COMPUTE_DISP(ins); \
1476 predict = (disp != 0) ? 1 : 0; \
1477 g_assert (sparc_is_imm19 (disp)); \
1478 sparc_branchp (code, (annul), (cond), DEFAULT_ICC, (predict), disp); \
1479 if (filldelay) sparc_nop (code); \
1482 #define EMIT_COND_BRANCH_BPR(ins,bop,predict,annul,filldelay) \
1485 COMPUTE_DISP(ins); \
1486 g_assert (sparc_is_imm22 (disp)); \
1487 sparc_ ## bop (code, (annul), (predict), ins->sreg1, disp); \
1488 if (filldelay) sparc_nop (code); \
1491 /* emit an exception if condition is fail */
1493 * We put the exception throwing code out-of-line, at the end of the method
1495 #define EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,filldelay,icc) do { \
1496 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
1497 MONO_PATCH_INFO_EXC, sexc_name); \
1498 if (sparcv9 && ((icc) != sparc_icc_short)) { \
1499 sparc_branchp (code, 0, (cond), (icc), 0, 0); \
1502 sparc_branch (code, 0, cond, 0); \
1504 if (filldelay) sparc_nop (code); \
1507 #define EMIT_COND_SYSTEM_EXCEPTION(ins,cond,sexc_name) EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,TRUE,DEFAULT_ICC)
1509 #define EMIT_COND_SYSTEM_EXCEPTION_BPR(ins,bop,sexc_name) do { \
1510 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
1511 MONO_PATCH_INFO_EXC, sexc_name); \
1512 sparc_ ## bop (code, FALSE, FALSE, ins->sreg1, 0); \
1516 #define EMIT_ALU_IMM(ins,op,setcc) do { \
1517 if (sparc_is_imm13 ((ins)->inst_imm)) \
1518 sparc_ ## op ## _imm (code, (setcc), (ins)->sreg1, ins->inst_imm, (ins)->dreg); \
1520 sparc_set (code, ins->inst_imm, sparc_o7); \
1521 sparc_ ## op (code, (setcc), (ins)->sreg1, sparc_o7, (ins)->dreg); \
1525 #define EMIT_LOAD_MEMBASE(ins,op) do { \
1526 if (sparc_is_imm13 (ins->inst_offset)) \
1527 sparc_ ## op ## _imm (code, ins->inst_basereg, ins->inst_offset, ins->dreg); \
1529 sparc_set (code, ins->inst_offset, sparc_o7); \
1530 sparc_ ## op (code, ins->inst_basereg, sparc_o7, ins->dreg); \
1535 #define EMIT_STORE_MEMBASE_IMM(ins,op) do { \
1537 if (ins->inst_imm == 0) \
1540 sparc_set (code, ins->inst_imm, sparc_o7); \
1543 if (!sparc_is_imm13 (ins->inst_offset)) { \
1544 sparc_set (code, ins->inst_offset, GP_SCRATCH_REG); \
1545 sparc_ ## op (code, sreg, ins->inst_destbasereg, GP_SCRATCH_REG); \
1548 sparc_ ## op ## _imm (code, sreg, ins->inst_destbasereg, ins->inst_offset); \
1551 #define EMIT_STORE_MEMBASE_REG(ins,op) do { \
1552 if (!sparc_is_imm13 (ins->inst_offset)) { \
1553 sparc_set (code, ins->inst_offset, sparc_o7); \
1554 sparc_ ## op (code, ins->sreg1, ins->inst_destbasereg, sparc_o7); \
1557 sparc_ ## op ## _imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); \
1560 #define EMIT_CALL() do { \
1562 sparc_set_template (code, sparc_o7); \
1563 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7); \
1566 sparc_call_simple (code, 0); \
1572 * A call template is 7 instructions long, so we want to avoid it if possible.
1575 emit_call (MonoCompile
*cfg
, guint32
*code
, guint32 patch_type
, gconstpointer data
)
1579 /* FIXME: This only works if the target method is already compiled */
1580 if (0 && v64
&& !cfg
->compile_aot
) {
1581 MonoJumpInfo patch_info
;
1583 patch_info
.type
= patch_type
;
1584 patch_info
.data
.target
= data
;
1586 target
= mono_resolve_patch_target (cfg
->method
, cfg
->domain
, NULL
, &patch_info
, FALSE
);
1588 /* FIXME: Add optimizations if the target is close enough */
1589 sparc_set (code
, target
, sparc_o7
);
1590 sparc_jmpl (code
, sparc_o7
, sparc_g0
, sparc_o7
);
1594 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, patch_type
, data
);
1602 mono_arch_peephole_pass_1 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
1607 mono_arch_peephole_pass_2 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
1609 MonoInst
*ins
, *n
, *last_ins
= NULL
;
1612 MONO_BB_FOR_EACH_INS_SAFE (bb
, n
, ins
) {
1613 switch (ins
->opcode
) {
1615 /* remove unnecessary multiplication with 1 */
1616 if (ins
->inst_imm
== 1) {
1617 if (ins
->dreg
!= ins
->sreg1
) {
1618 ins
->opcode
= OP_MOVE
;
1620 MONO_DELETE_INS (bb
, ins
);
1626 case OP_LOAD_MEMBASE
:
1627 case OP_LOADI4_MEMBASE
:
1629 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1630 * OP_LOAD_MEMBASE offset(basereg), reg
1632 if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_REG
1633 || last_ins
->opcode
== OP_STORE_MEMBASE_REG
) &&
1634 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
1635 ins
->inst_offset
== last_ins
->inst_offset
) {
1636 if (ins
->dreg
== last_ins
->sreg1
) {
1637 MONO_DELETE_INS (bb
, ins
);
1640 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1641 ins
->opcode
= OP_MOVE
;
1642 ins
->sreg1
= last_ins
->sreg1
;
1646 * Note: reg1 must be different from the basereg in the second load
1647 * OP_LOAD_MEMBASE offset(basereg), reg1
1648 * OP_LOAD_MEMBASE offset(basereg), reg2
1650 * OP_LOAD_MEMBASE offset(basereg), reg1
1651 * OP_MOVE reg1, reg2
1653 } if (last_ins
&& (last_ins
->opcode
== OP_LOADI4_MEMBASE
1654 || last_ins
->opcode
== OP_LOAD_MEMBASE
) &&
1655 ins
->inst_basereg
!= last_ins
->dreg
&&
1656 ins
->inst_basereg
== last_ins
->inst_basereg
&&
1657 ins
->inst_offset
== last_ins
->inst_offset
) {
1659 if (ins
->dreg
== last_ins
->dreg
) {
1660 MONO_DELETE_INS (bb
, ins
);
1663 ins
->opcode
= OP_MOVE
;
1664 ins
->sreg1
= last_ins
->dreg
;
1667 //g_assert_not_reached ();
1671 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1672 * OP_LOAD_MEMBASE offset(basereg), reg
1674 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1675 * OP_ICONST reg, imm
1677 } else if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_IMM
1678 || last_ins
->opcode
== OP_STORE_MEMBASE_IMM
) &&
1679 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
1680 ins
->inst_offset
== last_ins
->inst_offset
) {
1681 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1682 ins
->opcode
= OP_ICONST
;
1683 ins
->inst_c0
= last_ins
->inst_imm
;
1684 g_assert_not_reached (); // check this rule
1689 case OP_LOADI1_MEMBASE
:
1690 if (last_ins
&& (last_ins
->opcode
== OP_STOREI1_MEMBASE_REG
) &&
1691 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
1692 ins
->inst_offset
== last_ins
->inst_offset
) {
1693 if (ins
->dreg
== last_ins
->sreg1
) {
1694 MONO_DELETE_INS (bb
, ins
);
1697 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1698 ins
->opcode
= OP_MOVE
;
1699 ins
->sreg1
= last_ins
->sreg1
;
1703 case OP_LOADI2_MEMBASE
:
1704 if (last_ins
&& (last_ins
->opcode
== OP_STOREI2_MEMBASE_REG
) &&
1705 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
1706 ins
->inst_offset
== last_ins
->inst_offset
) {
1707 if (ins
->dreg
== last_ins
->sreg1
) {
1708 MONO_DELETE_INS (bb
, ins
);
1711 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1712 ins
->opcode
= OP_MOVE
;
1713 ins
->sreg1
= last_ins
->sreg1
;
1717 case OP_STOREI4_MEMBASE_IMM
:
1718 /* Convert pairs of 0 stores to a dword 0 store */
1719 /* Used when initializing temporaries */
1720 /* We know sparc_fp is dword aligned */
1721 if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_IMM
) &&
1722 (ins
->inst_destbasereg
== last_ins
->inst_destbasereg
) &&
1723 (ins
->inst_destbasereg
== sparc_fp
) &&
1724 (ins
->inst_offset
< 0) &&
1725 ((ins
->inst_offset
% 8) == 0) &&
1726 ((ins
->inst_offset
== last_ins
->inst_offset
- 4)) &&
1727 (ins
->inst_imm
== 0) &&
1728 (last_ins
->inst_imm
== 0)) {
1730 last_ins
->opcode
= OP_STOREI8_MEMBASE_IMM
;
1731 last_ins
->inst_offset
= ins
->inst_offset
;
1732 MONO_DELETE_INS (bb
, ins
);
1743 case OP_COND_EXC_EQ
:
1744 case OP_COND_EXC_GE
:
1745 case OP_COND_EXC_GT
:
1746 case OP_COND_EXC_LE
:
1747 case OP_COND_EXC_LT
:
1748 case OP_COND_EXC_NE_UN
:
1750 * Convert compare with zero+branch to BRcc
1753 * This only works in 64 bit mode, since it examines all 64
1754 * bits of the register.
1755 * Only do this if the method is small since BPr only has a 16bit
1758 if (v64
&& (mono_method_get_header (cfg
->method
)->code_size
< 10000) && last_ins
&&
1759 (last_ins
->opcode
== OP_COMPARE_IMM
) &&
1760 (last_ins
->inst_imm
== 0)) {
1761 switch (ins
->opcode
) {
1763 ins
->opcode
= OP_SPARC_BRZ
;
1766 ins
->opcode
= OP_SPARC_BRNZ
;
1769 ins
->opcode
= OP_SPARC_BRLZ
;
1772 ins
->opcode
= OP_SPARC_BRGZ
;
1775 ins
->opcode
= OP_SPARC_BRGEZ
;
1778 ins
->opcode
= OP_SPARC_BRLEZ
;
1780 case OP_COND_EXC_EQ
:
1781 ins
->opcode
= OP_SPARC_COND_EXC_EQZ
;
1783 case OP_COND_EXC_GE
:
1784 ins
->opcode
= OP_SPARC_COND_EXC_GEZ
;
1786 case OP_COND_EXC_GT
:
1787 ins
->opcode
= OP_SPARC_COND_EXC_GTZ
;
1789 case OP_COND_EXC_LE
:
1790 ins
->opcode
= OP_SPARC_COND_EXC_LEZ
;
1792 case OP_COND_EXC_LT
:
1793 ins
->opcode
= OP_SPARC_COND_EXC_LTZ
;
1795 case OP_COND_EXC_NE_UN
:
1796 ins
->opcode
= OP_SPARC_COND_EXC_NEZ
;
1799 g_assert_not_reached ();
1801 ins
->sreg1
= last_ins
->sreg1
;
1803 MONO_DELETE_INS (bb
, ins
);
1811 if (ins
->dreg
== ins
->sreg1
) {
1812 MONO_DELETE_INS (bb
, ins
);
1816 * OP_MOVE sreg, dreg
1817 * OP_MOVE dreg, sreg
1819 if (last_ins
&& last_ins
->opcode
== OP_MOVE
&&
1820 ins
->sreg1
== last_ins
->dreg
&&
1821 ins
->dreg
== last_ins
->sreg1
) {
1822 MONO_DELETE_INS (bb
, ins
);
1830 bb
->last_ins
= last_ins
;
1834 mono_arch_decompose_long_opts (MonoCompile
*cfg
, MonoInst
*ins
)
1836 switch (ins
->opcode
) {
1838 MONO_EMIT_NEW_BIALU (cfg
, OP_SUBCC
, ins
->dreg
+ 1, 0, ins
->sreg1
+ 1);
1839 MONO_EMIT_NEW_BIALU (cfg
, OP_SBB
, ins
->dreg
+ 2, 0, ins
->sreg1
+ 2);
1848 mono_arch_lowering_pass (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
1852 /* FIXME: Strange loads from the stack in basic-float.cs:test_2_rem */
1855 sparc_patch (guint32
*code
, const gpointer target
)
1858 guint32 ins
= *code
;
1859 guint32 op
= ins
>> 30;
1860 guint32 op2
= (ins
>> 22) & 0x7;
1861 guint32 rd
= (ins
>> 25) & 0x1f;
1862 guint8
* target8
= (guint8
*)target
;
1863 gint64 disp
= (target8
- (guint8
*)code
) >> 2;
1866 // g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
1868 if ((op
== 0) && (op2
== 2)) {
1869 if (!sparc_is_imm22 (disp
))
1872 *code
= ((ins
>> 22) << 22) | (disp
& 0x3fffff);
1874 else if ((op
== 0) && (op2
== 1)) {
1875 if (!sparc_is_imm19 (disp
))
1878 *code
= ((ins
>> 19) << 19) | (disp
& 0x7ffff);
1880 else if ((op
== 0) && (op2
== 3)) {
1881 if (!sparc_is_imm16 (disp
))
1884 *code
&= ~(0x180000 | 0x3fff);
1885 *code
|= ((disp
<< 21) & (0x180000)) | (disp
& 0x3fff);
1887 else if ((op
== 0) && (op2
== 6)) {
1888 if (!sparc_is_imm22 (disp
))
1891 *code
= ((ins
>> 22) << 22) | (disp
& 0x3fffff);
1893 else if ((op
== 0) && (op2
== 4)) {
1894 guint32 ins2
= code
[1];
1896 if (((ins2
>> 30) == 2) && (((ins2
>> 19) & 0x3f) == 2)) {
1897 /* sethi followed by or */
1899 sparc_set (p
, target8
, rd
);
1900 while (p
<= (code
+ 1))
1903 else if (ins2
== 0x01000000) {
1904 /* sethi followed by nop */
1906 sparc_set (p
, target8
, rd
);
1907 while (p
<= (code
+ 1))
1910 else if ((sparc_inst_op (ins2
) == 3) && (sparc_inst_imm (ins2
))) {
1911 /* sethi followed by load/store */
1913 guint32 t
= (guint32
)target8
;
1914 *code
&= ~(0x3fffff);
1916 *(code
+ 1) &= ~(0x3ff);
1917 *(code
+ 1) |= (t
& 0x3ff);
1921 (sparc_inst_rd (ins
) == sparc_g1
) &&
1922 (sparc_inst_op (c
[1]) == 0) && (sparc_inst_op2 (c
[1]) == 4) &&
1923 (sparc_inst_op (c
[2]) == 2) && (sparc_inst_op3 (c
[2]) == 2) &&
1924 (sparc_inst_op (c
[3]) == 2) && (sparc_inst_op3 (c
[3]) == 2))
1928 reg
= sparc_inst_rd (c
[1]);
1929 sparc_set (p
, target8
, reg
);
1933 else if ((sparc_inst_op (ins2
) == 2) && (sparc_inst_op3 (ins2
) == 0x38) &&
1934 (sparc_inst_imm (ins2
))) {
1935 /* sethi followed by jmpl */
1937 guint32 t
= (guint32
)target8
;
1938 *code
&= ~(0x3fffff);
1940 *(code
+ 1) &= ~(0x3ff);
1941 *(code
+ 1) |= (t
& 0x3ff);
1947 else if (op
== 01) {
1948 gint64 disp
= (target8
- (guint8
*)code
) >> 2;
1950 if (!sparc_is_imm30 (disp
))
1952 sparc_call_simple (code
, target8
- (guint8
*)code
);
1954 else if ((op
== 2) && (sparc_inst_op3 (ins
) == 0x2) && sparc_inst_imm (ins
)) {
1956 g_assert (sparc_is_imm13 (target8
));
1958 *code
|= (guint32
)target8
;
1960 else if ((sparc_inst_op (ins
) == 2) && (sparc_inst_op3 (ins
) == 0x7)) {
1961 /* sparc_set case 5. */
1965 reg
= sparc_inst_rd (c
[3]);
1966 sparc_set (p
, target
, reg
);
1973 // g_print ("patched with 0x%08x\n", ins);
1977 * mono_sparc_emit_save_lmf:
1979 * Emit the code neccesary to push a new entry onto the lmf stack. Used by
1980 * trampolines as well.
1983 mono_sparc_emit_save_lmf (guint32
*code
, guint32 lmf_offset
)
1986 sparc_sti_imm (code
, sparc_o0
, sparc_fp
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, lmf_addr
));
1987 /* Save previous_lmf */
1988 sparc_ldi (code
, sparc_o0
, sparc_g0
, sparc_o7
);
1989 sparc_sti_imm (code
, sparc_o7
, sparc_fp
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
1991 sparc_add_imm (code
, FALSE
, sparc_fp
, lmf_offset
, sparc_o7
);
1992 sparc_sti (code
, sparc_o7
, sparc_o0
, sparc_g0
);
1998 mono_sparc_emit_restore_lmf (guint32
*code
, guint32 lmf_offset
)
2000 /* Load previous_lmf */
2001 sparc_ldi_imm (code
, sparc_fp
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, previous_lmf
), sparc_l0
);
2003 sparc_ldi_imm (code
, sparc_fp
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, lmf_addr
), sparc_l1
);
2004 /* *(lmf) = previous_lmf */
2005 sparc_sti (code
, sparc_l0
, sparc_l1
, sparc_g0
);
2010 emit_save_sp_to_lmf (MonoCompile
*cfg
, guint32
*code
)
2013 * Since register windows are saved to the current value of %sp, we need to
2014 * set the sp field in the lmf before the call, not in the prolog.
2016 if (cfg
->method
->save_lmf
) {
2017 gint32 lmf_offset
= MONO_SPARC_STACK_BIAS
- cfg
->arch
.lmf_offset
;
2020 sparc_sti_imm (code
, sparc_sp
, sparc_fp
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, sp
));
2027 emit_vret_token (MonoGenericSharingContext
*gsctx
, MonoInst
*ins
, guint32
*code
)
2029 MonoCallInst
*call
= (MonoCallInst
*)ins
;
2033 * The sparc ABI requires that calls to functions which return a structure
2034 * contain an additional unimpl instruction which is checked by the callee.
2036 if (call
->signature
->pinvoke
&& MONO_TYPE_ISSTRUCT(call
->signature
->ret
)) {
2037 if (call
->signature
->ret
->type
== MONO_TYPE_TYPEDBYREF
)
2038 size
= mini_type_stack_size (gsctx
, call
->signature
->ret
, NULL
);
2040 size
= mono_class_native_size (call
->signature
->ret
->data
.klass
, NULL
);
2041 sparc_unimp (code
, size
& 0xfff);
2048 emit_move_return_value (MonoInst
*ins
, guint32
*code
)
2050 /* Move return value to the target register */
2051 /* FIXME: do more things in the local reg allocator */
2052 switch (ins
->opcode
) {
2054 case OP_VOIDCALL_REG
:
2055 case OP_VOIDCALL_MEMBASE
:
2059 case OP_CALL_MEMBASE
:
2060 g_assert (ins
->dreg
== sparc_o0
);
2064 case OP_LCALL_MEMBASE
:
2066 * ins->dreg is the least significant reg due to the lreg: LCALL rule
2067 * in inssel-long32.brg.
2070 sparc_mov_reg_reg (code
, sparc_o0
, ins
->dreg
);
2072 g_assert (ins
->dreg
== sparc_o1
);
2077 case OP_FCALL_MEMBASE
:
2079 if (((MonoCallInst
*)ins
)->signature
->ret
->type
== MONO_TYPE_R4
) {
2080 sparc_fmovs (code
, sparc_f0
, ins
->dreg
);
2081 sparc_fstod (code
, ins
->dreg
, ins
->dreg
);
2084 sparc_fmovd (code
, sparc_f0
, ins
->dreg
);
2086 sparc_fmovs (code
, sparc_f0
, ins
->dreg
);
2087 if (((MonoCallInst
*)ins
)->signature
->ret
->type
== MONO_TYPE_R4
)
2088 sparc_fstod (code
, ins
->dreg
, ins
->dreg
);
2090 sparc_fmovs (code
, sparc_f1
, ins
->dreg
+ 1);
2095 case OP_VCALL_MEMBASE
:
2098 case OP_VCALL2_MEMBASE
:
2108 * emit_load_volatile_arguments:
2110 * Load volatile arguments from the stack to the original input registers.
2111 * Required before a tail call.
2114 emit_load_volatile_arguments (MonoCompile
*cfg
, guint32
*code
)
2116 MonoMethod
*method
= cfg
->method
;
2117 MonoMethodSignature
*sig
;
2122 /* FIXME: Generate intermediate code instead */
2124 sig
= mono_method_signature (method
);
2126 cinfo
= get_call_info (cfg
, sig
, FALSE
);
2128 /* This is the opposite of the code in emit_prolog */
2130 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
2131 ArgInfo
*ainfo
= cinfo
->args
+ i
;
2132 gint32 stack_offset
;
2135 inst
= cfg
->args
[i
];
2137 if (sig
->hasthis
&& (i
== 0))
2138 arg_type
= &mono_defaults
.object_class
->byval_arg
;
2140 arg_type
= sig
->params
[i
- sig
->hasthis
];
2142 stack_offset
= ainfo
->offset
+ ARGS_OFFSET
;
2143 ireg
= sparc_i0
+ ainfo
->reg
;
2145 if (ainfo
->storage
== ArgInSplitRegStack
) {
2146 g_assert (inst
->opcode
== OP_REGOFFSET
);
2148 if (!sparc_is_imm13 (stack_offset
))
2150 sparc_st_imm (code
, inst
->inst_basereg
, stack_offset
, sparc_i5
);
2153 if (!v64
&& !arg_type
->byref
&& (arg_type
->type
== MONO_TYPE_R8
)) {
2154 if (ainfo
->storage
== ArgInIRegPair
) {
2155 if (!sparc_is_imm13 (inst
->inst_offset
+ 4))
2157 sparc_ld_imm (code
, inst
->inst_basereg
, inst
->inst_offset
, ireg
);
2158 sparc_ld_imm (code
, inst
->inst_basereg
, inst
->inst_offset
+ 4, ireg
+ 1);
2161 if (ainfo
->storage
== ArgInSplitRegStack
) {
2162 if (stack_offset
!= inst
->inst_offset
) {
2163 sparc_ld_imm (code
, inst
->inst_basereg
, inst
->inst_offset
, sparc_i5
);
2164 sparc_ld_imm (code
, inst
->inst_basereg
, inst
->inst_offset
+ 4, sparc_o7
);
2165 sparc_st_imm (code
, sparc_o7
, sparc_fp
, stack_offset
+ 4);
2170 if (ainfo
->storage
== ArgOnStackPair
) {
2171 if (stack_offset
!= inst
->inst_offset
) {
2172 /* stack_offset is not dword aligned, so we need to make a copy */
2173 sparc_ld_imm (code
, inst
->inst_basereg
, inst
->inst_offset
, sparc_o7
);
2174 sparc_st_imm (code
, sparc_o7
, sparc_fp
, stack_offset
);
2176 sparc_ld_imm (code
, inst
->inst_basereg
, inst
->inst_offset
+ 4, sparc_o7
);
2177 sparc_st_imm (code
, sparc_o7
, sparc_fp
, stack_offset
+ 4);
2182 g_assert_not_reached ();
2185 if ((ainfo
->storage
== ArgInIReg
) && (inst
->opcode
!= OP_REGVAR
)) {
2186 /* Argument in register, but need to be saved to stack */
2187 if (!sparc_is_imm13 (stack_offset
))
2189 if ((stack_offset
- ARGS_OFFSET
) & 0x1)
2190 /* FIXME: Is this ldsb or ldub ? */
2191 sparc_ldsb_imm (code
, inst
->inst_basereg
, stack_offset
, ireg
);
2193 if ((stack_offset
- ARGS_OFFSET
) & 0x2)
2194 sparc_ldsh_imm (code
, inst
->inst_basereg
, stack_offset
, ireg
);
2196 if ((stack_offset
- ARGS_OFFSET
) & 0x4)
2197 sparc_ld_imm (code
, inst
->inst_basereg
, stack_offset
, ireg
);
2200 sparc_ldx_imm (code
, inst
->inst_basereg
, stack_offset
, ireg
);
2202 sparc_ld_imm (code
, inst
->inst_basereg
, stack_offset
, ireg
);
2205 else if ((ainfo
->storage
== ArgInIRegPair
) && (inst
->opcode
!= OP_REGVAR
)) {
2206 /* Argument in regpair, but need to be saved to stack */
2207 if (!sparc_is_imm13 (inst
->inst_offset
+ 4))
2209 sparc_ld_imm (code
, inst
->inst_basereg
, inst
->inst_offset
, ireg
);
2210 sparc_st_imm (code
, inst
->inst_basereg
, inst
->inst_offset
+ 4, ireg
+ 1);
2212 else if ((ainfo
->storage
== ArgInFloatReg
) && (inst
->opcode
!= OP_REGVAR
)) {
2215 else if ((ainfo
->storage
== ArgInDoubleReg
) && (inst
->opcode
!= OP_REGVAR
)) {
2219 if ((ainfo
->storage
== ArgInSplitRegStack
) || (ainfo
->storage
== ArgOnStack
))
2220 if (inst
->opcode
== OP_REGVAR
)
2221 /* FIXME: Load the argument into memory */
2231 * mono_sparc_is_virtual_call:
2233 * Determine whenever the instruction at CODE is a virtual call.
2236 mono_sparc_is_virtual_call (guint32
*code
)
2243 if ((sparc_inst_op (*code
) == 0x2) && (sparc_inst_op3 (*code
) == 0x38)) {
2245 * Register indirect call. If it is a virtual call, then the
2246 * instruction in the delay slot is a special kind of nop.
2249 /* Construct special nop */
2250 sparc_or_imm (p
, FALSE
, sparc_g0
, 0xca, sparc_g0
);
2253 if (code
[1] == p
[0])
2261 * mono_arch_get_vcall_slot:
2263 * Determine the vtable slot used by a virtual call.
2266 mono_arch_get_vcall_slot (guint8
*code8
, mgreg_t
*regs
, int *displacement
)
2268 guint32
*code
= (guint32
*)(gpointer
)code8
;
2269 guint32 ins
= code
[0];
2270 guint32 prev_ins
= code
[-1];
2272 mono_sparc_flushw ();
2276 if (!mono_sparc_is_virtual_call (code
))
2279 if ((sparc_inst_op (ins
) == 0x2) && (sparc_inst_op3 (ins
) == 0x38)) {
2280 if ((sparc_inst_op (prev_ins
) == 0x3) && (sparc_inst_i (prev_ins
) == 1) && (sparc_inst_op3 (prev_ins
) == 0 || sparc_inst_op3 (prev_ins
) == 0xb)) {
2281 /* ld [r1 + CONST ], r2; call r2 */
2282 guint32 base
= sparc_inst_rs1 (prev_ins
);
2283 gint32 disp
= (((gint32
)(sparc_inst_imm13 (prev_ins
))) << 19) >> 19;
2286 g_assert (sparc_inst_rd (prev_ins
) == sparc_inst_rs1 (ins
));
2288 g_assert ((base
>= sparc_o0
) && (base
<= sparc_i7
));
2290 base_val
= regs
[base
];
2292 *displacement
= disp
;
2294 return (gpointer
)base_val
;
2296 else if ((sparc_inst_op (prev_ins
) == 0x3) && (sparc_inst_i (prev_ins
) == 0) && (sparc_inst_op3 (prev_ins
) == 0)) {
2297 /* set r1, ICONST; ld [r1 + r2], r2; call r2 */
2298 /* Decode a sparc_set32 */
2299 guint32 base
= sparc_inst_rs1 (prev_ins
);
2302 guint32 s1
= code
[-3];
2303 guint32 s2
= code
[-2];
2310 g_assert (sparc_inst_op (s1
) == 0);
2311 g_assert (sparc_inst_op2 (s1
) == 4);
2314 g_assert (sparc_inst_op (s2
) == 2);
2315 g_assert (sparc_inst_op3 (s2
) == 2);
2316 g_assert (sparc_inst_i (s2
) == 1);
2317 g_assert (sparc_inst_rs1 (s2
) == sparc_inst_rd (s2
));
2318 g_assert (sparc_inst_rd (s1
) == sparc_inst_rs1 (s2
));
2320 disp
= ((s1
& 0x3fffff) << 10) | sparc_inst_imm13 (s2
);
2322 g_assert ((base
>= sparc_o0
) && (base
<= sparc_i7
));
2324 base_val
= regs
[base
];
2326 *displacement
= disp
;
2328 return (gpointer
)base_val
;
2330 g_assert_not_reached ();
2333 g_assert_not_reached ();
2339 #define BR_SMALL_SIZE 2
2340 #define BR_LARGE_SIZE 2
2341 #define JUMP_IMM_SIZE 5
2342 #define ENABLE_WRONG_METHOD_CHECK 0
2345 * LOCKING: called with the domain lock held
2348 mono_arch_build_imt_thunk (MonoVTable
*vtable
, MonoDomain
*domain
, MonoIMTCheckItem
**imt_entries
, int count
,
2349 gpointer fail_tramp
)
2353 guint32
*code
, *start
;
2355 for (i
= 0; i
< count
; ++i
) {
2356 MonoIMTCheckItem
*item
= imt_entries
[i
];
2357 if (item
->is_equals
) {
2358 if (item
->check_target_idx
) {
2359 if (!item
->compare_done
)
2360 item
->chunk_size
+= CMP_SIZE
;
2361 item
->chunk_size
+= BR_SMALL_SIZE
+ JUMP_IMM_SIZE
;
2364 item
->chunk_size
+= 16;
2365 item
->chunk_size
+= JUMP_IMM_SIZE
;
2366 #if ENABLE_WRONG_METHOD_CHECK
2367 item
->chunk_size
+= CMP_SIZE
+ BR_SMALL_SIZE
+ 1;
2371 item
->chunk_size
+= CMP_SIZE
+ BR_LARGE_SIZE
;
2372 imt_entries
[item
->check_target_idx
]->compare_done
= TRUE
;
2374 size
+= item
->chunk_size
;
2377 code
= mono_method_alloc_generic_virtual_thunk (domain
, size
* 4);
2379 code
= mono_domain_code_reserve (domain
, size
* 4);
2381 for (i
= 0; i
< count
; ++i
) {
2382 MonoIMTCheckItem
*item
= imt_entries
[i
];
2383 item
->code_target
= (guint8
*)code
;
2384 if (item
->is_equals
) {
2385 gboolean fail_case
= !item
->check_target_idx
&& fail_tramp
;
2387 if (item
->check_target_idx
|| fail_case
) {
2388 if (!item
->compare_done
|| fail_case
) {
2389 sparc_set (code
, (guint32
)item
->key
, sparc_g5
);
2390 sparc_cmp (code
, MONO_ARCH_IMT_REG
, sparc_g5
);
2392 item
->jmp_code
= (guint8
*)code
;
2393 sparc_branch (code
, 0, sparc_bne
, 0);
2395 sparc_set (code
, ((guint32
)(&(vtable
->vtable
[item
->value
.vtable_slot
]))), sparc_g5
);
2396 sparc_ld (code
, sparc_g5
, 0, sparc_g5
);
2397 sparc_jmpl (code
, sparc_g5
, sparc_g0
, sparc_g0
);
2401 sparc_patch (item
->jmp_code
, code
);
2402 sparc_set (code
, fail_tramp
, sparc_g5
);
2403 sparc_jmpl (code
, sparc_g5
, sparc_g0
, sparc_g0
);
2405 item
->jmp_code
= NULL
;
2408 /* enable the commented code to assert on wrong method */
2409 #if ENABLE_WRONG_METHOD_CHECK
2410 g_assert_not_reached ();
2412 sparc_set (code
, ((guint32
)(&(vtable
->vtable
[item
->value
.vtable_slot
]))), sparc_g5
);
2413 sparc_ld (code
, sparc_g5
, 0, sparc_g5
);
2414 sparc_jmpl (code
, sparc_g5
, sparc_g0
, sparc_g0
);
2416 #if ENABLE_WRONG_METHOD_CHECK
2417 g_assert_not_reached ();
2421 sparc_set (code
, (guint32
)item
->key
, sparc_g5
);
2422 sparc_cmp (code
, MONO_ARCH_IMT_REG
, sparc_g5
);
2423 item
->jmp_code
= (guint8
*)code
;
2424 sparc_branch (code
, 0, sparc_beu
, 0);
2428 /* patch the branches to get to the target items */
2429 for (i
= 0; i
< count
; ++i
) {
2430 MonoIMTCheckItem
*item
= imt_entries
[i
];
2431 if (item
->jmp_code
) {
2432 if (item
->check_target_idx
) {
2433 sparc_patch ((guint32
*)item
->jmp_code
, imt_entries
[item
->check_target_idx
]->code_target
);
2438 mono_arch_flush_icache ((guint8
*)start
, (code
- start
) * 4);
2440 mono_stats
.imt_thunks_size
+= (code
- start
) * 4;
2441 g_assert (code
- start
<= size
);
2446 mono_arch_find_imt_method (mgreg_t
*regs
, guint8
*code
)
2449 g_assert_not_reached ();
2452 return (MonoMethod
*)regs
[sparc_g1
];
2456 mono_arch_get_this_arg_from_call (MonoGenericSharingContext
*gsctx
, MonoMethodSignature
*sig
, mgreg_t
*regs
, guint8
*code
)
2458 mono_sparc_flushw ();
2460 return (gpointer
)regs
[sparc_o0
];
2464 * Some conventions used in the following code.
2465 * 2) The only scratch registers we have are o7 and g1. We try to
2466 * stick to o7 when we can, and use g1 when necessary.
2470 mono_arch_output_basic_block (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
2475 guint32
*code
= (guint32
*)(cfg
->native_code
+ cfg
->code_len
);
2476 MonoInst
*last_ins
= NULL
;
2480 if (cfg
->verbose_level
> 2)
2481 g_print ("Basic block %d starting at offset 0x%x\n", bb
->block_num
, bb
->native_offset
);
2483 cpos
= bb
->max_offset
;
2485 if (cfg
->prof_options
& MONO_PROFILE_COVERAGE
) {
2489 MONO_BB_FOR_EACH_INS (bb
, ins
) {
2492 offset
= (guint8
*)code
- cfg
->native_code
;
2494 spec
= ins_get_spec (ins
->opcode
);
2496 max_len
= ((guint8
*)spec
)[MONO_INST_LEN
];
2498 if (offset
> (cfg
->code_size
- max_len
- 16)) {
2499 cfg
->code_size
*= 2;
2500 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
2501 code
= (guint32
*)(cfg
->native_code
+ offset
);
2503 code_start
= (guint8
*)code
;
2504 // if (ins->cil_code)
2505 // g_print ("cil code\n");
2506 mono_debug_record_line_number (cfg
, ins
, offset
);
2508 switch (ins
->opcode
) {
2509 case OP_STOREI1_MEMBASE_IMM
:
2510 EMIT_STORE_MEMBASE_IMM (ins
, stb
);
2512 case OP_STOREI2_MEMBASE_IMM
:
2513 EMIT_STORE_MEMBASE_IMM (ins
, sth
);
2515 case OP_STORE_MEMBASE_IMM
:
2516 EMIT_STORE_MEMBASE_IMM (ins
, sti
);
2518 case OP_STOREI4_MEMBASE_IMM
:
2519 EMIT_STORE_MEMBASE_IMM (ins
, st
);
2521 case OP_STOREI8_MEMBASE_IMM
:
2523 EMIT_STORE_MEMBASE_IMM (ins
, stx
);
2525 /* Only generated by peephole opts */
2526 g_assert ((ins
->inst_offset
% 8) == 0);
2527 g_assert (ins
->inst_imm
== 0);
2528 EMIT_STORE_MEMBASE_IMM (ins
, stx
);
2531 case OP_STOREI1_MEMBASE_REG
:
2532 EMIT_STORE_MEMBASE_REG (ins
, stb
);
2534 case OP_STOREI2_MEMBASE_REG
:
2535 EMIT_STORE_MEMBASE_REG (ins
, sth
);
2537 case OP_STOREI4_MEMBASE_REG
:
2538 EMIT_STORE_MEMBASE_REG (ins
, st
);
2540 case OP_STOREI8_MEMBASE_REG
:
2542 EMIT_STORE_MEMBASE_REG (ins
, stx
);
2544 /* Only used by OP_MEMSET */
2545 EMIT_STORE_MEMBASE_REG (ins
, std
);
2548 case OP_STORE_MEMBASE_REG
:
2549 EMIT_STORE_MEMBASE_REG (ins
, sti
);
2552 sparc_set (code
, ins
->inst_c0
, ins
->dreg
);
2553 sparc_ld (code
, ins
->dreg
, sparc_g0
, ins
->dreg
);
2555 case OP_LOADI4_MEMBASE
:
2557 EMIT_LOAD_MEMBASE (ins
, ldsw
);
2559 EMIT_LOAD_MEMBASE (ins
, ld
);
2562 case OP_LOADU4_MEMBASE
:
2563 EMIT_LOAD_MEMBASE (ins
, ld
);
2565 case OP_LOADU1_MEMBASE
:
2566 EMIT_LOAD_MEMBASE (ins
, ldub
);
2568 case OP_LOADI1_MEMBASE
:
2569 EMIT_LOAD_MEMBASE (ins
, ldsb
);
2571 case OP_LOADU2_MEMBASE
:
2572 EMIT_LOAD_MEMBASE (ins
, lduh
);
2574 case OP_LOADI2_MEMBASE
:
2575 EMIT_LOAD_MEMBASE (ins
, ldsh
);
2577 case OP_LOAD_MEMBASE
:
2579 EMIT_LOAD_MEMBASE (ins
, ldx
);
2581 EMIT_LOAD_MEMBASE (ins
, ld
);
2585 case OP_LOADI8_MEMBASE
:
2586 EMIT_LOAD_MEMBASE (ins
, ldx
);
2589 case OP_ICONV_TO_I1
:
2590 sparc_sll_imm (code
, ins
->sreg1
, 24, sparc_o7
);
2591 sparc_sra_imm (code
, sparc_o7
, 24, ins
->dreg
);
2593 case OP_ICONV_TO_I2
:
2594 sparc_sll_imm (code
, ins
->sreg1
, 16, sparc_o7
);
2595 sparc_sra_imm (code
, sparc_o7
, 16, ins
->dreg
);
2597 case OP_ICONV_TO_U1
:
2598 sparc_and_imm (code
, FALSE
, ins
->sreg1
, 0xff, ins
->dreg
);
2600 case OP_ICONV_TO_U2
:
2601 sparc_sll_imm (code
, ins
->sreg1
, 16, sparc_o7
);
2602 sparc_srl_imm (code
, sparc_o7
, 16, ins
->dreg
);
2604 case OP_LCONV_TO_OVF_U4
:
2605 case OP_ICONV_TO_OVF_U4
:
2606 /* Only used on V9 */
2607 sparc_cmp_imm (code
, ins
->sreg1
, 0);
2608 mono_add_patch_info (cfg
, (guint8
*)(code
) - (cfg
)->native_code
,
2609 MONO_PATCH_INFO_EXC
, "OverflowException");
2610 sparc_branchp (code
, 0, sparc_bl
, sparc_xcc_short
, 0, 0);
2612 sparc_set (code
, 1, sparc_o7
);
2613 sparc_sllx_imm (code
, sparc_o7
, 32, sparc_o7
);
2614 sparc_cmp (code
, ins
->sreg1
, sparc_o7
);
2615 mono_add_patch_info (cfg
, (guint8
*)(code
) - (cfg
)->native_code
,
2616 MONO_PATCH_INFO_EXC
, "OverflowException");
2617 sparc_branchp (code
, 0, sparc_bge
, sparc_xcc_short
, 0, 0);
2619 sparc_mov_reg_reg (code
, ins
->sreg1
, ins
->dreg
);
2621 case OP_LCONV_TO_OVF_I4_UN
:
2622 case OP_ICONV_TO_OVF_I4_UN
:
2623 /* Only used on V9 */
2629 sparc_cmp (code
, ins
->sreg1
, ins
->sreg2
);
2631 case OP_COMPARE_IMM
:
2632 case OP_ICOMPARE_IMM
:
2633 if (sparc_is_imm13 (ins
->inst_imm
))
2634 sparc_cmp_imm (code
, ins
->sreg1
, ins
->inst_imm
);
2636 sparc_set (code
, ins
->inst_imm
, sparc_o7
);
2637 sparc_cmp (code
, ins
->sreg1
, sparc_o7
);
2642 * gdb does not like encountering 'ta 1' in the debugged code. So
2643 * instead of emitting a trap, we emit a call a C function and place a
2646 //sparc_ta (code, 1);
2647 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_ABS
, mono_break
);
2652 sparc_add (code
, TRUE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2655 sparc_add (code
, FALSE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2660 /* according to inssel-long32.brg, this should set cc */
2661 EMIT_ALU_IMM (ins
, add
, TRUE
);
2665 /* according to inssel-long32.brg, this should set cc */
2666 sparc_addx (code
, TRUE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2670 EMIT_ALU_IMM (ins
, addx
, TRUE
);
2674 sparc_sub (code
, TRUE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2677 sparc_sub (code
, FALSE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2682 /* according to inssel-long32.brg, this should set cc */
2683 EMIT_ALU_IMM (ins
, sub
, TRUE
);
2687 /* according to inssel-long32.brg, this should set cc */
2688 sparc_subx (code
, TRUE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2692 EMIT_ALU_IMM (ins
, subx
, TRUE
);
2695 sparc_and (code
, FALSE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2699 EMIT_ALU_IMM (ins
, and, FALSE
);
2702 /* Sign extend sreg1 into %y */
2703 sparc_sra_imm (code
, ins
->sreg1
, 31, sparc_o7
);
2704 sparc_wry (code
, sparc_o7
, sparc_g0
);
2705 sparc_sdiv (code
, TRUE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2706 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code
, sparc_boverflow
, "ArithmeticException", TRUE
, sparc_icc_short
);
2709 sparc_wry (code
, sparc_g0
, sparc_g0
);
2710 sparc_udiv (code
, FALSE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2716 /* Transform division into a shift */
2717 for (i
= 1; i
< 30; ++i
) {
2719 if (ins
->inst_imm
== imm
)
2725 sparc_srl_imm (code
, ins
->sreg1
, 31, sparc_o7
);
2726 sparc_add (code
, FALSE
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2727 sparc_sra_imm (code
, ins
->dreg
, 1, ins
->dreg
);
2730 /* http://compilers.iecc.com/comparch/article/93-04-079 */
2731 sparc_sra_imm (code
, ins
->sreg1
, 31, sparc_o7
);
2732 sparc_srl_imm (code
, sparc_o7
, 32 - i
, sparc_o7
);
2733 sparc_add (code
, FALSE
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2734 sparc_sra_imm (code
, ins
->dreg
, i
, ins
->dreg
);
2738 /* Sign extend sreg1 into %y */
2739 sparc_sra_imm (code
, ins
->sreg1
, 31, sparc_o7
);
2740 sparc_wry (code
, sparc_o7
, sparc_g0
);
2741 EMIT_ALU_IMM (ins
, sdiv
, TRUE
);
2742 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code
, sparc_boverflow
, "ArithmeticException", TRUE
, sparc_icc_short
);
2746 case OP_IDIV_UN_IMM
:
2747 sparc_wry (code
, sparc_g0
, sparc_g0
);
2748 EMIT_ALU_IMM (ins
, udiv
, FALSE
);
2751 /* Sign extend sreg1 into %y */
2752 sparc_sra_imm (code
, ins
->sreg1
, 31, sparc_o7
);
2753 sparc_wry (code
, sparc_o7
, sparc_g0
);
2754 sparc_sdiv (code
, TRUE
, ins
->sreg1
, ins
->sreg2
, sparc_o7
);
2755 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code
, sparc_boverflow
, "ArithmeticException", TRUE
, sparc_icc_short
);
2756 sparc_smul (code
, FALSE
, ins
->sreg2
, sparc_o7
, sparc_o7
);
2757 sparc_sub (code
, FALSE
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2760 sparc_wry (code
, sparc_g0
, sparc_g0
);
2761 sparc_udiv (code
, FALSE
, ins
->sreg1
, ins
->sreg2
, sparc_o7
);
2762 sparc_umul (code
, FALSE
, ins
->sreg2
, sparc_o7
, sparc_o7
);
2763 sparc_sub (code
, FALSE
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2767 /* Sign extend sreg1 into %y */
2768 sparc_sra_imm (code
, ins
->sreg1
, 31, sparc_o7
);
2769 sparc_wry (code
, sparc_o7
, sparc_g0
);
2770 if (!sparc_is_imm13 (ins
->inst_imm
)) {
2771 sparc_set (code
, ins
->inst_imm
, GP_SCRATCH_REG
);
2772 sparc_sdiv (code
, TRUE
, ins
->sreg1
, GP_SCRATCH_REG
, sparc_o7
);
2773 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code
, sparc_boverflow
, "ArithmeticException", TRUE
, sparc_icc_short
);
2774 sparc_smul (code
, FALSE
, sparc_o7
, GP_SCRATCH_REG
, sparc_o7
);
2777 sparc_sdiv_imm (code
, TRUE
, ins
->sreg1
, ins
->inst_imm
, sparc_o7
);
2778 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code
, sparc_boverflow
, "ArithmeticException", TRUE
, sparc_icc_short
);
2779 sparc_smul_imm (code
, FALSE
, sparc_o7
, ins
->inst_imm
, sparc_o7
);
2781 sparc_sub (code
, FALSE
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2783 case OP_IREM_UN_IMM
:
2784 sparc_set (code
, ins
->inst_imm
, GP_SCRATCH_REG
);
2785 sparc_wry (code
, sparc_g0
, sparc_g0
);
2786 sparc_udiv (code
, FALSE
, ins
->sreg1
, GP_SCRATCH_REG
, sparc_o7
);
2787 sparc_umul (code
, FALSE
, GP_SCRATCH_REG
, sparc_o7
, sparc_o7
);
2788 sparc_sub (code
, FALSE
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2791 sparc_or (code
, FALSE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2795 EMIT_ALU_IMM (ins
, or, FALSE
);
2798 sparc_xor (code
, FALSE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2802 EMIT_ALU_IMM (ins
, xor, FALSE
);
2805 sparc_sll (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2809 if (ins
->inst_imm
< (1 << 5))
2810 sparc_sll_imm (code
, ins
->sreg1
, ins
->inst_imm
, ins
->dreg
);
2812 sparc_set (code
, ins
->inst_imm
, sparc_o7
);
2813 sparc_sll (code
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2817 sparc_sra (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2821 if (ins
->inst_imm
< (1 << 5))
2822 sparc_sra_imm (code
, ins
->sreg1
, ins
->inst_imm
, ins
->dreg
);
2824 sparc_set (code
, ins
->inst_imm
, sparc_o7
);
2825 sparc_sra (code
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2829 case OP_ISHR_UN_IMM
:
2830 if (ins
->inst_imm
< (1 << 5))
2831 sparc_srl_imm (code
, ins
->sreg1
, ins
->inst_imm
, ins
->dreg
);
2833 sparc_set (code
, ins
->inst_imm
, sparc_o7
);
2834 sparc_srl (code
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2838 sparc_srl (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2841 sparc_sllx (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2844 if (ins
->inst_imm
< (1 << 6))
2845 sparc_sllx_imm (code
, ins
->sreg1
, ins
->inst_imm
, ins
->dreg
);
2847 sparc_set (code
, ins
->inst_imm
, sparc_o7
);
2848 sparc_sllx (code
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2852 sparc_srax (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2855 if (ins
->inst_imm
< (1 << 6))
2856 sparc_srax_imm (code
, ins
->sreg1
, ins
->inst_imm
, ins
->dreg
);
2858 sparc_set (code
, ins
->inst_imm
, sparc_o7
);
2859 sparc_srax (code
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2863 sparc_srlx (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2865 case OP_LSHR_UN_IMM
:
2866 if (ins
->inst_imm
< (1 << 6))
2867 sparc_srlx_imm (code
, ins
->sreg1
, ins
->inst_imm
, ins
->dreg
);
2869 sparc_set (code
, ins
->inst_imm
, sparc_o7
);
2870 sparc_srlx (code
, ins
->sreg1
, sparc_o7
, ins
->dreg
);
2874 /* can't use sparc_not */
2875 sparc_xnor (code
, FALSE
, ins
->sreg1
, sparc_g0
, ins
->dreg
);
2878 /* can't use sparc_neg */
2879 sparc_sub (code
, FALSE
, sparc_g0
, ins
->sreg1
, ins
->dreg
);
2882 sparc_smul (code
, FALSE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2888 if ((ins
->inst_imm
== 1) && (ins
->sreg1
== ins
->dreg
))
2891 /* Transform multiplication into a shift */
2892 for (i
= 0; i
< 30; ++i
) {
2894 if (ins
->inst_imm
== imm
)
2898 sparc_sll_imm (code
, ins
->sreg1
, i
, ins
->dreg
);
2900 EMIT_ALU_IMM (ins
, smul
, FALSE
);
2904 sparc_smul (code
, TRUE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2905 sparc_rdy (code
, sparc_g1
);
2906 sparc_sra_imm (code
, ins
->dreg
, 31, sparc_o7
);
2907 sparc_cmp (code
, sparc_g1
, sparc_o7
);
2908 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins
, sparc_bne
, "OverflowException", TRUE
, sparc_icc_short
);
2910 case OP_IMUL_OVF_UN
:
2911 sparc_umul (code
, TRUE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2912 sparc_rdy (code
, sparc_o7
);
2913 sparc_cmp (code
, sparc_o7
, sparc_g0
);
2914 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins
, sparc_bne
, "OverflowException", TRUE
, sparc_icc_short
);
2917 sparc_set (code
, ins
->inst_c0
, ins
->dreg
);
2920 sparc_set (code
, ins
->inst_l
, ins
->dreg
);
2923 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)ins
->inst_i1
, ins
->inst_p0
);
2924 sparc_set_template (code
, ins
->dreg
);
2927 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)ins
->inst_i1
, ins
->inst_p0
);
2928 sparc_set_template (code
, ins
->dreg
);
2930 case OP_ICONV_TO_I4
:
2931 case OP_ICONV_TO_U4
:
2933 if (ins
->sreg1
!= ins
->dreg
)
2934 sparc_mov_reg_reg (code
, ins
->sreg1
, ins
->dreg
);
2938 if (ins
->sreg1
!= ins
->dreg
)
2939 sparc_fmovd (code
, ins
->sreg1
, ins
->dreg
);
2941 sparc_fmovs (code
, ins
->sreg1
, ins
->dreg
);
2942 sparc_fmovs (code
, ins
->sreg1
+ 1, ins
->dreg
+ 1);
2946 if (cfg
->method
->save_lmf
)
2949 code
= emit_load_volatile_arguments (cfg
, code
);
2950 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, MONO_PATCH_INFO_METHOD_JUMP
, ins
->inst_p0
);
2951 sparc_set_template (code
, sparc_o7
);
2952 sparc_jmpl (code
, sparc_o7
, sparc_g0
, sparc_g0
);
2953 /* Restore parent frame in delay slot */
2954 sparc_restore_imm (code
, sparc_g0
, 0, sparc_g0
);
2957 /* ensure ins->sreg1 is not NULL */
2958 /* Might be misaligned in case of vtypes so use a byte load */
2959 sparc_ldsb_imm (code
, ins
->sreg1
, 0, sparc_g0
);
2962 sparc_add_imm (code
, FALSE
, sparc_fp
, cfg
->sig_cookie
, sparc_o7
);
2963 sparc_sti_imm (code
, sparc_o7
, ins
->sreg1
, 0);
2971 call
= (MonoCallInst
*)ins
;
2972 g_assert (!call
->virtual);
2973 code
= emit_save_sp_to_lmf (cfg
, code
);
2974 if (ins
->flags
& MONO_INST_HAS_METHOD
)
2975 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_METHOD
, call
->method
);
2977 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_ABS
, call
->fptr
);
2979 code
= emit_vret_token (cfg
->generic_sharing_context
, ins
, code
);
2980 code
= emit_move_return_value (ins
, code
);
2986 case OP_VOIDCALL_REG
:
2988 call
= (MonoCallInst
*)ins
;
2989 code
= emit_save_sp_to_lmf (cfg
, code
);
2990 sparc_jmpl (code
, ins
->sreg1
, sparc_g0
, sparc_callsite
);
2992 * We emit a special kind of nop in the delay slot to tell the
2993 * trampoline code that this is a virtual call, thus an unbox
2994 * trampoline might need to be called.
2997 sparc_or_imm (code
, FALSE
, sparc_g0
, 0xca, sparc_g0
);
3001 code
= emit_vret_token (cfg
->generic_sharing_context
, ins
, code
);
3002 code
= emit_move_return_value (ins
, code
);
3004 case OP_FCALL_MEMBASE
:
3005 case OP_LCALL_MEMBASE
:
3006 case OP_VCALL_MEMBASE
:
3007 case OP_VCALL2_MEMBASE
:
3008 case OP_VOIDCALL_MEMBASE
:
3009 case OP_CALL_MEMBASE
:
3010 call
= (MonoCallInst
*)ins
;
3011 code
= emit_save_sp_to_lmf (cfg
, code
);
3012 if (sparc_is_imm13 (ins
->inst_offset
)) {
3013 sparc_ldi_imm (code
, ins
->inst_basereg
, ins
->inst_offset
, sparc_o7
);
3015 sparc_set (code
, ins
->inst_offset
, sparc_o7
);
3016 sparc_ldi (code
, ins
->inst_basereg
, sparc_o7
, sparc_o7
);
3018 sparc_jmpl (code
, sparc_o7
, sparc_g0
, sparc_callsite
);
3020 sparc_or_imm (code
, FALSE
, sparc_g0
, 0xca, sparc_g0
);
3024 code
= emit_vret_token (cfg
->generic_sharing_context
, ins
, code
);
3025 code
= emit_move_return_value (ins
, code
);
3028 if (mono_method_signature (cfg
->method
)->ret
->type
== MONO_TYPE_R4
)
3029 sparc_fdtos (code
, ins
->sreg1
, sparc_f0
);
3032 sparc_fmovd (code
, ins
->sreg1
, ins
->dreg
);
3034 /* FIXME: Why not use fmovd ? */
3035 sparc_fmovs (code
, ins
->sreg1
, ins
->dreg
);
3036 sparc_fmovs (code
, ins
->sreg1
+ 1, ins
->dreg
+ 1);
3044 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
3045 /* Perform stack touching */
3049 /* Keep alignment */
3050 /* Add 4 to compensate for the rounding of localloc_offset */
3051 sparc_add_imm (code
, FALSE
, ins
->sreg1
, 4 + MONO_ARCH_LOCALLOC_ALIGNMENT
- 1, ins
->dreg
);
3052 sparc_set (code
, ~(MONO_ARCH_LOCALLOC_ALIGNMENT
- 1), sparc_o7
);
3053 sparc_and (code
, FALSE
, ins
->dreg
, sparc_o7
, ins
->dreg
);
3055 if ((ins
->flags
& MONO_INST_INIT
) && (ins
->sreg1
== ins
->dreg
)) {
3057 size_reg
= sparc_g4
;
3059 size_reg
= sparc_g1
;
3061 sparc_mov_reg_reg (code
, ins
->dreg
, size_reg
);
3064 size_reg
= ins
->sreg1
;
3066 sparc_sub (code
, FALSE
, sparc_sp
, ins
->dreg
, ins
->dreg
);
3067 /* Keep %sp valid at all times */
3068 sparc_mov_reg_reg (code
, ins
->dreg
, sparc_sp
);
3069 /* Round localloc_offset too so the result is at least 8 aligned */
3070 offset2
= ALIGN_TO (cfg
->arch
.localloc_offset
, 8);
3071 g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS
+ offset2
));
3072 sparc_add_imm (code
, FALSE
, ins
->dreg
, MONO_SPARC_STACK_BIAS
+ offset2
, ins
->dreg
);
3074 if (ins
->flags
& MONO_INST_INIT
) {
3076 /* Initialize memory region */
3077 sparc_cmp_imm (code
, size_reg
, 0);
3079 sparc_branch (code
, 0, sparc_be
, 0);
3081 sparc_set (code
, 0, sparc_o7
);
3082 sparc_sub_imm (code
, 0, size_reg
, sparcv9
? 8 : 4, size_reg
);
3086 sparc_stx (code
, sparc_g0
, ins
->dreg
, sparc_o7
);
3088 sparc_st (code
, sparc_g0
, ins
->dreg
, sparc_o7
);
3089 sparc_cmp (code
, sparc_o7
, size_reg
);
3091 sparc_branch (code
, 0, sparc_bl
, 0);
3092 sparc_patch (br
[2], br
[1]);
3094 sparc_add_imm (code
, 0, sparc_o7
, sparcv9
? 8 : 4, sparc_o7
);
3095 sparc_patch (br
[0], code
);
3099 case OP_LOCALLOC_IMM
: {
3100 gint32 offset
= ins
->inst_imm
;
3103 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
3104 /* Perform stack touching */
3108 /* To compensate for the rounding of localloc_offset */
3109 offset
+= sizeof (gpointer
);
3110 offset
= ALIGN_TO (offset
, MONO_ARCH_FRAME_ALIGNMENT
);
3111 if (sparc_is_imm13 (offset
))
3112 sparc_sub_imm (code
, FALSE
, sparc_sp
, offset
, sparc_sp
);
3114 sparc_set (code
, offset
, sparc_o7
);
3115 sparc_sub (code
, FALSE
, sparc_sp
, sparc_o7
, sparc_sp
);
3117 /* Round localloc_offset too so the result is at least 8 aligned */
3118 offset2
= ALIGN_TO (cfg
->arch
.localloc_offset
, 8);
3119 g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS
+ offset2
));
3120 sparc_add_imm (code
, FALSE
, sparc_sp
, MONO_SPARC_STACK_BIAS
+ offset2
, ins
->dreg
);
3121 if ((ins
->flags
& MONO_INST_INIT
) && (offset
> 0)) {
3127 while (i
< offset
) {
3129 sparc_stx_imm (code
, sparc_g0
, ins
->dreg
, i
);
3133 sparc_st_imm (code
, sparc_g0
, ins
->dreg
, i
);
3139 sparc_set (code
, offset
, sparc_o7
);
3140 sparc_sub_imm (code
, 0, sparc_o7
, sparcv9
? 8 : 4, sparc_o7
);
3141 /* beginning of loop */
3144 sparc_stx (code
, sparc_g0
, ins
->dreg
, sparc_o7
);
3146 sparc_st (code
, sparc_g0
, ins
->dreg
, sparc_o7
);
3147 sparc_cmp_imm (code
, sparc_o7
, 0);
3149 sparc_branch (code
, 0, sparc_bne
, 0);
3151 sparc_sub_imm (code
, 0, sparc_o7
, sparcv9
? 8 : 4, sparc_o7
);
3152 sparc_patch (br
[1], br
[0]);
3158 sparc_mov_reg_reg (code
, ins
->sreg1
, sparc_o0
);
3159 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
3160 (gpointer
)"mono_arch_throw_exception");
3164 sparc_mov_reg_reg (code
, ins
->sreg1
, sparc_o0
);
3165 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
3166 (gpointer
)"mono_arch_rethrow_exception");
3169 case OP_START_HANDLER
: {
3171 * The START_HANDLER instruction marks the beginning of a handler
3172 * block. It is called using a call instruction, so %o7 contains
3173 * the return address. Since the handler executes in the same stack
3174 * frame as the method itself, we can't use save/restore to save
3175 * the return address. Instead, we save it into a dedicated
3178 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3179 if (!sparc_is_imm13 (spvar
->inst_offset
)) {
3180 sparc_set (code
, spvar
->inst_offset
, GP_SCRATCH_REG
);
3181 sparc_sti (code
, sparc_o7
, spvar
->inst_basereg
, GP_SCRATCH_REG
);
3184 sparc_sti_imm (code
, sparc_o7
, spvar
->inst_basereg
, spvar
->inst_offset
);
3187 case OP_ENDFILTER
: {
3188 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3189 if (!sparc_is_imm13 (spvar
->inst_offset
)) {
3190 sparc_set (code
, spvar
->inst_offset
, GP_SCRATCH_REG
);
3191 sparc_ldi (code
, spvar
->inst_basereg
, GP_SCRATCH_REG
, sparc_o7
);
3194 sparc_ldi_imm (code
, spvar
->inst_basereg
, spvar
->inst_offset
, sparc_o7
);
3195 sparc_jmpl_imm (code
, sparc_o7
, 8, sparc_g0
);
3197 sparc_mov_reg_reg (code
, ins
->sreg1
, sparc_o0
);
3200 case OP_ENDFINALLY
: {
3201 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3202 if (!sparc_is_imm13 (spvar
->inst_offset
)) {
3203 sparc_set (code
, spvar
->inst_offset
, GP_SCRATCH_REG
);
3204 sparc_ldi (code
, spvar
->inst_basereg
, GP_SCRATCH_REG
, sparc_o7
);
3207 sparc_ldi_imm (code
, spvar
->inst_basereg
, spvar
->inst_offset
, sparc_o7
);
3208 sparc_jmpl_imm (code
, sparc_o7
, 8, sparc_g0
);
3212 case OP_CALL_HANDLER
:
3213 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
3214 /* This is a jump inside the method, so call_simple works even on V9 */
3215 sparc_call_simple (code
, 0);
3219 ins
->inst_c0
= (guint8
*)code
- cfg
->native_code
;
3221 case OP_RELAXED_NOP
:
3224 case OP_DUMMY_STORE
:
3225 case OP_NOT_REACHED
:
3229 //g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
3230 if ((ins
->inst_target_bb
== bb
->next_bb
) && ins
== bb
->last_ins
)
3232 if (ins
->inst_target_bb
->native_offset
) {
3233 gint32 disp
= (ins
->inst_target_bb
->native_offset
- ((guint8
*)code
- cfg
->native_code
)) >> 2;
3234 g_assert (sparc_is_imm22 (disp
));
3235 sparc_branch (code
, 1, sparc_ba
, disp
);
3237 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
3238 sparc_branch (code
, 1, sparc_ba
, 0);
3243 sparc_jmp (code
, ins
->sreg1
, sparc_g0
);
3251 if (v64
&& (cfg
->opt
& MONO_OPT_CMOV
)) {
3252 sparc_clr_reg (code
, ins
->dreg
);
3253 sparc_movcc_imm (code
, sparc_xcc
, opcode_to_sparc_cond (ins
->opcode
), 1, ins
->dreg
);
3256 sparc_clr_reg (code
, ins
->dreg
);
3258 sparc_branchp (code
, 1, opcode_to_sparc_cond (ins
->opcode
), DEFAULT_ICC
, 0, 2);
3260 sparc_branch (code
, 1, opcode_to_sparc_cond (ins
->opcode
), 2);
3263 sparc_set (code
, 1, ins
->dreg
);
3271 if (v64
&& (cfg
->opt
& MONO_OPT_CMOV
)) {
3272 sparc_clr_reg (code
, ins
->dreg
);
3273 sparc_movcc_imm (code
, sparc_icc
, opcode_to_sparc_cond (ins
->opcode
), 1, ins
->dreg
);
3276 sparc_clr_reg (code
, ins
->dreg
);
3277 sparc_branchp (code
, 1, opcode_to_sparc_cond (ins
->opcode
), sparc_icc_short
, 0, 2);
3279 sparc_set (code
, 1, ins
->dreg
);
3282 case OP_COND_EXC_EQ
:
3283 case OP_COND_EXC_NE_UN
:
3284 case OP_COND_EXC_LT
:
3285 case OP_COND_EXC_LT_UN
:
3286 case OP_COND_EXC_GT
:
3287 case OP_COND_EXC_GT_UN
:
3288 case OP_COND_EXC_GE
:
3289 case OP_COND_EXC_GE_UN
:
3290 case OP_COND_EXC_LE
:
3291 case OP_COND_EXC_LE_UN
:
3292 case OP_COND_EXC_OV
:
3293 case OP_COND_EXC_NO
:
3295 case OP_COND_EXC_NC
:
3296 case OP_COND_EXC_IEQ
:
3297 case OP_COND_EXC_INE_UN
:
3298 case OP_COND_EXC_ILT
:
3299 case OP_COND_EXC_ILT_UN
:
3300 case OP_COND_EXC_IGT
:
3301 case OP_COND_EXC_IGT_UN
:
3302 case OP_COND_EXC_IGE
:
3303 case OP_COND_EXC_IGE_UN
:
3304 case OP_COND_EXC_ILE
:
3305 case OP_COND_EXC_ILE_UN
:
3306 case OP_COND_EXC_IOV
:
3307 case OP_COND_EXC_INO
:
3308 case OP_COND_EXC_IC
:
3309 case OP_COND_EXC_INC
:
3313 EMIT_COND_SYSTEM_EXCEPTION (ins
, opcode_to_sparc_cond (ins
->opcode
), ins
->inst_p1
);
3316 case OP_SPARC_COND_EXC_EQZ
:
3317 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins
, brz
, ins
->inst_p1
);
3319 case OP_SPARC_COND_EXC_GEZ
:
3320 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins
, brgez
, ins
->inst_p1
);
3322 case OP_SPARC_COND_EXC_GTZ
:
3323 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins
, brgz
, ins
->inst_p1
);
3325 case OP_SPARC_COND_EXC_LEZ
:
3326 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins
, brlez
, ins
->inst_p1
);
3328 case OP_SPARC_COND_EXC_LTZ
:
3329 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins
, brlz
, ins
->inst_p1
);
3331 case OP_SPARC_COND_EXC_NEZ
:
3332 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins
, brnz
, ins
->inst_p1
);
3346 EMIT_COND_BRANCH_PREDICTED (ins
, opcode_to_sparc_cond (ins
->opcode
), 1, 1);
3348 EMIT_COND_BRANCH (ins
, opcode_to_sparc_cond (ins
->opcode
), 1, 1);
3353 EMIT_COND_BRANCH_BPR (ins
, brz
, 1, 1, 1);
3355 case OP_SPARC_BRLEZ
:
3356 EMIT_COND_BRANCH_BPR (ins
, brlez
, 1, 1, 1);
3359 EMIT_COND_BRANCH_BPR (ins
, brlz
, 1, 1, 1);
3362 EMIT_COND_BRANCH_BPR (ins
, brnz
, 1, 1, 1);
3365 EMIT_COND_BRANCH_BPR (ins
, brgz
, 1, 1, 1);
3367 case OP_SPARC_BRGEZ
:
3368 EMIT_COND_BRANCH_BPR (ins
, brgez
, 1, 1, 1);
3371 /* floating point opcodes */
3373 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_R8
, ins
->inst_p0
);
3375 sparc_set_template (code
, sparc_o7
);
3377 sparc_sethi (code
, 0, sparc_o7
);
3379 sparc_lddf_imm (code
, sparc_o7
, 0, ins
->dreg
);
3382 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_R4
, ins
->inst_p0
);
3384 sparc_set_template (code
, sparc_o7
);
3386 sparc_sethi (code
, 0, sparc_o7
);
3388 sparc_ldf_imm (code
, sparc_o7
, 0, FP_SCRATCH_REG
);
3390 /* Extend to double */
3391 sparc_fstod (code
, FP_SCRATCH_REG
, ins
->dreg
);
3393 case OP_STORER8_MEMBASE_REG
:
3394 if (!sparc_is_imm13 (ins
->inst_offset
+ 4)) {
3395 sparc_set (code
, ins
->inst_offset
, sparc_o7
);
3396 /* SPARCV9 handles misaligned fp loads/stores */
3397 if (!v64
&& (ins
->inst_offset
% 8)) {
3399 sparc_add (code
, FALSE
, ins
->inst_destbasereg
, sparc_o7
, sparc_o7
);
3400 sparc_stf (code
, ins
->sreg1
, sparc_o7
, sparc_g0
);
3401 sparc_stf_imm (code
, ins
->sreg1
+ 1, sparc_o7
, 4);
3403 sparc_stdf (code
, ins
->sreg1
, ins
->inst_destbasereg
, sparc_o7
);
3406 if (!v64
&& (ins
->inst_offset
% 8)) {
3408 sparc_stf_imm (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3409 sparc_stf_imm (code
, ins
->sreg1
+ 1, ins
->inst_destbasereg
, ins
->inst_offset
+ 4);
3411 sparc_stdf_imm (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3414 case OP_LOADR8_MEMBASE
:
3415 EMIT_LOAD_MEMBASE (ins
, lddf
);
3417 case OP_STORER4_MEMBASE_REG
:
3418 /* This requires a double->single conversion */
3419 sparc_fdtos (code
, ins
->sreg1
, FP_SCRATCH_REG
);
3420 if (!sparc_is_imm13 (ins
->inst_offset
)) {
3421 sparc_set (code
, ins
->inst_offset
, sparc_o7
);
3422 sparc_stf (code
, FP_SCRATCH_REG
, ins
->inst_destbasereg
, sparc_o7
);
3425 sparc_stf_imm (code
, FP_SCRATCH_REG
, ins
->inst_destbasereg
, ins
->inst_offset
);
3427 case OP_LOADR4_MEMBASE
: {
3428 /* ldf needs a single precision register */
3429 int dreg
= ins
->dreg
;
3430 ins
->dreg
= FP_SCRATCH_REG
;
3431 EMIT_LOAD_MEMBASE (ins
, ldf
);
3433 /* Extend to double */
3434 sparc_fstod (code
, FP_SCRATCH_REG
, ins
->dreg
);
3437 case OP_ICONV_TO_R4
: {
3438 MonoInst
*spill
= cfg
->arch
.float_spill_slot
;
3439 gint32 reg
= spill
->inst_basereg
;
3440 gint32 offset
= spill
->inst_offset
;
3442 g_assert (spill
->opcode
== OP_REGOFFSET
);
3444 if (!sparc_is_imm13 (offset
)) {
3445 sparc_set (code
, offset
, sparc_o7
);
3446 sparc_stx (code
, ins
->sreg1
, reg
, offset
);
3447 sparc_lddf (code
, reg
, offset
, FP_SCRATCH_REG
);
3449 sparc_stx_imm (code
, ins
->sreg1
, reg
, offset
);
3450 sparc_lddf_imm (code
, reg
, offset
, FP_SCRATCH_REG
);
3452 sparc_fxtos (code
, FP_SCRATCH_REG
, FP_SCRATCH_REG
);
3454 if (!sparc_is_imm13 (offset
)) {
3455 sparc_set (code
, offset
, sparc_o7
);
3456 sparc_st (code
, ins
->sreg1
, reg
, sparc_o7
);
3457 sparc_ldf (code
, reg
, sparc_o7
, FP_SCRATCH_REG
);
3459 sparc_st_imm (code
, ins
->sreg1
, reg
, offset
);
3460 sparc_ldf_imm (code
, reg
, offset
, FP_SCRATCH_REG
);
3462 sparc_fitos (code
, FP_SCRATCH_REG
, FP_SCRATCH_REG
);
3464 sparc_fstod (code
, FP_SCRATCH_REG
, ins
->dreg
);
3467 case OP_ICONV_TO_R8
: {
3468 MonoInst
*spill
= cfg
->arch
.float_spill_slot
;
3469 gint32 reg
= spill
->inst_basereg
;
3470 gint32 offset
= spill
->inst_offset
;
3472 g_assert (spill
->opcode
== OP_REGOFFSET
);
3475 if (!sparc_is_imm13 (offset
)) {
3476 sparc_set (code
, offset
, sparc_o7
);
3477 sparc_stx (code
, ins
->sreg1
, reg
, sparc_o7
);
3478 sparc_lddf (code
, reg
, sparc_o7
, FP_SCRATCH_REG
);
3480 sparc_stx_imm (code
, ins
->sreg1
, reg
, offset
);
3481 sparc_lddf_imm (code
, reg
, offset
, FP_SCRATCH_REG
);
3483 sparc_fxtod (code
, FP_SCRATCH_REG
, ins
->dreg
);
3485 if (!sparc_is_imm13 (offset
)) {
3486 sparc_set (code
, offset
, sparc_o7
);
3487 sparc_st (code
, ins
->sreg1
, reg
, sparc_o7
);
3488 sparc_ldf (code
, reg
, sparc_o7
, FP_SCRATCH_REG
);
3490 sparc_st_imm (code
, ins
->sreg1
, reg
, offset
);
3491 sparc_ldf_imm (code
, reg
, offset
, FP_SCRATCH_REG
);
3493 sparc_fitod (code
, FP_SCRATCH_REG
, ins
->dreg
);
3497 case OP_FCONV_TO_I1
:
3498 case OP_FCONV_TO_U1
:
3499 case OP_FCONV_TO_I2
:
3500 case OP_FCONV_TO_U2
:
3505 case OP_FCONV_TO_I4
:
3506 case OP_FCONV_TO_U4
: {
3507 MonoInst
*spill
= cfg
->arch
.float_spill_slot
;
3508 gint32 reg
= spill
->inst_basereg
;
3509 gint32 offset
= spill
->inst_offset
;
3511 g_assert (spill
->opcode
== OP_REGOFFSET
);
3513 sparc_fdtoi (code
, ins
->sreg1
, FP_SCRATCH_REG
);
3514 if (!sparc_is_imm13 (offset
)) {
3515 sparc_set (code
, offset
, sparc_o7
);
3516 sparc_stdf (code
, FP_SCRATCH_REG
, reg
, sparc_o7
);
3517 sparc_ld (code
, reg
, sparc_o7
, ins
->dreg
);
3519 sparc_stdf_imm (code
, FP_SCRATCH_REG
, reg
, offset
);
3520 sparc_ld_imm (code
, reg
, offset
, ins
->dreg
);
3523 switch (ins
->opcode
) {
3524 case OP_FCONV_TO_I1
:
3525 case OP_FCONV_TO_U1
:
3526 sparc_and_imm (code
, 0, ins
->dreg
, 0xff, ins
->dreg
);
3528 case OP_FCONV_TO_I2
:
3529 case OP_FCONV_TO_U2
:
3530 sparc_set (code
, 0xffff, sparc_o7
);
3531 sparc_and (code
, 0, ins
->dreg
, sparc_o7
, ins
->dreg
);
3538 case OP_FCONV_TO_I8
:
3539 case OP_FCONV_TO_U8
:
3541 g_assert_not_reached ();
3543 case OP_FCONV_TO_R4
:
3544 /* FIXME: Change precision ? */
3546 sparc_fmovd (code
, ins
->sreg1
, ins
->dreg
);
3548 sparc_fmovs (code
, ins
->sreg1
, ins
->dreg
);
3549 sparc_fmovs (code
, ins
->sreg1
+ 1, ins
->dreg
+ 1);
3552 case OP_LCONV_TO_R_UN
: {
3554 g_assert_not_reached ();
3557 case OP_LCONV_TO_OVF_I
:
3558 case OP_LCONV_TO_OVF_I4_2
: {
3559 guint32
*br
[3], *label
[1];
3562 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
3564 sparc_cmp_imm (code
, ins
->sreg1
, 0);
3566 sparc_branch (code
, 1, sparc_bneg
, 0);
3570 /* ms word must be 0 */
3571 sparc_cmp_imm (code
, ins
->sreg2
, 0);
3573 sparc_branch (code
, 1, sparc_be
, 0);
3578 EMIT_COND_SYSTEM_EXCEPTION (ins
, sparc_ba
, "OverflowException");
3581 sparc_patch (br
[0], code
);
3583 /* ms word must 0xfffffff */
3584 sparc_cmp_imm (code
, ins
->sreg2
, -1);
3586 sparc_branch (code
, 1, sparc_bne
, 0);
3588 sparc_patch (br
[2], label
[0]);
3591 sparc_patch (br
[1], code
);
3592 if (ins
->sreg1
!= ins
->dreg
)
3593 sparc_mov_reg_reg (code
, ins
->sreg1
, ins
->dreg
);
3597 sparc_faddd (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
3600 sparc_fsubd (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
3603 sparc_fmuld (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
3606 sparc_fdivd (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
3610 sparc_fnegd (code
, ins
->sreg1
, ins
->dreg
);
3612 /* FIXME: why don't use fnegd ? */
3613 sparc_fnegs (code
, ins
->sreg1
, ins
->dreg
);
3617 sparc_fdivd (code
, ins
->sreg1
, ins
->sreg2
, FP_SCRATCH_REG
);
3618 sparc_fmuld (code
, ins
->sreg2
, FP_SCRATCH_REG
, FP_SCRATCH_REG
);
3619 sparc_fsubd (code
, ins
->sreg1
, FP_SCRATCH_REG
, ins
->dreg
);
3622 sparc_fcmpd (code
, ins
->sreg1
, ins
->sreg2
);
3629 sparc_fcmpd (code
, ins
->sreg1
, ins
->sreg2
);
3630 sparc_clr_reg (code
, ins
->dreg
);
3631 switch (ins
->opcode
) {
3634 sparc_fbranch (code
, 1, opcode_to_sparc_cond (ins
->opcode
), 4);
3636 sparc_set (code
, 1, ins
->dreg
);
3637 sparc_fbranch (code
, 1, sparc_fbu
, 2);
3639 sparc_set (code
, 1, ins
->dreg
);
3642 sparc_fbranch (code
, 1, opcode_to_sparc_cond (ins
->opcode
), 2);
3644 sparc_set (code
, 1, ins
->dreg
);
3650 EMIT_FLOAT_COND_BRANCH (ins
, opcode_to_sparc_cond (ins
->opcode
), 1, 1);
3653 /* clt.un + brfalse */
3655 sparc_fbranch (code
, 1, sparc_fbul
, 0);
3658 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fba
, 1, 1);
3659 sparc_patch (p
, (guint8
*)code
);
3663 /* cgt.un + brfalse */
3665 sparc_fbranch (code
, 1, sparc_fbug
, 0);
3668 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fba
, 1, 1);
3669 sparc_patch (p
, (guint8
*)code
);
3673 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fbne
, 1, 1);
3674 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fbu
, 1, 1);
3677 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fbl
, 1, 1);
3678 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fbu
, 1, 1);
3681 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fbg
, 1, 1);
3682 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fbu
, 1, 1);
3685 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fbge
, 1, 1);
3686 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fbu
, 1, 1);
3689 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fble
, 1, 1);
3690 EMIT_FLOAT_COND_BRANCH (ins
, sparc_fbu
, 1, 1);
3693 MonoInst
*spill
= cfg
->arch
.float_spill_slot
;
3694 gint32 reg
= spill
->inst_basereg
;
3695 gint32 offset
= spill
->inst_offset
;
3697 g_assert (spill
->opcode
== OP_REGOFFSET
);
3699 if (!sparc_is_imm13 (offset
)) {
3700 sparc_set (code
, offset
, sparc_o7
);
3701 sparc_stdf (code
, ins
->sreg1
, reg
, sparc_o7
);
3702 sparc_lduh (code
, reg
, sparc_o7
, sparc_o7
);
3704 sparc_stdf_imm (code
, ins
->sreg1
, reg
, offset
);
3705 sparc_lduh_imm (code
, reg
, offset
, sparc_o7
);
3707 sparc_srl_imm (code
, sparc_o7
, 4, sparc_o7
);
3708 sparc_and_imm (code
, FALSE
, sparc_o7
, 2047, sparc_o7
);
3709 sparc_cmp_imm (code
, sparc_o7
, 2047);
3710 EMIT_COND_SYSTEM_EXCEPTION (ins
, sparc_be
, "ArithmeticException");
3712 sparc_fmovd (code
, ins
->sreg1
, ins
->dreg
);
3714 sparc_fmovs (code
, ins
->sreg1
, ins
->dreg
);
3715 sparc_fmovs (code
, ins
->sreg1
+ 1, ins
->dreg
+ 1);
3720 case OP_MEMORY_BARRIER
:
3721 sparc_membar (code
, sparc_membar_all
);
3726 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins
->opcode
), __FUNCTION__
);
3728 g_warning ("%s:%d: unknown opcode %s\n", __FILE__
, __LINE__
, mono_inst_name (ins
->opcode
));
3730 g_assert_not_reached ();
3733 if ((((guint8
*)code
) - code_start
) > max_len
) {
3734 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
3735 mono_inst_name (ins
->opcode
), max_len
, ((guint8
*)code
) - code_start
);
3736 g_assert_not_reached ();
3744 cfg
->code_len
= (guint8
*)code
- cfg
->native_code
;
3748 mono_arch_register_lowlevel_calls (void)
3750 mono_register_jit_icall (mono_arch_get_lmf_addr
, "mono_arch_get_lmf_addr", NULL
, TRUE
);
3754 mono_arch_patch_code (MonoMethod
*method
, MonoDomain
*domain
, guint8
*code
, MonoJumpInfo
*ji
, gboolean run_cctors
)
3756 MonoJumpInfo
*patch_info
;
3758 /* FIXME: Move part of this to arch independent code */
3759 for (patch_info
= ji
; patch_info
; patch_info
= patch_info
->next
) {
3760 unsigned char *ip
= patch_info
->ip
.i
+ code
;
3763 target
= mono_resolve_patch_target (method
, domain
, code
, patch_info
, run_cctors
);
3765 switch (patch_info
->type
) {
3766 case MONO_PATCH_INFO_NONE
:
3768 case MONO_PATCH_INFO_CLASS_INIT
: {
3769 guint32
*ip2
= (guint32
*)ip
;
3770 /* Might already been changed to a nop */
3772 sparc_set_template (ip2
, sparc_o7
);
3773 sparc_jmpl (ip2
, sparc_o7
, sparc_g0
, sparc_o7
);
3775 sparc_call_simple (ip2
, 0);
3779 case MONO_PATCH_INFO_METHOD_JUMP
: {
3780 guint32
*ip2
= (guint32
*)ip
;
3781 /* Might already been patched */
3782 sparc_set_template (ip2
, sparc_o7
);
3788 sparc_patch ((guint32
*)ip
, target
);
3793 mono_arch_instrument_prolog (MonoCompile
*cfg
, void *func
, void *p
, gboolean enable_arguments
)
3796 guint32
*code
= (guint32
*)p
;
3797 MonoMethodSignature
*sig
= mono_method_signature (cfg
->method
);
3800 /* Save registers to stack */
3801 for (i
= 0; i
< 6; ++i
)
3802 sparc_sti_imm (code
, sparc_i0
+ i
, sparc_fp
, ARGS_OFFSET
+ (i
* sizeof (gpointer
)));
3804 cinfo
= get_call_info (cfg
, sig
, FALSE
);
3806 /* Save float regs on V9, since they are caller saved */
3807 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
3808 ArgInfo
*ainfo
= cinfo
->args
+ i
;
3809 gint32 stack_offset
;
3811 stack_offset
= ainfo
->offset
+ ARGS_OFFSET
;
3813 if (ainfo
->storage
== ArgInFloatReg
) {
3814 if (!sparc_is_imm13 (stack_offset
))
3816 sparc_stf_imm (code
, ainfo
->reg
, sparc_fp
, stack_offset
);
3818 else if (ainfo
->storage
== ArgInDoubleReg
) {
3819 /* The offset is guaranteed to be aligned by the ABI rules */
3820 sparc_stdf_imm (code
, ainfo
->reg
, sparc_fp
, stack_offset
);
3824 sparc_set (code
, cfg
->method
, sparc_o0
);
3825 sparc_add_imm (code
, FALSE
, sparc_fp
, MONO_SPARC_STACK_BIAS
, sparc_o1
);
3827 mono_add_patch_info (cfg
, (guint8
*)code
-cfg
->native_code
, MONO_PATCH_INFO_ABS
, func
);
3830 /* Restore float regs on V9 */
3831 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
3832 ArgInfo
*ainfo
= cinfo
->args
+ i
;
3833 gint32 stack_offset
;
3835 stack_offset
= ainfo
->offset
+ ARGS_OFFSET
;
3837 if (ainfo
->storage
== ArgInFloatReg
) {
3838 if (!sparc_is_imm13 (stack_offset
))
3840 sparc_ldf_imm (code
, sparc_fp
, stack_offset
, ainfo
->reg
);
3842 else if (ainfo
->storage
== ArgInDoubleReg
) {
3843 /* The offset is guaranteed to be aligned by the ABI rules */
3844 sparc_lddf_imm (code
, sparc_fp
, stack_offset
, ainfo
->reg
);
3862 mono_arch_instrument_epilog_full (MonoCompile
*cfg
, void *func
, void *p
, gboolean enable_arguments
, gboolean preserve_argument_registers
)
3864 guint32
*code
= (guint32
*)p
;
3865 int save_mode
= SAVE_NONE
;
3866 MonoMethod
*method
= cfg
->method
;
3868 switch (mono_type_get_underlying_type (mono_method_signature (method
)->ret
)->type
) {
3869 case MONO_TYPE_VOID
:
3870 /* special case string .ctor icall */
3871 if (strcmp (".ctor", method
->name
) && method
->klass
== mono_defaults
.string_class
)
3872 save_mode
= SAVE_ONE
;
3874 save_mode
= SAVE_NONE
;
3879 save_mode
= SAVE_ONE
;
3881 save_mode
= SAVE_TWO
;
3886 save_mode
= SAVE_FP
;
3888 case MONO_TYPE_VALUETYPE
:
3889 save_mode
= SAVE_STRUCT
;
3892 save_mode
= SAVE_ONE
;
3896 /* Save the result to the stack and also put it into the output registers */
3898 switch (save_mode
) {
3901 sparc_st_imm (code
, sparc_i0
, sparc_fp
, 68);
3902 sparc_st_imm (code
, sparc_i0
, sparc_fp
, 72);
3903 sparc_mov_reg_reg (code
, sparc_i0
, sparc_o1
);
3904 sparc_mov_reg_reg (code
, sparc_i1
, sparc_o2
);
3907 sparc_sti_imm (code
, sparc_i0
, sparc_fp
, ARGS_OFFSET
);
3908 sparc_mov_reg_reg (code
, sparc_i0
, sparc_o1
);
3912 sparc_stdf_imm (code
, sparc_f0
, sparc_fp
, ARGS_OFFSET
);
3914 sparc_stdf_imm (code
, sparc_f0
, sparc_fp
, 72);
3915 sparc_ld_imm (code
, sparc_fp
, 72, sparc_o1
);
3916 sparc_ld_imm (code
, sparc_fp
, 72 + 4, sparc_o2
);
3921 sparc_mov_reg_reg (code
, sparc_i0
, sparc_o1
);
3923 sparc_ld_imm (code
, sparc_fp
, 64, sparc_o1
);
3931 sparc_set (code
, cfg
->method
, sparc_o0
);
3933 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, MONO_PATCH_INFO_ABS
, func
);
3936 /* Restore result */
3938 switch (save_mode
) {
3940 sparc_ld_imm (code
, sparc_fp
, 68, sparc_i0
);
3941 sparc_ld_imm (code
, sparc_fp
, 72, sparc_i0
);
3944 sparc_ldi_imm (code
, sparc_fp
, ARGS_OFFSET
, sparc_i0
);
3947 sparc_lddf_imm (code
, sparc_fp
, ARGS_OFFSET
, sparc_f0
);
3958 mono_arch_emit_prolog (MonoCompile
*cfg
)
3960 MonoMethod
*method
= cfg
->method
;
3961 MonoMethodSignature
*sig
;
3967 cfg
->code_size
= 256;
3968 cfg
->native_code
= g_malloc (cfg
->code_size
);
3969 code
= (guint32
*)cfg
->native_code
;
3971 /* FIXME: Generate intermediate code instead */
3973 offset
= cfg
->stack_offset
;
3974 offset
+= (16 * sizeof (gpointer
)); /* register save area */
3976 offset
+= 4; /* struct/union return pointer */
3979 /* add parameter area size for called functions */
3980 if (cfg
->param_area
< (6 * sizeof (gpointer
)))
3981 /* Reserve space for the first 6 arguments even if it is unused */
3982 offset
+= 6 * sizeof (gpointer
);
3984 offset
+= cfg
->param_area
;
3986 /* align the stack size */
3987 offset
= ALIGN_TO (offset
, MONO_ARCH_FRAME_ALIGNMENT
);
3990 * localloc'd memory is stored between the local variables (whose
3991 * size is given by cfg->stack_offset), and between the space reserved
3994 cfg
->arch
.localloc_offset
= offset
- cfg
->stack_offset
;
3996 cfg
->stack_offset
= offset
;
3998 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
3999 /* Perform stack touching */
4003 if (!sparc_is_imm13 (- cfg
->stack_offset
)) {
4004 /* Can't use sparc_o7 here, since we're still in the caller's frame */
4005 sparc_set (code
, (- cfg
->stack_offset
), GP_SCRATCH_REG
);
4006 sparc_save (code
, sparc_sp
, GP_SCRATCH_REG
, sparc_sp
);
4009 sparc_save_imm (code
, sparc_sp
, - cfg
->stack_offset
, sparc_sp
);
4012 if (strstr (cfg->method->name, "foo")) {
4013 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, mono_sparc_break);
4014 sparc_call_simple (code, 0);
4019 sig
= mono_method_signature (method
);
4021 cinfo
= get_call_info (cfg
, sig
, FALSE
);
4023 /* Keep in sync with emit_load_volatile_arguments */
4024 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
4025 ArgInfo
*ainfo
= cinfo
->args
+ i
;
4026 gint32 stack_offset
;
4028 inst
= cfg
->args
[i
];
4030 if (sig
->hasthis
&& (i
== 0))
4031 arg_type
= &mono_defaults
.object_class
->byval_arg
;
4033 arg_type
= sig
->params
[i
- sig
->hasthis
];
4035 stack_offset
= ainfo
->offset
+ ARGS_OFFSET
;
4037 /* Save the split arguments so they will reside entirely on the stack */
4038 if (ainfo
->storage
== ArgInSplitRegStack
) {
4039 /* Save the register to the stack */
4040 g_assert (inst
->opcode
== OP_REGOFFSET
);
4041 if (!sparc_is_imm13 (stack_offset
))
4043 sparc_st_imm (code
, sparc_i5
, inst
->inst_basereg
, stack_offset
);
4046 if (!v64
&& !arg_type
->byref
&& (arg_type
->type
== MONO_TYPE_R8
)) {
4047 /* Save the argument to a dword aligned stack location */
4049 * stack_offset contains the offset of the argument on the stack.
4050 * inst->inst_offset contains the dword aligned offset where the value
4053 if (ainfo
->storage
== ArgInIRegPair
) {
4054 if (!sparc_is_imm13 (inst
->inst_offset
+ 4))
4056 sparc_st_imm (code
, sparc_i0
+ ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
4057 sparc_st_imm (code
, sparc_i0
+ ainfo
->reg
+ 1, inst
->inst_basereg
, inst
->inst_offset
+ 4);
4060 if (ainfo
->storage
== ArgInSplitRegStack
) {
4062 g_assert_not_reached ();
4064 if (stack_offset
!= inst
->inst_offset
) {
4065 /* stack_offset is not dword aligned, so we need to make a copy */
4066 sparc_st_imm (code
, sparc_i5
, inst
->inst_basereg
, inst
->inst_offset
);
4067 sparc_ld_imm (code
, sparc_fp
, stack_offset
+ 4, sparc_o7
);
4068 sparc_st_imm (code
, sparc_o7
, inst
->inst_basereg
, inst
->inst_offset
+ 4);
4072 if (ainfo
->storage
== ArgOnStackPair
) {
4074 g_assert_not_reached ();
4076 if (stack_offset
!= inst
->inst_offset
) {
4077 /* stack_offset is not dword aligned, so we need to make a copy */
4078 sparc_ld_imm (code
, sparc_fp
, stack_offset
, sparc_o7
);
4079 sparc_st_imm (code
, sparc_o7
, inst
->inst_basereg
, inst
->inst_offset
);
4080 sparc_ld_imm (code
, sparc_fp
, stack_offset
+ 4, sparc_o7
);
4081 sparc_st_imm (code
, sparc_o7
, inst
->inst_basereg
, inst
->inst_offset
+ 4);
4085 g_assert_not_reached ();
4088 if ((ainfo
->storage
== ArgInIReg
) && (inst
->opcode
!= OP_REGVAR
)) {
4089 /* Argument in register, but need to be saved to stack */
4090 if (!sparc_is_imm13 (stack_offset
))
4092 if ((stack_offset
- ARGS_OFFSET
) & 0x1)
4093 sparc_stb_imm (code
, sparc_i0
+ ainfo
->reg
, inst
->inst_basereg
, stack_offset
);
4095 if ((stack_offset
- ARGS_OFFSET
) & 0x2)
4096 sparc_sth_imm (code
, sparc_i0
+ ainfo
->reg
, inst
->inst_basereg
, stack_offset
);
4098 if ((stack_offset
- ARGS_OFFSET
) & 0x4)
4099 sparc_st_imm (code
, sparc_i0
+ ainfo
->reg
, inst
->inst_basereg
, stack_offset
);
4102 sparc_stx_imm (code
, sparc_i0
+ ainfo
->reg
, inst
->inst_basereg
, stack_offset
);
4104 sparc_st_imm (code
, sparc_i0
+ ainfo
->reg
, inst
->inst_basereg
, stack_offset
);
4108 if ((ainfo
->storage
== ArgInIRegPair
) && (inst
->opcode
!= OP_REGVAR
)) {
4112 /* Argument in regpair, but need to be saved to stack */
4113 if (!sparc_is_imm13 (inst
->inst_offset
+ 4))
4115 sparc_st_imm (code
, sparc_i0
+ ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
4116 sparc_st_imm (code
, sparc_i0
+ ainfo
->reg
+ 1, inst
->inst_basereg
, inst
->inst_offset
+ 4);
4118 else if ((ainfo
->storage
== ArgInFloatReg
) && (inst
->opcode
!= OP_REGVAR
)) {
4119 if (!sparc_is_imm13 (stack_offset
))
4121 sparc_stf_imm (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
4123 else if ((ainfo
->storage
== ArgInDoubleReg
) && (inst
->opcode
!= OP_REGVAR
)) {
4124 /* The offset is guaranteed to be aligned by the ABI rules */
4125 sparc_stdf_imm (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
4128 if ((ainfo
->storage
== ArgInFloatReg
) && (inst
->opcode
== OP_REGVAR
)) {
4129 /* Need to move into the a double precision register */
4130 sparc_fstod (code
, ainfo
->reg
, ainfo
->reg
- 1);
4133 if ((ainfo
->storage
== ArgInSplitRegStack
) || (ainfo
->storage
== ArgOnStack
))
4134 if (inst
->opcode
== OP_REGVAR
)
4135 /* FIXME: Load the argument into memory */
4141 if (cfg
->method
->save_lmf
) {
4142 gint32 lmf_offset
= STACK_BIAS
- cfg
->arch
.lmf_offset
;
4145 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, MONO_PATCH_INFO_IP
, NULL
);
4146 sparc_set_template (code
, sparc_o7
);
4147 sparc_sti_imm (code
, sparc_o7
, sparc_fp
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, ip
));
4149 sparc_sti_imm (code
, sparc_sp
, sparc_fp
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, sp
));
4151 sparc_sti_imm (code
, sparc_fp
, sparc_fp
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, ebp
));
4153 /* FIXME: add a relocation for this */
4154 sparc_set (code
, cfg
->method
, sparc_o7
);
4155 sparc_sti_imm (code
, sparc_o7
, sparc_fp
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, method
));
4157 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
4158 (gpointer
)"mono_arch_get_lmf_addr");
4161 code
= (guint32
*)mono_sparc_emit_save_lmf (code
, lmf_offset
);
4164 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
))
4165 code
= mono_arch_instrument_prolog (cfg
, mono_trace_enter_method
, code
, TRUE
);
4167 cfg
->code_len
= (guint8
*)code
- cfg
->native_code
;
4169 g_assert (cfg
->code_len
<= cfg
->code_size
);
4171 return (guint8
*)code
;
4175 mono_arch_emit_epilog (MonoCompile
*cfg
)
4177 MonoMethod
*method
= cfg
->method
;
4180 int max_epilog_size
= 16 + 20 * 4;
4182 if (cfg
->method
->save_lmf
)
4183 max_epilog_size
+= 128;
4185 if (mono_jit_trace_calls
!= NULL
)
4186 max_epilog_size
+= 50;
4188 if (cfg
->prof_options
& MONO_PROFILE_ENTER_LEAVE
)
4189 max_epilog_size
+= 50;
4191 while (cfg
->code_len
+ max_epilog_size
> (cfg
->code_size
- 16)) {
4192 cfg
->code_size
*= 2;
4193 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
4194 mono_jit_stats
.code_reallocs
++;
4197 code
= (guint32
*)(cfg
->native_code
+ cfg
->code_len
);
4199 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
))
4200 code
= mono_arch_instrument_epilog (cfg
, mono_trace_leave_method
, code
, TRUE
);
4202 if (cfg
->method
->save_lmf
) {
4203 gint32 lmf_offset
= STACK_BIAS
- cfg
->arch
.lmf_offset
;
4205 code
= mono_sparc_emit_restore_lmf (code
, lmf_offset
);
4209 * The V8 ABI requires that calls to functions which return a structure
4212 if (!v64
&& mono_method_signature (cfg
->method
)->pinvoke
&& MONO_TYPE_ISSTRUCT(mono_method_signature (cfg
->method
)->ret
))
4213 sparc_jmpl_imm (code
, sparc_i7
, 12, sparc_g0
);
4217 /* Only fold last instruction into the restore if the exit block has an in count of 1
4218 and the previous block hasn't been optimized away since it may have an in count > 1 */
4219 if (cfg
->bb_exit
->in_count
== 1 && cfg
->bb_exit
->in_bb
[0]->native_offset
!= cfg
->bb_exit
->native_offset
)
4223 * FIXME: The last instruction might have a branch pointing into it like in
4224 * int_ceq sparc_i0 <-
4228 /* Try folding last instruction into the restore */
4229 if (can_fold
&& (sparc_inst_op (code
[-2]) == 0x2) && (sparc_inst_op3 (code
[-2]) == 0x2) && sparc_inst_imm (code
[-2]) && (sparc_inst_rd (code
[-2]) == sparc_i0
)) {
4230 /* or reg, imm, %i0 */
4231 int reg
= sparc_inst_rs1 (code
[-2]);
4232 int imm
= (((gint32
)(sparc_inst_imm13 (code
[-2]))) << 19) >> 19;
4233 code
[-2] = code
[-1];
4235 sparc_restore_imm (code
, reg
, imm
, sparc_o0
);
4238 if (can_fold
&& (sparc_inst_op (code
[-2]) == 0x2) && (sparc_inst_op3 (code
[-2]) == 0x2) && (!sparc_inst_imm (code
[-2])) && (sparc_inst_rd (code
[-2]) == sparc_i0
)) {
4239 /* or reg, reg, %i0 */
4240 int reg1
= sparc_inst_rs1 (code
[-2]);
4241 int reg2
= sparc_inst_rs2 (code
[-2]);
4242 code
[-2] = code
[-1];
4244 sparc_restore (code
, reg1
, reg2
, sparc_o0
);
4247 sparc_restore_imm (code
, sparc_g0
, 0, sparc_g0
);
4249 cfg
->code_len
= (guint8
*)code
- cfg
->native_code
;
4251 g_assert (cfg
->code_len
< cfg
->code_size
);
4256 mono_arch_emit_exceptions (MonoCompile
*cfg
)
4258 MonoJumpInfo
*patch_info
;
4263 MonoClass
*exc_classes
[16];
4264 guint8
*exc_throw_start
[16], *exc_throw_end
[16];
4266 /* Compute needed space */
4267 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
4268 if (patch_info
->type
== MONO_PATCH_INFO_EXC
)
4273 * make sure we have enough space for exceptions
4276 code_size
= exc_count
* (20 * 4);
4278 code_size
= exc_count
* 24;
4281 while (cfg
->code_len
+ code_size
> (cfg
->code_size
- 16)) {
4282 cfg
->code_size
*= 2;
4283 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
4284 mono_jit_stats
.code_reallocs
++;
4287 code
= (guint32
*)(cfg
->native_code
+ cfg
->code_len
);
4289 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
4290 switch (patch_info
->type
) {
4291 case MONO_PATCH_INFO_EXC
: {
4292 MonoClass
*exc_class
;
4293 guint32
*buf
, *buf2
;
4294 guint32 throw_ip
, type_idx
;
4297 sparc_patch ((guint32
*)(cfg
->native_code
+ patch_info
->ip
.i
), code
);
4299 exc_class
= mono_class_from_name (mono_defaults
.corlib
, "System", patch_info
->data
.name
);
4300 g_assert (exc_class
);
4301 type_idx
= exc_class
->type_token
- MONO_TOKEN_TYPE_DEF
;
4302 throw_ip
= patch_info
->ip
.i
;
4304 /* Find a throw sequence for the same exception class */
4305 for (i
= 0; i
< nthrows
; ++i
)
4306 if (exc_classes
[i
] == exc_class
)
4310 guint32 throw_offset
= (((guint8
*)exc_throw_end
[i
] - cfg
->native_code
) - throw_ip
) >> 2;
4311 if (!sparc_is_imm13 (throw_offset
))
4312 sparc_set32 (code
, throw_offset
, sparc_o1
);
4314 disp
= (exc_throw_start
[i
] - (guint8
*)code
) >> 2;
4315 g_assert (sparc_is_imm22 (disp
));
4316 sparc_branch (code
, 0, sparc_ba
, disp
);
4317 if (sparc_is_imm13 (throw_offset
))
4318 sparc_set32 (code
, throw_offset
, sparc_o1
);
4321 patch_info
->type
= MONO_PATCH_INFO_NONE
;
4324 /* Emit the template for setting o1 */
4326 if (sparc_is_imm13 (((((guint8
*)code
- cfg
->native_code
) - throw_ip
) >> 2) - 8))
4327 /* Can use a short form */
4330 sparc_set_template (code
, sparc_o1
);
4334 exc_classes
[nthrows
] = exc_class
;
4335 exc_throw_start
[nthrows
] = (guint8
*)code
;
4339 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, mono_sparc_break);
4343 /* first arg = type token */
4344 /* Pass the type index to reduce the size of the sparc_set */
4345 if (!sparc_is_imm13 (type_idx
))
4346 sparc_set32 (code
, type_idx
, sparc_o0
);
4348 /* second arg = offset between the throw ip and the current ip */
4349 /* On sparc, the saved ip points to the call instruction */
4350 disp
= (((guint8
*)code
- cfg
->native_code
) - throw_ip
) >> 2;
4351 sparc_set32 (buf
, disp
, sparc_o1
);
4356 exc_throw_end
[nthrows
] = (guint8
*)code
;
4360 patch_info
->data
.name
= "mono_arch_throw_corlib_exception";
4361 patch_info
->type
= MONO_PATCH_INFO_INTERNAL_METHOD
;
4362 patch_info
->ip
.i
= (guint8
*)code
- cfg
->native_code
;
4366 if (sparc_is_imm13 (type_idx
)) {
4367 /* Put it into the delay slot */
4370 sparc_set32 (code
, type_idx
, sparc_o0
);
4371 g_assert (code
- buf
== 1);
4382 cfg
->code_len
= (guint8
*)code
- cfg
->native_code
;
4384 g_assert (cfg
->code_len
< cfg
->code_size
);
4388 gboolean lmf_addr_key_inited
= FALSE
;
4390 #ifdef MONO_SPARC_THR_TLS
4391 thread_key_t lmf_addr_key
;
4393 pthread_key_t lmf_addr_key
;
4397 mono_arch_get_lmf_addr (void)
4399 /* This is perf critical so we bypass the IO layer */
4400 /* The thr_... functions seem to be somewhat faster */
4401 #ifdef MONO_SPARC_THR_TLS
4403 thr_getspecific (lmf_addr_key
, &res
);
4406 return pthread_getspecific (lmf_addr_key
);
4410 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
4413 * There seems to be no way to determine stack boundaries under solaris,
4414 * so it's not possible to determine whenever a SIGSEGV is caused by stack
4417 #error "--with-sigaltstack=yes not supported on solaris"
4422 mono_arch_setup_jit_tls_data (MonoJitTlsData
*tls
)
4424 if (!lmf_addr_key_inited
) {
4427 lmf_addr_key_inited
= TRUE
;
4429 #ifdef MONO_SPARC_THR_TLS
4430 res
= thr_keycreate (&lmf_addr_key
, NULL
);
4432 res
= pthread_key_create (&lmf_addr_key
, NULL
);
4434 g_assert (res
== 0);
4438 #ifdef MONO_SPARC_THR_TLS
4439 thr_setspecific (lmf_addr_key
, &tls
->lmf
);
4441 pthread_setspecific (lmf_addr_key
, &tls
->lmf
);
4446 mono_arch_free_jit_tls_data (MonoJitTlsData
*tls
)
4451 mono_arch_emit_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
4453 MonoInst
*ins
= NULL
;
4459 * mono_arch_get_argument_info:
4460 * @csig: a method signature
4461 * @param_count: the number of parameters to consider
4462 * @arg_info: an array to store the result infos
4464 * Gathers information on parameters such as size, alignment and
4465 * padding. arg_info should be large enought to hold param_count + 1 entries.
4467 * Returns the size of the activation frame.
4470 mono_arch_get_argument_info (MonoMethodSignature
*csig
, int param_count
, MonoJitArgumentInfo
*arg_info
)
4476 cinfo
= get_call_info (NULL
, csig
, FALSE
);
4478 if (csig
->hasthis
) {
4479 ainfo
= &cinfo
->args
[0];
4480 arg_info
[0].offset
= ARGS_OFFSET
- MONO_SPARC_STACK_BIAS
+ ainfo
->offset
;
4483 for (k
= 0; k
< param_count
; k
++) {
4484 ainfo
= &cinfo
->args
[k
+ csig
->hasthis
];
4486 arg_info
[k
+ 1].offset
= ARGS_OFFSET
- MONO_SPARC_STACK_BIAS
+ ainfo
->offset
;
4487 arg_info
[k
+ 1].size
= mono_type_size (csig
->params
[k
], &align
);
4496 mono_arch_print_tree (MonoInst
*tree
, int arity
)
4501 MonoInst
* mono_arch_get_domain_intrinsic (MonoCompile
* cfg
)
4507 mono_arch_context_get_int_reg (MonoContext
*ctx
, int reg
)
4509 /* FIXME: implement */
4510 g_assert_not_reached ();