[Facades] Use the Open.snk key for the System.ValueTuple facade (#4173)
[mono-project.git] / mono / mini / mini-sparc.c
blobd02988845ae3019dd65b70e4586a3f8ca16a1a7e
1 /*
2 * mini-sparc.c: Sparc backend for the Mono code generator
4 * Authors:
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * Modified for SPARC:
9 * Christopher Taylor (ct@gentoo.org)
10 * Mark Crichton (crichton@gimp.org)
11 * Zoltan Varga (vargaz@freemail.hu)
13 * (C) 2003 Ximian, Inc.
15 #include "mini.h"
16 #include <string.h>
17 #include <pthread.h>
18 #include <unistd.h>
20 #ifndef __linux__
21 #include <thread.h>
22 #endif
24 #include <unistd.h>
25 #include <sys/mman.h>
27 #include <mono/metadata/appdomain.h>
28 #include <mono/metadata/debug-helpers.h>
29 #include <mono/metadata/tokentype.h>
30 #include <mono/utils/mono-math.h>
31 #include <mono/utils/mono-hwcap.h>
33 #include "mini-sparc.h"
34 #include "trace.h"
35 #include "cpu-sparc.h"
36 #include "jit-icalls.h"
37 #include "ir-emit.h"
40 * Sparc V9 means two things:
41 * - the instruction set
42 * - the ABI
44 * V9 instructions are only usable if the underlying processor is 64 bit. Most Sparc
45 * processors in use are 64 bit processors. The V9 ABI is only usable if the
46 * mono executable is a 64 bit executable. So it would make sense to use the 64 bit
47 * instructions without using the 64 bit ABI.
51 * Register usage:
52 * - %i0..%i<n> hold the incoming arguments, these are never written by JITted
53 * code. Unused input registers are used for global register allocation.
54 * - %o0..%o5 and %l7 is used for local register allocation and passing arguments
55 * - %l0..%l6 is used for global register allocation
56 * - %o7 and %g1 is used as scratch registers in opcodes
57 * - all floating point registers are used for local register allocation except %f0.
58 * Only double precision registers are used.
59 * In 64 bit mode:
60 * - fp registers %d0..%d30 are used for parameter passing, and %d32..%d62 are
61 * used for local allocation.
65 * Alignment:
66 * - doubles and longs must be stored in dword aligned locations
70 * The following things are not implemented or do not work:
71 * - some fp arithmetic corner cases
72 * The following tests in mono/mini are expected to fail:
73 * - test_0_simple_double_casts
74 * This test casts (guint64)-1 to double and then back to guint64 again.
75 * Under x86, it returns 0, while under sparc it returns -1.
77 * In addition to this, the runtime requires the trunc function, or its
78 * solaris counterpart, aintl, to do some double->int conversions. If this
79 * function is not available, it is emulated somewhat, but the results can be
80 * strange.
84 * SPARCV9 FIXME:
85 * - optimize sparc_set according to the memory model
86 * - when non-AOT compiling, compute patch targets immediately so we don't
87 * have to emit the 6 byte template.
88 * - varags
89 * - struct arguments/returns
93 * SPARCV9 ISSUES:
94 * - sparc_call_simple can't be used in a lot of places since the displacement
95 * might not fit into an imm30.
96 * - g1 can't be used in a lot of places since it is used as a scratch reg in
97 * sparc_set.
98 * - sparc_f0 can't be used as a scratch register on V9
99 * - the %d34..%d62 fp registers are encoded as: %dx = %f(x - 32 + 1), ie.
100 * %d36 = %f5.
101 * - ldind.i4/u4 needs to sign extend/clear out upper word -> slows things down
102 * - ins->dreg can't be used as a scatch register in r4 opcodes since it might
103 * be a double precision register which has no single precision part.
104 * - passing/returning structs is hard to implement, because:
105 * - the spec is very hard to understand
106 * - it requires knowledge about the fields of structure, needs to handle
107 * nested structures etc.
111 * Possible optimizations:
112 * - delay slot scheduling
113 * - allocate large constants to registers
114 * - add more mul/div/rem optimizations
117 #ifndef __linux__
118 #define MONO_SPARC_THR_TLS 1
119 #endif
122 * There was a 64 bit bug in glib-2.2: g_bit_nth_msf (0, -1) would return 32,
123 * causing infinite loops in dominator computation. So glib-2.4 is required.
125 #ifdef SPARCV9
126 #if GLIB_MAJOR_VERSION == 2 && GLIB_MINOR_VERSION < 4
127 #error "glib 2.4 or later is required for 64 bit mode."
128 #endif
129 #endif
131 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
133 #define SIGNAL_STACK_SIZE (64 * 1024)
135 #define STACK_BIAS MONO_SPARC_STACK_BIAS
137 #ifdef SPARCV9
139 /* %g1 is used by sparc_set */
140 #define GP_SCRATCH_REG sparc_g4
141 /* %f0 is used for parameter passing */
142 #define FP_SCRATCH_REG sparc_f30
143 #define ARGS_OFFSET (STACK_BIAS + 128)
145 #else
147 #define FP_SCRATCH_REG sparc_f0
148 #define ARGS_OFFSET 68
149 #define GP_SCRATCH_REG sparc_g1
151 #endif
153 /* Whenever this is a 64bit executable */
154 #if SPARCV9
155 static gboolean v64 = TRUE;
156 #else
157 static gboolean v64 = FALSE;
158 #endif
160 static gpointer mono_arch_get_lmf_addr (void);
162 const char*
163 mono_arch_regname (int reg) {
164 static const char * rnames[] = {
165 "sparc_g0", "sparc_g1", "sparc_g2", "sparc_g3", "sparc_g4",
166 "sparc_g5", "sparc_g6", "sparc_g7", "sparc_o0", "sparc_o1",
167 "sparc_o2", "sparc_o3", "sparc_o4", "sparc_o5", "sparc_sp",
168 "sparc_call", "sparc_l0", "sparc_l1", "sparc_l2", "sparc_l3",
169 "sparc_l4", "sparc_l5", "sparc_l6", "sparc_l7", "sparc_i0",
170 "sparc_i1", "sparc_i2", "sparc_i3", "sparc_i4", "sparc_i5",
171 "sparc_fp", "sparc_retadr"
173 if (reg >= 0 && reg < 32)
174 return rnames [reg];
175 return "unknown";
178 const char*
179 mono_arch_fregname (int reg) {
180 static const char *rnames [] = {
181 "sparc_f0", "sparc_f1", "sparc_f2", "sparc_f3", "sparc_f4",
182 "sparc_f5", "sparc_f6", "sparc_f7", "sparc_f8", "sparc_f9",
183 "sparc_f10", "sparc_f11", "sparc_f12", "sparc_f13", "sparc_f14",
184 "sparc_f15", "sparc_f16", "sparc_f17", "sparc_f18", "sparc_f19",
185 "sparc_f20", "sparc_f21", "sparc_f22", "sparc_f23", "sparc_f24",
186 "sparc_f25", "sparc_f26", "sparc_f27", "sparc_f28", "sparc_f29",
187 "sparc_f30", "sparc_f31"
190 if (reg >= 0 && reg < 32)
191 return rnames [reg];
192 else
193 return "unknown";
197 * Initialize the cpu to execute managed code.
199 void
200 mono_arch_cpu_init (void)
205 * Initialize architecture specific code.
207 void
208 mono_arch_init (void)
213 * Cleanup architecture specific code.
215 void
216 mono_arch_cleanup (void)
220 gboolean
221 mono_arch_have_fast_tls (void)
223 return FALSE;
227 * This function returns the optimizations supported on this cpu.
229 guint32
230 mono_arch_cpu_optimizations (guint32 *exclude_mask)
232 guint32 opts = 0;
234 *exclude_mask = 0;
237 * On some processors, the cmov instructions are even slower than the
238 * normal ones...
240 if (mono_hwcap_sparc_is_v9)
241 opts |= MONO_OPT_CMOV | MONO_OPT_FCMOV;
242 else
243 *exclude_mask |= MONO_OPT_CMOV | MONO_OPT_FCMOV;
245 return opts;
249 * This function test for all SIMD functions supported.
251 * Returns a bitmask corresponding to all supported versions.
254 guint32
255 mono_arch_cpu_enumerate_simd_versions (void)
257 /* SIMD is currently unimplemented */
258 return 0;
261 #ifdef __GNUC__
262 #define flushi(addr) __asm__ __volatile__ ("iflush %0"::"r"(addr):"memory")
263 #else /* assume Sun's compiler */
264 static void flushi(void *addr)
266 asm("flush %i0");
268 #endif
270 #ifndef __linux__
271 void sync_instruction_memory(caddr_t addr, int len);
272 #endif
274 void
275 mono_arch_flush_icache (guint8 *code, gint size)
277 #ifndef __linux__
278 /* Hopefully this is optimized based on the actual CPU */
279 sync_instruction_memory (code, size);
280 #else
281 gulong start = (gulong) code;
282 gulong end = start + size;
283 gulong align;
285 /* Sparcv9 chips only need flushes on 32 byte
286 * cacheline boundaries.
288 * Sparcv8 needs a flush every 8 bytes.
290 align = (mono_hwcap_sparc_is_v9 ? 32 : 8);
292 start &= ~(align - 1);
293 end = (end + (align - 1)) & ~(align - 1);
295 while (start < end) {
296 #ifdef __GNUC__
297 __asm__ __volatile__ ("iflush %0"::"r"(start));
298 #else
299 flushi (start);
300 #endif
301 start += align;
303 #endif
307 * mono_sparc_flushw:
309 * Flush all register windows to memory. Every register window is saved to
310 * a 16 word area on the stack pointed to by its %sp register.
312 void
313 mono_sparc_flushw (void)
315 static guint32 start [64];
316 static int inited = 0;
317 guint32 *code;
318 static void (*flushw) (void);
320 if (!inited) {
321 code = start;
323 sparc_save_imm (code, sparc_sp, -160, sparc_sp);
324 sparc_flushw (code);
325 sparc_ret (code);
326 sparc_restore_simple (code);
328 g_assert ((code - start) < 64);
330 mono_arch_flush_icache ((guint8*)start, (guint8*)code - (guint8*)start);
332 flushw = (gpointer)start;
334 inited = 1;
337 flushw ();
340 void
341 mono_arch_flush_register_windows (void)
343 mono_sparc_flushw ();
346 gboolean
347 mono_arch_is_inst_imm (gint64 imm)
349 return sparc_is_imm13 (imm);
352 gboolean
353 mono_sparc_is_v9 (void) {
354 return mono_hwcap_sparc_is_v9;
357 gboolean
358 mono_sparc_is_sparc64 (void) {
359 return v64;
362 typedef enum {
363 ArgInIReg,
364 ArgInIRegPair,
365 ArgInSplitRegStack,
366 ArgInFReg,
367 ArgInFRegPair,
368 ArgOnStack,
369 ArgOnStackPair,
370 ArgInFloatReg, /* V9 only */
371 ArgInDoubleReg /* V9 only */
372 } ArgStorage;
374 typedef struct {
375 gint16 offset;
376 /* This needs to be offset by %i0 or %o0 depending on caller/callee */
377 gint8 reg;
378 ArgStorage storage;
379 guint32 vt_offset; /* for valuetypes */
380 } ArgInfo;
382 typedef struct {
383 int nargs;
384 guint32 stack_usage;
385 guint32 reg_usage;
386 ArgInfo ret;
387 ArgInfo sig_cookie;
388 ArgInfo args [1];
389 } CallInfo;
391 #define DEBUG(a)
393 /* %o0..%o5 */
394 #define PARAM_REGS 6
396 static void inline
397 add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean pair)
399 ainfo->offset = *stack_size;
401 if (!pair) {
402 if (*gr >= PARAM_REGS) {
403 ainfo->storage = ArgOnStack;
405 else {
406 ainfo->storage = ArgInIReg;
407 ainfo->reg = *gr;
408 (*gr) ++;
411 /* Allways reserve stack space for parameters passed in registers */
412 (*stack_size) += sizeof (gpointer);
414 else {
415 if (*gr < PARAM_REGS - 1) {
416 /* A pair of registers */
417 ainfo->storage = ArgInIRegPair;
418 ainfo->reg = *gr;
419 (*gr) += 2;
421 else if (*gr >= PARAM_REGS) {
422 /* A pair of stack locations */
423 ainfo->storage = ArgOnStackPair;
425 else {
426 ainfo->storage = ArgInSplitRegStack;
427 ainfo->reg = *gr;
428 (*gr) ++;
431 (*stack_size) += 2 * sizeof (gpointer);
435 #ifdef SPARCV9
437 #define FLOAT_PARAM_REGS 32
439 static void inline
440 add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean single)
442 ainfo->offset = *stack_size;
444 if (single) {
445 if (*gr >= FLOAT_PARAM_REGS) {
446 ainfo->storage = ArgOnStack;
448 else {
449 /* A single is passed in an even numbered fp register */
450 ainfo->storage = ArgInFloatReg;
451 ainfo->reg = *gr + 1;
452 (*gr) += 2;
455 else {
456 if (*gr < FLOAT_PARAM_REGS) {
457 /* A double register */
458 ainfo->storage = ArgInDoubleReg;
459 ainfo->reg = *gr;
460 (*gr) += 2;
462 else {
463 ainfo->storage = ArgOnStack;
467 (*stack_size) += sizeof (gpointer);
470 #endif
473 * get_call_info:
475 * Obtain information about a call according to the calling convention.
476 * For V8, see the "System V ABI, Sparc Processor Supplement" Sparc V8 version
477 * document for more information.
478 * For V9, see the "Low Level System Information (64-bit psABI)" chapter in
479 * the 'Sparc Compliance Definition 2.4' document.
481 static CallInfo*
482 get_call_info (MonoCompile *cfg, MonoMethodSignature *sig, gboolean is_pinvoke)
484 guint32 i, gr, fr;
485 int n = sig->hasthis + sig->param_count;
486 guint32 stack_size = 0;
487 CallInfo *cinfo;
488 MonoType *ret_type;
490 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
492 gr = 0;
493 fr = 0;
495 #ifdef SPARCV9
496 if (MONO_TYPE_ISSTRUCT ((sig->ret))) {
497 /* The address of the return value is passed in %o0 */
498 add_general (&gr, &stack_size, &cinfo->ret, FALSE);
499 cinfo->ret.reg += sparc_i0;
500 /* FIXME: Pass this after this as on other platforms */
501 NOT_IMPLEMENTED;
503 #endif
505 /* this */
506 if (sig->hasthis)
507 add_general (&gr, &stack_size, cinfo->args + 0, FALSE);
509 if ((sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
510 gr = PARAM_REGS;
512 /* Emit the signature cookie just before the implicit arguments */
513 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
516 for (i = 0; i < sig->param_count; ++i) {
517 ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
518 MonoType *ptype;
520 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
521 gr = PARAM_REGS;
523 /* Emit the signature cookie just before the implicit arguments */
524 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
527 DEBUG(printf("param %d: ", i));
528 if (sig->params [i]->byref) {
529 DEBUG(printf("byref\n"));
531 add_general (&gr, &stack_size, ainfo, FALSE);
532 continue;
534 ptype = mini_get_underlying_type (sig->params [i]);
535 switch (ptype->type) {
536 case MONO_TYPE_BOOLEAN:
537 case MONO_TYPE_I1:
538 case MONO_TYPE_U1:
539 add_general (&gr, &stack_size, ainfo, FALSE);
540 /* the value is in the ls byte */
541 ainfo->offset += sizeof (gpointer) - 1;
542 break;
543 case MONO_TYPE_I2:
544 case MONO_TYPE_U2:
545 case MONO_TYPE_CHAR:
546 add_general (&gr, &stack_size, ainfo, FALSE);
547 /* the value is in the ls word */
548 ainfo->offset += sizeof (gpointer) - 2;
549 break;
550 case MONO_TYPE_I4:
551 case MONO_TYPE_U4:
552 add_general (&gr, &stack_size, ainfo, FALSE);
553 /* the value is in the ls dword */
554 ainfo->offset += sizeof (gpointer) - 4;
555 break;
556 case MONO_TYPE_I:
557 case MONO_TYPE_U:
558 case MONO_TYPE_PTR:
559 case MONO_TYPE_FNPTR:
560 case MONO_TYPE_CLASS:
561 case MONO_TYPE_OBJECT:
562 case MONO_TYPE_STRING:
563 case MONO_TYPE_SZARRAY:
564 case MONO_TYPE_ARRAY:
565 add_general (&gr, &stack_size, ainfo, FALSE);
566 break;
567 case MONO_TYPE_GENERICINST:
568 if (!mono_type_generic_inst_is_valuetype (ptype)) {
569 add_general (&gr, &stack_size, ainfo, FALSE);
570 break;
572 /* Fall through */
573 case MONO_TYPE_VALUETYPE:
574 #ifdef SPARCV9
575 if (sig->pinvoke)
576 NOT_IMPLEMENTED;
577 #endif
578 add_general (&gr, &stack_size, ainfo, FALSE);
579 break;
580 case MONO_TYPE_TYPEDBYREF:
581 add_general (&gr, &stack_size, ainfo, FALSE);
582 break;
583 case MONO_TYPE_U8:
584 case MONO_TYPE_I8:
585 #ifdef SPARCV9
586 add_general (&gr, &stack_size, ainfo, FALSE);
587 #else
588 add_general (&gr, &stack_size, ainfo, TRUE);
589 #endif
590 break;
591 case MONO_TYPE_R4:
592 #ifdef SPARCV9
593 add_float (&fr, &stack_size, ainfo, TRUE);
594 gr ++;
595 #else
596 /* single precision values are passed in integer registers */
597 add_general (&gr, &stack_size, ainfo, FALSE);
598 #endif
599 break;
600 case MONO_TYPE_R8:
601 #ifdef SPARCV9
602 add_float (&fr, &stack_size, ainfo, FALSE);
603 gr ++;
604 #else
605 /* double precision values are passed in a pair of registers */
606 add_general (&gr, &stack_size, ainfo, TRUE);
607 #endif
608 break;
609 default:
610 g_assert_not_reached ();
614 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n > 0) && (sig->sentinelpos == sig->param_count)) {
615 gr = PARAM_REGS;
617 /* Emit the signature cookie just before the implicit arguments */
618 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
621 /* return value */
622 ret_type = mini_get_underlying_type (sig->ret);
623 switch (ret_type->type) {
624 case MONO_TYPE_BOOLEAN:
625 case MONO_TYPE_I1:
626 case MONO_TYPE_U1:
627 case MONO_TYPE_I2:
628 case MONO_TYPE_U2:
629 case MONO_TYPE_CHAR:
630 case MONO_TYPE_I4:
631 case MONO_TYPE_U4:
632 case MONO_TYPE_I:
633 case MONO_TYPE_U:
634 case MONO_TYPE_PTR:
635 case MONO_TYPE_FNPTR:
636 case MONO_TYPE_CLASS:
637 case MONO_TYPE_OBJECT:
638 case MONO_TYPE_SZARRAY:
639 case MONO_TYPE_ARRAY:
640 case MONO_TYPE_STRING:
641 cinfo->ret.storage = ArgInIReg;
642 cinfo->ret.reg = sparc_i0;
643 if (gr < 1)
644 gr = 1;
645 break;
646 case MONO_TYPE_U8:
647 case MONO_TYPE_I8:
648 #ifdef SPARCV9
649 cinfo->ret.storage = ArgInIReg;
650 cinfo->ret.reg = sparc_i0;
651 if (gr < 1)
652 gr = 1;
653 #else
654 cinfo->ret.storage = ArgInIRegPair;
655 cinfo->ret.reg = sparc_i0;
656 if (gr < 2)
657 gr = 2;
658 #endif
659 break;
660 case MONO_TYPE_R4:
661 case MONO_TYPE_R8:
662 cinfo->ret.storage = ArgInFReg;
663 cinfo->ret.reg = sparc_f0;
664 break;
665 case MONO_TYPE_GENERICINST:
666 if (!mono_type_generic_inst_is_valuetype (ret_type)) {
667 cinfo->ret.storage = ArgInIReg;
668 cinfo->ret.reg = sparc_i0;
669 if (gr < 1)
670 gr = 1;
671 break;
673 /* Fall through */
674 case MONO_TYPE_VALUETYPE:
675 if (v64) {
676 if (sig->pinvoke)
677 NOT_IMPLEMENTED;
678 else
679 /* Already done */
682 else
683 cinfo->ret.storage = ArgOnStack;
684 break;
685 case MONO_TYPE_TYPEDBYREF:
686 if (v64) {
687 if (sig->pinvoke)
688 /* Same as a valuetype with size 24 */
689 NOT_IMPLEMENTED;
690 else
691 /* Already done */
694 else
695 cinfo->ret.storage = ArgOnStack;
696 break;
697 case MONO_TYPE_VOID:
698 break;
699 default:
700 g_error ("Can't handle as return value 0x%x", sig->ret->type);
703 cinfo->stack_usage = stack_size;
704 cinfo->reg_usage = gr;
705 return cinfo;
708 GList *
709 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
711 GList *vars = NULL;
712 int i;
715 * FIXME: If an argument is allocated to a register, then load it from the
716 * stack in the prolog.
719 for (i = 0; i < cfg->num_varinfo; i++) {
720 MonoInst *ins = cfg->varinfo [i];
721 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
723 /* unused vars */
724 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
725 continue;
727 /* FIXME: Make arguments on stack allocateable to registers */
728 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode == OP_REGVAR) || (ins->opcode == OP_ARG))
729 continue;
731 if (mono_is_regsize_var (ins->inst_vtype)) {
732 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
733 g_assert (i == vmv->idx);
735 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
739 return vars;
742 GList *
743 mono_arch_get_global_int_regs (MonoCompile *cfg)
745 GList *regs = NULL;
746 int i;
747 MonoMethodSignature *sig;
748 CallInfo *cinfo;
750 sig = mono_method_signature (cfg->method);
752 cinfo = get_call_info (cfg, sig, FALSE);
754 /* Use unused input registers */
755 for (i = cinfo->reg_usage; i < 6; ++i)
756 regs = g_list_prepend (regs, GUINT_TO_POINTER (sparc_i0 + i));
758 /* Use %l0..%l6 as global registers */
759 for (i = sparc_l0; i < sparc_l7; ++i)
760 regs = g_list_prepend (regs, GUINT_TO_POINTER (i));
762 g_free (cinfo);
764 return regs;
768 * mono_arch_regalloc_cost:
770 * Return the cost, in number of memory references, of the action of
771 * allocating the variable VMV into a register during global register
772 * allocation.
774 guint32
775 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
777 return 0;
781 * Set var information according to the calling convention. sparc version.
782 * The locals var stuff should most likely be split in another method.
785 void
786 mono_arch_allocate_vars (MonoCompile *cfg)
788 MonoMethodSignature *sig;
789 MonoMethodHeader *header;
790 MonoInst *inst;
791 int i, offset, size, align, curinst;
792 CallInfo *cinfo;
794 header = cfg->header;
796 sig = mono_method_signature (cfg->method);
798 cinfo = get_call_info (cfg, sig, FALSE);
800 if (sig->ret->type != MONO_TYPE_VOID) {
801 switch (cinfo->ret.storage) {
802 case ArgInIReg:
803 case ArgInFReg:
804 cfg->ret->opcode = OP_REGVAR;
805 cfg->ret->inst_c0 = cinfo->ret.reg;
806 break;
807 case ArgInIRegPair: {
808 MonoType *t = mini_get_underlying_type (sig->ret);
809 if (((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
810 MonoInst *low = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->ret->dreg));
811 MonoInst *high = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->ret->dreg));
813 low->opcode = OP_REGVAR;
814 low->dreg = cinfo->ret.reg + 1;
815 high->opcode = OP_REGVAR;
816 high->dreg = cinfo->ret.reg;
818 cfg->ret->opcode = OP_REGVAR;
819 cfg->ret->inst_c0 = cinfo->ret.reg;
820 break;
822 case ArgOnStack:
823 #ifdef SPARCV9
824 g_assert_not_reached ();
825 #else
826 /* valuetypes */
827 cfg->vret_addr->opcode = OP_REGOFFSET;
828 cfg->vret_addr->inst_basereg = sparc_fp;
829 cfg->vret_addr->inst_offset = 64;
830 #endif
831 break;
832 default:
833 NOT_IMPLEMENTED;
835 cfg->ret->dreg = cfg->ret->inst_c0;
839 * We use the ABI calling conventions for managed code as well.
840 * Exception: valuetypes are never returned in registers on V9.
841 * FIXME: Use something more optimized.
844 /* Locals are allocated backwards from %fp */
845 cfg->frame_reg = sparc_fp;
846 offset = 0;
849 * Reserve a stack slot for holding information used during exception
850 * handling.
852 if (header->num_clauses)
853 offset += sizeof (gpointer) * 2;
855 if (cfg->method->save_lmf) {
856 offset += sizeof (MonoLMF);
857 cfg->arch.lmf_offset = offset;
860 curinst = cfg->locals_start;
861 for (i = curinst; i < cfg->num_varinfo; ++i) {
862 inst = cfg->varinfo [i];
864 if ((inst->opcode == OP_REGVAR) || (inst->opcode == OP_REGOFFSET)) {
865 //g_print ("allocating local %d to %s\n", i, mono_arch_regname (inst->dreg));
866 continue;
869 if (inst->flags & MONO_INST_IS_DEAD)
870 continue;
872 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
873 * pinvoke wrappers when they call functions returning structure */
874 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
875 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), &align);
876 else
877 size = mini_type_stack_size (inst->inst_vtype, &align);
880 * This is needed since structures containing doubles must be doubleword
881 * aligned.
882 * FIXME: Do this only if needed.
884 if (MONO_TYPE_ISSTRUCT (inst->inst_vtype))
885 align = 8;
888 * variables are accessed as negative offsets from %fp, so increase
889 * the offset before assigning it to a variable
891 offset += size;
893 offset += align - 1;
894 offset &= ~(align - 1);
895 inst->opcode = OP_REGOFFSET;
896 inst->inst_basereg = sparc_fp;
897 inst->inst_offset = STACK_BIAS + -offset;
899 //g_print ("allocating local %d to [%s - %d]\n", i, mono_arch_regname (inst->inst_basereg), - inst->inst_offset);
902 if (sig->call_convention == MONO_CALL_VARARG) {
903 cfg->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
906 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
907 inst = cfg->args [i];
908 if (inst->opcode != OP_REGVAR) {
909 ArgInfo *ainfo = &cinfo->args [i];
910 gboolean inreg = TRUE;
911 MonoType *arg_type;
912 ArgStorage storage;
914 if (sig->hasthis && (i == 0))
915 arg_type = &mono_defaults.object_class->byval_arg;
916 else
917 arg_type = sig->params [i - sig->hasthis];
919 #ifndef SPARCV9
920 if (!arg_type->byref && ((arg_type->type == MONO_TYPE_R4)
921 || (arg_type->type == MONO_TYPE_R8)))
923 * Since float arguments are passed in integer registers, we need to
924 * save them to the stack in the prolog.
926 inreg = FALSE;
927 #endif
929 /* FIXME: Allocate volatile arguments to registers */
930 /* FIXME: This makes the argument holding a vtype address into volatile */
931 if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
932 inreg = FALSE;
934 if (MONO_TYPE_ISSTRUCT (arg_type))
935 /* FIXME: this isn't needed */
936 inreg = FALSE;
938 inst->opcode = OP_REGOFFSET;
940 if (!inreg)
941 storage = ArgOnStack;
942 else
943 storage = ainfo->storage;
945 switch (storage) {
946 case ArgInIReg:
947 inst->opcode = OP_REGVAR;
948 inst->dreg = sparc_i0 + ainfo->reg;
949 break;
950 case ArgInIRegPair:
951 if (inst->type == STACK_I8) {
952 MonoInst *low = get_vreg_to_inst (cfg, MONO_LVREG_LS (inst->dreg));
953 MonoInst *high = get_vreg_to_inst (cfg, MONO_LVREG_MS (inst->dreg));
955 low->opcode = OP_REGVAR;
956 low->dreg = sparc_i0 + ainfo->reg + 1;
957 high->opcode = OP_REGVAR;
958 high->dreg = sparc_i0 + ainfo->reg;
960 inst->opcode = OP_REGVAR;
961 inst->dreg = sparc_i0 + ainfo->reg;
962 break;
963 case ArgInFloatReg:
964 case ArgInDoubleReg:
966 * Since float regs are volatile, we save the arguments to
967 * the stack in the prolog.
968 * FIXME: Avoid this if the method contains no calls.
970 case ArgOnStack:
971 case ArgOnStackPair:
972 case ArgInSplitRegStack:
973 /* Split arguments are saved to the stack in the prolog */
974 inst->opcode = OP_REGOFFSET;
975 /* in parent frame */
976 inst->inst_basereg = sparc_fp;
977 inst->inst_offset = ainfo->offset + ARGS_OFFSET;
979 if (!arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
981 * It is very hard to load doubles from non-doubleword aligned
982 * memory locations. So if the offset is misaligned, we copy the
983 * argument to a stack location in the prolog.
985 if ((inst->inst_offset - STACK_BIAS) % 8) {
986 inst->inst_basereg = sparc_fp;
987 offset += 8;
988 align = 8;
989 offset += align - 1;
990 offset &= ~(align - 1);
991 inst->inst_offset = STACK_BIAS + -offset;
995 break;
996 default:
997 NOT_IMPLEMENTED;
1000 if (MONO_TYPE_ISSTRUCT (arg_type)) {
1001 /* Add a level of indirection */
1003 * It would be easier to add OP_LDIND_I here, but ldind_i instructions
1004 * are destructively modified in a lot of places in inssel.brg.
1006 MonoInst *indir;
1007 MONO_INST_NEW (cfg, indir, 0);
1008 *indir = *inst;
1009 inst->opcode = OP_VTARG_ADDR;
1010 inst->inst_left = indir;
1016 * spillvars are stored between the normal locals and the storage reserved
1017 * by the ABI.
1020 cfg->stack_offset = offset;
1022 g_free (cinfo);
1025 void
1026 mono_arch_create_vars (MonoCompile *cfg)
1028 MonoMethodSignature *sig;
1030 sig = mono_method_signature (cfg->method);
1032 if (MONO_TYPE_ISSTRUCT ((sig->ret))) {
1033 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1034 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1035 printf ("vret_addr = ");
1036 mono_print_ins (cfg->vret_addr);
1040 if (!sig->ret->byref && (sig->ret->type == MONO_TYPE_I8 || sig->ret->type == MONO_TYPE_U8)) {
1041 MonoInst *low = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->ret->dreg));
1042 MonoInst *high = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->ret->dreg));
1044 low->flags |= MONO_INST_VOLATILE;
1045 high->flags |= MONO_INST_VOLATILE;
1048 /* Add a properly aligned dword for use by int<->float conversion opcodes */
1049 cfg->arch.float_spill_slot = mono_compile_create_var (cfg, &mono_defaults.double_class->byval_arg, OP_ARG);
1050 ((MonoInst*)cfg->arch.float_spill_slot)->flags |= MONO_INST_VOLATILE;
1053 static void
1054 add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, guint32 sreg)
1056 MonoInst *arg;
1058 MONO_INST_NEW (cfg, arg, 0);
1060 arg->sreg1 = sreg;
1062 switch (storage) {
1063 case ArgInIReg:
1064 arg->opcode = OP_MOVE;
1065 arg->dreg = mono_alloc_ireg (cfg);
1067 mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, FALSE);
1068 break;
1069 case ArgInFloatReg:
1070 arg->opcode = OP_FMOVE;
1071 arg->dreg = mono_alloc_freg (cfg);
1073 mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, TRUE);
1074 break;
1075 default:
1076 g_assert_not_reached ();
1079 MONO_ADD_INS (cfg->cbb, arg);
1082 static void
1083 add_outarg_load (MonoCompile *cfg, MonoCallInst *call, int opcode, int basereg, int offset, int reg)
1085 int dreg = mono_alloc_ireg (cfg);
1087 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, sparc_sp, offset);
1089 mono_call_inst_add_outarg_reg (cfg, call, dreg, reg, FALSE);
1092 static void
1093 emit_pass_long (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in)
1095 int offset = ARGS_OFFSET + ainfo->offset;
1097 switch (ainfo->storage) {
1098 case ArgInIRegPair:
1099 add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg + 1, MONO_LVREG_LS (in->dreg));
1100 add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, MONO_LVREG_MS (in->dreg));
1101 break;
1102 case ArgOnStackPair:
1103 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset, MONO_LVREG_MS (in->dreg));
1104 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset + 4, MONO_LVREG_LS (in->dreg));
1105 break;
1106 case ArgInSplitRegStack:
1107 add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, MONO_LVREG_MS (in->dreg));
1108 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset + 4, MONO_LVREG_LS (in->dreg));
1109 break;
1110 default:
1111 g_assert_not_reached ();
1115 static void
1116 emit_pass_double (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in)
1118 int offset = ARGS_OFFSET + ainfo->offset;
1120 switch (ainfo->storage) {
1121 case ArgInIRegPair:
1122 /* floating-point <-> integer transfer must go through memory */
1123 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg);
1125 /* Load into a register pair */
1126 add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg);
1127 add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset + 4, sparc_o0 + ainfo->reg + 1);
1128 break;
1129 case ArgOnStackPair:
1130 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg);
1131 break;
1132 case ArgInSplitRegStack:
1133 /* floating-point <-> integer transfer must go through memory */
1134 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg);
1135 /* Load most significant word into register */
1136 add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg);
1137 break;
1138 default:
1139 g_assert_not_reached ();
1143 static void
1144 emit_pass_float (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in)
1146 int offset = ARGS_OFFSET + ainfo->offset;
1148 switch (ainfo->storage) {
1149 case ArgInIReg:
1150 /* floating-point <-> integer transfer must go through memory */
1151 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, sparc_sp, offset, in->dreg);
1152 add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg);
1153 break;
1154 case ArgOnStack:
1155 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, sparc_sp, offset, in->dreg);
1156 break;
1157 default:
1158 g_assert_not_reached ();
1162 static void
1163 emit_pass_other (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in);
1165 static void
1166 emit_pass_vtype (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in, gboolean pinvoke)
1168 MonoInst *arg;
1169 guint32 align, offset, pad, size;
1171 if (arg_type->type == MONO_TYPE_TYPEDBYREF) {
1172 size = sizeof (MonoTypedRef);
1173 align = sizeof (gpointer);
1175 else if (pinvoke)
1176 size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
1177 else {
1179 * Other backends use mono_type_stack_size (), but that
1180 * aligns the size to 8, which is larger than the size of
1181 * the source, leading to reads of invalid memory if the
1182 * source is at the end of address space.
1184 size = mono_class_value_size (in->klass, &align);
1187 /* The first 6 argument locations are reserved */
1188 if (cinfo->stack_usage < 6 * sizeof (gpointer))
1189 cinfo->stack_usage = 6 * sizeof (gpointer);
1191 offset = ALIGN_TO ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage, align);
1192 pad = offset - ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage);
1194 cinfo->stack_usage += size;
1195 cinfo->stack_usage += pad;
1198 * We use OP_OUTARG_VT to copy the valuetype to a stack location, then
1199 * use the normal OUTARG opcodes to pass the address of the location to
1200 * the callee.
1202 if (size > 0) {
1203 MONO_INST_NEW (cfg, arg, OP_OUTARG_VT);
1204 arg->sreg1 = in->dreg;
1205 arg->klass = in->klass;
1206 arg->backend.size = size;
1207 arg->inst_p0 = call;
1208 arg->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1209 memcpy (arg->inst_p1, ainfo, sizeof (ArgInfo));
1210 ((ArgInfo*)(arg->inst_p1))->offset = STACK_BIAS + offset;
1211 MONO_ADD_INS (cfg->cbb, arg);
1213 MONO_INST_NEW (cfg, arg, OP_ADD_IMM);
1214 arg->dreg = mono_alloc_preg (cfg);
1215 arg->sreg1 = sparc_sp;
1216 arg->inst_imm = STACK_BIAS + offset;
1217 MONO_ADD_INS (cfg->cbb, arg);
1219 emit_pass_other (cfg, call, ainfo, NULL, arg);
1223 static void
1224 emit_pass_other (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in)
1226 int offset = ARGS_OFFSET + ainfo->offset;
1227 int opcode;
1229 switch (ainfo->storage) {
1230 case ArgInIReg:
1231 add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, in->dreg);
1232 break;
1233 case ArgOnStack:
1234 #ifdef SPARCV9
1235 NOT_IMPLEMENTED;
1236 #else
1237 if (offset & 0x1)
1238 opcode = OP_STOREI1_MEMBASE_REG;
1239 else if (offset & 0x2)
1240 opcode = OP_STOREI2_MEMBASE_REG;
1241 else
1242 opcode = OP_STOREI4_MEMBASE_REG;
1243 MONO_EMIT_NEW_STORE_MEMBASE (cfg, opcode, sparc_sp, offset, in->dreg);
1244 #endif
1245 break;
1246 default:
1247 g_assert_not_reached ();
1251 static void
1252 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
1254 MonoMethodSignature *tmp_sig;
1257 * mono_ArgIterator_Setup assumes the signature cookie is
1258 * passed first and all the arguments which were before it are
1259 * passed on the stack after the signature. So compensate by
1260 * passing a different signature.
1262 tmp_sig = mono_metadata_signature_dup (call->signature);
1263 tmp_sig->param_count -= call->signature->sentinelpos;
1264 tmp_sig->sentinelpos = 0;
1265 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1267 /* FIXME: Add support for signature tokens to AOT */
1268 cfg->disable_aot = TRUE;
1269 /* We allways pass the signature on the stack for simplicity */
1270 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sparc_sp, ARGS_OFFSET + cinfo->sig_cookie.offset, tmp_sig);
1273 void
1274 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1276 MonoInst *in;
1277 MonoMethodSignature *sig;
1278 int i, n;
1279 CallInfo *cinfo;
1280 ArgInfo *ainfo;
1281 guint32 extra_space = 0;
1283 sig = call->signature;
1284 n = sig->param_count + sig->hasthis;
1286 cinfo = get_call_info (cfg, sig, sig->pinvoke);
1288 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
1289 /* Set the 'struct/union return pointer' location on the stack */
1290 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, 64, call->vret_var->dreg);
1293 for (i = 0; i < n; ++i) {
1294 MonoType *arg_type;
1296 ainfo = cinfo->args + i;
1298 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1299 /* Emit the signature cookie just before the first implicit argument */
1300 emit_sig_cookie (cfg, call, cinfo);
1303 in = call->args [i];
1305 if (sig->hasthis && (i == 0))
1306 arg_type = &mono_defaults.object_class->byval_arg;
1307 else
1308 arg_type = sig->params [i - sig->hasthis];
1310 arg_type = mini_get_underlying_type (arg_type);
1311 if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis])))
1312 emit_pass_vtype (cfg, call, cinfo, ainfo, arg_type, in, sig->pinvoke);
1313 else if (!arg_type->byref && ((arg_type->type == MONO_TYPE_I8) || (arg_type->type == MONO_TYPE_U8)))
1314 emit_pass_long (cfg, call, ainfo, in);
1315 else if (!arg_type->byref && (arg_type->type == MONO_TYPE_R8))
1316 emit_pass_double (cfg, call, ainfo, in);
1317 else if (!arg_type->byref && (arg_type->type == MONO_TYPE_R4))
1318 emit_pass_float (cfg, call, ainfo, in);
1319 else
1320 emit_pass_other (cfg, call, ainfo, arg_type, in);
1323 /* Handle the case where there are no implicit arguments */
1324 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos)) {
1325 emit_sig_cookie (cfg, call, cinfo);
1328 call->stack_usage = cinfo->stack_usage + extra_space;
1330 g_free (cinfo);
1333 void
1334 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1336 ArgInfo *ainfo = (ArgInfo*)ins->inst_p1;
1337 int size = ins->backend.size;
1339 mini_emit_memcpy (cfg, sparc_sp, ainfo->offset, src->dreg, 0, size, 0);
1342 void
1343 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1345 CallInfo *cinfo = get_call_info (cfg, mono_method_signature (method), FALSE);
1346 MonoType *ret = mini_get_underlying_type (mono_method_signature (method)->ret);
1348 switch (cinfo->ret.storage) {
1349 case ArgInIReg:
1350 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1351 break;
1352 case ArgInIRegPair:
1353 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
1354 MONO_EMIT_NEW_UNALU (cfg, OP_LMOVE, cfg->ret->dreg, val->dreg);
1355 } else {
1356 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, MONO_LVREG_MS (cfg->ret->dreg), MONO_LVREG_MS (val->dreg));
1357 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, MONO_LVREG_LS (cfg->ret->dreg), MONO_LVREG_LS (val->dreg));
1359 break;
1360 case ArgInFReg:
1361 if (ret->type == MONO_TYPE_R4)
1362 MONO_EMIT_NEW_UNALU (cfg, OP_SETFRET, cfg->ret->dreg, val->dreg);
1363 else
1364 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
1365 break;
1366 default:
1367 g_assert_not_reached ();
1370 g_assert (cinfo);
1373 int cond_to_sparc_cond [][3] = {
1374 {sparc_be, sparc_be, sparc_fbe},
1375 {sparc_bne, sparc_bne, 0},
1376 {sparc_ble, sparc_ble, sparc_fble},
1377 {sparc_bge, sparc_bge, sparc_fbge},
1378 {sparc_bl, sparc_bl, sparc_fbl},
1379 {sparc_bg, sparc_bg, sparc_fbg},
1380 {sparc_bleu, sparc_bleu, 0},
1381 {sparc_beu, sparc_beu, 0},
1382 {sparc_blu, sparc_blu, sparc_fbl},
1383 {sparc_bgu, sparc_bgu, sparc_fbg}
1386 /* Map opcode to the sparc condition codes */
1387 static inline SparcCond
1388 opcode_to_sparc_cond (int opcode)
1390 CompRelation rel;
1391 CompType t;
1393 switch (opcode) {
1394 case OP_COND_EXC_OV:
1395 case OP_COND_EXC_IOV:
1396 return sparc_bvs;
1397 case OP_COND_EXC_C:
1398 case OP_COND_EXC_IC:
1399 return sparc_bcs;
1400 case OP_COND_EXC_NO:
1401 case OP_COND_EXC_NC:
1402 NOT_IMPLEMENTED;
1403 default:
1404 rel = mono_opcode_to_cond (opcode);
1405 t = mono_opcode_to_type (opcode, -1);
1407 return cond_to_sparc_cond [rel][t];
1408 break;
1411 return -1;
1414 #define COMPUTE_DISP(ins) \
1415 if (ins->inst_true_bb->native_offset) \
1416 disp = (ins->inst_true_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2; \
1417 else { \
1418 disp = 0; \
1419 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1422 #ifdef SPARCV9
1423 #define DEFAULT_ICC sparc_xcc_short
1424 #else
1425 #define DEFAULT_ICC sparc_icc_short
1426 #endif
1428 #ifdef SPARCV9
1429 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) \
1430 do { \
1431 gint32 disp; \
1432 guint32 predict; \
1433 COMPUTE_DISP(ins); \
1434 predict = (disp != 0) ? 1 : 0; \
1435 g_assert (sparc_is_imm19 (disp)); \
1436 sparc_branchp (code, (annul), cond, icc, (predict), disp); \
1437 if (filldelay) sparc_nop (code); \
1438 } while (0)
1439 #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_ICC ((ins), (cond), (annul), (filldelay), (sparc_xcc_short))
1440 #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) \
1441 do { \
1442 gint32 disp; \
1443 guint32 predict; \
1444 COMPUTE_DISP(ins); \
1445 predict = (disp != 0) ? 1 : 0; \
1446 g_assert (sparc_is_imm19 (disp)); \
1447 sparc_fbranch (code, (annul), cond, disp); \
1448 if (filldelay) sparc_nop (code); \
1449 } while (0)
1450 #else
1451 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) g_assert_not_reached ()
1452 #define EMIT_COND_BRANCH_GENERAL(ins,bop,cond,annul,filldelay) \
1453 do { \
1454 gint32 disp; \
1455 COMPUTE_DISP(ins); \
1456 g_assert (sparc_is_imm22 (disp)); \
1457 sparc_ ## bop (code, (annul), cond, disp); \
1458 if (filldelay) sparc_nop (code); \
1459 } while (0)
1460 #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),branch,(cond),annul,filldelay)
1461 #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),fbranch,(cond),annul,filldelay)
1462 #endif
1464 #define EMIT_COND_BRANCH_PREDICTED(ins,cond,annul,filldelay) \
1465 do { \
1466 gint32 disp; \
1467 guint32 predict; \
1468 COMPUTE_DISP(ins); \
1469 predict = (disp != 0) ? 1 : 0; \
1470 g_assert (sparc_is_imm19 (disp)); \
1471 sparc_branchp (code, (annul), (cond), DEFAULT_ICC, (predict), disp); \
1472 if (filldelay) sparc_nop (code); \
1473 } while (0)
1475 #define EMIT_COND_BRANCH_BPR(ins,bop,predict,annul,filldelay) \
1476 do { \
1477 gint32 disp; \
1478 COMPUTE_DISP(ins); \
1479 g_assert (sparc_is_imm22 (disp)); \
1480 sparc_ ## bop (code, (annul), (predict), ins->sreg1, disp); \
1481 if (filldelay) sparc_nop (code); \
1482 } while (0)
1484 /* emit an exception if condition is fail */
1486 * We put the exception throwing code out-of-line, at the end of the method
1488 #define EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,filldelay,icc) do { \
1489 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
1490 MONO_PATCH_INFO_EXC, sexc_name); \
1491 if (mono_hwcap_sparc_is_v9 && ((icc) != sparc_icc_short)) { \
1492 sparc_branchp (code, 0, (cond), (icc), 0, 0); \
1494 else { \
1495 sparc_branch (code, 0, cond, 0); \
1497 if (filldelay) sparc_nop (code); \
1498 } while (0);
1500 #define EMIT_COND_SYSTEM_EXCEPTION(ins,cond,sexc_name) EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,TRUE,DEFAULT_ICC)
1502 #define EMIT_COND_SYSTEM_EXCEPTION_BPR(ins,bop,sexc_name) do { \
1503 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
1504 MONO_PATCH_INFO_EXC, sexc_name); \
1505 sparc_ ## bop (code, FALSE, FALSE, ins->sreg1, 0); \
1506 sparc_nop (code); \
1507 } while (0);
1509 #define EMIT_ALU_IMM(ins,op,setcc) do { \
1510 if (sparc_is_imm13 ((ins)->inst_imm)) \
1511 sparc_ ## op ## _imm (code, (setcc), (ins)->sreg1, ins->inst_imm, (ins)->dreg); \
1512 else { \
1513 sparc_set (code, ins->inst_imm, sparc_o7); \
1514 sparc_ ## op (code, (setcc), (ins)->sreg1, sparc_o7, (ins)->dreg); \
1516 } while (0);
1518 #define EMIT_LOAD_MEMBASE(ins,op) do { \
1519 if (sparc_is_imm13 (ins->inst_offset)) \
1520 sparc_ ## op ## _imm (code, ins->inst_basereg, ins->inst_offset, ins->dreg); \
1521 else { \
1522 sparc_set (code, ins->inst_offset, sparc_o7); \
1523 sparc_ ## op (code, ins->inst_basereg, sparc_o7, ins->dreg); \
1525 } while (0);
1527 /* max len = 5 */
1528 #define EMIT_STORE_MEMBASE_IMM(ins,op) do { \
1529 guint32 sreg; \
1530 if (ins->inst_imm == 0) \
1531 sreg = sparc_g0; \
1532 else { \
1533 sparc_set (code, ins->inst_imm, sparc_o7); \
1534 sreg = sparc_o7; \
1536 if (!sparc_is_imm13 (ins->inst_offset)) { \
1537 sparc_set (code, ins->inst_offset, GP_SCRATCH_REG); \
1538 sparc_ ## op (code, sreg, ins->inst_destbasereg, GP_SCRATCH_REG); \
1540 else \
1541 sparc_ ## op ## _imm (code, sreg, ins->inst_destbasereg, ins->inst_offset); \
1542 } while (0);
1544 #define EMIT_STORE_MEMBASE_REG(ins,op) do { \
1545 if (!sparc_is_imm13 (ins->inst_offset)) { \
1546 sparc_set (code, ins->inst_offset, sparc_o7); \
1547 sparc_ ## op (code, ins->sreg1, ins->inst_destbasereg, sparc_o7); \
1549 else \
1550 sparc_ ## op ## _imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); \
1551 } while (0);
1553 #define EMIT_CALL() do { \
1554 if (v64) { \
1555 sparc_set_template (code, sparc_o7); \
1556 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7); \
1558 else { \
1559 sparc_call_simple (code, 0); \
1561 sparc_nop (code); \
1562 } while (0);
1565 * A call template is 7 instructions long, so we want to avoid it if possible.
1567 static guint32*
1568 emit_call (MonoCompile *cfg, guint32 *code, guint32 patch_type, gconstpointer data)
1570 MonoError error;
1571 gpointer target;
1573 /* FIXME: This only works if the target method is already compiled */
1574 if (0 && v64 && !cfg->compile_aot) {
1575 MonoJumpInfo patch_info;
1577 patch_info.type = patch_type;
1578 patch_info.data.target = data;
1580 target = mono_resolve_patch_target (cfg->method, cfg->domain, NULL, &patch_info, FALSE, &error);
1581 mono_error_raise_exception (&error); /* FIXME: don't raise here */
1583 /* FIXME: Add optimizations if the target is close enough */
1584 sparc_set (code, target, sparc_o7);
1585 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7);
1586 sparc_nop (code);
1588 else {
1589 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, patch_type, data);
1590 EMIT_CALL ();
1593 return code;
1596 void
1597 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
1601 void
1602 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
1604 MonoInst *ins, *n, *last_ins = NULL;
1605 ins = bb->code;
1607 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
1608 switch (ins->opcode) {
1609 case OP_MUL_IMM:
1610 /* remove unnecessary multiplication with 1 */
1611 if (ins->inst_imm == 1) {
1612 if (ins->dreg != ins->sreg1) {
1613 ins->opcode = OP_MOVE;
1614 } else {
1615 MONO_DELETE_INS (bb, ins);
1616 continue;
1619 break;
1620 #ifndef SPARCV9
1621 case OP_LOAD_MEMBASE:
1622 case OP_LOADI4_MEMBASE:
1624 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1625 * OP_LOAD_MEMBASE offset(basereg), reg
1627 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
1628 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1629 ins->inst_basereg == last_ins->inst_destbasereg &&
1630 ins->inst_offset == last_ins->inst_offset) {
1631 if (ins->dreg == last_ins->sreg1) {
1632 MONO_DELETE_INS (bb, ins);
1633 continue;
1634 } else {
1635 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1636 ins->opcode = OP_MOVE;
1637 ins->sreg1 = last_ins->sreg1;
1641 * Note: reg1 must be different from the basereg in the second load
1642 * OP_LOAD_MEMBASE offset(basereg), reg1
1643 * OP_LOAD_MEMBASE offset(basereg), reg2
1644 * -->
1645 * OP_LOAD_MEMBASE offset(basereg), reg1
1646 * OP_MOVE reg1, reg2
1648 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
1649 || last_ins->opcode == OP_LOAD_MEMBASE) &&
1650 ins->inst_basereg != last_ins->dreg &&
1651 ins->inst_basereg == last_ins->inst_basereg &&
1652 ins->inst_offset == last_ins->inst_offset) {
1654 if (ins->dreg == last_ins->dreg) {
1655 MONO_DELETE_INS (bb, ins);
1656 continue;
1657 } else {
1658 ins->opcode = OP_MOVE;
1659 ins->sreg1 = last_ins->dreg;
1662 //g_assert_not_reached ();
1664 #if 0
1666 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1667 * OP_LOAD_MEMBASE offset(basereg), reg
1668 * -->
1669 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1670 * OP_ICONST reg, imm
1672 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
1673 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
1674 ins->inst_basereg == last_ins->inst_destbasereg &&
1675 ins->inst_offset == last_ins->inst_offset) {
1676 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1677 ins->opcode = OP_ICONST;
1678 ins->inst_c0 = last_ins->inst_imm;
1679 g_assert_not_reached (); // check this rule
1680 #endif
1682 break;
1683 #endif
1684 case OP_LOADI1_MEMBASE:
1685 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
1686 ins->inst_basereg == last_ins->inst_destbasereg &&
1687 ins->inst_offset == last_ins->inst_offset) {
1688 if (ins->dreg == last_ins->sreg1) {
1689 MONO_DELETE_INS (bb, ins);
1690 continue;
1691 } else {
1692 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1693 ins->opcode = OP_MOVE;
1694 ins->sreg1 = last_ins->sreg1;
1697 break;
1698 case OP_LOADI2_MEMBASE:
1699 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
1700 ins->inst_basereg == last_ins->inst_destbasereg &&
1701 ins->inst_offset == last_ins->inst_offset) {
1702 if (ins->dreg == last_ins->sreg1) {
1703 MONO_DELETE_INS (bb, ins);
1704 continue;
1705 } else {
1706 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1707 ins->opcode = OP_MOVE;
1708 ins->sreg1 = last_ins->sreg1;
1711 break;
1712 case OP_STOREI4_MEMBASE_IMM:
1713 /* Convert pairs of 0 stores to a dword 0 store */
1714 /* Used when initializing temporaries */
1715 /* We know sparc_fp is dword aligned */
1716 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM) &&
1717 (ins->inst_destbasereg == last_ins->inst_destbasereg) &&
1718 (ins->inst_destbasereg == sparc_fp) &&
1719 (ins->inst_offset < 0) &&
1720 ((ins->inst_offset % 8) == 0) &&
1721 ((ins->inst_offset == last_ins->inst_offset - 4)) &&
1722 (ins->inst_imm == 0) &&
1723 (last_ins->inst_imm == 0)) {
1724 if (mono_hwcap_sparc_is_v9) {
1725 last_ins->opcode = OP_STOREI8_MEMBASE_IMM;
1726 last_ins->inst_offset = ins->inst_offset;
1727 MONO_DELETE_INS (bb, ins);
1728 continue;
1731 break;
1732 case OP_IBEQ:
1733 case OP_IBNE_UN:
1734 case OP_IBLT:
1735 case OP_IBGT:
1736 case OP_IBGE:
1737 case OP_IBLE:
1738 case OP_COND_EXC_EQ:
1739 case OP_COND_EXC_GE:
1740 case OP_COND_EXC_GT:
1741 case OP_COND_EXC_LE:
1742 case OP_COND_EXC_LT:
1743 case OP_COND_EXC_NE_UN:
1745 * Convert compare with zero+branch to BRcc
1748 * This only works in 64 bit mode, since it examines all 64
1749 * bits of the register.
1750 * Only do this if the method is small since BPr only has a 16bit
1751 * displacement.
1753 if (v64 && (cfg->header->code_size < 10000) && last_ins &&
1754 (last_ins->opcode == OP_COMPARE_IMM) &&
1755 (last_ins->inst_imm == 0)) {
1756 switch (ins->opcode) {
1757 case OP_IBEQ:
1758 ins->opcode = OP_SPARC_BRZ;
1759 break;
1760 case OP_IBNE_UN:
1761 ins->opcode = OP_SPARC_BRNZ;
1762 break;
1763 case OP_IBLT:
1764 ins->opcode = OP_SPARC_BRLZ;
1765 break;
1766 case OP_IBGT:
1767 ins->opcode = OP_SPARC_BRGZ;
1768 break;
1769 case OP_IBGE:
1770 ins->opcode = OP_SPARC_BRGEZ;
1771 break;
1772 case OP_IBLE:
1773 ins->opcode = OP_SPARC_BRLEZ;
1774 break;
1775 case OP_COND_EXC_EQ:
1776 ins->opcode = OP_SPARC_COND_EXC_EQZ;
1777 break;
1778 case OP_COND_EXC_GE:
1779 ins->opcode = OP_SPARC_COND_EXC_GEZ;
1780 break;
1781 case OP_COND_EXC_GT:
1782 ins->opcode = OP_SPARC_COND_EXC_GTZ;
1783 break;
1784 case OP_COND_EXC_LE:
1785 ins->opcode = OP_SPARC_COND_EXC_LEZ;
1786 break;
1787 case OP_COND_EXC_LT:
1788 ins->opcode = OP_SPARC_COND_EXC_LTZ;
1789 break;
1790 case OP_COND_EXC_NE_UN:
1791 ins->opcode = OP_SPARC_COND_EXC_NEZ;
1792 break;
1793 default:
1794 g_assert_not_reached ();
1796 ins->sreg1 = last_ins->sreg1;
1797 *last_ins = *ins;
1798 MONO_DELETE_INS (bb, ins);
1799 continue;
1801 break;
1802 case OP_MOVE:
1804 * OP_MOVE reg, reg
1806 if (ins->dreg == ins->sreg1) {
1807 MONO_DELETE_INS (bb, ins);
1808 continue;
1811 * OP_MOVE sreg, dreg
1812 * OP_MOVE dreg, sreg
1814 if (last_ins && last_ins->opcode == OP_MOVE &&
1815 ins->sreg1 == last_ins->dreg &&
1816 ins->dreg == last_ins->sreg1) {
1817 MONO_DELETE_INS (bb, ins);
1818 continue;
1820 break;
1822 last_ins = ins;
1823 ins = ins->next;
1825 bb->last_ins = last_ins;
1828 void
1829 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *ins)
1831 switch (ins->opcode) {
1832 case OP_LNEG:
1833 MONO_EMIT_NEW_BIALU (cfg, OP_SUBCC, MONO_LVREG_LS (ins->dreg), 0, MONO_LVREG_LS (ins->sreg1));
1834 MONO_EMIT_NEW_BIALU (cfg, OP_SBB, MONO_LVREG_MS (ins->dreg), 0, MONO_LVREG_MS (ins->sreg1));
1835 NULLIFY_INS (ins);
1836 break;
1837 default:
1838 break;
1842 void
1843 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1847 /* FIXME: Strange loads from the stack in basic-float.cs:test_2_rem */
1849 static void
1850 sparc_patch (guint32 *code, const gpointer target)
1852 guint32 *c = code;
1853 guint32 ins = *code;
1854 guint32 op = ins >> 30;
1855 guint32 op2 = (ins >> 22) & 0x7;
1856 guint32 rd = (ins >> 25) & 0x1f;
1857 guint8* target8 = (guint8*)target;
1858 gint64 disp = (target8 - (guint8*)code) >> 2;
1859 int reg;
1861 // g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
1863 if ((op == 0) && (op2 == 2)) {
1864 if (!sparc_is_imm22 (disp))
1865 NOT_IMPLEMENTED;
1866 /* Bicc */
1867 *code = ((ins >> 22) << 22) | (disp & 0x3fffff);
1869 else if ((op == 0) && (op2 == 1)) {
1870 if (!sparc_is_imm19 (disp))
1871 NOT_IMPLEMENTED;
1872 /* BPcc */
1873 *code = ((ins >> 19) << 19) | (disp & 0x7ffff);
1875 else if ((op == 0) && (op2 == 3)) {
1876 if (!sparc_is_imm16 (disp))
1877 NOT_IMPLEMENTED;
1878 /* BPr */
1879 *code &= ~(0x180000 | 0x3fff);
1880 *code |= ((disp << 21) & (0x180000)) | (disp & 0x3fff);
1882 else if ((op == 0) && (op2 == 6)) {
1883 if (!sparc_is_imm22 (disp))
1884 NOT_IMPLEMENTED;
1885 /* FBicc */
1886 *code = ((ins >> 22) << 22) | (disp & 0x3fffff);
1888 else if ((op == 0) && (op2 == 4)) {
1889 guint32 ins2 = code [1];
1891 if (((ins2 >> 30) == 2) && (((ins2 >> 19) & 0x3f) == 2)) {
1892 /* sethi followed by or */
1893 guint32 *p = code;
1894 sparc_set (p, target8, rd);
1895 while (p <= (code + 1))
1896 sparc_nop (p);
1898 else if (ins2 == 0x01000000) {
1899 /* sethi followed by nop */
1900 guint32 *p = code;
1901 sparc_set (p, target8, rd);
1902 while (p <= (code + 1))
1903 sparc_nop (p);
1905 else if ((sparc_inst_op (ins2) == 3) && (sparc_inst_imm (ins2))) {
1906 /* sethi followed by load/store */
1907 #ifndef SPARCV9
1908 guint32 t = (guint32)target8;
1909 *code &= ~(0x3fffff);
1910 *code |= (t >> 10);
1911 *(code + 1) &= ~(0x3ff);
1912 *(code + 1) |= (t & 0x3ff);
1913 #endif
1915 else if (v64 &&
1916 (sparc_inst_rd (ins) == sparc_g1) &&
1917 (sparc_inst_op (c [1]) == 0) && (sparc_inst_op2 (c [1]) == 4) &&
1918 (sparc_inst_op (c [2]) == 2) && (sparc_inst_op3 (c [2]) == 2) &&
1919 (sparc_inst_op (c [3]) == 2) && (sparc_inst_op3 (c [3]) == 2))
1921 /* sparc_set */
1922 guint32 *p = c;
1923 reg = sparc_inst_rd (c [1]);
1924 sparc_set (p, target8, reg);
1925 while (p < (c + 6))
1926 sparc_nop (p);
1928 else if ((sparc_inst_op (ins2) == 2) && (sparc_inst_op3 (ins2) == 0x38) &&
1929 (sparc_inst_imm (ins2))) {
1930 /* sethi followed by jmpl */
1931 #ifndef SPARCV9
1932 guint32 t = (guint32)target8;
1933 *code &= ~(0x3fffff);
1934 *code |= (t >> 10);
1935 *(code + 1) &= ~(0x3ff);
1936 *(code + 1) |= (t & 0x3ff);
1937 #endif
1939 else
1940 NOT_IMPLEMENTED;
1942 else if (op == 01) {
1943 gint64 disp = (target8 - (guint8*)code) >> 2;
1945 if (!sparc_is_imm30 (disp))
1946 NOT_IMPLEMENTED;
1947 sparc_call_simple (code, target8 - (guint8*)code);
1949 else if ((op == 2) && (sparc_inst_op3 (ins) == 0x2) && sparc_inst_imm (ins)) {
1950 /* mov imm, reg */
1951 g_assert (sparc_is_imm13 (target8));
1952 *code &= ~(0x1fff);
1953 *code |= (guint32)target8;
1955 else if ((sparc_inst_op (ins) == 2) && (sparc_inst_op3 (ins) == 0x7)) {
1956 /* sparc_set case 5. */
1957 guint32 *p = c;
1959 g_assert (v64);
1960 reg = sparc_inst_rd (c [3]);
1961 sparc_set (p, target, reg);
1962 while (p < (c + 6))
1963 sparc_nop (p);
1965 else
1966 NOT_IMPLEMENTED;
1968 // g_print ("patched with 0x%08x\n", ins);
1972 * mono_sparc_emit_save_lmf:
1974 * Emit the code neccesary to push a new entry onto the lmf stack. Used by
1975 * trampolines as well.
1977 guint32*
1978 mono_sparc_emit_save_lmf (guint32 *code, guint32 lmf_offset)
1980 /* Save lmf_addr */
1981 sparc_sti_imm (code, sparc_o0, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr));
1982 /* Save previous_lmf */
1983 sparc_ldi (code, sparc_o0, sparc_g0, sparc_o7);
1984 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf));
1985 /* Set new lmf */
1986 sparc_add_imm (code, FALSE, sparc_fp, lmf_offset, sparc_o7);
1987 sparc_sti (code, sparc_o7, sparc_o0, sparc_g0);
1989 return code;
1992 guint32*
1993 mono_sparc_emit_restore_lmf (guint32 *code, guint32 lmf_offset)
1995 /* Load previous_lmf */
1996 sparc_ldi_imm (code, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sparc_l0);
1997 /* Load lmf_addr */
1998 sparc_ldi_imm (code, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), sparc_l1);
1999 /* *(lmf) = previous_lmf */
2000 sparc_sti (code, sparc_l0, sparc_l1, sparc_g0);
2001 return code;
2004 static guint32*
2005 emit_save_sp_to_lmf (MonoCompile *cfg, guint32 *code)
2008 * Since register windows are saved to the current value of %sp, we need to
2009 * set the sp field in the lmf before the call, not in the prolog.
2011 if (cfg->method->save_lmf) {
2012 gint32 lmf_offset = MONO_SPARC_STACK_BIAS - cfg->arch.lmf_offset;
2014 /* Save sp */
2015 sparc_sti_imm (code, sparc_sp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, sp));
2018 return code;
2021 static guint32*
2022 emit_vret_token (MonoInst *ins, guint32 *code)
2024 MonoCallInst *call = (MonoCallInst*)ins;
2025 guint32 size;
2028 * The sparc ABI requires that calls to functions which return a structure
2029 * contain an additional unimpl instruction which is checked by the callee.
2031 if (call->signature->pinvoke && MONO_TYPE_ISSTRUCT(call->signature->ret)) {
2032 if (call->signature->ret->type == MONO_TYPE_TYPEDBYREF)
2033 size = mini_type_stack_size (call->signature->ret, NULL);
2034 else
2035 size = mono_class_native_size (call->signature->ret->data.klass, NULL);
2036 sparc_unimp (code, size & 0xfff);
2039 return code;
2042 static guint32*
2043 emit_move_return_value (MonoInst *ins, guint32 *code)
2045 /* Move return value to the target register */
2046 /* FIXME: do more things in the local reg allocator */
2047 switch (ins->opcode) {
2048 case OP_VOIDCALL:
2049 case OP_VOIDCALL_REG:
2050 case OP_VOIDCALL_MEMBASE:
2051 break;
2052 case OP_CALL:
2053 case OP_CALL_REG:
2054 case OP_CALL_MEMBASE:
2055 g_assert (ins->dreg == sparc_o0);
2056 break;
2057 case OP_LCALL:
2058 case OP_LCALL_REG:
2059 case OP_LCALL_MEMBASE:
2061 * ins->dreg is the least significant reg due to the lreg: LCALL rule
2062 * in inssel-long32.brg.
2064 #ifdef SPARCV9
2065 sparc_mov_reg_reg (code, sparc_o0, ins->dreg);
2066 #else
2067 g_assert (ins->dreg == sparc_o1);
2068 #endif
2069 break;
2070 case OP_FCALL:
2071 case OP_FCALL_REG:
2072 case OP_FCALL_MEMBASE:
2073 #ifdef SPARCV9
2074 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
2075 sparc_fmovs (code, sparc_f0, ins->dreg);
2076 sparc_fstod (code, ins->dreg, ins->dreg);
2078 else
2079 sparc_fmovd (code, sparc_f0, ins->dreg);
2080 #else
2081 sparc_fmovs (code, sparc_f0, ins->dreg);
2082 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4)
2083 sparc_fstod (code, ins->dreg, ins->dreg);
2084 else
2085 sparc_fmovs (code, sparc_f1, ins->dreg + 1);
2086 #endif
2087 break;
2088 case OP_VCALL:
2089 case OP_VCALL_REG:
2090 case OP_VCALL_MEMBASE:
2091 case OP_VCALL2:
2092 case OP_VCALL2_REG:
2093 case OP_VCALL2_MEMBASE:
2094 break;
2095 default:
2096 NOT_IMPLEMENTED;
2099 return code;
2103 * emit_load_volatile_arguments:
2105 * Load volatile arguments from the stack to the original input registers.
2106 * Required before a tail call.
2108 static guint32*
2109 emit_load_volatile_arguments (MonoCompile *cfg, guint32 *code)
2111 MonoMethod *method = cfg->method;
2112 MonoMethodSignature *sig;
2113 MonoInst *inst;
2114 CallInfo *cinfo;
2115 guint32 i, ireg;
2117 /* FIXME: Generate intermediate code instead */
2119 sig = mono_method_signature (method);
2121 cinfo = get_call_info (cfg, sig, FALSE);
2123 /* This is the opposite of the code in emit_prolog */
2125 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2126 ArgInfo *ainfo = cinfo->args + i;
2127 gint32 stack_offset;
2128 MonoType *arg_type;
2130 inst = cfg->args [i];
2132 if (sig->hasthis && (i == 0))
2133 arg_type = &mono_defaults.object_class->byval_arg;
2134 else
2135 arg_type = sig->params [i - sig->hasthis];
2137 stack_offset = ainfo->offset + ARGS_OFFSET;
2138 ireg = sparc_i0 + ainfo->reg;
2140 if (ainfo->storage == ArgInSplitRegStack) {
2141 g_assert (inst->opcode == OP_REGOFFSET);
2143 if (!sparc_is_imm13 (stack_offset))
2144 NOT_IMPLEMENTED;
2145 sparc_st_imm (code, inst->inst_basereg, stack_offset, sparc_i5);
2148 if (!v64 && !arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
2149 if (ainfo->storage == ArgInIRegPair) {
2150 if (!sparc_is_imm13 (inst->inst_offset + 4))
2151 NOT_IMPLEMENTED;
2152 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, ireg);
2153 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, ireg + 1);
2155 else
2156 if (ainfo->storage == ArgInSplitRegStack) {
2157 if (stack_offset != inst->inst_offset) {
2158 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, sparc_i5);
2159 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, sparc_o7);
2160 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset + 4);
2164 else
2165 if (ainfo->storage == ArgOnStackPair) {
2166 if (stack_offset != inst->inst_offset) {
2167 /* stack_offset is not dword aligned, so we need to make a copy */
2168 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, sparc_o7);
2169 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset);
2171 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, sparc_o7);
2172 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset + 4);
2176 else
2177 g_assert_not_reached ();
2179 else
2180 if ((ainfo->storage == ArgInIReg) && (inst->opcode != OP_REGVAR)) {
2181 /* Argument in register, but need to be saved to stack */
2182 if (!sparc_is_imm13 (stack_offset))
2183 NOT_IMPLEMENTED;
2184 if ((stack_offset - ARGS_OFFSET) & 0x1)
2185 /* FIXME: Is this ldsb or ldub ? */
2186 sparc_ldsb_imm (code, inst->inst_basereg, stack_offset, ireg);
2187 else
2188 if ((stack_offset - ARGS_OFFSET) & 0x2)
2189 sparc_ldsh_imm (code, inst->inst_basereg, stack_offset, ireg);
2190 else
2191 if ((stack_offset - ARGS_OFFSET) & 0x4)
2192 sparc_ld_imm (code, inst->inst_basereg, stack_offset, ireg);
2193 else {
2194 if (v64)
2195 sparc_ldx_imm (code, inst->inst_basereg, stack_offset, ireg);
2196 else
2197 sparc_ld_imm (code, inst->inst_basereg, stack_offset, ireg);
2200 else if ((ainfo->storage == ArgInIRegPair) && (inst->opcode != OP_REGVAR)) {
2201 /* Argument in regpair, but need to be saved to stack */
2202 if (!sparc_is_imm13 (inst->inst_offset + 4))
2203 NOT_IMPLEMENTED;
2204 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, ireg);
2205 sparc_st_imm (code, inst->inst_basereg, inst->inst_offset + 4, ireg + 1);
2207 else if ((ainfo->storage == ArgInFloatReg) && (inst->opcode != OP_REGVAR)) {
2208 NOT_IMPLEMENTED;
2210 else if ((ainfo->storage == ArgInDoubleReg) && (inst->opcode != OP_REGVAR)) {
2211 NOT_IMPLEMENTED;
2214 if ((ainfo->storage == ArgInSplitRegStack) || (ainfo->storage == ArgOnStack))
2215 if (inst->opcode == OP_REGVAR)
2216 /* FIXME: Load the argument into memory */
2217 NOT_IMPLEMENTED;
2220 g_free (cinfo);
2222 return code;
2226 * mono_sparc_is_virtual_call:
2228 * Determine whenever the instruction at CODE is a virtual call.
2230 gboolean
2231 mono_sparc_is_virtual_call (guint32 *code)
2233 guint32 buf[1];
2234 guint32 *p;
2236 p = buf;
2238 if ((sparc_inst_op (*code) == 0x2) && (sparc_inst_op3 (*code) == 0x38)) {
2240 * Register indirect call. If it is a virtual call, then the
2241 * instruction in the delay slot is a special kind of nop.
2244 /* Construct special nop */
2245 sparc_or_imm (p, FALSE, sparc_g0, 0xca, sparc_g0);
2246 p --;
2248 if (code [1] == p [0])
2249 return TRUE;
2252 return FALSE;
2255 #define CMP_SIZE 3
2256 #define BR_SMALL_SIZE 2
2257 #define BR_LARGE_SIZE 2
2258 #define JUMP_IMM_SIZE 5
2259 #define ENABLE_WRONG_METHOD_CHECK 0
2262 * LOCKING: called with the domain lock held
2264 gpointer
2265 mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
2266 gpointer fail_tramp)
2268 int i;
2269 int size = 0;
2270 guint32 *code, *start;
2272 for (i = 0; i < count; ++i) {
2273 MonoIMTCheckItem *item = imt_entries [i];
2274 if (item->is_equals) {
2275 if (item->check_target_idx) {
2276 if (!item->compare_done)
2277 item->chunk_size += CMP_SIZE;
2278 item->chunk_size += BR_SMALL_SIZE + JUMP_IMM_SIZE;
2279 } else {
2280 if (fail_tramp)
2281 item->chunk_size += 16;
2282 item->chunk_size += JUMP_IMM_SIZE;
2283 #if ENABLE_WRONG_METHOD_CHECK
2284 item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + 1;
2285 #endif
2287 } else {
2288 item->chunk_size += CMP_SIZE + BR_LARGE_SIZE;
2289 imt_entries [item->check_target_idx]->compare_done = TRUE;
2291 size += item->chunk_size;
2293 if (fail_tramp)
2294 code = mono_method_alloc_generic_virtual_trampoline (domain, size * 4);
2295 else
2296 code = mono_domain_code_reserve (domain, size * 4);
2297 start = code;
2298 for (i = 0; i < count; ++i) {
2299 MonoIMTCheckItem *item = imt_entries [i];
2300 item->code_target = (guint8*)code;
2301 if (item->is_equals) {
2302 gboolean fail_case = !item->check_target_idx && fail_tramp;
2304 if (item->check_target_idx || fail_case) {
2305 if (!item->compare_done || fail_case) {
2306 sparc_set (code, (guint32)item->key, sparc_g5);
2307 sparc_cmp (code, MONO_ARCH_IMT_REG, sparc_g5);
2309 item->jmp_code = (guint8*)code;
2310 sparc_branch (code, 0, sparc_bne, 0);
2311 sparc_nop (code);
2312 if (item->has_target_code) {
2313 sparc_set (code, item->value.target_code, sparc_f5);
2314 } else {
2315 sparc_set (code, ((guint32)(&(vtable->vtable [item->value.vtable_slot]))), sparc_g5);
2316 sparc_ld (code, sparc_g5, 0, sparc_g5);
2318 sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
2319 sparc_nop (code);
2321 if (fail_case) {
2322 sparc_patch (item->jmp_code, code);
2323 sparc_set (code, fail_tramp, sparc_g5);
2324 sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
2325 sparc_nop (code);
2326 item->jmp_code = NULL;
2328 } else {
2329 /* enable the commented code to assert on wrong method */
2330 #if ENABLE_WRONG_METHOD_CHECK
2331 g_assert_not_reached ();
2332 #endif
2333 sparc_set (code, ((guint32)(&(vtable->vtable [item->value.vtable_slot]))), sparc_g5);
2334 sparc_ld (code, sparc_g5, 0, sparc_g5);
2335 sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
2336 sparc_nop (code);
2337 #if ENABLE_WRONG_METHOD_CHECK
2338 g_assert_not_reached ();
2339 #endif
2341 } else {
2342 sparc_set (code, (guint32)item->key, sparc_g5);
2343 sparc_cmp (code, MONO_ARCH_IMT_REG, sparc_g5);
2344 item->jmp_code = (guint8*)code;
2345 sparc_branch (code, 0, sparc_beu, 0);
2346 sparc_nop (code);
2349 /* patch the branches to get to the target items */
2350 for (i = 0; i < count; ++i) {
2351 MonoIMTCheckItem *item = imt_entries [i];
2352 if (item->jmp_code) {
2353 if (item->check_target_idx) {
2354 sparc_patch ((guint32*)item->jmp_code, imt_entries [item->check_target_idx]->code_target);
2359 mono_arch_flush_icache ((guint8*)start, (code - start) * 4);
2361 mono_stats.imt_trampolines_size += (code - start) * 4;
2362 g_assert (code - start <= size);
2364 mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, NULL), domain);
2366 return start;
2369 MonoMethod*
2370 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
2372 #ifdef SPARCV9
2373 g_assert_not_reached ();
2374 #endif
2376 return (MonoMethod*)regs [sparc_g1];
2379 gpointer
2380 mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
2382 mono_sparc_flushw ();
2384 return (gpointer)regs [sparc_o0];
2388 * Some conventions used in the following code.
2389 * 2) The only scratch registers we have are o7 and g1. We try to
2390 * stick to o7 when we can, and use g1 when necessary.
2393 void
2394 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
2396 MonoInst *ins;
2397 MonoCallInst *call;
2398 guint offset;
2399 guint32 *code = (guint32*)(cfg->native_code + cfg->code_len);
2400 MonoInst *last_ins = NULL;
2401 int max_len, cpos;
2402 const char *spec;
2404 if (cfg->verbose_level > 2)
2405 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
2407 cpos = bb->max_offset;
2409 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
2410 NOT_IMPLEMENTED;
2413 MONO_BB_FOR_EACH_INS (bb, ins) {
2414 guint8* code_start;
2416 offset = (guint8*)code - cfg->native_code;
2418 spec = ins_get_spec (ins->opcode);
2420 max_len = ((guint8 *)spec)[MONO_INST_LEN];
2422 if (offset > (cfg->code_size - max_len - 16)) {
2423 cfg->code_size *= 2;
2424 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2425 code = (guint32*)(cfg->native_code + offset);
2427 code_start = (guint8*)code;
2428 // if (ins->cil_code)
2429 // g_print ("cil code\n");
2430 mono_debug_record_line_number (cfg, ins, offset);
2432 switch (ins->opcode) {
2433 case OP_STOREI1_MEMBASE_IMM:
2434 EMIT_STORE_MEMBASE_IMM (ins, stb);
2435 break;
2436 case OP_STOREI2_MEMBASE_IMM:
2437 EMIT_STORE_MEMBASE_IMM (ins, sth);
2438 break;
2439 case OP_STORE_MEMBASE_IMM:
2440 EMIT_STORE_MEMBASE_IMM (ins, sti);
2441 break;
2442 case OP_STOREI4_MEMBASE_IMM:
2443 EMIT_STORE_MEMBASE_IMM (ins, st);
2444 break;
2445 case OP_STOREI8_MEMBASE_IMM:
2446 #ifdef SPARCV9
2447 EMIT_STORE_MEMBASE_IMM (ins, stx);
2448 #else
2449 /* Only generated by peephole opts */
2450 g_assert ((ins->inst_offset % 8) == 0);
2451 g_assert (ins->inst_imm == 0);
2452 EMIT_STORE_MEMBASE_IMM (ins, stx);
2453 #endif
2454 break;
2455 case OP_STOREI1_MEMBASE_REG:
2456 EMIT_STORE_MEMBASE_REG (ins, stb);
2457 break;
2458 case OP_STOREI2_MEMBASE_REG:
2459 EMIT_STORE_MEMBASE_REG (ins, sth);
2460 break;
2461 case OP_STOREI4_MEMBASE_REG:
2462 EMIT_STORE_MEMBASE_REG (ins, st);
2463 break;
2464 case OP_STOREI8_MEMBASE_REG:
2465 #ifdef SPARCV9
2466 EMIT_STORE_MEMBASE_REG (ins, stx);
2467 #else
2468 /* Only used by OP_MEMSET */
2469 EMIT_STORE_MEMBASE_REG (ins, std);
2470 #endif
2471 break;
2472 case OP_STORE_MEMBASE_REG:
2473 EMIT_STORE_MEMBASE_REG (ins, sti);
2474 break;
2475 case OP_LOADU4_MEM:
2476 sparc_set (code, ins->inst_c0, ins->dreg);
2477 sparc_ld (code, ins->dreg, sparc_g0, ins->dreg);
2478 break;
2479 case OP_LOADI4_MEMBASE:
2480 #ifdef SPARCV9
2481 EMIT_LOAD_MEMBASE (ins, ldsw);
2482 #else
2483 EMIT_LOAD_MEMBASE (ins, ld);
2484 #endif
2485 break;
2486 case OP_LOADU4_MEMBASE:
2487 EMIT_LOAD_MEMBASE (ins, ld);
2488 break;
2489 case OP_LOADU1_MEMBASE:
2490 EMIT_LOAD_MEMBASE (ins, ldub);
2491 break;
2492 case OP_LOADI1_MEMBASE:
2493 EMIT_LOAD_MEMBASE (ins, ldsb);
2494 break;
2495 case OP_LOADU2_MEMBASE:
2496 EMIT_LOAD_MEMBASE (ins, lduh);
2497 break;
2498 case OP_LOADI2_MEMBASE:
2499 EMIT_LOAD_MEMBASE (ins, ldsh);
2500 break;
2501 case OP_LOAD_MEMBASE:
2502 #ifdef SPARCV9
2503 EMIT_LOAD_MEMBASE (ins, ldx);
2504 #else
2505 EMIT_LOAD_MEMBASE (ins, ld);
2506 #endif
2507 break;
2508 #ifdef SPARCV9
2509 case OP_LOADI8_MEMBASE:
2510 EMIT_LOAD_MEMBASE (ins, ldx);
2511 break;
2512 #endif
2513 case OP_ICONV_TO_I1:
2514 sparc_sll_imm (code, ins->sreg1, 24, sparc_o7);
2515 sparc_sra_imm (code, sparc_o7, 24, ins->dreg);
2516 break;
2517 case OP_ICONV_TO_I2:
2518 sparc_sll_imm (code, ins->sreg1, 16, sparc_o7);
2519 sparc_sra_imm (code, sparc_o7, 16, ins->dreg);
2520 break;
2521 case OP_ICONV_TO_U1:
2522 sparc_and_imm (code, FALSE, ins->sreg1, 0xff, ins->dreg);
2523 break;
2524 case OP_ICONV_TO_U2:
2525 sparc_sll_imm (code, ins->sreg1, 16, sparc_o7);
2526 sparc_srl_imm (code, sparc_o7, 16, ins->dreg);
2527 break;
2528 case OP_LCONV_TO_OVF_U4:
2529 case OP_ICONV_TO_OVF_U4:
2530 /* Only used on V9 */
2531 sparc_cmp_imm (code, ins->sreg1, 0);
2532 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code,
2533 MONO_PATCH_INFO_EXC, "OverflowException");
2534 sparc_branchp (code, 0, sparc_bl, sparc_xcc_short, 0, 0);
2535 /* Delay slot */
2536 sparc_set (code, 1, sparc_o7);
2537 sparc_sllx_imm (code, sparc_o7, 32, sparc_o7);
2538 sparc_cmp (code, ins->sreg1, sparc_o7);
2539 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code,
2540 MONO_PATCH_INFO_EXC, "OverflowException");
2541 sparc_branchp (code, 0, sparc_bge, sparc_xcc_short, 0, 0);
2542 sparc_nop (code);
2543 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
2544 break;
2545 case OP_LCONV_TO_OVF_I4_UN:
2546 case OP_ICONV_TO_OVF_I4_UN:
2547 /* Only used on V9 */
2548 NOT_IMPLEMENTED;
2549 break;
2550 case OP_COMPARE:
2551 case OP_LCOMPARE:
2552 case OP_ICOMPARE:
2553 sparc_cmp (code, ins->sreg1, ins->sreg2);
2554 break;
2555 case OP_COMPARE_IMM:
2556 case OP_ICOMPARE_IMM:
2557 if (sparc_is_imm13 (ins->inst_imm))
2558 sparc_cmp_imm (code, ins->sreg1, ins->inst_imm);
2559 else {
2560 sparc_set (code, ins->inst_imm, sparc_o7);
2561 sparc_cmp (code, ins->sreg1, sparc_o7);
2563 break;
2564 case OP_BREAK:
2566 * gdb does not like encountering 'ta 1' in the debugged code. So
2567 * instead of emitting a trap, we emit a call a C function and place a
2568 * breakpoint there.
2570 //sparc_ta (code, 1);
2571 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, mono_break);
2572 EMIT_CALL();
2573 break;
2574 case OP_ADDCC:
2575 case OP_IADDCC:
2576 sparc_add (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2577 break;
2578 case OP_IADD:
2579 sparc_add (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2580 break;
2581 case OP_ADDCC_IMM:
2582 case OP_ADD_IMM:
2583 case OP_IADD_IMM:
2584 /* according to inssel-long32.brg, this should set cc */
2585 EMIT_ALU_IMM (ins, add, TRUE);
2586 break;
2587 case OP_ADC:
2588 case OP_IADC:
2589 /* according to inssel-long32.brg, this should set cc */
2590 sparc_addx (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2591 break;
2592 case OP_ADC_IMM:
2593 case OP_IADC_IMM:
2594 EMIT_ALU_IMM (ins, addx, TRUE);
2595 break;
2596 case OP_SUBCC:
2597 case OP_ISUBCC:
2598 sparc_sub (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2599 break;
2600 case OP_ISUB:
2601 sparc_sub (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2602 break;
2603 case OP_SUBCC_IMM:
2604 case OP_SUB_IMM:
2605 case OP_ISUB_IMM:
2606 /* according to inssel-long32.brg, this should set cc */
2607 EMIT_ALU_IMM (ins, sub, TRUE);
2608 break;
2609 case OP_SBB:
2610 case OP_ISBB:
2611 /* according to inssel-long32.brg, this should set cc */
2612 sparc_subx (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2613 break;
2614 case OP_SBB_IMM:
2615 case OP_ISBB_IMM:
2616 EMIT_ALU_IMM (ins, subx, TRUE);
2617 break;
2618 case OP_IAND:
2619 sparc_and (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2620 break;
2621 case OP_AND_IMM:
2622 case OP_IAND_IMM:
2623 EMIT_ALU_IMM (ins, and, FALSE);
2624 break;
2625 case OP_IDIV:
2626 /* Sign extend sreg1 into %y */
2627 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2628 sparc_wry (code, sparc_o7, sparc_g0);
2629 sparc_sdiv (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2630 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2631 break;
2632 case OP_IDIV_UN:
2633 sparc_wry (code, sparc_g0, sparc_g0);
2634 sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2635 break;
2636 case OP_DIV_IMM:
2637 case OP_IDIV_IMM: {
2638 int i, imm;
2640 /* Transform division into a shift */
2641 for (i = 1; i < 30; ++i) {
2642 imm = (1 << i);
2643 if (ins->inst_imm == imm)
2644 break;
2646 if (i < 30) {
2647 if (i == 1) {
2648 /* gcc 2.95.3 */
2649 sparc_srl_imm (code, ins->sreg1, 31, sparc_o7);
2650 sparc_add (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2651 sparc_sra_imm (code, ins->dreg, 1, ins->dreg);
2653 else {
2654 /* http://compilers.iecc.com/comparch/article/93-04-079 */
2655 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2656 sparc_srl_imm (code, sparc_o7, 32 - i, sparc_o7);
2657 sparc_add (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2658 sparc_sra_imm (code, ins->dreg, i, ins->dreg);
2661 else {
2662 /* Sign extend sreg1 into %y */
2663 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2664 sparc_wry (code, sparc_o7, sparc_g0);
2665 EMIT_ALU_IMM (ins, sdiv, TRUE);
2666 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2668 break;
2670 case OP_IDIV_UN_IMM:
2671 sparc_wry (code, sparc_g0, sparc_g0);
2672 EMIT_ALU_IMM (ins, udiv, FALSE);
2673 break;
2674 case OP_IREM:
2675 /* Sign extend sreg1 into %y */
2676 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2677 sparc_wry (code, sparc_o7, sparc_g0);
2678 sparc_sdiv (code, TRUE, ins->sreg1, ins->sreg2, sparc_o7);
2679 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2680 sparc_smul (code, FALSE, ins->sreg2, sparc_o7, sparc_o7);
2681 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2682 break;
2683 case OP_IREM_UN:
2684 sparc_wry (code, sparc_g0, sparc_g0);
2685 sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, sparc_o7);
2686 sparc_umul (code, FALSE, ins->sreg2, sparc_o7, sparc_o7);
2687 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2688 break;
2689 case OP_REM_IMM:
2690 case OP_IREM_IMM:
2691 /* Sign extend sreg1 into %y */
2692 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2693 sparc_wry (code, sparc_o7, sparc_g0);
2694 if (!sparc_is_imm13 (ins->inst_imm)) {
2695 sparc_set (code, ins->inst_imm, GP_SCRATCH_REG);
2696 sparc_sdiv (code, TRUE, ins->sreg1, GP_SCRATCH_REG, sparc_o7);
2697 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2698 sparc_smul (code, FALSE, sparc_o7, GP_SCRATCH_REG, sparc_o7);
2700 else {
2701 sparc_sdiv_imm (code, TRUE, ins->sreg1, ins->inst_imm, sparc_o7);
2702 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2703 sparc_smul_imm (code, FALSE, sparc_o7, ins->inst_imm, sparc_o7);
2705 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2706 break;
2707 case OP_IREM_UN_IMM:
2708 sparc_set (code, ins->inst_imm, GP_SCRATCH_REG);
2709 sparc_wry (code, sparc_g0, sparc_g0);
2710 sparc_udiv (code, FALSE, ins->sreg1, GP_SCRATCH_REG, sparc_o7);
2711 sparc_umul (code, FALSE, GP_SCRATCH_REG, sparc_o7, sparc_o7);
2712 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2713 break;
2714 case OP_IOR:
2715 sparc_or (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2716 break;
2717 case OP_OR_IMM:
2718 case OP_IOR_IMM:
2719 EMIT_ALU_IMM (ins, or, FALSE);
2720 break;
2721 case OP_IXOR:
2722 sparc_xor (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2723 break;
2724 case OP_XOR_IMM:
2725 case OP_IXOR_IMM:
2726 EMIT_ALU_IMM (ins, xor, FALSE);
2727 break;
2728 case OP_ISHL:
2729 sparc_sll (code, ins->sreg1, ins->sreg2, ins->dreg);
2730 break;
2731 case OP_SHL_IMM:
2732 case OP_ISHL_IMM:
2733 if (ins->inst_imm < (1 << 5))
2734 sparc_sll_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2735 else {
2736 sparc_set (code, ins->inst_imm, sparc_o7);
2737 sparc_sll (code, ins->sreg1, sparc_o7, ins->dreg);
2739 break;
2740 case OP_ISHR:
2741 sparc_sra (code, ins->sreg1, ins->sreg2, ins->dreg);
2742 break;
2743 case OP_ISHR_IMM:
2744 case OP_SHR_IMM:
2745 if (ins->inst_imm < (1 << 5))
2746 sparc_sra_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2747 else {
2748 sparc_set (code, ins->inst_imm, sparc_o7);
2749 sparc_sra (code, ins->sreg1, sparc_o7, ins->dreg);
2751 break;
2752 case OP_SHR_UN_IMM:
2753 case OP_ISHR_UN_IMM:
2754 if (ins->inst_imm < (1 << 5))
2755 sparc_srl_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2756 else {
2757 sparc_set (code, ins->inst_imm, sparc_o7);
2758 sparc_srl (code, ins->sreg1, sparc_o7, ins->dreg);
2760 break;
2761 case OP_ISHR_UN:
2762 sparc_srl (code, ins->sreg1, ins->sreg2, ins->dreg);
2763 break;
2764 case OP_LSHL:
2765 sparc_sllx (code, ins->sreg1, ins->sreg2, ins->dreg);
2766 break;
2767 case OP_LSHL_IMM:
2768 if (ins->inst_imm < (1 << 6))
2769 sparc_sllx_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2770 else {
2771 sparc_set (code, ins->inst_imm, sparc_o7);
2772 sparc_sllx (code, ins->sreg1, sparc_o7, ins->dreg);
2774 break;
2775 case OP_LSHR:
2776 sparc_srax (code, ins->sreg1, ins->sreg2, ins->dreg);
2777 break;
2778 case OP_LSHR_IMM:
2779 if (ins->inst_imm < (1 << 6))
2780 sparc_srax_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2781 else {
2782 sparc_set (code, ins->inst_imm, sparc_o7);
2783 sparc_srax (code, ins->sreg1, sparc_o7, ins->dreg);
2785 break;
2786 case OP_LSHR_UN:
2787 sparc_srlx (code, ins->sreg1, ins->sreg2, ins->dreg);
2788 break;
2789 case OP_LSHR_UN_IMM:
2790 if (ins->inst_imm < (1 << 6))
2791 sparc_srlx_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2792 else {
2793 sparc_set (code, ins->inst_imm, sparc_o7);
2794 sparc_srlx (code, ins->sreg1, sparc_o7, ins->dreg);
2796 break;
2797 case OP_INOT:
2798 /* can't use sparc_not */
2799 sparc_xnor (code, FALSE, ins->sreg1, sparc_g0, ins->dreg);
2800 break;
2801 case OP_INEG:
2802 /* can't use sparc_neg */
2803 sparc_sub (code, FALSE, sparc_g0, ins->sreg1, ins->dreg);
2804 break;
2805 case OP_IMUL:
2806 sparc_smul (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2807 break;
2808 case OP_IMUL_IMM:
2809 case OP_MUL_IMM: {
2810 int i, imm;
2812 if ((ins->inst_imm == 1) && (ins->sreg1 == ins->dreg))
2813 break;
2815 /* Transform multiplication into a shift */
2816 for (i = 0; i < 30; ++i) {
2817 imm = (1 << i);
2818 if (ins->inst_imm == imm)
2819 break;
2821 if (i < 30)
2822 sparc_sll_imm (code, ins->sreg1, i, ins->dreg);
2823 else
2824 EMIT_ALU_IMM (ins, smul, FALSE);
2825 break;
2827 case OP_IMUL_OVF:
2828 sparc_smul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2829 sparc_rdy (code, sparc_g1);
2830 sparc_sra_imm (code, ins->dreg, 31, sparc_o7);
2831 sparc_cmp (code, sparc_g1, sparc_o7);
2832 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short);
2833 break;
2834 case OP_IMUL_OVF_UN:
2835 sparc_umul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2836 sparc_rdy (code, sparc_o7);
2837 sparc_cmp (code, sparc_o7, sparc_g0);
2838 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short);
2839 break;
2840 case OP_ICONST:
2841 sparc_set (code, ins->inst_c0, ins->dreg);
2842 break;
2843 case OP_I8CONST:
2844 sparc_set (code, ins->inst_l, ins->dreg);
2845 break;
2846 case OP_AOTCONST:
2847 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2848 sparc_set_template (code, ins->dreg);
2849 break;
2850 case OP_JUMP_TABLE:
2851 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2852 sparc_set_template (code, ins->dreg);
2853 break;
2854 case OP_ICONV_TO_I4:
2855 case OP_ICONV_TO_U4:
2856 case OP_MOVE:
2857 if (ins->sreg1 != ins->dreg)
2858 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
2859 break;
2860 case OP_FMOVE:
2861 #ifdef SPARCV9
2862 if (ins->sreg1 != ins->dreg)
2863 sparc_fmovd (code, ins->sreg1, ins->dreg);
2864 #else
2865 sparc_fmovs (code, ins->sreg1, ins->dreg);
2866 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
2867 #endif
2868 break;
2869 case OP_JMP:
2870 if (cfg->method->save_lmf)
2871 NOT_IMPLEMENTED;
2873 code = emit_load_volatile_arguments (cfg, code);
2874 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
2875 sparc_set_template (code, sparc_o7);
2876 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_g0);
2877 /* Restore parent frame in delay slot */
2878 sparc_restore_imm (code, sparc_g0, 0, sparc_g0);
2879 break;
2880 case OP_CHECK_THIS:
2881 /* ensure ins->sreg1 is not NULL */
2882 /* Might be misaligned in case of vtypes so use a byte load */
2883 sparc_ldsb_imm (code, ins->sreg1, 0, sparc_g0);
2884 break;
2885 case OP_ARGLIST:
2886 sparc_add_imm (code, FALSE, sparc_fp, cfg->sig_cookie, sparc_o7);
2887 sparc_sti_imm (code, sparc_o7, ins->sreg1, 0);
2888 break;
2889 case OP_FCALL:
2890 case OP_LCALL:
2891 case OP_VCALL:
2892 case OP_VCALL2:
2893 case OP_VOIDCALL:
2894 case OP_CALL:
2895 call = (MonoCallInst*)ins;
2896 g_assert (!call->virtual);
2897 code = emit_save_sp_to_lmf (cfg, code);
2898 if (ins->flags & MONO_INST_HAS_METHOD)
2899 code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method);
2900 else
2901 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
2903 code = emit_vret_token (ins, code);
2904 code = emit_move_return_value (ins, code);
2905 break;
2906 case OP_FCALL_REG:
2907 case OP_LCALL_REG:
2908 case OP_VCALL_REG:
2909 case OP_VCALL2_REG:
2910 case OP_VOIDCALL_REG:
2911 case OP_CALL_REG:
2912 call = (MonoCallInst*)ins;
2913 code = emit_save_sp_to_lmf (cfg, code);
2914 sparc_jmpl (code, ins->sreg1, sparc_g0, sparc_callsite);
2916 * We emit a special kind of nop in the delay slot to tell the
2917 * trampoline code that this is a virtual call, thus an unbox
2918 * trampoline might need to be called.
2920 if (call->virtual)
2921 sparc_or_imm (code, FALSE, sparc_g0, 0xca, sparc_g0);
2922 else
2923 sparc_nop (code);
2925 code = emit_vret_token (ins, code);
2926 code = emit_move_return_value (ins, code);
2927 break;
2928 case OP_FCALL_MEMBASE:
2929 case OP_LCALL_MEMBASE:
2930 case OP_VCALL_MEMBASE:
2931 case OP_VCALL2_MEMBASE:
2932 case OP_VOIDCALL_MEMBASE:
2933 case OP_CALL_MEMBASE:
2934 call = (MonoCallInst*)ins;
2935 code = emit_save_sp_to_lmf (cfg, code);
2936 if (sparc_is_imm13 (ins->inst_offset)) {
2937 sparc_ldi_imm (code, ins->inst_basereg, ins->inst_offset, sparc_o7);
2938 } else {
2939 sparc_set (code, ins->inst_offset, sparc_o7);
2940 sparc_ldi (code, ins->inst_basereg, sparc_o7, sparc_o7);
2942 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_callsite);
2943 if (call->virtual)
2944 sparc_or_imm (code, FALSE, sparc_g0, 0xca, sparc_g0);
2945 else
2946 sparc_nop (code);
2948 code = emit_vret_token (ins, code);
2949 code = emit_move_return_value (ins, code);
2950 break;
2951 case OP_SETFRET:
2952 if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4)
2953 sparc_fdtos (code, ins->sreg1, sparc_f0);
2954 else {
2955 #ifdef SPARCV9
2956 sparc_fmovd (code, ins->sreg1, ins->dreg);
2957 #else
2958 /* FIXME: Why not use fmovd ? */
2959 sparc_fmovs (code, ins->sreg1, ins->dreg);
2960 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
2961 #endif
2963 break;
2964 case OP_LOCALLOC: {
2965 guint32 size_reg;
2966 gint32 offset2;
2968 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
2969 /* Perform stack touching */
2970 NOT_IMPLEMENTED;
2971 #endif
2973 /* Keep alignment */
2974 /* Add 4 to compensate for the rounding of localloc_offset */
2975 sparc_add_imm (code, FALSE, ins->sreg1, 4 + MONO_ARCH_LOCALLOC_ALIGNMENT - 1, ins->dreg);
2976 sparc_set (code, ~(MONO_ARCH_LOCALLOC_ALIGNMENT - 1), sparc_o7);
2977 sparc_and (code, FALSE, ins->dreg, sparc_o7, ins->dreg);
2979 if ((ins->flags & MONO_INST_INIT) && (ins->sreg1 == ins->dreg)) {
2980 #ifdef SPARCV9
2981 size_reg = sparc_g4;
2982 #else
2983 size_reg = sparc_g1;
2984 #endif
2985 sparc_mov_reg_reg (code, ins->dreg, size_reg);
2987 else
2988 size_reg = ins->sreg1;
2990 sparc_sub (code, FALSE, sparc_sp, ins->dreg, ins->dreg);
2991 /* Keep %sp valid at all times */
2992 sparc_mov_reg_reg (code, ins->dreg, sparc_sp);
2993 /* Round localloc_offset too so the result is at least 8 aligned */
2994 offset2 = ALIGN_TO (cfg->arch.localloc_offset, 8);
2995 g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + offset2));
2996 sparc_add_imm (code, FALSE, ins->dreg, MONO_SPARC_STACK_BIAS + offset2, ins->dreg);
2998 if (ins->flags & MONO_INST_INIT) {
2999 guint32 *br [3];
3000 /* Initialize memory region */
3001 sparc_cmp_imm (code, size_reg, 0);
3002 br [0] = code;
3003 sparc_branch (code, 0, sparc_be, 0);
3004 /* delay slot */
3005 sparc_set (code, 0, sparc_o7);
3006 sparc_sub_imm (code, 0, size_reg, mono_hwcap_sparc_is_v9 ? 8 : 4, size_reg);
3007 /* start of loop */
3008 br [1] = code;
3009 if (mono_hwcap_sparc_is_v9)
3010 sparc_stx (code, sparc_g0, ins->dreg, sparc_o7);
3011 else
3012 sparc_st (code, sparc_g0, ins->dreg, sparc_o7);
3013 sparc_cmp (code, sparc_o7, size_reg);
3014 br [2] = code;
3015 sparc_branch (code, 0, sparc_bl, 0);
3016 sparc_patch (br [2], br [1]);
3017 /* delay slot */
3018 sparc_add_imm (code, 0, sparc_o7, mono_hwcap_sparc_is_v9 ? 8 : 4, sparc_o7);
3019 sparc_patch (br [0], code);
3021 break;
3023 case OP_LOCALLOC_IMM: {
3024 gint32 offset = ins->inst_imm;
3025 gint32 offset2;
3027 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
3028 /* Perform stack touching */
3029 NOT_IMPLEMENTED;
3030 #endif
3032 /* To compensate for the rounding of localloc_offset */
3033 offset += sizeof (gpointer);
3034 offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
3035 if (sparc_is_imm13 (offset))
3036 sparc_sub_imm (code, FALSE, sparc_sp, offset, sparc_sp);
3037 else {
3038 sparc_set (code, offset, sparc_o7);
3039 sparc_sub (code, FALSE, sparc_sp, sparc_o7, sparc_sp);
3041 /* Round localloc_offset too so the result is at least 8 aligned */
3042 offset2 = ALIGN_TO (cfg->arch.localloc_offset, 8);
3043 g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + offset2));
3044 sparc_add_imm (code, FALSE, sparc_sp, MONO_SPARC_STACK_BIAS + offset2, ins->dreg);
3045 if ((ins->flags & MONO_INST_INIT) && (offset > 0)) {
3046 guint32 *br [2];
3047 int i;
3049 if (offset <= 16) {
3050 i = 0;
3051 while (i < offset) {
3052 if (mono_hwcap_sparc_is_v9) {
3053 sparc_stx_imm (code, sparc_g0, ins->dreg, i);
3054 i += 8;
3056 else {
3057 sparc_st_imm (code, sparc_g0, ins->dreg, i);
3058 i += 4;
3062 else {
3063 sparc_set (code, offset, sparc_o7);
3064 sparc_sub_imm (code, 0, sparc_o7, mono_hwcap_sparc_is_v9 ? 8 : 4, sparc_o7);
3065 /* beginning of loop */
3066 br [0] = code;
3067 if (mono_hwcap_sparc_is_v9)
3068 sparc_stx (code, sparc_g0, ins->dreg, sparc_o7);
3069 else
3070 sparc_st (code, sparc_g0, ins->dreg, sparc_o7);
3071 sparc_cmp_imm (code, sparc_o7, 0);
3072 br [1] = code;
3073 sparc_branch (code, 0, sparc_bne, 0);
3074 /* delay slot */
3075 sparc_sub_imm (code, 0, sparc_o7, mono_hwcap_sparc_is_v9 ? 8 : 4, sparc_o7);
3076 sparc_patch (br [1], br [0]);
3079 break;
3081 case OP_THROW:
3082 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3083 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3084 (gpointer)"mono_arch_throw_exception");
3085 EMIT_CALL ();
3086 break;
3087 case OP_RETHROW:
3088 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3089 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3090 (gpointer)"mono_arch_rethrow_exception");
3091 EMIT_CALL ();
3092 break;
3093 case OP_START_HANDLER: {
3095 * The START_HANDLER instruction marks the beginning of a handler
3096 * block. It is called using a call instruction, so %o7 contains
3097 * the return address. Since the handler executes in the same stack
3098 * frame as the method itself, we can't use save/restore to save
3099 * the return address. Instead, we save it into a dedicated
3100 * variable.
3102 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3103 if (!sparc_is_imm13 (spvar->inst_offset)) {
3104 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3105 sparc_sti (code, sparc_o7, spvar->inst_basereg, GP_SCRATCH_REG);
3107 else
3108 sparc_sti_imm (code, sparc_o7, spvar->inst_basereg, spvar->inst_offset);
3109 break;
3111 case OP_ENDFILTER: {
3112 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3113 if (!sparc_is_imm13 (spvar->inst_offset)) {
3114 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3115 sparc_ldi (code, spvar->inst_basereg, GP_SCRATCH_REG, sparc_o7);
3117 else
3118 sparc_ldi_imm (code, spvar->inst_basereg, spvar->inst_offset, sparc_o7);
3119 sparc_jmpl_imm (code, sparc_o7, 8, sparc_g0);
3120 /* Delay slot */
3121 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3122 break;
3124 case OP_ENDFINALLY: {
3125 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3126 if (!sparc_is_imm13 (spvar->inst_offset)) {
3127 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3128 sparc_ldi (code, spvar->inst_basereg, GP_SCRATCH_REG, sparc_o7);
3130 else
3131 sparc_ldi_imm (code, spvar->inst_basereg, spvar->inst_offset, sparc_o7);
3132 sparc_jmpl_imm (code, sparc_o7, 8, sparc_g0);
3133 sparc_nop (code);
3134 break;
3136 case OP_CALL_HANDLER:
3137 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3138 /* This is a jump inside the method, so call_simple works even on V9 */
3139 sparc_call_simple (code, 0);
3140 sparc_nop (code);
3141 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
3142 break;
3143 case OP_LABEL:
3144 ins->inst_c0 = (guint8*)code - cfg->native_code;
3145 break;
3146 case OP_RELAXED_NOP:
3147 case OP_NOP:
3148 case OP_DUMMY_USE:
3149 case OP_DUMMY_STORE:
3150 case OP_NOT_REACHED:
3151 case OP_NOT_NULL:
3152 break;
3153 case OP_BR:
3154 //g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
3155 if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins)
3156 break;
3157 if (ins->inst_target_bb->native_offset) {
3158 gint32 disp = (ins->inst_target_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2;
3159 g_assert (sparc_is_imm22 (disp));
3160 sparc_branch (code, 1, sparc_ba, disp);
3161 } else {
3162 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3163 sparc_branch (code, 1, sparc_ba, 0);
3165 sparc_nop (code);
3166 break;
3167 case OP_BR_REG:
3168 sparc_jmp (code, ins->sreg1, sparc_g0);
3169 sparc_nop (code);
3170 break;
3171 case OP_CEQ:
3172 case OP_CLT:
3173 case OP_CLT_UN:
3174 case OP_CGT:
3175 case OP_CGT_UN:
3176 if (v64 && (cfg->opt & MONO_OPT_CMOV)) {
3177 sparc_clr_reg (code, ins->dreg);
3178 sparc_movcc_imm (code, sparc_xcc, opcode_to_sparc_cond (ins->opcode), 1, ins->dreg);
3180 else {
3181 sparc_clr_reg (code, ins->dreg);
3182 #ifdef SPARCV9
3183 sparc_branchp (code, 1, opcode_to_sparc_cond (ins->opcode), DEFAULT_ICC, 0, 2);
3184 #else
3185 sparc_branch (code, 1, opcode_to_sparc_cond (ins->opcode), 2);
3186 #endif
3187 /* delay slot */
3188 sparc_set (code, 1, ins->dreg);
3190 break;
3191 case OP_ICEQ:
3192 case OP_ICLT:
3193 case OP_ICLT_UN:
3194 case OP_ICGT:
3195 case OP_ICGT_UN:
3196 if (v64 && (cfg->opt & MONO_OPT_CMOV)) {
3197 sparc_clr_reg (code, ins->dreg);
3198 sparc_movcc_imm (code, sparc_icc, opcode_to_sparc_cond (ins->opcode), 1, ins->dreg);
3200 else {
3201 sparc_clr_reg (code, ins->dreg);
3202 sparc_branchp (code, 1, opcode_to_sparc_cond (ins->opcode), sparc_icc_short, 0, 2);
3203 /* delay slot */
3204 sparc_set (code, 1, ins->dreg);
3206 break;
3207 case OP_COND_EXC_EQ:
3208 case OP_COND_EXC_NE_UN:
3209 case OP_COND_EXC_LT:
3210 case OP_COND_EXC_LT_UN:
3211 case OP_COND_EXC_GT:
3212 case OP_COND_EXC_GT_UN:
3213 case OP_COND_EXC_GE:
3214 case OP_COND_EXC_GE_UN:
3215 case OP_COND_EXC_LE:
3216 case OP_COND_EXC_LE_UN:
3217 case OP_COND_EXC_OV:
3218 case OP_COND_EXC_NO:
3219 case OP_COND_EXC_C:
3220 case OP_COND_EXC_NC:
3221 case OP_COND_EXC_IEQ:
3222 case OP_COND_EXC_INE_UN:
3223 case OP_COND_EXC_ILT:
3224 case OP_COND_EXC_ILT_UN:
3225 case OP_COND_EXC_IGT:
3226 case OP_COND_EXC_IGT_UN:
3227 case OP_COND_EXC_IGE:
3228 case OP_COND_EXC_IGE_UN:
3229 case OP_COND_EXC_ILE:
3230 case OP_COND_EXC_ILE_UN:
3231 case OP_COND_EXC_IOV:
3232 case OP_COND_EXC_INO:
3233 case OP_COND_EXC_IC:
3234 case OP_COND_EXC_INC:
3235 #ifdef SPARCV9
3236 NOT_IMPLEMENTED;
3237 #else
3238 EMIT_COND_SYSTEM_EXCEPTION (ins, opcode_to_sparc_cond (ins->opcode), ins->inst_p1);
3239 #endif
3240 break;
3241 case OP_SPARC_COND_EXC_EQZ:
3242 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brz, ins->inst_p1);
3243 break;
3244 case OP_SPARC_COND_EXC_GEZ:
3245 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brgez, ins->inst_p1);
3246 break;
3247 case OP_SPARC_COND_EXC_GTZ:
3248 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brgz, ins->inst_p1);
3249 break;
3250 case OP_SPARC_COND_EXC_LEZ:
3251 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brlez, ins->inst_p1);
3252 break;
3253 case OP_SPARC_COND_EXC_LTZ:
3254 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brlz, ins->inst_p1);
3255 break;
3256 case OP_SPARC_COND_EXC_NEZ:
3257 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brnz, ins->inst_p1);
3258 break;
3260 case OP_IBEQ:
3261 case OP_IBNE_UN:
3262 case OP_IBLT:
3263 case OP_IBLT_UN:
3264 case OP_IBGT:
3265 case OP_IBGT_UN:
3266 case OP_IBGE:
3267 case OP_IBGE_UN:
3268 case OP_IBLE:
3269 case OP_IBLE_UN: {
3270 if (mono_hwcap_sparc_is_v9)
3271 EMIT_COND_BRANCH_PREDICTED (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3272 else
3273 EMIT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3274 break;
3277 case OP_SPARC_BRZ:
3278 EMIT_COND_BRANCH_BPR (ins, brz, 1, 1, 1);
3279 break;
3280 case OP_SPARC_BRLEZ:
3281 EMIT_COND_BRANCH_BPR (ins, brlez, 1, 1, 1);
3282 break;
3283 case OP_SPARC_BRLZ:
3284 EMIT_COND_BRANCH_BPR (ins, brlz, 1, 1, 1);
3285 break;
3286 case OP_SPARC_BRNZ:
3287 EMIT_COND_BRANCH_BPR (ins, brnz, 1, 1, 1);
3288 break;
3289 case OP_SPARC_BRGZ:
3290 EMIT_COND_BRANCH_BPR (ins, brgz, 1, 1, 1);
3291 break;
3292 case OP_SPARC_BRGEZ:
3293 EMIT_COND_BRANCH_BPR (ins, brgez, 1, 1, 1);
3294 break;
3296 /* floating point opcodes */
3297 case OP_R8CONST:
3298 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, ins->inst_p0);
3299 #ifdef SPARCV9
3300 sparc_set_template (code, sparc_o7);
3301 #else
3302 sparc_sethi (code, 0, sparc_o7);
3303 #endif
3304 sparc_lddf_imm (code, sparc_o7, 0, ins->dreg);
3305 break;
3306 case OP_R4CONST:
3307 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, ins->inst_p0);
3308 #ifdef SPARCV9
3309 sparc_set_template (code, sparc_o7);
3310 #else
3311 sparc_sethi (code, 0, sparc_o7);
3312 #endif
3313 sparc_ldf_imm (code, sparc_o7, 0, FP_SCRATCH_REG);
3315 /* Extend to double */
3316 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3317 break;
3318 case OP_STORER8_MEMBASE_REG:
3319 if (!sparc_is_imm13 (ins->inst_offset + 4)) {
3320 sparc_set (code, ins->inst_offset, sparc_o7);
3321 /* SPARCV9 handles misaligned fp loads/stores */
3322 if (!v64 && (ins->inst_offset % 8)) {
3323 /* Misaligned */
3324 sparc_add (code, FALSE, ins->inst_destbasereg, sparc_o7, sparc_o7);
3325 sparc_stf (code, ins->sreg1, sparc_o7, sparc_g0);
3326 sparc_stf_imm (code, ins->sreg1 + 1, sparc_o7, 4);
3327 } else
3328 sparc_stdf (code, ins->sreg1, ins->inst_destbasereg, sparc_o7);
3330 else {
3331 if (!v64 && (ins->inst_offset % 8)) {
3332 /* Misaligned */
3333 sparc_stf_imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3334 sparc_stf_imm (code, ins->sreg1 + 1, ins->inst_destbasereg, ins->inst_offset + 4);
3335 } else
3336 sparc_stdf_imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3338 break;
3339 case OP_LOADR8_MEMBASE:
3340 EMIT_LOAD_MEMBASE (ins, lddf);
3341 break;
3342 case OP_STORER4_MEMBASE_REG:
3343 /* This requires a double->single conversion */
3344 sparc_fdtos (code, ins->sreg1, FP_SCRATCH_REG);
3345 if (!sparc_is_imm13 (ins->inst_offset)) {
3346 sparc_set (code, ins->inst_offset, sparc_o7);
3347 sparc_stf (code, FP_SCRATCH_REG, ins->inst_destbasereg, sparc_o7);
3349 else
3350 sparc_stf_imm (code, FP_SCRATCH_REG, ins->inst_destbasereg, ins->inst_offset);
3351 break;
3352 case OP_LOADR4_MEMBASE: {
3353 /* ldf needs a single precision register */
3354 int dreg = ins->dreg;
3355 ins->dreg = FP_SCRATCH_REG;
3356 EMIT_LOAD_MEMBASE (ins, ldf);
3357 ins->dreg = dreg;
3358 /* Extend to double */
3359 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3360 break;
3362 case OP_ICONV_TO_R4: {
3363 MonoInst *spill = cfg->arch.float_spill_slot;
3364 gint32 reg = spill->inst_basereg;
3365 gint32 offset = spill->inst_offset;
3367 g_assert (spill->opcode == OP_REGOFFSET);
3368 #ifdef SPARCV9
3369 if (!sparc_is_imm13 (offset)) {
3370 sparc_set (code, offset, sparc_o7);
3371 sparc_stx (code, ins->sreg1, reg, offset);
3372 sparc_lddf (code, reg, offset, FP_SCRATCH_REG);
3373 } else {
3374 sparc_stx_imm (code, ins->sreg1, reg, offset);
3375 sparc_lddf_imm (code, reg, offset, FP_SCRATCH_REG);
3377 sparc_fxtos (code, FP_SCRATCH_REG, FP_SCRATCH_REG);
3378 #else
3379 if (!sparc_is_imm13 (offset)) {
3380 sparc_set (code, offset, sparc_o7);
3381 sparc_st (code, ins->sreg1, reg, sparc_o7);
3382 sparc_ldf (code, reg, sparc_o7, FP_SCRATCH_REG);
3383 } else {
3384 sparc_st_imm (code, ins->sreg1, reg, offset);
3385 sparc_ldf_imm (code, reg, offset, FP_SCRATCH_REG);
3387 sparc_fitos (code, FP_SCRATCH_REG, FP_SCRATCH_REG);
3388 #endif
3389 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3390 break;
3392 case OP_ICONV_TO_R8: {
3393 MonoInst *spill = cfg->arch.float_spill_slot;
3394 gint32 reg = spill->inst_basereg;
3395 gint32 offset = spill->inst_offset;
3397 g_assert (spill->opcode == OP_REGOFFSET);
3399 #ifdef SPARCV9
3400 if (!sparc_is_imm13 (offset)) {
3401 sparc_set (code, offset, sparc_o7);
3402 sparc_stx (code, ins->sreg1, reg, sparc_o7);
3403 sparc_lddf (code, reg, sparc_o7, FP_SCRATCH_REG);
3404 } else {
3405 sparc_stx_imm (code, ins->sreg1, reg, offset);
3406 sparc_lddf_imm (code, reg, offset, FP_SCRATCH_REG);
3408 sparc_fxtod (code, FP_SCRATCH_REG, ins->dreg);
3409 #else
3410 if (!sparc_is_imm13 (offset)) {
3411 sparc_set (code, offset, sparc_o7);
3412 sparc_st (code, ins->sreg1, reg, sparc_o7);
3413 sparc_ldf (code, reg, sparc_o7, FP_SCRATCH_REG);
3414 } else {
3415 sparc_st_imm (code, ins->sreg1, reg, offset);
3416 sparc_ldf_imm (code, reg, offset, FP_SCRATCH_REG);
3418 sparc_fitod (code, FP_SCRATCH_REG, ins->dreg);
3419 #endif
3420 break;
3422 case OP_FCONV_TO_I1:
3423 case OP_FCONV_TO_U1:
3424 case OP_FCONV_TO_I2:
3425 case OP_FCONV_TO_U2:
3426 #ifndef SPARCV9
3427 case OP_FCONV_TO_I:
3428 case OP_FCONV_TO_U:
3429 #endif
3430 case OP_FCONV_TO_I4:
3431 case OP_FCONV_TO_U4: {
3432 MonoInst *spill = cfg->arch.float_spill_slot;
3433 gint32 reg = spill->inst_basereg;
3434 gint32 offset = spill->inst_offset;
3436 g_assert (spill->opcode == OP_REGOFFSET);
3438 sparc_fdtoi (code, ins->sreg1, FP_SCRATCH_REG);
3439 if (!sparc_is_imm13 (offset)) {
3440 sparc_set (code, offset, sparc_o7);
3441 sparc_stdf (code, FP_SCRATCH_REG, reg, sparc_o7);
3442 sparc_ld (code, reg, sparc_o7, ins->dreg);
3443 } else {
3444 sparc_stdf_imm (code, FP_SCRATCH_REG, reg, offset);
3445 sparc_ld_imm (code, reg, offset, ins->dreg);
3448 switch (ins->opcode) {
3449 case OP_FCONV_TO_I1:
3450 case OP_FCONV_TO_U1:
3451 sparc_and_imm (code, 0, ins->dreg, 0xff, ins->dreg);
3452 break;
3453 case OP_FCONV_TO_I2:
3454 case OP_FCONV_TO_U2:
3455 sparc_set (code, 0xffff, sparc_o7);
3456 sparc_and (code, 0, ins->dreg, sparc_o7, ins->dreg);
3457 break;
3458 default:
3459 break;
3461 break;
3463 case OP_FCONV_TO_I8:
3464 case OP_FCONV_TO_U8:
3465 /* Emulated */
3466 g_assert_not_reached ();
3467 break;
3468 case OP_FCONV_TO_R4:
3469 /* FIXME: Change precision ? */
3470 #ifdef SPARCV9
3471 sparc_fmovd (code, ins->sreg1, ins->dreg);
3472 #else
3473 sparc_fmovs (code, ins->sreg1, ins->dreg);
3474 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
3475 #endif
3476 break;
3477 case OP_LCONV_TO_R_UN: {
3478 /* Emulated */
3479 g_assert_not_reached ();
3480 break;
3482 case OP_LCONV_TO_OVF_I:
3483 case OP_LCONV_TO_OVF_I4_2: {
3484 guint32 *br [3], *label [1];
3487 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
3489 sparc_cmp_imm (code, ins->sreg1, 0);
3490 br [0] = code;
3491 sparc_branch (code, 1, sparc_bneg, 0);
3492 sparc_nop (code);
3494 /* positive */
3495 /* ms word must be 0 */
3496 sparc_cmp_imm (code, ins->sreg2, 0);
3497 br [1] = code;
3498 sparc_branch (code, 1, sparc_be, 0);
3499 sparc_nop (code);
3501 label [0] = code;
3503 EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_ba, "OverflowException");
3505 /* negative */
3506 sparc_patch (br [0], code);
3508 /* ms word must 0xfffffff */
3509 sparc_cmp_imm (code, ins->sreg2, -1);
3510 br [2] = code;
3511 sparc_branch (code, 1, sparc_bne, 0);
3512 sparc_nop (code);
3513 sparc_patch (br [2], label [0]);
3515 /* Ok */
3516 sparc_patch (br [1], code);
3517 if (ins->sreg1 != ins->dreg)
3518 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
3519 break;
3521 case OP_FADD:
3522 sparc_faddd (code, ins->sreg1, ins->sreg2, ins->dreg);
3523 break;
3524 case OP_FSUB:
3525 sparc_fsubd (code, ins->sreg1, ins->sreg2, ins->dreg);
3526 break;
3527 case OP_FMUL:
3528 sparc_fmuld (code, ins->sreg1, ins->sreg2, ins->dreg);
3529 break;
3530 case OP_FDIV:
3531 sparc_fdivd (code, ins->sreg1, ins->sreg2, ins->dreg);
3532 break;
3533 case OP_FNEG:
3534 #ifdef SPARCV9
3535 sparc_fnegd (code, ins->sreg1, ins->dreg);
3536 #else
3537 /* FIXME: why don't use fnegd ? */
3538 sparc_fnegs (code, ins->sreg1, ins->dreg);
3539 #endif
3540 break;
3541 case OP_FREM:
3542 sparc_fdivd (code, ins->sreg1, ins->sreg2, FP_SCRATCH_REG);
3543 sparc_fmuld (code, ins->sreg2, FP_SCRATCH_REG, FP_SCRATCH_REG);
3544 sparc_fsubd (code, ins->sreg1, FP_SCRATCH_REG, ins->dreg);
3545 break;
3546 case OP_FCOMPARE:
3547 sparc_fcmpd (code, ins->sreg1, ins->sreg2);
3548 break;
3549 case OP_FCEQ:
3550 case OP_FCLT:
3551 case OP_FCLT_UN:
3552 case OP_FCGT:
3553 case OP_FCGT_UN:
3554 sparc_fcmpd (code, ins->sreg1, ins->sreg2);
3555 sparc_clr_reg (code, ins->dreg);
3556 switch (ins->opcode) {
3557 case OP_FCLT_UN:
3558 case OP_FCGT_UN:
3559 sparc_fbranch (code, 1, opcode_to_sparc_cond (ins->opcode), 4);
3560 /* delay slot */
3561 sparc_set (code, 1, ins->dreg);
3562 sparc_fbranch (code, 1, sparc_fbu, 2);
3563 /* delay slot */
3564 sparc_set (code, 1, ins->dreg);
3565 break;
3566 default:
3567 sparc_fbranch (code, 1, opcode_to_sparc_cond (ins->opcode), 2);
3568 /* delay slot */
3569 sparc_set (code, 1, ins->dreg);
3571 break;
3572 case OP_FBEQ:
3573 case OP_FBLT:
3574 case OP_FBGT:
3575 EMIT_FLOAT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3576 break;
3577 case OP_FBGE: {
3578 /* clt.un + brfalse */
3579 guint32 *p = code;
3580 sparc_fbranch (code, 1, sparc_fbul, 0);
3581 /* delay slot */
3582 sparc_nop (code);
3583 EMIT_FLOAT_COND_BRANCH (ins, sparc_fba, 1, 1);
3584 sparc_patch (p, (guint8*)code);
3585 break;
3587 case OP_FBLE: {
3588 /* cgt.un + brfalse */
3589 guint32 *p = code;
3590 sparc_fbranch (code, 1, sparc_fbug, 0);
3591 /* delay slot */
3592 sparc_nop (code);
3593 EMIT_FLOAT_COND_BRANCH (ins, sparc_fba, 1, 1);
3594 sparc_patch (p, (guint8*)code);
3595 break;
3597 case OP_FBNE_UN:
3598 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbne, 1, 1);
3599 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3600 break;
3601 case OP_FBLT_UN:
3602 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbl, 1, 1);
3603 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3604 break;
3605 case OP_FBGT_UN:
3606 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbg, 1, 1);
3607 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3608 break;
3609 case OP_FBGE_UN:
3610 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbge, 1, 1);
3611 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3612 break;
3613 case OP_FBLE_UN:
3614 EMIT_FLOAT_COND_BRANCH (ins, sparc_fble, 1, 1);
3615 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3616 break;
3617 case OP_CKFINITE: {
3618 MonoInst *spill = cfg->arch.float_spill_slot;
3619 gint32 reg = spill->inst_basereg;
3620 gint32 offset = spill->inst_offset;
3622 g_assert (spill->opcode == OP_REGOFFSET);
3624 if (!sparc_is_imm13 (offset)) {
3625 sparc_set (code, offset, sparc_o7);
3626 sparc_stdf (code, ins->sreg1, reg, sparc_o7);
3627 sparc_lduh (code, reg, sparc_o7, sparc_o7);
3628 } else {
3629 sparc_stdf_imm (code, ins->sreg1, reg, offset);
3630 sparc_lduh_imm (code, reg, offset, sparc_o7);
3632 sparc_srl_imm (code, sparc_o7, 4, sparc_o7);
3633 sparc_and_imm (code, FALSE, sparc_o7, 2047, sparc_o7);
3634 sparc_cmp_imm (code, sparc_o7, 2047);
3635 EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_be, "OverflowException");
3636 #ifdef SPARCV9
3637 sparc_fmovd (code, ins->sreg1, ins->dreg);
3638 #else
3639 sparc_fmovs (code, ins->sreg1, ins->dreg);
3640 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
3641 #endif
3642 break;
3645 case OP_MEMORY_BARRIER:
3646 sparc_membar (code, sparc_membar_all);
3647 break;
3648 case OP_GC_SAFE_POINT:
3649 break;
3651 default:
3652 #ifdef __GNUC__
3653 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
3654 #else
3655 g_warning ("%s:%d: unknown opcode %s\n", __FILE__, __LINE__, mono_inst_name (ins->opcode));
3656 #endif
3657 g_assert_not_reached ();
3660 if ((((guint8*)code) - code_start) > max_len) {
3661 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
3662 mono_inst_name (ins->opcode), max_len, ((guint8*)code) - code_start);
3663 g_assert_not_reached ();
3666 cpos += max_len;
3668 last_ins = ins;
3671 cfg->code_len = (guint8*)code - cfg->native_code;
3674 void
3675 mono_arch_register_lowlevel_calls (void)
3677 mono_register_jit_icall (mono_arch_get_lmf_addr, "mono_arch_get_lmf_addr", NULL, TRUE);
3680 void
3681 mono_arch_patch_code (MonoCompile *cfg, MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors, MonoError *error)
3683 MonoJumpInfo *patch_info;
3685 mono_error_init (error);
3687 /* FIXME: Move part of this to arch independent code */
3688 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
3689 unsigned char *ip = patch_info->ip.i + code;
3690 gpointer target;
3692 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors, error);
3693 return_if_nok (error);
3695 switch (patch_info->type) {
3696 case MONO_PATCH_INFO_NONE:
3697 continue;
3698 case MONO_PATCH_INFO_METHOD_JUMP: {
3699 guint32 *ip2 = (guint32*)ip;
3700 /* Might already been patched */
3701 sparc_set_template (ip2, sparc_o7);
3702 break;
3704 default:
3705 break;
3707 sparc_patch ((guint32*)ip, target);
3711 void*
3712 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
3714 int i;
3715 guint32 *code = (guint32*)p;
3716 MonoMethodSignature *sig = mono_method_signature (cfg->method);
3717 CallInfo *cinfo;
3719 /* Save registers to stack */
3720 for (i = 0; i < 6; ++i)
3721 sparc_sti_imm (code, sparc_i0 + i, sparc_fp, ARGS_OFFSET + (i * sizeof (gpointer)));
3723 cinfo = get_call_info (cfg, sig, FALSE);
3725 /* Save float regs on V9, since they are caller saved */
3726 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3727 ArgInfo *ainfo = cinfo->args + i;
3728 gint32 stack_offset;
3730 stack_offset = ainfo->offset + ARGS_OFFSET;
3732 if (ainfo->storage == ArgInFloatReg) {
3733 if (!sparc_is_imm13 (stack_offset))
3734 NOT_IMPLEMENTED;
3735 sparc_stf_imm (code, ainfo->reg, sparc_fp, stack_offset);
3737 else if (ainfo->storage == ArgInDoubleReg) {
3738 /* The offset is guaranteed to be aligned by the ABI rules */
3739 sparc_stdf_imm (code, ainfo->reg, sparc_fp, stack_offset);
3743 sparc_set (code, cfg->method, sparc_o0);
3744 sparc_add_imm (code, FALSE, sparc_fp, MONO_SPARC_STACK_BIAS, sparc_o1);
3746 mono_add_patch_info (cfg, (guint8*)code-cfg->native_code, MONO_PATCH_INFO_ABS, func);
3747 EMIT_CALL ();
3749 /* Restore float regs on V9 */
3750 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3751 ArgInfo *ainfo = cinfo->args + i;
3752 gint32 stack_offset;
3754 stack_offset = ainfo->offset + ARGS_OFFSET;
3756 if (ainfo->storage == ArgInFloatReg) {
3757 if (!sparc_is_imm13 (stack_offset))
3758 NOT_IMPLEMENTED;
3759 sparc_ldf_imm (code, sparc_fp, stack_offset, ainfo->reg);
3761 else if (ainfo->storage == ArgInDoubleReg) {
3762 /* The offset is guaranteed to be aligned by the ABI rules */
3763 sparc_lddf_imm (code, sparc_fp, stack_offset, ainfo->reg);
3767 g_free (cinfo);
3769 return code;
3772 enum {
3773 SAVE_NONE,
3774 SAVE_STRUCT,
3775 SAVE_ONE,
3776 SAVE_TWO,
3777 SAVE_FP
3780 void*
3781 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
3783 guint32 *code = (guint32*)p;
3784 int save_mode = SAVE_NONE;
3785 MonoMethod *method = cfg->method;
3787 switch (mini_get_underlying_type (mono_method_signature (method)->ret)->type) {
3788 case MONO_TYPE_VOID:
3789 /* special case string .ctor icall */
3790 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
3791 save_mode = SAVE_ONE;
3792 else
3793 save_mode = SAVE_NONE;
3794 break;
3795 case MONO_TYPE_I8:
3796 case MONO_TYPE_U8:
3797 #ifdef SPARCV9
3798 save_mode = SAVE_ONE;
3799 #else
3800 save_mode = SAVE_TWO;
3801 #endif
3802 break;
3803 case MONO_TYPE_R4:
3804 case MONO_TYPE_R8:
3805 save_mode = SAVE_FP;
3806 break;
3807 case MONO_TYPE_VALUETYPE:
3808 save_mode = SAVE_STRUCT;
3809 break;
3810 default:
3811 save_mode = SAVE_ONE;
3812 break;
3815 /* Save the result to the stack and also put it into the output registers */
3817 switch (save_mode) {
3818 case SAVE_TWO:
3819 /* V8 only */
3820 sparc_st_imm (code, sparc_i0, sparc_fp, 68);
3821 sparc_st_imm (code, sparc_i0, sparc_fp, 72);
3822 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
3823 sparc_mov_reg_reg (code, sparc_i1, sparc_o2);
3824 break;
3825 case SAVE_ONE:
3826 sparc_sti_imm (code, sparc_i0, sparc_fp, ARGS_OFFSET);
3827 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
3828 break;
3829 case SAVE_FP:
3830 #ifdef SPARCV9
3831 sparc_stdf_imm (code, sparc_f0, sparc_fp, ARGS_OFFSET);
3832 #else
3833 sparc_stdf_imm (code, sparc_f0, sparc_fp, 72);
3834 sparc_ld_imm (code, sparc_fp, 72, sparc_o1);
3835 sparc_ld_imm (code, sparc_fp, 72 + 4, sparc_o2);
3836 #endif
3837 break;
3838 case SAVE_STRUCT:
3839 #ifdef SPARCV9
3840 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
3841 #else
3842 sparc_ld_imm (code, sparc_fp, 64, sparc_o1);
3843 #endif
3844 break;
3845 case SAVE_NONE:
3846 default:
3847 break;
3850 sparc_set (code, cfg->method, sparc_o0);
3852 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, func);
3853 EMIT_CALL ();
3855 /* Restore result */
3857 switch (save_mode) {
3858 case SAVE_TWO:
3859 sparc_ld_imm (code, sparc_fp, 68, sparc_i0);
3860 sparc_ld_imm (code, sparc_fp, 72, sparc_i0);
3861 break;
3862 case SAVE_ONE:
3863 sparc_ldi_imm (code, sparc_fp, ARGS_OFFSET, sparc_i0);
3864 break;
3865 case SAVE_FP:
3866 sparc_lddf_imm (code, sparc_fp, ARGS_OFFSET, sparc_f0);
3867 break;
3868 case SAVE_NONE:
3869 default:
3870 break;
3873 return code;
3876 guint8 *
3877 mono_arch_emit_prolog (MonoCompile *cfg)
3879 MonoMethod *method = cfg->method;
3880 MonoMethodSignature *sig;
3881 MonoInst *inst;
3882 guint32 *code;
3883 CallInfo *cinfo;
3884 guint32 i, offset;
3886 cfg->code_size = 256;
3887 cfg->native_code = g_malloc (cfg->code_size);
3888 code = (guint32*)cfg->native_code;
3890 /* FIXME: Generate intermediate code instead */
3892 offset = cfg->stack_offset;
3893 offset += (16 * sizeof (gpointer)); /* register save area */
3894 #ifndef SPARCV9
3895 offset += 4; /* struct/union return pointer */
3896 #endif
3898 /* add parameter area size for called functions */
3899 if (cfg->param_area < (6 * sizeof (gpointer)))
3900 /* Reserve space for the first 6 arguments even if it is unused */
3901 offset += 6 * sizeof (gpointer);
3902 else
3903 offset += cfg->param_area;
3905 /* align the stack size */
3906 offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
3909 * localloc'd memory is stored between the local variables (whose
3910 * size is given by cfg->stack_offset), and between the space reserved
3911 * by the ABI.
3913 cfg->arch.localloc_offset = offset - cfg->stack_offset;
3915 cfg->stack_offset = offset;
3917 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
3918 /* Perform stack touching */
3919 NOT_IMPLEMENTED;
3920 #endif
3922 if (!sparc_is_imm13 (- cfg->stack_offset)) {
3923 /* Can't use sparc_o7 here, since we're still in the caller's frame */
3924 sparc_set (code, (- cfg->stack_offset), GP_SCRATCH_REG);
3925 sparc_save (code, sparc_sp, GP_SCRATCH_REG, sparc_sp);
3927 else
3928 sparc_save_imm (code, sparc_sp, - cfg->stack_offset, sparc_sp);
3931 if (strstr (cfg->method->name, "foo")) {
3932 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, mono_sparc_break);
3933 sparc_call_simple (code, 0);
3934 sparc_nop (code);
3938 sig = mono_method_signature (method);
3940 cinfo = get_call_info (cfg, sig, FALSE);
3942 /* Keep in sync with emit_load_volatile_arguments */
3943 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3944 ArgInfo *ainfo = cinfo->args + i;
3945 gint32 stack_offset;
3946 MonoType *arg_type;
3947 inst = cfg->args [i];
3949 if (sig->hasthis && (i == 0))
3950 arg_type = &mono_defaults.object_class->byval_arg;
3951 else
3952 arg_type = sig->params [i - sig->hasthis];
3954 stack_offset = ainfo->offset + ARGS_OFFSET;
3956 /* Save the split arguments so they will reside entirely on the stack */
3957 if (ainfo->storage == ArgInSplitRegStack) {
3958 /* Save the register to the stack */
3959 g_assert (inst->opcode == OP_REGOFFSET);
3960 if (!sparc_is_imm13 (stack_offset))
3961 NOT_IMPLEMENTED;
3962 sparc_st_imm (code, sparc_i5, inst->inst_basereg, stack_offset);
3965 if (!v64 && !arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
3966 /* Save the argument to a dword aligned stack location */
3968 * stack_offset contains the offset of the argument on the stack.
3969 * inst->inst_offset contains the dword aligned offset where the value
3970 * should be stored.
3972 if (ainfo->storage == ArgInIRegPair) {
3973 if (!sparc_is_imm13 (inst->inst_offset + 4))
3974 NOT_IMPLEMENTED;
3975 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, inst->inst_offset);
3976 sparc_st_imm (code, sparc_i0 + ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
3978 else
3979 if (ainfo->storage == ArgInSplitRegStack) {
3980 #ifdef SPARCV9
3981 g_assert_not_reached ();
3982 #endif
3983 if (stack_offset != inst->inst_offset) {
3984 /* stack_offset is not dword aligned, so we need to make a copy */
3985 sparc_st_imm (code, sparc_i5, inst->inst_basereg, inst->inst_offset);
3986 sparc_ld_imm (code, sparc_fp, stack_offset + 4, sparc_o7);
3987 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset + 4);
3990 else
3991 if (ainfo->storage == ArgOnStackPair) {
3992 #ifdef SPARCV9
3993 g_assert_not_reached ();
3994 #endif
3995 if (stack_offset != inst->inst_offset) {
3996 /* stack_offset is not dword aligned, so we need to make a copy */
3997 sparc_ld_imm (code, sparc_fp, stack_offset, sparc_o7);
3998 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset);
3999 sparc_ld_imm (code, sparc_fp, stack_offset + 4, sparc_o7);
4000 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset + 4);
4003 else
4004 g_assert_not_reached ();
4006 else
4007 if ((ainfo->storage == ArgInIReg) && (inst->opcode != OP_REGVAR)) {
4008 /* Argument in register, but need to be saved to stack */
4009 if (!sparc_is_imm13 (stack_offset))
4010 NOT_IMPLEMENTED;
4011 if ((stack_offset - ARGS_OFFSET) & 0x1)
4012 sparc_stb_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4013 else
4014 if ((stack_offset - ARGS_OFFSET) & 0x2)
4015 sparc_sth_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4016 else
4017 if ((stack_offset - ARGS_OFFSET) & 0x4)
4018 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4019 else {
4020 if (v64)
4021 sparc_stx_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4022 else
4023 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4026 else
4027 if ((ainfo->storage == ArgInIRegPair) && (inst->opcode != OP_REGVAR)) {
4028 #ifdef SPARCV9
4029 NOT_IMPLEMENTED;
4030 #endif
4031 /* Argument in regpair, but need to be saved to stack */
4032 if (!sparc_is_imm13 (inst->inst_offset + 4))
4033 NOT_IMPLEMENTED;
4034 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, inst->inst_offset);
4035 sparc_st_imm (code, sparc_i0 + ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4037 else if ((ainfo->storage == ArgInFloatReg) && (inst->opcode != OP_REGVAR)) {
4038 if (!sparc_is_imm13 (stack_offset))
4039 NOT_IMPLEMENTED;
4040 sparc_stf_imm (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4042 else if ((ainfo->storage == ArgInDoubleReg) && (inst->opcode != OP_REGVAR)) {
4043 /* The offset is guaranteed to be aligned by the ABI rules */
4044 sparc_stdf_imm (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4047 if ((ainfo->storage == ArgInFloatReg) && (inst->opcode == OP_REGVAR)) {
4048 /* Need to move into the a double precision register */
4049 sparc_fstod (code, ainfo->reg, ainfo->reg - 1);
4052 if ((ainfo->storage == ArgInSplitRegStack) || (ainfo->storage == ArgOnStack))
4053 if (inst->opcode == OP_REGVAR)
4054 /* FIXME: Load the argument into memory */
4055 NOT_IMPLEMENTED;
4058 g_free (cinfo);
4060 if (cfg->method->save_lmf) {
4061 gint32 lmf_offset = STACK_BIAS - cfg->arch.lmf_offset;
4063 /* Save ip */
4064 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
4065 sparc_set_template (code, sparc_o7);
4066 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ip));
4067 /* Save sp */
4068 sparc_sti_imm (code, sparc_sp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, sp));
4069 /* Save fp */
4070 sparc_sti_imm (code, sparc_fp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebp));
4071 /* Save method */
4072 /* FIXME: add a relocation for this */
4073 sparc_set (code, cfg->method, sparc_o7);
4074 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method));
4076 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4077 (gpointer)"mono_arch_get_lmf_addr");
4078 EMIT_CALL ();
4080 code = (guint32*)mono_sparc_emit_save_lmf (code, lmf_offset);
4083 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4084 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
4086 cfg->code_len = (guint8*)code - cfg->native_code;
4088 g_assert (cfg->code_len <= cfg->code_size);
4090 return (guint8*)code;
4093 void
4094 mono_arch_emit_epilog (MonoCompile *cfg)
4096 MonoMethod *method = cfg->method;
4097 guint32 *code;
4098 int can_fold = 0;
4099 int max_epilog_size = 16 + 20 * 4;
4101 if (cfg->method->save_lmf)
4102 max_epilog_size += 128;
4104 if (mono_jit_trace_calls != NULL)
4105 max_epilog_size += 50;
4107 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
4108 max_epilog_size += 50;
4110 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4111 cfg->code_size *= 2;
4112 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4113 cfg->stat_code_reallocs++;
4116 code = (guint32*)(cfg->native_code + cfg->code_len);
4118 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4119 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4121 if (cfg->method->save_lmf) {
4122 gint32 lmf_offset = STACK_BIAS - cfg->arch.lmf_offset;
4124 code = mono_sparc_emit_restore_lmf (code, lmf_offset);
4128 * The V8 ABI requires that calls to functions which return a structure
4129 * return to %i7+12
4131 if (!v64 && mono_method_signature (cfg->method)->pinvoke && MONO_TYPE_ISSTRUCT(mono_method_signature (cfg->method)->ret))
4132 sparc_jmpl_imm (code, sparc_i7, 12, sparc_g0);
4133 else
4134 sparc_ret (code);
4136 /* Only fold last instruction into the restore if the exit block has an in count of 1
4137 and the previous block hasn't been optimized away since it may have an in count > 1 */
4138 if (cfg->bb_exit->in_count == 1 && cfg->bb_exit->in_bb[0]->native_offset != cfg->bb_exit->native_offset)
4139 can_fold = 1;
4142 * FIXME: The last instruction might have a branch pointing into it like in
4143 * int_ceq sparc_i0 <-
4145 can_fold = 0;
4147 /* Try folding last instruction into the restore */
4148 if (can_fold && (sparc_inst_op (code [-2]) == 0x2) && (sparc_inst_op3 (code [-2]) == 0x2) && sparc_inst_imm (code [-2]) && (sparc_inst_rd (code [-2]) == sparc_i0)) {
4149 /* or reg, imm, %i0 */
4150 int reg = sparc_inst_rs1 (code [-2]);
4151 int imm = (((gint32)(sparc_inst_imm13 (code [-2]))) << 19) >> 19;
4152 code [-2] = code [-1];
4153 code --;
4154 sparc_restore_imm (code, reg, imm, sparc_o0);
4156 else
4157 if (can_fold && (sparc_inst_op (code [-2]) == 0x2) && (sparc_inst_op3 (code [-2]) == 0x2) && (!sparc_inst_imm (code [-2])) && (sparc_inst_rd (code [-2]) == sparc_i0)) {
4158 /* or reg, reg, %i0 */
4159 int reg1 = sparc_inst_rs1 (code [-2]);
4160 int reg2 = sparc_inst_rs2 (code [-2]);
4161 code [-2] = code [-1];
4162 code --;
4163 sparc_restore (code, reg1, reg2, sparc_o0);
4165 else
4166 sparc_restore_imm (code, sparc_g0, 0, sparc_g0);
4168 cfg->code_len = (guint8*)code - cfg->native_code;
4170 g_assert (cfg->code_len < cfg->code_size);
4174 void
4175 mono_arch_emit_exceptions (MonoCompile *cfg)
4177 MonoJumpInfo *patch_info;
4178 guint32 *code;
4179 int nthrows = 0, i;
4180 int exc_count = 0;
4181 guint32 code_size;
4182 MonoClass *exc_classes [16];
4183 guint8 *exc_throw_start [16], *exc_throw_end [16];
4185 /* Compute needed space */
4186 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4187 if (patch_info->type == MONO_PATCH_INFO_EXC)
4188 exc_count++;
4192 * make sure we have enough space for exceptions
4194 #ifdef SPARCV9
4195 code_size = exc_count * (20 * 4);
4196 #else
4197 code_size = exc_count * 24;
4198 #endif
4200 while (cfg->code_len + code_size > (cfg->code_size - 16)) {
4201 cfg->code_size *= 2;
4202 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4203 cfg->stat_code_reallocs++;
4206 code = (guint32*)(cfg->native_code + cfg->code_len);
4208 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4209 switch (patch_info->type) {
4210 case MONO_PATCH_INFO_EXC: {
4211 MonoClass *exc_class;
4212 guint32 *buf, *buf2;
4213 guint32 throw_ip, type_idx;
4214 gint32 disp;
4216 sparc_patch ((guint32*)(cfg->native_code + patch_info->ip.i), code);
4218 exc_class = mono_class_load_from_name (mono_defaults.corlib, "System", patch_info->data.name);
4219 type_idx = exc_class->type_token - MONO_TOKEN_TYPE_DEF;
4220 throw_ip = patch_info->ip.i;
4222 /* Find a throw sequence for the same exception class */
4223 for (i = 0; i < nthrows; ++i)
4224 if (exc_classes [i] == exc_class)
4225 break;
4227 if (i < nthrows) {
4228 guint32 throw_offset = (((guint8*)exc_throw_end [i] - cfg->native_code) - throw_ip) >> 2;
4229 if (!sparc_is_imm13 (throw_offset))
4230 sparc_set32 (code, throw_offset, sparc_o1);
4232 disp = (exc_throw_start [i] - (guint8*)code) >> 2;
4233 g_assert (sparc_is_imm22 (disp));
4234 sparc_branch (code, 0, sparc_ba, disp);
4235 if (sparc_is_imm13 (throw_offset))
4236 sparc_set32 (code, throw_offset, sparc_o1);
4237 else
4238 sparc_nop (code);
4239 patch_info->type = MONO_PATCH_INFO_NONE;
4241 else {
4242 /* Emit the template for setting o1 */
4243 buf = code;
4244 if (sparc_is_imm13 (((((guint8*)code - cfg->native_code) - throw_ip) >> 2) - 8))
4245 /* Can use a short form */
4246 sparc_nop (code);
4247 else
4248 sparc_set_template (code, sparc_o1);
4249 buf2 = code;
4251 if (nthrows < 16) {
4252 exc_classes [nthrows] = exc_class;
4253 exc_throw_start [nthrows] = (guint8*)code;
4257 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, mono_sparc_break);
4258 EMIT_CALL();
4261 /* first arg = type token */
4262 /* Pass the type index to reduce the size of the sparc_set */
4263 if (!sparc_is_imm13 (type_idx))
4264 sparc_set32 (code, type_idx, sparc_o0);
4266 /* second arg = offset between the throw ip and the current ip */
4267 /* On sparc, the saved ip points to the call instruction */
4268 disp = (((guint8*)code - cfg->native_code) - throw_ip) >> 2;
4269 sparc_set32 (buf, disp, sparc_o1);
4270 while (buf < buf2)
4271 sparc_nop (buf);
4273 if (nthrows < 16) {
4274 exc_throw_end [nthrows] = (guint8*)code;
4275 nthrows ++;
4278 patch_info->data.name = "mono_arch_throw_corlib_exception";
4279 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4280 patch_info->ip.i = (guint8*)code - cfg->native_code;
4282 EMIT_CALL ();
4284 if (sparc_is_imm13 (type_idx)) {
4285 /* Put it into the delay slot */
4286 code --;
4287 buf = code;
4288 sparc_set32 (code, type_idx, sparc_o0);
4289 g_assert (code - buf == 1);
4292 break;
4294 default:
4295 /* do nothing */
4296 break;
4300 cfg->code_len = (guint8*)code - cfg->native_code;
4302 g_assert (cfg->code_len < cfg->code_size);
4306 gboolean lmf_addr_key_inited = FALSE;
4308 #ifdef MONO_SPARC_THR_TLS
4309 thread_key_t lmf_addr_key;
4310 #else
4311 pthread_key_t lmf_addr_key;
4312 #endif
4314 gpointer
4315 mono_arch_get_lmf_addr (void)
4317 /* This is perf critical so we bypass the IO layer */
4318 /* The thr_... functions seem to be somewhat faster */
4319 #ifdef MONO_SPARC_THR_TLS
4320 gpointer res;
4321 thr_getspecific (lmf_addr_key, &res);
4322 return res;
4323 #else
4324 return pthread_getspecific (lmf_addr_key);
4325 #endif
4328 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
4331 * There seems to be no way to determine stack boundaries under solaris,
4332 * so it's not possible to determine whenever a SIGSEGV is caused by stack
4333 * overflow or not.
4335 #error "--with-sigaltstack=yes not supported on solaris"
4337 #endif
4339 void
4340 mono_arch_tls_init (void)
4342 MonoJitTlsData *jit_tls;
4344 if (!lmf_addr_key_inited) {
4345 int res;
4347 lmf_addr_key_inited = TRUE;
4349 #ifdef MONO_SPARC_THR_TLS
4350 res = thr_keycreate (&lmf_addr_key, NULL);
4351 #else
4352 res = pthread_key_create (&lmf_addr_key, NULL);
4353 #endif
4354 g_assert (res == 0);
4358 jit_tls = mono_get_jit_tls ();
4360 #ifdef MONO_SPARC_THR_TLS
4361 thr_setspecific (lmf_addr_key, &jit_tls->lmf);
4362 #else
4363 pthread_setspecific (lmf_addr_key, &jit_tls->lmf);
4364 #endif
4367 void
4368 mono_arch_finish_init (void)
4372 void
4373 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
4377 MonoInst*
4378 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4380 MonoInst *ins = NULL;
4382 return ins;
4386 * mono_arch_get_argument_info:
4387 * @csig: a method signature
4388 * @param_count: the number of parameters to consider
4389 * @arg_info: an array to store the result infos
4391 * Gathers information on parameters such as size, alignment and
4392 * padding. arg_info should be large enought to hold param_count + 1 entries.
4394 * Returns the size of the activation frame.
4397 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
4399 int k, align;
4400 CallInfo *cinfo;
4401 ArgInfo *ainfo;
4403 cinfo = get_call_info (NULL, csig, FALSE);
4405 if (csig->hasthis) {
4406 ainfo = &cinfo->args [0];
4407 arg_info [0].offset = ARGS_OFFSET - MONO_SPARC_STACK_BIAS + ainfo->offset;
4410 for (k = 0; k < param_count; k++) {
4411 ainfo = &cinfo->args [k + csig->hasthis];
4413 arg_info [k + 1].offset = ARGS_OFFSET - MONO_SPARC_STACK_BIAS + ainfo->offset;
4414 arg_info [k + 1].size = mono_type_size (csig->params [k], &align);
4417 g_free (cinfo);
4419 return 0;
4422 gboolean
4423 mono_arch_print_tree (MonoInst *tree, int arity)
4425 return 0;
4428 mgreg_t
4429 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
4431 /* FIXME: implement */
4432 g_assert_not_reached ();
4435 gboolean
4436 mono_arch_opcode_supported (int opcode)
4438 return FALSE;