[ci] Bump timeout in ms-test-suite
[mono-project.git] / mono / mini / mini-sparc.c
blobe43410353d1928e5b6ba332a678ca45426c7047b
1 /*
2 * mini-sparc.c: Sparc backend for the Mono code generator
4 * Authors:
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * Modified for SPARC:
9 * Christopher Taylor (ct@gentoo.org)
10 * Mark Crichton (crichton@gimp.org)
11 * Zoltan Varga (vargaz@freemail.hu)
13 * (C) 2003 Ximian, Inc.
15 #include "mini.h"
16 #include <string.h>
17 #include <pthread.h>
18 #include <unistd.h>
20 #ifndef __linux__
21 #include <thread.h>
22 #endif
24 #include <unistd.h>
25 #include <sys/mman.h>
27 #include <mono/metadata/appdomain.h>
28 #include <mono/metadata/debug-helpers.h>
29 #include <mono/metadata/tokentype.h>
30 #include <mono/utils/mono-math.h>
31 #include <mono/utils/mono-hwcap.h>
33 #include "mini-sparc.h"
34 #include "trace.h"
35 #include "cpu-sparc.h"
36 #include "jit-icalls.h"
37 #include "ir-emit.h"
40 * Sparc V9 means two things:
41 * - the instruction set
42 * - the ABI
44 * V9 instructions are only usable if the underlying processor is 64 bit. Most Sparc
45 * processors in use are 64 bit processors. The V9 ABI is only usable if the
46 * mono executable is a 64 bit executable. So it would make sense to use the 64 bit
47 * instructions without using the 64 bit ABI.
51 * Register usage:
52 * - %i0..%i<n> hold the incoming arguments, these are never written by JITted
53 * code. Unused input registers are used for global register allocation.
54 * - %o0..%o5 and %l7 is used for local register allocation and passing arguments
55 * - %l0..%l6 is used for global register allocation
56 * - %o7 and %g1 is used as scratch registers in opcodes
57 * - all floating point registers are used for local register allocation except %f0.
58 * Only double precision registers are used.
59 * In 64 bit mode:
60 * - fp registers %d0..%d30 are used for parameter passing, and %d32..%d62 are
61 * used for local allocation.
65 * Alignment:
66 * - doubles and longs must be stored in dword aligned locations
70 * The following things are not implemented or do not work:
71 * - some fp arithmetic corner cases
72 * The following tests in mono/mini are expected to fail:
73 * - test_0_simple_double_casts
74 * This test casts (guint64)-1 to double and then back to guint64 again.
75 * Under x86, it returns 0, while under sparc it returns -1.
77 * In addition to this, the runtime requires the trunc function, or its
78 * solaris counterpart, aintl, to do some double->int conversions. If this
79 * function is not available, it is emulated somewhat, but the results can be
80 * strange.
84 * SPARCV9 FIXME:
85 * - optimize sparc_set according to the memory model
86 * - when non-AOT compiling, compute patch targets immediately so we don't
87 * have to emit the 6 byte template.
88 * - varags
89 * - struct arguments/returns
93 * SPARCV9 ISSUES:
94 * - sparc_call_simple can't be used in a lot of places since the displacement
95 * might not fit into an imm30.
96 * - g1 can't be used in a lot of places since it is used as a scratch reg in
97 * sparc_set.
98 * - sparc_f0 can't be used as a scratch register on V9
99 * - the %d34..%d62 fp registers are encoded as: %dx = %f(x - 32 + 1), ie.
100 * %d36 = %f5.
101 * - ldind.i4/u4 needs to sign extend/clear out upper word -> slows things down
102 * - ins->dreg can't be used as a scatch register in r4 opcodes since it might
103 * be a double precision register which has no single precision part.
104 * - passing/returning structs is hard to implement, because:
105 * - the spec is very hard to understand
106 * - it requires knowledge about the fields of structure, needs to handle
107 * nested structures etc.
111 * Possible optimizations:
112 * - delay slot scheduling
113 * - allocate large constants to registers
114 * - add more mul/div/rem optimizations
117 #ifndef __linux__
118 #define MONO_SPARC_THR_TLS 1
119 #endif
122 * There was a 64 bit bug in glib-2.2: g_bit_nth_msf (0, -1) would return 32,
123 * causing infinite loops in dominator computation. So glib-2.4 is required.
125 #ifdef SPARCV9
126 #if GLIB_MAJOR_VERSION == 2 && GLIB_MINOR_VERSION < 4
127 #error "glib 2.4 or later is required for 64 bit mode."
128 #endif
129 #endif
131 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
133 #define SIGNAL_STACK_SIZE (64 * 1024)
135 #define STACK_BIAS MONO_SPARC_STACK_BIAS
137 #ifdef SPARCV9
139 /* %g1 is used by sparc_set */
140 #define GP_SCRATCH_REG sparc_g4
141 /* %f0 is used for parameter passing */
142 #define FP_SCRATCH_REG sparc_f30
143 #define ARGS_OFFSET (STACK_BIAS + 128)
145 #else
147 #define FP_SCRATCH_REG sparc_f0
148 #define ARGS_OFFSET 68
149 #define GP_SCRATCH_REG sparc_g1
151 #endif
153 /* Whenever this is a 64bit executable */
154 #if SPARCV9
155 static gboolean v64 = TRUE;
156 #else
157 static gboolean v64 = FALSE;
158 #endif
160 static gpointer mono_arch_get_lmf_addr (void);
162 const char*
163 mono_arch_regname (int reg) {
164 static const char * rnames[] = {
165 "sparc_g0", "sparc_g1", "sparc_g2", "sparc_g3", "sparc_g4",
166 "sparc_g5", "sparc_g6", "sparc_g7", "sparc_o0", "sparc_o1",
167 "sparc_o2", "sparc_o3", "sparc_o4", "sparc_o5", "sparc_sp",
168 "sparc_call", "sparc_l0", "sparc_l1", "sparc_l2", "sparc_l3",
169 "sparc_l4", "sparc_l5", "sparc_l6", "sparc_l7", "sparc_i0",
170 "sparc_i1", "sparc_i2", "sparc_i3", "sparc_i4", "sparc_i5",
171 "sparc_fp", "sparc_retadr"
173 if (reg >= 0 && reg < 32)
174 return rnames [reg];
175 return "unknown";
178 const char*
179 mono_arch_fregname (int reg) {
180 static const char *rnames [] = {
181 "sparc_f0", "sparc_f1", "sparc_f2", "sparc_f3", "sparc_f4",
182 "sparc_f5", "sparc_f6", "sparc_f7", "sparc_f8", "sparc_f9",
183 "sparc_f10", "sparc_f11", "sparc_f12", "sparc_f13", "sparc_f14",
184 "sparc_f15", "sparc_f16", "sparc_f17", "sparc_f18", "sparc_f19",
185 "sparc_f20", "sparc_f21", "sparc_f22", "sparc_f23", "sparc_f24",
186 "sparc_f25", "sparc_f26", "sparc_f27", "sparc_f28", "sparc_f29",
187 "sparc_f30", "sparc_f31"
190 if (reg >= 0 && reg < 32)
191 return rnames [reg];
192 else
193 return "unknown";
197 * Initialize the cpu to execute managed code.
199 void
200 mono_arch_cpu_init (void)
205 * Initialize architecture specific code.
207 void
208 mono_arch_init (void)
213 * Cleanup architecture specific code.
215 void
216 mono_arch_cleanup (void)
221 * This function returns the optimizations supported on this cpu.
223 guint32
224 mono_arch_cpu_optimizations (guint32 *exclude_mask)
226 guint32 opts = 0;
228 *exclude_mask = 0;
231 * On some processors, the cmov instructions are even slower than the
232 * normal ones...
234 if (mono_hwcap_sparc_is_v9)
235 opts |= MONO_OPT_CMOV | MONO_OPT_FCMOV;
236 else
237 *exclude_mask |= MONO_OPT_CMOV | MONO_OPT_FCMOV;
239 return opts;
243 * This function test for all SIMD functions supported.
245 * Returns a bitmask corresponding to all supported versions.
248 guint32
249 mono_arch_cpu_enumerate_simd_versions (void)
251 /* SIMD is currently unimplemented */
252 return 0;
255 #ifdef __GNUC__
256 #define flushi(addr) __asm__ __volatile__ ("iflush %0"::"r"(addr):"memory")
257 #else /* assume Sun's compiler */
258 static void flushi(void *addr)
260 asm("flush %i0");
262 #endif
264 #ifndef __linux__
265 void sync_instruction_memory(caddr_t addr, int len);
266 #endif
268 void
269 mono_arch_flush_icache (guint8 *code, gint size)
271 #ifndef __linux__
272 /* Hopefully this is optimized based on the actual CPU */
273 sync_instruction_memory (code, size);
274 #else
275 gulong start = (gulong) code;
276 gulong end = start + size;
277 gulong align;
279 /* Sparcv9 chips only need flushes on 32 byte
280 * cacheline boundaries.
282 * Sparcv8 needs a flush every 8 bytes.
284 align = (mono_hwcap_sparc_is_v9 ? 32 : 8);
286 start &= ~(align - 1);
287 end = (end + (align - 1)) & ~(align - 1);
289 while (start < end) {
290 #ifdef __GNUC__
291 __asm__ __volatile__ ("iflush %0"::"r"(start));
292 #else
293 flushi (start);
294 #endif
295 start += align;
297 #endif
301 * mono_sparc_flushw:
303 * Flush all register windows to memory. Every register window is saved to
304 * a 16 word area on the stack pointed to by its %sp register.
306 void
307 mono_sparc_flushw (void)
309 static guint32 start [64];
310 static int inited = 0;
311 guint32 *code;
312 static void (*flushw) (void);
314 if (!inited) {
315 code = start;
317 sparc_save_imm (code, sparc_sp, -160, sparc_sp);
318 sparc_flushw (code);
319 sparc_ret (code);
320 sparc_restore_simple (code);
322 g_assert ((code - start) < 64);
324 mono_arch_flush_icache ((guint8*)start, (guint8*)code - (guint8*)start);
326 flushw = (gpointer)start;
328 inited = 1;
331 flushw ();
334 void
335 mono_arch_flush_register_windows (void)
337 mono_sparc_flushw ();
340 gboolean
341 mono_arch_is_inst_imm (gint64 imm)
343 return sparc_is_imm13 (imm);
346 gboolean
347 mono_sparc_is_v9 (void) {
348 return mono_hwcap_sparc_is_v9;
351 gboolean
352 mono_sparc_is_sparc64 (void) {
353 return v64;
356 typedef enum {
357 ArgInIReg,
358 ArgInIRegPair,
359 ArgInSplitRegStack,
360 ArgInFReg,
361 ArgInFRegPair,
362 ArgOnStack,
363 ArgOnStackPair,
364 ArgInFloatReg, /* V9 only */
365 ArgInDoubleReg /* V9 only */
366 } ArgStorage;
368 typedef struct {
369 gint16 offset;
370 /* This needs to be offset by %i0 or %o0 depending on caller/callee */
371 gint8 reg;
372 ArgStorage storage;
373 guint32 vt_offset; /* for valuetypes */
374 } ArgInfo;
376 typedef struct {
377 int nargs;
378 guint32 stack_usage;
379 guint32 reg_usage;
380 ArgInfo ret;
381 ArgInfo sig_cookie;
382 ArgInfo args [1];
383 } CallInfo;
385 #define DEBUG(a)
387 /* %o0..%o5 */
388 #define PARAM_REGS 6
390 static void inline
391 add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean pair)
393 ainfo->offset = *stack_size;
395 if (!pair) {
396 if (*gr >= PARAM_REGS) {
397 ainfo->storage = ArgOnStack;
399 else {
400 ainfo->storage = ArgInIReg;
401 ainfo->reg = *gr;
402 (*gr) ++;
405 /* Allways reserve stack space for parameters passed in registers */
406 (*stack_size) += sizeof (gpointer);
408 else {
409 if (*gr < PARAM_REGS - 1) {
410 /* A pair of registers */
411 ainfo->storage = ArgInIRegPair;
412 ainfo->reg = *gr;
413 (*gr) += 2;
415 else if (*gr >= PARAM_REGS) {
416 /* A pair of stack locations */
417 ainfo->storage = ArgOnStackPair;
419 else {
420 ainfo->storage = ArgInSplitRegStack;
421 ainfo->reg = *gr;
422 (*gr) ++;
425 (*stack_size) += 2 * sizeof (gpointer);
429 #ifdef SPARCV9
431 #define FLOAT_PARAM_REGS 32
433 static void inline
434 add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean single)
436 ainfo->offset = *stack_size;
438 if (single) {
439 if (*gr >= FLOAT_PARAM_REGS) {
440 ainfo->storage = ArgOnStack;
442 else {
443 /* A single is passed in an even numbered fp register */
444 ainfo->storage = ArgInFloatReg;
445 ainfo->reg = *gr + 1;
446 (*gr) += 2;
449 else {
450 if (*gr < FLOAT_PARAM_REGS) {
451 /* A double register */
452 ainfo->storage = ArgInDoubleReg;
453 ainfo->reg = *gr;
454 (*gr) += 2;
456 else {
457 ainfo->storage = ArgOnStack;
461 (*stack_size) += sizeof (gpointer);
464 #endif
467 * get_call_info:
469 * Obtain information about a call according to the calling convention.
470 * For V8, see the "System V ABI, Sparc Processor Supplement" Sparc V8 version
471 * document for more information.
472 * For V9, see the "Low Level System Information (64-bit psABI)" chapter in
473 * the 'Sparc Compliance Definition 2.4' document.
475 static CallInfo*
476 get_call_info (MonoCompile *cfg, MonoMethodSignature *sig, gboolean is_pinvoke)
478 guint32 i, gr, fr;
479 int n = sig->hasthis + sig->param_count;
480 guint32 stack_size = 0;
481 CallInfo *cinfo;
482 MonoType *ret_type;
484 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
486 gr = 0;
487 fr = 0;
489 #ifdef SPARCV9
490 if (MONO_TYPE_ISSTRUCT ((sig->ret))) {
491 /* The address of the return value is passed in %o0 */
492 add_general (&gr, &stack_size, &cinfo->ret, FALSE);
493 cinfo->ret.reg += sparc_i0;
494 /* FIXME: Pass this after this as on other platforms */
495 NOT_IMPLEMENTED;
497 #endif
499 /* this */
500 if (sig->hasthis)
501 add_general (&gr, &stack_size, cinfo->args + 0, FALSE);
503 if ((sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
504 gr = PARAM_REGS;
506 /* Emit the signature cookie just before the implicit arguments */
507 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
510 for (i = 0; i < sig->param_count; ++i) {
511 ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
512 MonoType *ptype;
514 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
515 gr = PARAM_REGS;
517 /* Emit the signature cookie just before the implicit arguments */
518 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
521 DEBUG(printf("param %d: ", i));
522 if (sig->params [i]->byref) {
523 DEBUG(printf("byref\n"));
525 add_general (&gr, &stack_size, ainfo, FALSE);
526 continue;
528 ptype = mini_get_underlying_type (sig->params [i]);
529 switch (ptype->type) {
530 case MONO_TYPE_BOOLEAN:
531 case MONO_TYPE_I1:
532 case MONO_TYPE_U1:
533 add_general (&gr, &stack_size, ainfo, FALSE);
534 /* the value is in the ls byte */
535 ainfo->offset += sizeof (gpointer) - 1;
536 break;
537 case MONO_TYPE_I2:
538 case MONO_TYPE_U2:
539 case MONO_TYPE_CHAR:
540 add_general (&gr, &stack_size, ainfo, FALSE);
541 /* the value is in the ls word */
542 ainfo->offset += sizeof (gpointer) - 2;
543 break;
544 case MONO_TYPE_I4:
545 case MONO_TYPE_U4:
546 add_general (&gr, &stack_size, ainfo, FALSE);
547 /* the value is in the ls dword */
548 ainfo->offset += sizeof (gpointer) - 4;
549 break;
550 case MONO_TYPE_I:
551 case MONO_TYPE_U:
552 case MONO_TYPE_PTR:
553 case MONO_TYPE_FNPTR:
554 case MONO_TYPE_CLASS:
555 case MONO_TYPE_OBJECT:
556 case MONO_TYPE_STRING:
557 case MONO_TYPE_SZARRAY:
558 case MONO_TYPE_ARRAY:
559 add_general (&gr, &stack_size, ainfo, FALSE);
560 break;
561 case MONO_TYPE_GENERICINST:
562 if (!mono_type_generic_inst_is_valuetype (ptype)) {
563 add_general (&gr, &stack_size, ainfo, FALSE);
564 break;
566 /* Fall through */
567 case MONO_TYPE_VALUETYPE:
568 #ifdef SPARCV9
569 if (sig->pinvoke)
570 NOT_IMPLEMENTED;
571 #endif
572 add_general (&gr, &stack_size, ainfo, FALSE);
573 break;
574 case MONO_TYPE_TYPEDBYREF:
575 add_general (&gr, &stack_size, ainfo, FALSE);
576 break;
577 case MONO_TYPE_U8:
578 case MONO_TYPE_I8:
579 #ifdef SPARCV9
580 add_general (&gr, &stack_size, ainfo, FALSE);
581 #else
582 add_general (&gr, &stack_size, ainfo, TRUE);
583 #endif
584 break;
585 case MONO_TYPE_R4:
586 #ifdef SPARCV9
587 add_float (&fr, &stack_size, ainfo, TRUE);
588 gr ++;
589 #else
590 /* single precision values are passed in integer registers */
591 add_general (&gr, &stack_size, ainfo, FALSE);
592 #endif
593 break;
594 case MONO_TYPE_R8:
595 #ifdef SPARCV9
596 add_float (&fr, &stack_size, ainfo, FALSE);
597 gr ++;
598 #else
599 /* double precision values are passed in a pair of registers */
600 add_general (&gr, &stack_size, ainfo, TRUE);
601 #endif
602 break;
603 default:
604 g_assert_not_reached ();
608 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n > 0) && (sig->sentinelpos == sig->param_count)) {
609 gr = PARAM_REGS;
611 /* Emit the signature cookie just before the implicit arguments */
612 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
615 /* return value */
616 ret_type = mini_get_underlying_type (sig->ret);
617 switch (ret_type->type) {
618 case MONO_TYPE_BOOLEAN:
619 case MONO_TYPE_I1:
620 case MONO_TYPE_U1:
621 case MONO_TYPE_I2:
622 case MONO_TYPE_U2:
623 case MONO_TYPE_CHAR:
624 case MONO_TYPE_I4:
625 case MONO_TYPE_U4:
626 case MONO_TYPE_I:
627 case MONO_TYPE_U:
628 case MONO_TYPE_PTR:
629 case MONO_TYPE_FNPTR:
630 case MONO_TYPE_CLASS:
631 case MONO_TYPE_OBJECT:
632 case MONO_TYPE_SZARRAY:
633 case MONO_TYPE_ARRAY:
634 case MONO_TYPE_STRING:
635 cinfo->ret.storage = ArgInIReg;
636 cinfo->ret.reg = sparc_i0;
637 if (gr < 1)
638 gr = 1;
639 break;
640 case MONO_TYPE_U8:
641 case MONO_TYPE_I8:
642 #ifdef SPARCV9
643 cinfo->ret.storage = ArgInIReg;
644 cinfo->ret.reg = sparc_i0;
645 if (gr < 1)
646 gr = 1;
647 #else
648 cinfo->ret.storage = ArgInIRegPair;
649 cinfo->ret.reg = sparc_i0;
650 if (gr < 2)
651 gr = 2;
652 #endif
653 break;
654 case MONO_TYPE_R4:
655 case MONO_TYPE_R8:
656 cinfo->ret.storage = ArgInFReg;
657 cinfo->ret.reg = sparc_f0;
658 break;
659 case MONO_TYPE_GENERICINST:
660 if (!mono_type_generic_inst_is_valuetype (ret_type)) {
661 cinfo->ret.storage = ArgInIReg;
662 cinfo->ret.reg = sparc_i0;
663 if (gr < 1)
664 gr = 1;
665 break;
667 /* Fall through */
668 case MONO_TYPE_VALUETYPE:
669 if (v64) {
670 if (sig->pinvoke)
671 NOT_IMPLEMENTED;
672 else
673 /* Already done */
676 else
677 cinfo->ret.storage = ArgOnStack;
678 break;
679 case MONO_TYPE_TYPEDBYREF:
680 if (v64) {
681 if (sig->pinvoke)
682 /* Same as a valuetype with size 24 */
683 NOT_IMPLEMENTED;
684 else
685 /* Already done */
688 else
689 cinfo->ret.storage = ArgOnStack;
690 break;
691 case MONO_TYPE_VOID:
692 break;
693 default:
694 g_error ("Can't handle as return value 0x%x", sig->ret->type);
697 cinfo->stack_usage = stack_size;
698 cinfo->reg_usage = gr;
699 return cinfo;
702 GList *
703 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
705 GList *vars = NULL;
706 int i;
709 * FIXME: If an argument is allocated to a register, then load it from the
710 * stack in the prolog.
713 for (i = 0; i < cfg->num_varinfo; i++) {
714 MonoInst *ins = cfg->varinfo [i];
715 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
717 /* unused vars */
718 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
719 continue;
721 /* FIXME: Make arguments on stack allocateable to registers */
722 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode == OP_REGVAR) || (ins->opcode == OP_ARG))
723 continue;
725 if (mono_is_regsize_var (ins->inst_vtype)) {
726 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
727 g_assert (i == vmv->idx);
729 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
733 return vars;
736 GList *
737 mono_arch_get_global_int_regs (MonoCompile *cfg)
739 GList *regs = NULL;
740 int i;
741 MonoMethodSignature *sig;
742 CallInfo *cinfo;
744 sig = mono_method_signature (cfg->method);
746 cinfo = get_call_info (cfg, sig, FALSE);
748 /* Use unused input registers */
749 for (i = cinfo->reg_usage; i < 6; ++i)
750 regs = g_list_prepend (regs, GUINT_TO_POINTER (sparc_i0 + i));
752 /* Use %l0..%l6 as global registers */
753 for (i = sparc_l0; i < sparc_l7; ++i)
754 regs = g_list_prepend (regs, GUINT_TO_POINTER (i));
756 g_free (cinfo);
758 return regs;
762 * mono_arch_regalloc_cost:
764 * Return the cost, in number of memory references, of the action of
765 * allocating the variable VMV into a register during global register
766 * allocation.
768 guint32
769 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
771 return 0;
775 * Set var information according to the calling convention. sparc version.
776 * The locals var stuff should most likely be split in another method.
779 void
780 mono_arch_allocate_vars (MonoCompile *cfg)
782 MonoMethodSignature *sig;
783 MonoMethodHeader *header;
784 MonoInst *inst;
785 int i, offset, size, align, curinst;
786 CallInfo *cinfo;
788 header = cfg->header;
790 sig = mono_method_signature (cfg->method);
792 cinfo = get_call_info (cfg, sig, FALSE);
794 if (sig->ret->type != MONO_TYPE_VOID) {
795 switch (cinfo->ret.storage) {
796 case ArgInIReg:
797 case ArgInFReg:
798 cfg->ret->opcode = OP_REGVAR;
799 cfg->ret->inst_c0 = cinfo->ret.reg;
800 break;
801 case ArgInIRegPair: {
802 MonoType *t = mini_get_underlying_type (sig->ret);
803 if (((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
804 MonoInst *low = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->ret->dreg));
805 MonoInst *high = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->ret->dreg));
807 low->opcode = OP_REGVAR;
808 low->dreg = cinfo->ret.reg + 1;
809 high->opcode = OP_REGVAR;
810 high->dreg = cinfo->ret.reg;
812 cfg->ret->opcode = OP_REGVAR;
813 cfg->ret->inst_c0 = cinfo->ret.reg;
814 break;
816 case ArgOnStack:
817 #ifdef SPARCV9
818 g_assert_not_reached ();
819 #else
820 /* valuetypes */
821 cfg->vret_addr->opcode = OP_REGOFFSET;
822 cfg->vret_addr->inst_basereg = sparc_fp;
823 cfg->vret_addr->inst_offset = 64;
824 #endif
825 break;
826 default:
827 NOT_IMPLEMENTED;
829 cfg->ret->dreg = cfg->ret->inst_c0;
833 * We use the ABI calling conventions for managed code as well.
834 * Exception: valuetypes are never returned in registers on V9.
835 * FIXME: Use something more optimized.
838 /* Locals are allocated backwards from %fp */
839 cfg->frame_reg = sparc_fp;
840 offset = 0;
843 * Reserve a stack slot for holding information used during exception
844 * handling.
846 if (header->num_clauses)
847 offset += sizeof (gpointer) * 2;
849 if (cfg->method->save_lmf) {
850 offset += sizeof (MonoLMF);
851 cfg->arch.lmf_offset = offset;
854 curinst = cfg->locals_start;
855 for (i = curinst; i < cfg->num_varinfo; ++i) {
856 inst = cfg->varinfo [i];
858 if ((inst->opcode == OP_REGVAR) || (inst->opcode == OP_REGOFFSET)) {
859 //g_print ("allocating local %d to %s\n", i, mono_arch_regname (inst->dreg));
860 continue;
863 if (inst->flags & MONO_INST_IS_DEAD)
864 continue;
866 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
867 * pinvoke wrappers when they call functions returning structure */
868 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
869 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), &align);
870 else
871 size = mini_type_stack_size (inst->inst_vtype, &align);
874 * This is needed since structures containing doubles must be doubleword
875 * aligned.
876 * FIXME: Do this only if needed.
878 if (MONO_TYPE_ISSTRUCT (inst->inst_vtype))
879 align = 8;
882 * variables are accessed as negative offsets from %fp, so increase
883 * the offset before assigning it to a variable
885 offset += size;
887 offset += align - 1;
888 offset &= ~(align - 1);
889 inst->opcode = OP_REGOFFSET;
890 inst->inst_basereg = sparc_fp;
891 inst->inst_offset = STACK_BIAS + -offset;
893 //g_print ("allocating local %d to [%s - %d]\n", i, mono_arch_regname (inst->inst_basereg), - inst->inst_offset);
896 if (sig->call_convention == MONO_CALL_VARARG) {
897 cfg->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
900 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
901 inst = cfg->args [i];
902 if (inst->opcode != OP_REGVAR) {
903 ArgInfo *ainfo = &cinfo->args [i];
904 gboolean inreg = TRUE;
905 MonoType *arg_type;
906 ArgStorage storage;
908 if (sig->hasthis && (i == 0))
909 arg_type = &mono_defaults.object_class->byval_arg;
910 else
911 arg_type = sig->params [i - sig->hasthis];
913 #ifndef SPARCV9
914 if (!arg_type->byref && ((arg_type->type == MONO_TYPE_R4)
915 || (arg_type->type == MONO_TYPE_R8)))
917 * Since float arguments are passed in integer registers, we need to
918 * save them to the stack in the prolog.
920 inreg = FALSE;
921 #endif
923 /* FIXME: Allocate volatile arguments to registers */
924 /* FIXME: This makes the argument holding a vtype address into volatile */
925 if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
926 inreg = FALSE;
928 if (MONO_TYPE_ISSTRUCT (arg_type))
929 /* FIXME: this isn't needed */
930 inreg = FALSE;
932 inst->opcode = OP_REGOFFSET;
934 if (!inreg)
935 storage = ArgOnStack;
936 else
937 storage = ainfo->storage;
939 switch (storage) {
940 case ArgInIReg:
941 inst->opcode = OP_REGVAR;
942 inst->dreg = sparc_i0 + ainfo->reg;
943 break;
944 case ArgInIRegPair:
945 if (inst->type == STACK_I8) {
946 MonoInst *low = get_vreg_to_inst (cfg, MONO_LVREG_LS (inst->dreg));
947 MonoInst *high = get_vreg_to_inst (cfg, MONO_LVREG_MS (inst->dreg));
949 low->opcode = OP_REGVAR;
950 low->dreg = sparc_i0 + ainfo->reg + 1;
951 high->opcode = OP_REGVAR;
952 high->dreg = sparc_i0 + ainfo->reg;
954 inst->opcode = OP_REGVAR;
955 inst->dreg = sparc_i0 + ainfo->reg;
956 break;
957 case ArgInFloatReg:
958 case ArgInDoubleReg:
960 * Since float regs are volatile, we save the arguments to
961 * the stack in the prolog.
962 * FIXME: Avoid this if the method contains no calls.
964 case ArgOnStack:
965 case ArgOnStackPair:
966 case ArgInSplitRegStack:
967 /* Split arguments are saved to the stack in the prolog */
968 inst->opcode = OP_REGOFFSET;
969 /* in parent frame */
970 inst->inst_basereg = sparc_fp;
971 inst->inst_offset = ainfo->offset + ARGS_OFFSET;
973 if (!arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
975 * It is very hard to load doubles from non-doubleword aligned
976 * memory locations. So if the offset is misaligned, we copy the
977 * argument to a stack location in the prolog.
979 if ((inst->inst_offset - STACK_BIAS) % 8) {
980 inst->inst_basereg = sparc_fp;
981 offset += 8;
982 align = 8;
983 offset += align - 1;
984 offset &= ~(align - 1);
985 inst->inst_offset = STACK_BIAS + -offset;
989 break;
990 default:
991 NOT_IMPLEMENTED;
994 if (MONO_TYPE_ISSTRUCT (arg_type)) {
995 /* Add a level of indirection */
997 * It would be easier to add OP_LDIND_I here, but ldind_i instructions
998 * are destructively modified in a lot of places in inssel.brg.
1000 MonoInst *indir;
1001 MONO_INST_NEW (cfg, indir, 0);
1002 *indir = *inst;
1003 inst->opcode = OP_VTARG_ADDR;
1004 inst->inst_left = indir;
1010 * spillvars are stored between the normal locals and the storage reserved
1011 * by the ABI.
1014 cfg->stack_offset = offset;
1016 g_free (cinfo);
1019 void
1020 mono_arch_create_vars (MonoCompile *cfg)
1022 MonoMethodSignature *sig;
1024 sig = mono_method_signature (cfg->method);
1026 if (MONO_TYPE_ISSTRUCT ((sig->ret))) {
1027 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1028 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1029 printf ("vret_addr = ");
1030 mono_print_ins (cfg->vret_addr);
1034 if (!sig->ret->byref && (sig->ret->type == MONO_TYPE_I8 || sig->ret->type == MONO_TYPE_U8)) {
1035 MonoInst *low = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->ret->dreg));
1036 MonoInst *high = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->ret->dreg));
1038 low->flags |= MONO_INST_VOLATILE;
1039 high->flags |= MONO_INST_VOLATILE;
1042 /* Add a properly aligned dword for use by int<->float conversion opcodes */
1043 cfg->arch.float_spill_slot = mono_compile_create_var (cfg, &mono_defaults.double_class->byval_arg, OP_ARG);
1044 ((MonoInst*)cfg->arch.float_spill_slot)->flags |= MONO_INST_VOLATILE;
1047 static void
1048 add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, guint32 sreg)
1050 MonoInst *arg;
1052 MONO_INST_NEW (cfg, arg, 0);
1054 arg->sreg1 = sreg;
1056 switch (storage) {
1057 case ArgInIReg:
1058 arg->opcode = OP_MOVE;
1059 arg->dreg = mono_alloc_ireg (cfg);
1061 mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, FALSE);
1062 break;
1063 case ArgInFloatReg:
1064 arg->opcode = OP_FMOVE;
1065 arg->dreg = mono_alloc_freg (cfg);
1067 mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, TRUE);
1068 break;
1069 default:
1070 g_assert_not_reached ();
1073 MONO_ADD_INS (cfg->cbb, arg);
1076 static void
1077 add_outarg_load (MonoCompile *cfg, MonoCallInst *call, int opcode, int basereg, int offset, int reg)
1079 int dreg = mono_alloc_ireg (cfg);
1081 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, sparc_sp, offset);
1083 mono_call_inst_add_outarg_reg (cfg, call, dreg, reg, FALSE);
1086 static void
1087 emit_pass_long (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in)
1089 int offset = ARGS_OFFSET + ainfo->offset;
1091 switch (ainfo->storage) {
1092 case ArgInIRegPair:
1093 add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg + 1, MONO_LVREG_LS (in->dreg));
1094 add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, MONO_LVREG_MS (in->dreg));
1095 break;
1096 case ArgOnStackPair:
1097 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset, MONO_LVREG_MS (in->dreg));
1098 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset + 4, MONO_LVREG_LS (in->dreg));
1099 break;
1100 case ArgInSplitRegStack:
1101 add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, MONO_LVREG_MS (in->dreg));
1102 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset + 4, MONO_LVREG_LS (in->dreg));
1103 break;
1104 default:
1105 g_assert_not_reached ();
1109 static void
1110 emit_pass_double (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in)
1112 int offset = ARGS_OFFSET + ainfo->offset;
1114 switch (ainfo->storage) {
1115 case ArgInIRegPair:
1116 /* floating-point <-> integer transfer must go through memory */
1117 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg);
1119 /* Load into a register pair */
1120 add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg);
1121 add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset + 4, sparc_o0 + ainfo->reg + 1);
1122 break;
1123 case ArgOnStackPair:
1124 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg);
1125 break;
1126 case ArgInSplitRegStack:
1127 /* floating-point <-> integer transfer must go through memory */
1128 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg);
1129 /* Load most significant word into register */
1130 add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg);
1131 break;
1132 default:
1133 g_assert_not_reached ();
1137 static void
1138 emit_pass_float (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in)
1140 int offset = ARGS_OFFSET + ainfo->offset;
1142 switch (ainfo->storage) {
1143 case ArgInIReg:
1144 /* floating-point <-> integer transfer must go through memory */
1145 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, sparc_sp, offset, in->dreg);
1146 add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg);
1147 break;
1148 case ArgOnStack:
1149 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, sparc_sp, offset, in->dreg);
1150 break;
1151 default:
1152 g_assert_not_reached ();
1156 static void
1157 emit_pass_other (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in);
1159 static void
1160 emit_pass_vtype (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in, gboolean pinvoke)
1162 MonoInst *arg;
1163 guint32 align, offset, pad, size;
1165 if (arg_type->type == MONO_TYPE_TYPEDBYREF) {
1166 size = sizeof (MonoTypedRef);
1167 align = sizeof (gpointer);
1169 else if (pinvoke)
1170 size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
1171 else {
1173 * Other backends use mono_type_stack_size (), but that
1174 * aligns the size to 8, which is larger than the size of
1175 * the source, leading to reads of invalid memory if the
1176 * source is at the end of address space.
1178 size = mono_class_value_size (in->klass, &align);
1181 /* The first 6 argument locations are reserved */
1182 if (cinfo->stack_usage < 6 * sizeof (gpointer))
1183 cinfo->stack_usage = 6 * sizeof (gpointer);
1185 offset = ALIGN_TO ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage, align);
1186 pad = offset - ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage);
1188 cinfo->stack_usage += size;
1189 cinfo->stack_usage += pad;
1192 * We use OP_OUTARG_VT to copy the valuetype to a stack location, then
1193 * use the normal OUTARG opcodes to pass the address of the location to
1194 * the callee.
1196 if (size > 0) {
1197 MONO_INST_NEW (cfg, arg, OP_OUTARG_VT);
1198 arg->sreg1 = in->dreg;
1199 arg->klass = in->klass;
1200 arg->backend.size = size;
1201 arg->inst_p0 = call;
1202 arg->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1203 memcpy (arg->inst_p1, ainfo, sizeof (ArgInfo));
1204 ((ArgInfo*)(arg->inst_p1))->offset = STACK_BIAS + offset;
1205 MONO_ADD_INS (cfg->cbb, arg);
1207 MONO_INST_NEW (cfg, arg, OP_ADD_IMM);
1208 arg->dreg = mono_alloc_preg (cfg);
1209 arg->sreg1 = sparc_sp;
1210 arg->inst_imm = STACK_BIAS + offset;
1211 MONO_ADD_INS (cfg->cbb, arg);
1213 emit_pass_other (cfg, call, ainfo, NULL, arg);
1217 static void
1218 emit_pass_other (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in)
1220 int offset = ARGS_OFFSET + ainfo->offset;
1221 int opcode;
1223 switch (ainfo->storage) {
1224 case ArgInIReg:
1225 add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, in->dreg);
1226 break;
1227 case ArgOnStack:
1228 #ifdef SPARCV9
1229 NOT_IMPLEMENTED;
1230 #else
1231 if (offset & 0x1)
1232 opcode = OP_STOREI1_MEMBASE_REG;
1233 else if (offset & 0x2)
1234 opcode = OP_STOREI2_MEMBASE_REG;
1235 else
1236 opcode = OP_STOREI4_MEMBASE_REG;
1237 MONO_EMIT_NEW_STORE_MEMBASE (cfg, opcode, sparc_sp, offset, in->dreg);
1238 #endif
1239 break;
1240 default:
1241 g_assert_not_reached ();
1245 static void
1246 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
1248 MonoMethodSignature *tmp_sig;
1251 * mono_ArgIterator_Setup assumes the signature cookie is
1252 * passed first and all the arguments which were before it are
1253 * passed on the stack after the signature. So compensate by
1254 * passing a different signature.
1256 tmp_sig = mono_metadata_signature_dup (call->signature);
1257 tmp_sig->param_count -= call->signature->sentinelpos;
1258 tmp_sig->sentinelpos = 0;
1259 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1261 /* FIXME: Add support for signature tokens to AOT */
1262 cfg->disable_aot = TRUE;
1263 /* We allways pass the signature on the stack for simplicity */
1264 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sparc_sp, ARGS_OFFSET + cinfo->sig_cookie.offset, tmp_sig);
1267 void
1268 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1270 MonoInst *in;
1271 MonoMethodSignature *sig;
1272 int i, n;
1273 CallInfo *cinfo;
1274 ArgInfo *ainfo;
1275 guint32 extra_space = 0;
1277 sig = call->signature;
1278 n = sig->param_count + sig->hasthis;
1280 cinfo = get_call_info (cfg, sig, sig->pinvoke);
1282 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
1283 /* Set the 'struct/union return pointer' location on the stack */
1284 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, 64, call->vret_var->dreg);
1287 for (i = 0; i < n; ++i) {
1288 MonoType *arg_type;
1290 ainfo = cinfo->args + i;
1292 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1293 /* Emit the signature cookie just before the first implicit argument */
1294 emit_sig_cookie (cfg, call, cinfo);
1297 in = call->args [i];
1299 if (sig->hasthis && (i == 0))
1300 arg_type = &mono_defaults.object_class->byval_arg;
1301 else
1302 arg_type = sig->params [i - sig->hasthis];
1304 arg_type = mini_get_underlying_type (arg_type);
1305 if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis])))
1306 emit_pass_vtype (cfg, call, cinfo, ainfo, arg_type, in, sig->pinvoke);
1307 else if (!arg_type->byref && ((arg_type->type == MONO_TYPE_I8) || (arg_type->type == MONO_TYPE_U8)))
1308 emit_pass_long (cfg, call, ainfo, in);
1309 else if (!arg_type->byref && (arg_type->type == MONO_TYPE_R8))
1310 emit_pass_double (cfg, call, ainfo, in);
1311 else if (!arg_type->byref && (arg_type->type == MONO_TYPE_R4))
1312 emit_pass_float (cfg, call, ainfo, in);
1313 else
1314 emit_pass_other (cfg, call, ainfo, arg_type, in);
1317 /* Handle the case where there are no implicit arguments */
1318 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos)) {
1319 emit_sig_cookie (cfg, call, cinfo);
1322 call->stack_usage = cinfo->stack_usage + extra_space;
1324 g_free (cinfo);
1327 void
1328 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1330 ArgInfo *ainfo = (ArgInfo*)ins->inst_p1;
1331 int size = ins->backend.size;
1333 mini_emit_memcpy (cfg, sparc_sp, ainfo->offset, src->dreg, 0, size, 0);
1336 void
1337 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1339 CallInfo *cinfo = get_call_info (cfg, mono_method_signature (method), FALSE);
1340 MonoType *ret = mini_get_underlying_type (mono_method_signature (method)->ret);
1342 switch (cinfo->ret.storage) {
1343 case ArgInIReg:
1344 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1345 break;
1346 case ArgInIRegPair:
1347 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
1348 MONO_EMIT_NEW_UNALU (cfg, OP_LMOVE, cfg->ret->dreg, val->dreg);
1349 } else {
1350 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, MONO_LVREG_MS (cfg->ret->dreg), MONO_LVREG_MS (val->dreg));
1351 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, MONO_LVREG_LS (cfg->ret->dreg), MONO_LVREG_LS (val->dreg));
1353 break;
1354 case ArgInFReg:
1355 if (ret->type == MONO_TYPE_R4)
1356 MONO_EMIT_NEW_UNALU (cfg, OP_SETFRET, cfg->ret->dreg, val->dreg);
1357 else
1358 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
1359 break;
1360 default:
1361 g_assert_not_reached ();
1364 g_assert (cinfo);
1367 int cond_to_sparc_cond [][3] = {
1368 {sparc_be, sparc_be, sparc_fbe},
1369 {sparc_bne, sparc_bne, 0},
1370 {sparc_ble, sparc_ble, sparc_fble},
1371 {sparc_bge, sparc_bge, sparc_fbge},
1372 {sparc_bl, sparc_bl, sparc_fbl},
1373 {sparc_bg, sparc_bg, sparc_fbg},
1374 {sparc_bleu, sparc_bleu, 0},
1375 {sparc_beu, sparc_beu, 0},
1376 {sparc_blu, sparc_blu, sparc_fbl},
1377 {sparc_bgu, sparc_bgu, sparc_fbg}
1380 /* Map opcode to the sparc condition codes */
1381 static inline SparcCond
1382 opcode_to_sparc_cond (int opcode)
1384 CompRelation rel;
1385 CompType t;
1387 switch (opcode) {
1388 case OP_COND_EXC_OV:
1389 case OP_COND_EXC_IOV:
1390 return sparc_bvs;
1391 case OP_COND_EXC_C:
1392 case OP_COND_EXC_IC:
1393 return sparc_bcs;
1394 case OP_COND_EXC_NO:
1395 case OP_COND_EXC_NC:
1396 NOT_IMPLEMENTED;
1397 default:
1398 rel = mono_opcode_to_cond (opcode);
1399 t = mono_opcode_to_type (opcode, -1);
1401 return cond_to_sparc_cond [rel][t];
1402 break;
1405 return -1;
1408 #define COMPUTE_DISP(ins) \
1409 if (ins->inst_true_bb->native_offset) \
1410 disp = (ins->inst_true_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2; \
1411 else { \
1412 disp = 0; \
1413 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1416 #ifdef SPARCV9
1417 #define DEFAULT_ICC sparc_xcc_short
1418 #else
1419 #define DEFAULT_ICC sparc_icc_short
1420 #endif
1422 #ifdef SPARCV9
1423 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) \
1424 do { \
1425 gint32 disp; \
1426 guint32 predict; \
1427 COMPUTE_DISP(ins); \
1428 predict = (disp != 0) ? 1 : 0; \
1429 g_assert (sparc_is_imm19 (disp)); \
1430 sparc_branchp (code, (annul), cond, icc, (predict), disp); \
1431 if (filldelay) sparc_nop (code); \
1432 } while (0)
1433 #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_ICC ((ins), (cond), (annul), (filldelay), (sparc_xcc_short))
1434 #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) \
1435 do { \
1436 gint32 disp; \
1437 guint32 predict; \
1438 COMPUTE_DISP(ins); \
1439 predict = (disp != 0) ? 1 : 0; \
1440 g_assert (sparc_is_imm19 (disp)); \
1441 sparc_fbranch (code, (annul), cond, disp); \
1442 if (filldelay) sparc_nop (code); \
1443 } while (0)
1444 #else
1445 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) g_assert_not_reached ()
1446 #define EMIT_COND_BRANCH_GENERAL(ins,bop,cond,annul,filldelay) \
1447 do { \
1448 gint32 disp; \
1449 COMPUTE_DISP(ins); \
1450 g_assert (sparc_is_imm22 (disp)); \
1451 sparc_ ## bop (code, (annul), cond, disp); \
1452 if (filldelay) sparc_nop (code); \
1453 } while (0)
1454 #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),branch,(cond),annul,filldelay)
1455 #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),fbranch,(cond),annul,filldelay)
1456 #endif
1458 #define EMIT_COND_BRANCH_PREDICTED(ins,cond,annul,filldelay) \
1459 do { \
1460 gint32 disp; \
1461 guint32 predict; \
1462 COMPUTE_DISP(ins); \
1463 predict = (disp != 0) ? 1 : 0; \
1464 g_assert (sparc_is_imm19 (disp)); \
1465 sparc_branchp (code, (annul), (cond), DEFAULT_ICC, (predict), disp); \
1466 if (filldelay) sparc_nop (code); \
1467 } while (0)
1469 #define EMIT_COND_BRANCH_BPR(ins,bop,predict,annul,filldelay) \
1470 do { \
1471 gint32 disp; \
1472 COMPUTE_DISP(ins); \
1473 g_assert (sparc_is_imm22 (disp)); \
1474 sparc_ ## bop (code, (annul), (predict), ins->sreg1, disp); \
1475 if (filldelay) sparc_nop (code); \
1476 } while (0)
1478 /* emit an exception if condition is fail */
1480 * We put the exception throwing code out-of-line, at the end of the method
1482 #define EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,filldelay,icc) do { \
1483 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
1484 MONO_PATCH_INFO_EXC, sexc_name); \
1485 if (mono_hwcap_sparc_is_v9 && ((icc) != sparc_icc_short)) { \
1486 sparc_branchp (code, 0, (cond), (icc), 0, 0); \
1488 else { \
1489 sparc_branch (code, 0, cond, 0); \
1491 if (filldelay) sparc_nop (code); \
1492 } while (0);
1494 #define EMIT_COND_SYSTEM_EXCEPTION(ins,cond,sexc_name) EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,TRUE,DEFAULT_ICC)
1496 #define EMIT_COND_SYSTEM_EXCEPTION_BPR(ins,bop,sexc_name) do { \
1497 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
1498 MONO_PATCH_INFO_EXC, sexc_name); \
1499 sparc_ ## bop (code, FALSE, FALSE, ins->sreg1, 0); \
1500 sparc_nop (code); \
1501 } while (0);
1503 #define EMIT_ALU_IMM(ins,op,setcc) do { \
1504 if (sparc_is_imm13 ((ins)->inst_imm)) \
1505 sparc_ ## op ## _imm (code, (setcc), (ins)->sreg1, ins->inst_imm, (ins)->dreg); \
1506 else { \
1507 sparc_set (code, ins->inst_imm, sparc_o7); \
1508 sparc_ ## op (code, (setcc), (ins)->sreg1, sparc_o7, (ins)->dreg); \
1510 } while (0);
1512 #define EMIT_LOAD_MEMBASE(ins,op) do { \
1513 if (sparc_is_imm13 (ins->inst_offset)) \
1514 sparc_ ## op ## _imm (code, ins->inst_basereg, ins->inst_offset, ins->dreg); \
1515 else { \
1516 sparc_set (code, ins->inst_offset, sparc_o7); \
1517 sparc_ ## op (code, ins->inst_basereg, sparc_o7, ins->dreg); \
1519 } while (0);
1521 /* max len = 5 */
1522 #define EMIT_STORE_MEMBASE_IMM(ins,op) do { \
1523 guint32 sreg; \
1524 if (ins->inst_imm == 0) \
1525 sreg = sparc_g0; \
1526 else { \
1527 sparc_set (code, ins->inst_imm, sparc_o7); \
1528 sreg = sparc_o7; \
1530 if (!sparc_is_imm13 (ins->inst_offset)) { \
1531 sparc_set (code, ins->inst_offset, GP_SCRATCH_REG); \
1532 sparc_ ## op (code, sreg, ins->inst_destbasereg, GP_SCRATCH_REG); \
1534 else \
1535 sparc_ ## op ## _imm (code, sreg, ins->inst_destbasereg, ins->inst_offset); \
1536 } while (0);
1538 #define EMIT_STORE_MEMBASE_REG(ins,op) do { \
1539 if (!sparc_is_imm13 (ins->inst_offset)) { \
1540 sparc_set (code, ins->inst_offset, sparc_o7); \
1541 sparc_ ## op (code, ins->sreg1, ins->inst_destbasereg, sparc_o7); \
1543 else \
1544 sparc_ ## op ## _imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); \
1545 } while (0);
1547 #define EMIT_CALL() do { \
1548 if (v64) { \
1549 sparc_set_template (code, sparc_o7); \
1550 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7); \
1552 else { \
1553 sparc_call_simple (code, 0); \
1555 sparc_nop (code); \
1556 } while (0);
1559 * A call template is 7 instructions long, so we want to avoid it if possible.
1561 static guint32*
1562 emit_call (MonoCompile *cfg, guint32 *code, guint32 patch_type, gconstpointer data)
1564 MonoError error;
1565 gpointer target;
1567 /* FIXME: This only works if the target method is already compiled */
1568 if (0 && v64 && !cfg->compile_aot) {
1569 MonoJumpInfo patch_info;
1571 patch_info.type = patch_type;
1572 patch_info.data.target = data;
1574 target = mono_resolve_patch_target (cfg->method, cfg->domain, NULL, &patch_info, FALSE, &error);
1575 mono_error_raise_exception (&error); /* FIXME: don't raise here */
1577 /* FIXME: Add optimizations if the target is close enough */
1578 sparc_set (code, target, sparc_o7);
1579 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7);
1580 sparc_nop (code);
1582 else {
1583 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, patch_type, data);
1584 EMIT_CALL ();
1587 return code;
1590 void
1591 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
1595 void
1596 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
1598 MonoInst *ins, *n, *last_ins = NULL;
1599 ins = bb->code;
1601 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
1602 switch (ins->opcode) {
1603 case OP_MUL_IMM:
1604 /* remove unnecessary multiplication with 1 */
1605 if (ins->inst_imm == 1) {
1606 if (ins->dreg != ins->sreg1) {
1607 ins->opcode = OP_MOVE;
1608 } else {
1609 MONO_DELETE_INS (bb, ins);
1610 continue;
1613 break;
1614 #ifndef SPARCV9
1615 case OP_LOAD_MEMBASE:
1616 case OP_LOADI4_MEMBASE:
1618 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1619 * OP_LOAD_MEMBASE offset(basereg), reg
1621 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
1622 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1623 ins->inst_basereg == last_ins->inst_destbasereg &&
1624 ins->inst_offset == last_ins->inst_offset) {
1625 if (ins->dreg == last_ins->sreg1) {
1626 MONO_DELETE_INS (bb, ins);
1627 continue;
1628 } else {
1629 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1630 ins->opcode = OP_MOVE;
1631 ins->sreg1 = last_ins->sreg1;
1635 * Note: reg1 must be different from the basereg in the second load
1636 * OP_LOAD_MEMBASE offset(basereg), reg1
1637 * OP_LOAD_MEMBASE offset(basereg), reg2
1638 * -->
1639 * OP_LOAD_MEMBASE offset(basereg), reg1
1640 * OP_MOVE reg1, reg2
1642 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
1643 || last_ins->opcode == OP_LOAD_MEMBASE) &&
1644 ins->inst_basereg != last_ins->dreg &&
1645 ins->inst_basereg == last_ins->inst_basereg &&
1646 ins->inst_offset == last_ins->inst_offset) {
1648 if (ins->dreg == last_ins->dreg) {
1649 MONO_DELETE_INS (bb, ins);
1650 continue;
1651 } else {
1652 ins->opcode = OP_MOVE;
1653 ins->sreg1 = last_ins->dreg;
1656 //g_assert_not_reached ();
1658 #if 0
1660 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1661 * OP_LOAD_MEMBASE offset(basereg), reg
1662 * -->
1663 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1664 * OP_ICONST reg, imm
1666 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
1667 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
1668 ins->inst_basereg == last_ins->inst_destbasereg &&
1669 ins->inst_offset == last_ins->inst_offset) {
1670 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1671 ins->opcode = OP_ICONST;
1672 ins->inst_c0 = last_ins->inst_imm;
1673 g_assert_not_reached (); // check this rule
1674 #endif
1676 break;
1677 #endif
1678 case OP_LOADI1_MEMBASE:
1679 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
1680 ins->inst_basereg == last_ins->inst_destbasereg &&
1681 ins->inst_offset == last_ins->inst_offset) {
1682 if (ins->dreg == last_ins->sreg1) {
1683 MONO_DELETE_INS (bb, ins);
1684 continue;
1685 } else {
1686 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1687 ins->opcode = OP_MOVE;
1688 ins->sreg1 = last_ins->sreg1;
1691 break;
1692 case OP_LOADI2_MEMBASE:
1693 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
1694 ins->inst_basereg == last_ins->inst_destbasereg &&
1695 ins->inst_offset == last_ins->inst_offset) {
1696 if (ins->dreg == last_ins->sreg1) {
1697 MONO_DELETE_INS (bb, ins);
1698 continue;
1699 } else {
1700 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1701 ins->opcode = OP_MOVE;
1702 ins->sreg1 = last_ins->sreg1;
1705 break;
1706 case OP_STOREI4_MEMBASE_IMM:
1707 /* Convert pairs of 0 stores to a dword 0 store */
1708 /* Used when initializing temporaries */
1709 /* We know sparc_fp is dword aligned */
1710 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM) &&
1711 (ins->inst_destbasereg == last_ins->inst_destbasereg) &&
1712 (ins->inst_destbasereg == sparc_fp) &&
1713 (ins->inst_offset < 0) &&
1714 ((ins->inst_offset % 8) == 0) &&
1715 ((ins->inst_offset == last_ins->inst_offset - 4)) &&
1716 (ins->inst_imm == 0) &&
1717 (last_ins->inst_imm == 0)) {
1718 if (mono_hwcap_sparc_is_v9) {
1719 last_ins->opcode = OP_STOREI8_MEMBASE_IMM;
1720 last_ins->inst_offset = ins->inst_offset;
1721 MONO_DELETE_INS (bb, ins);
1722 continue;
1725 break;
1726 case OP_IBEQ:
1727 case OP_IBNE_UN:
1728 case OP_IBLT:
1729 case OP_IBGT:
1730 case OP_IBGE:
1731 case OP_IBLE:
1732 case OP_COND_EXC_EQ:
1733 case OP_COND_EXC_GE:
1734 case OP_COND_EXC_GT:
1735 case OP_COND_EXC_LE:
1736 case OP_COND_EXC_LT:
1737 case OP_COND_EXC_NE_UN:
1739 * Convert compare with zero+branch to BRcc
1742 * This only works in 64 bit mode, since it examines all 64
1743 * bits of the register.
1744 * Only do this if the method is small since BPr only has a 16bit
1745 * displacement.
1747 if (v64 && (cfg->header->code_size < 10000) && last_ins &&
1748 (last_ins->opcode == OP_COMPARE_IMM) &&
1749 (last_ins->inst_imm == 0)) {
1750 switch (ins->opcode) {
1751 case OP_IBEQ:
1752 ins->opcode = OP_SPARC_BRZ;
1753 break;
1754 case OP_IBNE_UN:
1755 ins->opcode = OP_SPARC_BRNZ;
1756 break;
1757 case OP_IBLT:
1758 ins->opcode = OP_SPARC_BRLZ;
1759 break;
1760 case OP_IBGT:
1761 ins->opcode = OP_SPARC_BRGZ;
1762 break;
1763 case OP_IBGE:
1764 ins->opcode = OP_SPARC_BRGEZ;
1765 break;
1766 case OP_IBLE:
1767 ins->opcode = OP_SPARC_BRLEZ;
1768 break;
1769 case OP_COND_EXC_EQ:
1770 ins->opcode = OP_SPARC_COND_EXC_EQZ;
1771 break;
1772 case OP_COND_EXC_GE:
1773 ins->opcode = OP_SPARC_COND_EXC_GEZ;
1774 break;
1775 case OP_COND_EXC_GT:
1776 ins->opcode = OP_SPARC_COND_EXC_GTZ;
1777 break;
1778 case OP_COND_EXC_LE:
1779 ins->opcode = OP_SPARC_COND_EXC_LEZ;
1780 break;
1781 case OP_COND_EXC_LT:
1782 ins->opcode = OP_SPARC_COND_EXC_LTZ;
1783 break;
1784 case OP_COND_EXC_NE_UN:
1785 ins->opcode = OP_SPARC_COND_EXC_NEZ;
1786 break;
1787 default:
1788 g_assert_not_reached ();
1790 ins->sreg1 = last_ins->sreg1;
1791 *last_ins = *ins;
1792 MONO_DELETE_INS (bb, ins);
1793 continue;
1795 break;
1796 case OP_MOVE:
1798 * OP_MOVE reg, reg
1800 if (ins->dreg == ins->sreg1) {
1801 MONO_DELETE_INS (bb, ins);
1802 continue;
1805 * OP_MOVE sreg, dreg
1806 * OP_MOVE dreg, sreg
1808 if (last_ins && last_ins->opcode == OP_MOVE &&
1809 ins->sreg1 == last_ins->dreg &&
1810 ins->dreg == last_ins->sreg1) {
1811 MONO_DELETE_INS (bb, ins);
1812 continue;
1814 break;
1816 last_ins = ins;
1817 ins = ins->next;
1819 bb->last_ins = last_ins;
1822 void
1823 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *ins)
1825 switch (ins->opcode) {
1826 case OP_LNEG:
1827 MONO_EMIT_NEW_BIALU (cfg, OP_SUBCC, MONO_LVREG_LS (ins->dreg), 0, MONO_LVREG_LS (ins->sreg1));
1828 MONO_EMIT_NEW_BIALU (cfg, OP_SBB, MONO_LVREG_MS (ins->dreg), 0, MONO_LVREG_MS (ins->sreg1));
1829 NULLIFY_INS (ins);
1830 break;
1831 default:
1832 break;
1836 void
1837 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1841 /* FIXME: Strange loads from the stack in basic-float.cs:test_2_rem */
1843 static void
1844 sparc_patch (guint32 *code, const gpointer target)
1846 guint32 *c = code;
1847 guint32 ins = *code;
1848 guint32 op = ins >> 30;
1849 guint32 op2 = (ins >> 22) & 0x7;
1850 guint32 rd = (ins >> 25) & 0x1f;
1851 guint8* target8 = (guint8*)target;
1852 gint64 disp = (target8 - (guint8*)code) >> 2;
1853 int reg;
1855 // g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
1857 if ((op == 0) && (op2 == 2)) {
1858 if (!sparc_is_imm22 (disp))
1859 NOT_IMPLEMENTED;
1860 /* Bicc */
1861 *code = ((ins >> 22) << 22) | (disp & 0x3fffff);
1863 else if ((op == 0) && (op2 == 1)) {
1864 if (!sparc_is_imm19 (disp))
1865 NOT_IMPLEMENTED;
1866 /* BPcc */
1867 *code = ((ins >> 19) << 19) | (disp & 0x7ffff);
1869 else if ((op == 0) && (op2 == 3)) {
1870 if (!sparc_is_imm16 (disp))
1871 NOT_IMPLEMENTED;
1872 /* BPr */
1873 *code &= ~(0x180000 | 0x3fff);
1874 *code |= ((disp << 21) & (0x180000)) | (disp & 0x3fff);
1876 else if ((op == 0) && (op2 == 6)) {
1877 if (!sparc_is_imm22 (disp))
1878 NOT_IMPLEMENTED;
1879 /* FBicc */
1880 *code = ((ins >> 22) << 22) | (disp & 0x3fffff);
1882 else if ((op == 0) && (op2 == 4)) {
1883 guint32 ins2 = code [1];
1885 if (((ins2 >> 30) == 2) && (((ins2 >> 19) & 0x3f) == 2)) {
1886 /* sethi followed by or */
1887 guint32 *p = code;
1888 sparc_set (p, target8, rd);
1889 while (p <= (code + 1))
1890 sparc_nop (p);
1892 else if (ins2 == 0x01000000) {
1893 /* sethi followed by nop */
1894 guint32 *p = code;
1895 sparc_set (p, target8, rd);
1896 while (p <= (code + 1))
1897 sparc_nop (p);
1899 else if ((sparc_inst_op (ins2) == 3) && (sparc_inst_imm (ins2))) {
1900 /* sethi followed by load/store */
1901 #ifndef SPARCV9
1902 guint32 t = (guint32)target8;
1903 *code &= ~(0x3fffff);
1904 *code |= (t >> 10);
1905 *(code + 1) &= ~(0x3ff);
1906 *(code + 1) |= (t & 0x3ff);
1907 #endif
1909 else if (v64 &&
1910 (sparc_inst_rd (ins) == sparc_g1) &&
1911 (sparc_inst_op (c [1]) == 0) && (sparc_inst_op2 (c [1]) == 4) &&
1912 (sparc_inst_op (c [2]) == 2) && (sparc_inst_op3 (c [2]) == 2) &&
1913 (sparc_inst_op (c [3]) == 2) && (sparc_inst_op3 (c [3]) == 2))
1915 /* sparc_set */
1916 guint32 *p = c;
1917 reg = sparc_inst_rd (c [1]);
1918 sparc_set (p, target8, reg);
1919 while (p < (c + 6))
1920 sparc_nop (p);
1922 else if ((sparc_inst_op (ins2) == 2) && (sparc_inst_op3 (ins2) == 0x38) &&
1923 (sparc_inst_imm (ins2))) {
1924 /* sethi followed by jmpl */
1925 #ifndef SPARCV9
1926 guint32 t = (guint32)target8;
1927 *code &= ~(0x3fffff);
1928 *code |= (t >> 10);
1929 *(code + 1) &= ~(0x3ff);
1930 *(code + 1) |= (t & 0x3ff);
1931 #endif
1933 else
1934 NOT_IMPLEMENTED;
1936 else if (op == 01) {
1937 gint64 disp = (target8 - (guint8*)code) >> 2;
1939 if (!sparc_is_imm30 (disp))
1940 NOT_IMPLEMENTED;
1941 sparc_call_simple (code, target8 - (guint8*)code);
1943 else if ((op == 2) && (sparc_inst_op3 (ins) == 0x2) && sparc_inst_imm (ins)) {
1944 /* mov imm, reg */
1945 g_assert (sparc_is_imm13 (target8));
1946 *code &= ~(0x1fff);
1947 *code |= (guint32)target8;
1949 else if ((sparc_inst_op (ins) == 2) && (sparc_inst_op3 (ins) == 0x7)) {
1950 /* sparc_set case 5. */
1951 guint32 *p = c;
1953 g_assert (v64);
1954 reg = sparc_inst_rd (c [3]);
1955 sparc_set (p, target, reg);
1956 while (p < (c + 6))
1957 sparc_nop (p);
1959 else
1960 NOT_IMPLEMENTED;
1962 // g_print ("patched with 0x%08x\n", ins);
1966 * mono_sparc_emit_save_lmf:
1968 * Emit the code neccesary to push a new entry onto the lmf stack. Used by
1969 * trampolines as well.
1971 guint32*
1972 mono_sparc_emit_save_lmf (guint32 *code, guint32 lmf_offset)
1974 /* Save lmf_addr */
1975 sparc_sti_imm (code, sparc_o0, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr));
1976 /* Save previous_lmf */
1977 sparc_ldi (code, sparc_o0, sparc_g0, sparc_o7);
1978 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf));
1979 /* Set new lmf */
1980 sparc_add_imm (code, FALSE, sparc_fp, lmf_offset, sparc_o7);
1981 sparc_sti (code, sparc_o7, sparc_o0, sparc_g0);
1983 return code;
1986 guint32*
1987 mono_sparc_emit_restore_lmf (guint32 *code, guint32 lmf_offset)
1989 /* Load previous_lmf */
1990 sparc_ldi_imm (code, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sparc_l0);
1991 /* Load lmf_addr */
1992 sparc_ldi_imm (code, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), sparc_l1);
1993 /* *(lmf) = previous_lmf */
1994 sparc_sti (code, sparc_l0, sparc_l1, sparc_g0);
1995 return code;
1998 static guint32*
1999 emit_save_sp_to_lmf (MonoCompile *cfg, guint32 *code)
2002 * Since register windows are saved to the current value of %sp, we need to
2003 * set the sp field in the lmf before the call, not in the prolog.
2005 if (cfg->method->save_lmf) {
2006 gint32 lmf_offset = MONO_SPARC_STACK_BIAS - cfg->arch.lmf_offset;
2008 /* Save sp */
2009 sparc_sti_imm (code, sparc_sp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, sp));
2012 return code;
2015 static guint32*
2016 emit_vret_token (MonoInst *ins, guint32 *code)
2018 MonoCallInst *call = (MonoCallInst*)ins;
2019 guint32 size;
2022 * The sparc ABI requires that calls to functions which return a structure
2023 * contain an additional unimpl instruction which is checked by the callee.
2025 if (call->signature->pinvoke && MONO_TYPE_ISSTRUCT(call->signature->ret)) {
2026 if (call->signature->ret->type == MONO_TYPE_TYPEDBYREF)
2027 size = mini_type_stack_size (call->signature->ret, NULL);
2028 else
2029 size = mono_class_native_size (call->signature->ret->data.klass, NULL);
2030 sparc_unimp (code, size & 0xfff);
2033 return code;
2036 static guint32*
2037 emit_move_return_value (MonoInst *ins, guint32 *code)
2039 /* Move return value to the target register */
2040 /* FIXME: do more things in the local reg allocator */
2041 switch (ins->opcode) {
2042 case OP_VOIDCALL:
2043 case OP_VOIDCALL_REG:
2044 case OP_VOIDCALL_MEMBASE:
2045 break;
2046 case OP_CALL:
2047 case OP_CALL_REG:
2048 case OP_CALL_MEMBASE:
2049 g_assert (ins->dreg == sparc_o0);
2050 break;
2051 case OP_LCALL:
2052 case OP_LCALL_REG:
2053 case OP_LCALL_MEMBASE:
2055 * ins->dreg is the least significant reg due to the lreg: LCALL rule
2056 * in inssel-long32.brg.
2058 #ifdef SPARCV9
2059 sparc_mov_reg_reg (code, sparc_o0, ins->dreg);
2060 #else
2061 g_assert (ins->dreg == sparc_o1);
2062 #endif
2063 break;
2064 case OP_FCALL:
2065 case OP_FCALL_REG:
2066 case OP_FCALL_MEMBASE:
2067 #ifdef SPARCV9
2068 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
2069 sparc_fmovs (code, sparc_f0, ins->dreg);
2070 sparc_fstod (code, ins->dreg, ins->dreg);
2072 else
2073 sparc_fmovd (code, sparc_f0, ins->dreg);
2074 #else
2075 sparc_fmovs (code, sparc_f0, ins->dreg);
2076 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4)
2077 sparc_fstod (code, ins->dreg, ins->dreg);
2078 else
2079 sparc_fmovs (code, sparc_f1, ins->dreg + 1);
2080 #endif
2081 break;
2082 case OP_VCALL:
2083 case OP_VCALL_REG:
2084 case OP_VCALL_MEMBASE:
2085 case OP_VCALL2:
2086 case OP_VCALL2_REG:
2087 case OP_VCALL2_MEMBASE:
2088 break;
2089 default:
2090 NOT_IMPLEMENTED;
2093 return code;
2097 * emit_load_volatile_arguments:
2099 * Load volatile arguments from the stack to the original input registers.
2100 * Required before a tail call.
2102 static guint32*
2103 emit_load_volatile_arguments (MonoCompile *cfg, guint32 *code)
2105 MonoMethod *method = cfg->method;
2106 MonoMethodSignature *sig;
2107 MonoInst *inst;
2108 CallInfo *cinfo;
2109 guint32 i, ireg;
2111 /* FIXME: Generate intermediate code instead */
2113 sig = mono_method_signature (method);
2115 cinfo = get_call_info (cfg, sig, FALSE);
2117 /* This is the opposite of the code in emit_prolog */
2119 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2120 ArgInfo *ainfo = cinfo->args + i;
2121 gint32 stack_offset;
2122 MonoType *arg_type;
2124 inst = cfg->args [i];
2126 if (sig->hasthis && (i == 0))
2127 arg_type = &mono_defaults.object_class->byval_arg;
2128 else
2129 arg_type = sig->params [i - sig->hasthis];
2131 stack_offset = ainfo->offset + ARGS_OFFSET;
2132 ireg = sparc_i0 + ainfo->reg;
2134 if (ainfo->storage == ArgInSplitRegStack) {
2135 g_assert (inst->opcode == OP_REGOFFSET);
2137 if (!sparc_is_imm13 (stack_offset))
2138 NOT_IMPLEMENTED;
2139 sparc_st_imm (code, inst->inst_basereg, stack_offset, sparc_i5);
2142 if (!v64 && !arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
2143 if (ainfo->storage == ArgInIRegPair) {
2144 if (!sparc_is_imm13 (inst->inst_offset + 4))
2145 NOT_IMPLEMENTED;
2146 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, ireg);
2147 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, ireg + 1);
2149 else
2150 if (ainfo->storage == ArgInSplitRegStack) {
2151 if (stack_offset != inst->inst_offset) {
2152 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, sparc_i5);
2153 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, sparc_o7);
2154 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset + 4);
2158 else
2159 if (ainfo->storage == ArgOnStackPair) {
2160 if (stack_offset != inst->inst_offset) {
2161 /* stack_offset is not dword aligned, so we need to make a copy */
2162 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, sparc_o7);
2163 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset);
2165 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, sparc_o7);
2166 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset + 4);
2170 else
2171 g_assert_not_reached ();
2173 else
2174 if ((ainfo->storage == ArgInIReg) && (inst->opcode != OP_REGVAR)) {
2175 /* Argument in register, but need to be saved to stack */
2176 if (!sparc_is_imm13 (stack_offset))
2177 NOT_IMPLEMENTED;
2178 if ((stack_offset - ARGS_OFFSET) & 0x1)
2179 /* FIXME: Is this ldsb or ldub ? */
2180 sparc_ldsb_imm (code, inst->inst_basereg, stack_offset, ireg);
2181 else
2182 if ((stack_offset - ARGS_OFFSET) & 0x2)
2183 sparc_ldsh_imm (code, inst->inst_basereg, stack_offset, ireg);
2184 else
2185 if ((stack_offset - ARGS_OFFSET) & 0x4)
2186 sparc_ld_imm (code, inst->inst_basereg, stack_offset, ireg);
2187 else {
2188 if (v64)
2189 sparc_ldx_imm (code, inst->inst_basereg, stack_offset, ireg);
2190 else
2191 sparc_ld_imm (code, inst->inst_basereg, stack_offset, ireg);
2194 else if ((ainfo->storage == ArgInIRegPair) && (inst->opcode != OP_REGVAR)) {
2195 /* Argument in regpair, but need to be saved to stack */
2196 if (!sparc_is_imm13 (inst->inst_offset + 4))
2197 NOT_IMPLEMENTED;
2198 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, ireg);
2199 sparc_st_imm (code, inst->inst_basereg, inst->inst_offset + 4, ireg + 1);
2201 else if ((ainfo->storage == ArgInFloatReg) && (inst->opcode != OP_REGVAR)) {
2202 NOT_IMPLEMENTED;
2204 else if ((ainfo->storage == ArgInDoubleReg) && (inst->opcode != OP_REGVAR)) {
2205 NOT_IMPLEMENTED;
2208 if ((ainfo->storage == ArgInSplitRegStack) || (ainfo->storage == ArgOnStack))
2209 if (inst->opcode == OP_REGVAR)
2210 /* FIXME: Load the argument into memory */
2211 NOT_IMPLEMENTED;
2214 g_free (cinfo);
2216 return code;
2220 * mono_sparc_is_virtual_call:
2222 * Determine whenever the instruction at CODE is a virtual call.
2224 gboolean
2225 mono_sparc_is_virtual_call (guint32 *code)
2227 guint32 buf[1];
2228 guint32 *p;
2230 p = buf;
2232 if ((sparc_inst_op (*code) == 0x2) && (sparc_inst_op3 (*code) == 0x38)) {
2234 * Register indirect call. If it is a virtual call, then the
2235 * instruction in the delay slot is a special kind of nop.
2238 /* Construct special nop */
2239 sparc_or_imm (p, FALSE, sparc_g0, 0xca, sparc_g0);
2240 p --;
2242 if (code [1] == p [0])
2243 return TRUE;
2246 return FALSE;
2249 #define CMP_SIZE 3
2250 #define BR_SMALL_SIZE 2
2251 #define BR_LARGE_SIZE 2
2252 #define JUMP_IMM_SIZE 5
2253 #define ENABLE_WRONG_METHOD_CHECK 0
2256 * LOCKING: called with the domain lock held
2258 gpointer
2259 mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
2260 gpointer fail_tramp)
2262 int i;
2263 int size = 0;
2264 guint32 *code, *start;
2266 for (i = 0; i < count; ++i) {
2267 MonoIMTCheckItem *item = imt_entries [i];
2268 if (item->is_equals) {
2269 if (item->check_target_idx) {
2270 if (!item->compare_done)
2271 item->chunk_size += CMP_SIZE;
2272 item->chunk_size += BR_SMALL_SIZE + JUMP_IMM_SIZE;
2273 } else {
2274 if (fail_tramp)
2275 item->chunk_size += 16;
2276 item->chunk_size += JUMP_IMM_SIZE;
2277 #if ENABLE_WRONG_METHOD_CHECK
2278 item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + 1;
2279 #endif
2281 } else {
2282 item->chunk_size += CMP_SIZE + BR_LARGE_SIZE;
2283 imt_entries [item->check_target_idx]->compare_done = TRUE;
2285 size += item->chunk_size;
2287 if (fail_tramp)
2288 code = mono_method_alloc_generic_virtual_trampoline (domain, size * 4);
2289 else
2290 code = mono_domain_code_reserve (domain, size * 4);
2291 start = code;
2292 for (i = 0; i < count; ++i) {
2293 MonoIMTCheckItem *item = imt_entries [i];
2294 item->code_target = (guint8*)code;
2295 if (item->is_equals) {
2296 gboolean fail_case = !item->check_target_idx && fail_tramp;
2298 if (item->check_target_idx || fail_case) {
2299 if (!item->compare_done || fail_case) {
2300 sparc_set (code, (guint32)item->key, sparc_g5);
2301 sparc_cmp (code, MONO_ARCH_IMT_REG, sparc_g5);
2303 item->jmp_code = (guint8*)code;
2304 sparc_branch (code, 0, sparc_bne, 0);
2305 sparc_nop (code);
2306 if (item->has_target_code) {
2307 sparc_set (code, item->value.target_code, sparc_f5);
2308 } else {
2309 sparc_set (code, ((guint32)(&(vtable->vtable [item->value.vtable_slot]))), sparc_g5);
2310 sparc_ld (code, sparc_g5, 0, sparc_g5);
2312 sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
2313 sparc_nop (code);
2315 if (fail_case) {
2316 sparc_patch (item->jmp_code, code);
2317 sparc_set (code, fail_tramp, sparc_g5);
2318 sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
2319 sparc_nop (code);
2320 item->jmp_code = NULL;
2322 } else {
2323 /* enable the commented code to assert on wrong method */
2324 #if ENABLE_WRONG_METHOD_CHECK
2325 g_assert_not_reached ();
2326 #endif
2327 sparc_set (code, ((guint32)(&(vtable->vtable [item->value.vtable_slot]))), sparc_g5);
2328 sparc_ld (code, sparc_g5, 0, sparc_g5);
2329 sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
2330 sparc_nop (code);
2331 #if ENABLE_WRONG_METHOD_CHECK
2332 g_assert_not_reached ();
2333 #endif
2335 } else {
2336 sparc_set (code, (guint32)item->key, sparc_g5);
2337 sparc_cmp (code, MONO_ARCH_IMT_REG, sparc_g5);
2338 item->jmp_code = (guint8*)code;
2339 sparc_branch (code, 0, sparc_beu, 0);
2340 sparc_nop (code);
2343 /* patch the branches to get to the target items */
2344 for (i = 0; i < count; ++i) {
2345 MonoIMTCheckItem *item = imt_entries [i];
2346 if (item->jmp_code) {
2347 if (item->check_target_idx) {
2348 sparc_patch ((guint32*)item->jmp_code, imt_entries [item->check_target_idx]->code_target);
2353 mono_arch_flush_icache ((guint8*)start, (code - start) * 4);
2355 mono_stats.imt_trampolines_size += (code - start) * 4;
2356 g_assert (code - start <= size);
2358 mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, NULL), domain);
2360 return start;
2363 MonoMethod*
2364 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
2366 #ifdef SPARCV9
2367 g_assert_not_reached ();
2368 #endif
2370 return (MonoMethod*)regs [sparc_g1];
2373 gpointer
2374 mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
2376 mono_sparc_flushw ();
2378 return (gpointer)regs [sparc_o0];
2382 * Some conventions used in the following code.
2383 * 2) The only scratch registers we have are o7 and g1. We try to
2384 * stick to o7 when we can, and use g1 when necessary.
2387 void
2388 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
2390 MonoInst *ins;
2391 MonoCallInst *call;
2392 guint offset;
2393 guint32 *code = (guint32*)(cfg->native_code + cfg->code_len);
2394 MonoInst *last_ins = NULL;
2395 int max_len, cpos;
2396 const char *spec;
2398 if (cfg->verbose_level > 2)
2399 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
2401 cpos = bb->max_offset;
2403 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
2404 NOT_IMPLEMENTED;
2407 MONO_BB_FOR_EACH_INS (bb, ins) {
2408 guint8* code_start;
2410 offset = (guint8*)code - cfg->native_code;
2412 spec = ins_get_spec (ins->opcode);
2414 max_len = ((guint8 *)spec)[MONO_INST_LEN];
2416 if (offset > (cfg->code_size - max_len - 16)) {
2417 cfg->code_size *= 2;
2418 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2419 code = (guint32*)(cfg->native_code + offset);
2421 code_start = (guint8*)code;
2422 // if (ins->cil_code)
2423 // g_print ("cil code\n");
2424 mono_debug_record_line_number (cfg, ins, offset);
2426 switch (ins->opcode) {
2427 case OP_STOREI1_MEMBASE_IMM:
2428 EMIT_STORE_MEMBASE_IMM (ins, stb);
2429 break;
2430 case OP_STOREI2_MEMBASE_IMM:
2431 EMIT_STORE_MEMBASE_IMM (ins, sth);
2432 break;
2433 case OP_STORE_MEMBASE_IMM:
2434 EMIT_STORE_MEMBASE_IMM (ins, sti);
2435 break;
2436 case OP_STOREI4_MEMBASE_IMM:
2437 EMIT_STORE_MEMBASE_IMM (ins, st);
2438 break;
2439 case OP_STOREI8_MEMBASE_IMM:
2440 #ifdef SPARCV9
2441 EMIT_STORE_MEMBASE_IMM (ins, stx);
2442 #else
2443 /* Only generated by peephole opts */
2444 g_assert ((ins->inst_offset % 8) == 0);
2445 g_assert (ins->inst_imm == 0);
2446 EMIT_STORE_MEMBASE_IMM (ins, stx);
2447 #endif
2448 break;
2449 case OP_STOREI1_MEMBASE_REG:
2450 EMIT_STORE_MEMBASE_REG (ins, stb);
2451 break;
2452 case OP_STOREI2_MEMBASE_REG:
2453 EMIT_STORE_MEMBASE_REG (ins, sth);
2454 break;
2455 case OP_STOREI4_MEMBASE_REG:
2456 EMIT_STORE_MEMBASE_REG (ins, st);
2457 break;
2458 case OP_STOREI8_MEMBASE_REG:
2459 #ifdef SPARCV9
2460 EMIT_STORE_MEMBASE_REG (ins, stx);
2461 #else
2462 /* Only used by OP_MEMSET */
2463 EMIT_STORE_MEMBASE_REG (ins, std);
2464 #endif
2465 break;
2466 case OP_STORE_MEMBASE_REG:
2467 EMIT_STORE_MEMBASE_REG (ins, sti);
2468 break;
2469 case OP_LOADU4_MEM:
2470 sparc_set (code, ins->inst_c0, ins->dreg);
2471 sparc_ld (code, ins->dreg, sparc_g0, ins->dreg);
2472 break;
2473 case OP_LOADI4_MEMBASE:
2474 #ifdef SPARCV9
2475 EMIT_LOAD_MEMBASE (ins, ldsw);
2476 #else
2477 EMIT_LOAD_MEMBASE (ins, ld);
2478 #endif
2479 break;
2480 case OP_LOADU4_MEMBASE:
2481 EMIT_LOAD_MEMBASE (ins, ld);
2482 break;
2483 case OP_LOADU1_MEMBASE:
2484 EMIT_LOAD_MEMBASE (ins, ldub);
2485 break;
2486 case OP_LOADI1_MEMBASE:
2487 EMIT_LOAD_MEMBASE (ins, ldsb);
2488 break;
2489 case OP_LOADU2_MEMBASE:
2490 EMIT_LOAD_MEMBASE (ins, lduh);
2491 break;
2492 case OP_LOADI2_MEMBASE:
2493 EMIT_LOAD_MEMBASE (ins, ldsh);
2494 break;
2495 case OP_LOAD_MEMBASE:
2496 #ifdef SPARCV9
2497 EMIT_LOAD_MEMBASE (ins, ldx);
2498 #else
2499 EMIT_LOAD_MEMBASE (ins, ld);
2500 #endif
2501 break;
2502 #ifdef SPARCV9
2503 case OP_LOADI8_MEMBASE:
2504 EMIT_LOAD_MEMBASE (ins, ldx);
2505 break;
2506 #endif
2507 case OP_ICONV_TO_I1:
2508 sparc_sll_imm (code, ins->sreg1, 24, sparc_o7);
2509 sparc_sra_imm (code, sparc_o7, 24, ins->dreg);
2510 break;
2511 case OP_ICONV_TO_I2:
2512 sparc_sll_imm (code, ins->sreg1, 16, sparc_o7);
2513 sparc_sra_imm (code, sparc_o7, 16, ins->dreg);
2514 break;
2515 case OP_ICONV_TO_U1:
2516 sparc_and_imm (code, FALSE, ins->sreg1, 0xff, ins->dreg);
2517 break;
2518 case OP_ICONV_TO_U2:
2519 sparc_sll_imm (code, ins->sreg1, 16, sparc_o7);
2520 sparc_srl_imm (code, sparc_o7, 16, ins->dreg);
2521 break;
2522 case OP_LCONV_TO_OVF_U4:
2523 case OP_ICONV_TO_OVF_U4:
2524 /* Only used on V9 */
2525 sparc_cmp_imm (code, ins->sreg1, 0);
2526 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code,
2527 MONO_PATCH_INFO_EXC, "OverflowException");
2528 sparc_branchp (code, 0, sparc_bl, sparc_xcc_short, 0, 0);
2529 /* Delay slot */
2530 sparc_set (code, 1, sparc_o7);
2531 sparc_sllx_imm (code, sparc_o7, 32, sparc_o7);
2532 sparc_cmp (code, ins->sreg1, sparc_o7);
2533 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code,
2534 MONO_PATCH_INFO_EXC, "OverflowException");
2535 sparc_branchp (code, 0, sparc_bge, sparc_xcc_short, 0, 0);
2536 sparc_nop (code);
2537 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
2538 break;
2539 case OP_LCONV_TO_OVF_I4_UN:
2540 case OP_ICONV_TO_OVF_I4_UN:
2541 /* Only used on V9 */
2542 NOT_IMPLEMENTED;
2543 break;
2544 case OP_COMPARE:
2545 case OP_LCOMPARE:
2546 case OP_ICOMPARE:
2547 sparc_cmp (code, ins->sreg1, ins->sreg2);
2548 break;
2549 case OP_COMPARE_IMM:
2550 case OP_ICOMPARE_IMM:
2551 if (sparc_is_imm13 (ins->inst_imm))
2552 sparc_cmp_imm (code, ins->sreg1, ins->inst_imm);
2553 else {
2554 sparc_set (code, ins->inst_imm, sparc_o7);
2555 sparc_cmp (code, ins->sreg1, sparc_o7);
2557 break;
2558 case OP_BREAK:
2560 * gdb does not like encountering 'ta 1' in the debugged code. So
2561 * instead of emitting a trap, we emit a call a C function and place a
2562 * breakpoint there.
2564 //sparc_ta (code, 1);
2565 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, mono_break);
2566 EMIT_CALL();
2567 break;
2568 case OP_ADDCC:
2569 case OP_IADDCC:
2570 sparc_add (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2571 break;
2572 case OP_IADD:
2573 sparc_add (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2574 break;
2575 case OP_ADDCC_IMM:
2576 case OP_ADD_IMM:
2577 case OP_IADD_IMM:
2578 /* according to inssel-long32.brg, this should set cc */
2579 EMIT_ALU_IMM (ins, add, TRUE);
2580 break;
2581 case OP_ADC:
2582 case OP_IADC:
2583 /* according to inssel-long32.brg, this should set cc */
2584 sparc_addx (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2585 break;
2586 case OP_ADC_IMM:
2587 case OP_IADC_IMM:
2588 EMIT_ALU_IMM (ins, addx, TRUE);
2589 break;
2590 case OP_SUBCC:
2591 case OP_ISUBCC:
2592 sparc_sub (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2593 break;
2594 case OP_ISUB:
2595 sparc_sub (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2596 break;
2597 case OP_SUBCC_IMM:
2598 case OP_SUB_IMM:
2599 case OP_ISUB_IMM:
2600 /* according to inssel-long32.brg, this should set cc */
2601 EMIT_ALU_IMM (ins, sub, TRUE);
2602 break;
2603 case OP_SBB:
2604 case OP_ISBB:
2605 /* according to inssel-long32.brg, this should set cc */
2606 sparc_subx (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2607 break;
2608 case OP_SBB_IMM:
2609 case OP_ISBB_IMM:
2610 EMIT_ALU_IMM (ins, subx, TRUE);
2611 break;
2612 case OP_IAND:
2613 sparc_and (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2614 break;
2615 case OP_AND_IMM:
2616 case OP_IAND_IMM:
2617 EMIT_ALU_IMM (ins, and, FALSE);
2618 break;
2619 case OP_IDIV:
2620 /* Sign extend sreg1 into %y */
2621 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2622 sparc_wry (code, sparc_o7, sparc_g0);
2623 sparc_sdiv (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2624 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2625 break;
2626 case OP_IDIV_UN:
2627 sparc_wry (code, sparc_g0, sparc_g0);
2628 sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2629 break;
2630 case OP_DIV_IMM:
2631 case OP_IDIV_IMM: {
2632 int i, imm;
2634 /* Transform division into a shift */
2635 for (i = 1; i < 30; ++i) {
2636 imm = (1 << i);
2637 if (ins->inst_imm == imm)
2638 break;
2640 if (i < 30) {
2641 if (i == 1) {
2642 /* gcc 2.95.3 */
2643 sparc_srl_imm (code, ins->sreg1, 31, sparc_o7);
2644 sparc_add (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2645 sparc_sra_imm (code, ins->dreg, 1, ins->dreg);
2647 else {
2648 /* http://compilers.iecc.com/comparch/article/93-04-079 */
2649 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2650 sparc_srl_imm (code, sparc_o7, 32 - i, sparc_o7);
2651 sparc_add (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2652 sparc_sra_imm (code, ins->dreg, i, ins->dreg);
2655 else {
2656 /* Sign extend sreg1 into %y */
2657 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2658 sparc_wry (code, sparc_o7, sparc_g0);
2659 EMIT_ALU_IMM (ins, sdiv, TRUE);
2660 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2662 break;
2664 case OP_IDIV_UN_IMM:
2665 sparc_wry (code, sparc_g0, sparc_g0);
2666 EMIT_ALU_IMM (ins, udiv, FALSE);
2667 break;
2668 case OP_IREM:
2669 /* Sign extend sreg1 into %y */
2670 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2671 sparc_wry (code, sparc_o7, sparc_g0);
2672 sparc_sdiv (code, TRUE, ins->sreg1, ins->sreg2, sparc_o7);
2673 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2674 sparc_smul (code, FALSE, ins->sreg2, sparc_o7, sparc_o7);
2675 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2676 break;
2677 case OP_IREM_UN:
2678 sparc_wry (code, sparc_g0, sparc_g0);
2679 sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, sparc_o7);
2680 sparc_umul (code, FALSE, ins->sreg2, sparc_o7, sparc_o7);
2681 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2682 break;
2683 case OP_REM_IMM:
2684 case OP_IREM_IMM:
2685 /* Sign extend sreg1 into %y */
2686 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2687 sparc_wry (code, sparc_o7, sparc_g0);
2688 if (!sparc_is_imm13 (ins->inst_imm)) {
2689 sparc_set (code, ins->inst_imm, GP_SCRATCH_REG);
2690 sparc_sdiv (code, TRUE, ins->sreg1, GP_SCRATCH_REG, sparc_o7);
2691 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2692 sparc_smul (code, FALSE, sparc_o7, GP_SCRATCH_REG, sparc_o7);
2694 else {
2695 sparc_sdiv_imm (code, TRUE, ins->sreg1, ins->inst_imm, sparc_o7);
2696 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2697 sparc_smul_imm (code, FALSE, sparc_o7, ins->inst_imm, sparc_o7);
2699 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2700 break;
2701 case OP_IREM_UN_IMM:
2702 sparc_set (code, ins->inst_imm, GP_SCRATCH_REG);
2703 sparc_wry (code, sparc_g0, sparc_g0);
2704 sparc_udiv (code, FALSE, ins->sreg1, GP_SCRATCH_REG, sparc_o7);
2705 sparc_umul (code, FALSE, GP_SCRATCH_REG, sparc_o7, sparc_o7);
2706 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2707 break;
2708 case OP_IOR:
2709 sparc_or (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2710 break;
2711 case OP_OR_IMM:
2712 case OP_IOR_IMM:
2713 EMIT_ALU_IMM (ins, or, FALSE);
2714 break;
2715 case OP_IXOR:
2716 sparc_xor (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2717 break;
2718 case OP_XOR_IMM:
2719 case OP_IXOR_IMM:
2720 EMIT_ALU_IMM (ins, xor, FALSE);
2721 break;
2722 case OP_ISHL:
2723 sparc_sll (code, ins->sreg1, ins->sreg2, ins->dreg);
2724 break;
2725 case OP_SHL_IMM:
2726 case OP_ISHL_IMM:
2727 if (ins->inst_imm < (1 << 5))
2728 sparc_sll_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2729 else {
2730 sparc_set (code, ins->inst_imm, sparc_o7);
2731 sparc_sll (code, ins->sreg1, sparc_o7, ins->dreg);
2733 break;
2734 case OP_ISHR:
2735 sparc_sra (code, ins->sreg1, ins->sreg2, ins->dreg);
2736 break;
2737 case OP_ISHR_IMM:
2738 case OP_SHR_IMM:
2739 if (ins->inst_imm < (1 << 5))
2740 sparc_sra_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2741 else {
2742 sparc_set (code, ins->inst_imm, sparc_o7);
2743 sparc_sra (code, ins->sreg1, sparc_o7, ins->dreg);
2745 break;
2746 case OP_SHR_UN_IMM:
2747 case OP_ISHR_UN_IMM:
2748 if (ins->inst_imm < (1 << 5))
2749 sparc_srl_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2750 else {
2751 sparc_set (code, ins->inst_imm, sparc_o7);
2752 sparc_srl (code, ins->sreg1, sparc_o7, ins->dreg);
2754 break;
2755 case OP_ISHR_UN:
2756 sparc_srl (code, ins->sreg1, ins->sreg2, ins->dreg);
2757 break;
2758 case OP_LSHL:
2759 sparc_sllx (code, ins->sreg1, ins->sreg2, ins->dreg);
2760 break;
2761 case OP_LSHL_IMM:
2762 if (ins->inst_imm < (1 << 6))
2763 sparc_sllx_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2764 else {
2765 sparc_set (code, ins->inst_imm, sparc_o7);
2766 sparc_sllx (code, ins->sreg1, sparc_o7, ins->dreg);
2768 break;
2769 case OP_LSHR:
2770 sparc_srax (code, ins->sreg1, ins->sreg2, ins->dreg);
2771 break;
2772 case OP_LSHR_IMM:
2773 if (ins->inst_imm < (1 << 6))
2774 sparc_srax_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2775 else {
2776 sparc_set (code, ins->inst_imm, sparc_o7);
2777 sparc_srax (code, ins->sreg1, sparc_o7, ins->dreg);
2779 break;
2780 case OP_LSHR_UN:
2781 sparc_srlx (code, ins->sreg1, ins->sreg2, ins->dreg);
2782 break;
2783 case OP_LSHR_UN_IMM:
2784 if (ins->inst_imm < (1 << 6))
2785 sparc_srlx_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2786 else {
2787 sparc_set (code, ins->inst_imm, sparc_o7);
2788 sparc_srlx (code, ins->sreg1, sparc_o7, ins->dreg);
2790 break;
2791 case OP_INOT:
2792 /* can't use sparc_not */
2793 sparc_xnor (code, FALSE, ins->sreg1, sparc_g0, ins->dreg);
2794 break;
2795 case OP_INEG:
2796 /* can't use sparc_neg */
2797 sparc_sub (code, FALSE, sparc_g0, ins->sreg1, ins->dreg);
2798 break;
2799 case OP_IMUL:
2800 sparc_smul (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2801 break;
2802 case OP_IMUL_IMM:
2803 case OP_MUL_IMM: {
2804 int i, imm;
2806 if ((ins->inst_imm == 1) && (ins->sreg1 == ins->dreg))
2807 break;
2809 /* Transform multiplication into a shift */
2810 for (i = 0; i < 30; ++i) {
2811 imm = (1 << i);
2812 if (ins->inst_imm == imm)
2813 break;
2815 if (i < 30)
2816 sparc_sll_imm (code, ins->sreg1, i, ins->dreg);
2817 else
2818 EMIT_ALU_IMM (ins, smul, FALSE);
2819 break;
2821 case OP_IMUL_OVF:
2822 sparc_smul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2823 sparc_rdy (code, sparc_g1);
2824 sparc_sra_imm (code, ins->dreg, 31, sparc_o7);
2825 sparc_cmp (code, sparc_g1, sparc_o7);
2826 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short);
2827 break;
2828 case OP_IMUL_OVF_UN:
2829 sparc_umul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2830 sparc_rdy (code, sparc_o7);
2831 sparc_cmp (code, sparc_o7, sparc_g0);
2832 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short);
2833 break;
2834 case OP_ICONST:
2835 sparc_set (code, ins->inst_c0, ins->dreg);
2836 break;
2837 case OP_I8CONST:
2838 sparc_set (code, ins->inst_l, ins->dreg);
2839 break;
2840 case OP_AOTCONST:
2841 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2842 sparc_set_template (code, ins->dreg);
2843 break;
2844 case OP_JUMP_TABLE:
2845 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2846 sparc_set_template (code, ins->dreg);
2847 break;
2848 case OP_ICONV_TO_I4:
2849 case OP_ICONV_TO_U4:
2850 case OP_MOVE:
2851 if (ins->sreg1 != ins->dreg)
2852 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
2853 break;
2854 case OP_FMOVE:
2855 #ifdef SPARCV9
2856 if (ins->sreg1 != ins->dreg)
2857 sparc_fmovd (code, ins->sreg1, ins->dreg);
2858 #else
2859 sparc_fmovs (code, ins->sreg1, ins->dreg);
2860 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
2861 #endif
2862 break;
2863 case OP_JMP:
2864 if (cfg->method->save_lmf)
2865 NOT_IMPLEMENTED;
2867 code = emit_load_volatile_arguments (cfg, code);
2868 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
2869 sparc_set_template (code, sparc_o7);
2870 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_g0);
2871 /* Restore parent frame in delay slot */
2872 sparc_restore_imm (code, sparc_g0, 0, sparc_g0);
2873 break;
2874 case OP_CHECK_THIS:
2875 /* ensure ins->sreg1 is not NULL */
2876 /* Might be misaligned in case of vtypes so use a byte load */
2877 sparc_ldsb_imm (code, ins->sreg1, 0, sparc_g0);
2878 break;
2879 case OP_ARGLIST:
2880 sparc_add_imm (code, FALSE, sparc_fp, cfg->sig_cookie, sparc_o7);
2881 sparc_sti_imm (code, sparc_o7, ins->sreg1, 0);
2882 break;
2883 case OP_FCALL:
2884 case OP_LCALL:
2885 case OP_VCALL:
2886 case OP_VCALL2:
2887 case OP_VOIDCALL:
2888 case OP_CALL:
2889 call = (MonoCallInst*)ins;
2890 g_assert (!call->virtual);
2891 code = emit_save_sp_to_lmf (cfg, code);
2892 if (ins->flags & MONO_INST_HAS_METHOD)
2893 code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method);
2894 else
2895 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
2897 code = emit_vret_token (ins, code);
2898 code = emit_move_return_value (ins, code);
2899 break;
2900 case OP_FCALL_REG:
2901 case OP_LCALL_REG:
2902 case OP_VCALL_REG:
2903 case OP_VCALL2_REG:
2904 case OP_VOIDCALL_REG:
2905 case OP_CALL_REG:
2906 call = (MonoCallInst*)ins;
2907 code = emit_save_sp_to_lmf (cfg, code);
2908 sparc_jmpl (code, ins->sreg1, sparc_g0, sparc_callsite);
2910 * We emit a special kind of nop in the delay slot to tell the
2911 * trampoline code that this is a virtual call, thus an unbox
2912 * trampoline might need to be called.
2914 if (call->virtual)
2915 sparc_or_imm (code, FALSE, sparc_g0, 0xca, sparc_g0);
2916 else
2917 sparc_nop (code);
2919 code = emit_vret_token (ins, code);
2920 code = emit_move_return_value (ins, code);
2921 break;
2922 case OP_FCALL_MEMBASE:
2923 case OP_LCALL_MEMBASE:
2924 case OP_VCALL_MEMBASE:
2925 case OP_VCALL2_MEMBASE:
2926 case OP_VOIDCALL_MEMBASE:
2927 case OP_CALL_MEMBASE:
2928 call = (MonoCallInst*)ins;
2929 code = emit_save_sp_to_lmf (cfg, code);
2930 if (sparc_is_imm13 (ins->inst_offset)) {
2931 sparc_ldi_imm (code, ins->inst_basereg, ins->inst_offset, sparc_o7);
2932 } else {
2933 sparc_set (code, ins->inst_offset, sparc_o7);
2934 sparc_ldi (code, ins->inst_basereg, sparc_o7, sparc_o7);
2936 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_callsite);
2937 if (call->virtual)
2938 sparc_or_imm (code, FALSE, sparc_g0, 0xca, sparc_g0);
2939 else
2940 sparc_nop (code);
2942 code = emit_vret_token (ins, code);
2943 code = emit_move_return_value (ins, code);
2944 break;
2945 case OP_SETFRET:
2946 if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4)
2947 sparc_fdtos (code, ins->sreg1, sparc_f0);
2948 else {
2949 #ifdef SPARCV9
2950 sparc_fmovd (code, ins->sreg1, ins->dreg);
2951 #else
2952 /* FIXME: Why not use fmovd ? */
2953 sparc_fmovs (code, ins->sreg1, ins->dreg);
2954 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
2955 #endif
2957 break;
2958 case OP_LOCALLOC: {
2959 guint32 size_reg;
2960 gint32 offset2;
2962 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
2963 /* Perform stack touching */
2964 NOT_IMPLEMENTED;
2965 #endif
2967 /* Keep alignment */
2968 /* Add 4 to compensate for the rounding of localloc_offset */
2969 sparc_add_imm (code, FALSE, ins->sreg1, 4 + MONO_ARCH_LOCALLOC_ALIGNMENT - 1, ins->dreg);
2970 sparc_set (code, ~(MONO_ARCH_LOCALLOC_ALIGNMENT - 1), sparc_o7);
2971 sparc_and (code, FALSE, ins->dreg, sparc_o7, ins->dreg);
2973 if ((ins->flags & MONO_INST_INIT) && (ins->sreg1 == ins->dreg)) {
2974 #ifdef SPARCV9
2975 size_reg = sparc_g4;
2976 #else
2977 size_reg = sparc_g1;
2978 #endif
2979 sparc_mov_reg_reg (code, ins->dreg, size_reg);
2981 else
2982 size_reg = ins->sreg1;
2984 sparc_sub (code, FALSE, sparc_sp, ins->dreg, ins->dreg);
2985 /* Keep %sp valid at all times */
2986 sparc_mov_reg_reg (code, ins->dreg, sparc_sp);
2987 /* Round localloc_offset too so the result is at least 8 aligned */
2988 offset2 = ALIGN_TO (cfg->arch.localloc_offset, 8);
2989 g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + offset2));
2990 sparc_add_imm (code, FALSE, ins->dreg, MONO_SPARC_STACK_BIAS + offset2, ins->dreg);
2992 if (ins->flags & MONO_INST_INIT) {
2993 guint32 *br [3];
2994 /* Initialize memory region */
2995 sparc_cmp_imm (code, size_reg, 0);
2996 br [0] = code;
2997 sparc_branch (code, 0, sparc_be, 0);
2998 /* delay slot */
2999 sparc_set (code, 0, sparc_o7);
3000 sparc_sub_imm (code, 0, size_reg, mono_hwcap_sparc_is_v9 ? 8 : 4, size_reg);
3001 /* start of loop */
3002 br [1] = code;
3003 if (mono_hwcap_sparc_is_v9)
3004 sparc_stx (code, sparc_g0, ins->dreg, sparc_o7);
3005 else
3006 sparc_st (code, sparc_g0, ins->dreg, sparc_o7);
3007 sparc_cmp (code, sparc_o7, size_reg);
3008 br [2] = code;
3009 sparc_branch (code, 0, sparc_bl, 0);
3010 sparc_patch (br [2], br [1]);
3011 /* delay slot */
3012 sparc_add_imm (code, 0, sparc_o7, mono_hwcap_sparc_is_v9 ? 8 : 4, sparc_o7);
3013 sparc_patch (br [0], code);
3015 break;
3017 case OP_LOCALLOC_IMM: {
3018 gint32 offset = ins->inst_imm;
3019 gint32 offset2;
3021 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
3022 /* Perform stack touching */
3023 NOT_IMPLEMENTED;
3024 #endif
3026 /* To compensate for the rounding of localloc_offset */
3027 offset += sizeof (gpointer);
3028 offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
3029 if (sparc_is_imm13 (offset))
3030 sparc_sub_imm (code, FALSE, sparc_sp, offset, sparc_sp);
3031 else {
3032 sparc_set (code, offset, sparc_o7);
3033 sparc_sub (code, FALSE, sparc_sp, sparc_o7, sparc_sp);
3035 /* Round localloc_offset too so the result is at least 8 aligned */
3036 offset2 = ALIGN_TO (cfg->arch.localloc_offset, 8);
3037 g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + offset2));
3038 sparc_add_imm (code, FALSE, sparc_sp, MONO_SPARC_STACK_BIAS + offset2, ins->dreg);
3039 if ((ins->flags & MONO_INST_INIT) && (offset > 0)) {
3040 guint32 *br [2];
3041 int i;
3043 if (offset <= 16) {
3044 i = 0;
3045 while (i < offset) {
3046 if (mono_hwcap_sparc_is_v9) {
3047 sparc_stx_imm (code, sparc_g0, ins->dreg, i);
3048 i += 8;
3050 else {
3051 sparc_st_imm (code, sparc_g0, ins->dreg, i);
3052 i += 4;
3056 else {
3057 sparc_set (code, offset, sparc_o7);
3058 sparc_sub_imm (code, 0, sparc_o7, mono_hwcap_sparc_is_v9 ? 8 : 4, sparc_o7);
3059 /* beginning of loop */
3060 br [0] = code;
3061 if (mono_hwcap_sparc_is_v9)
3062 sparc_stx (code, sparc_g0, ins->dreg, sparc_o7);
3063 else
3064 sparc_st (code, sparc_g0, ins->dreg, sparc_o7);
3065 sparc_cmp_imm (code, sparc_o7, 0);
3066 br [1] = code;
3067 sparc_branch (code, 0, sparc_bne, 0);
3068 /* delay slot */
3069 sparc_sub_imm (code, 0, sparc_o7, mono_hwcap_sparc_is_v9 ? 8 : 4, sparc_o7);
3070 sparc_patch (br [1], br [0]);
3073 break;
3075 case OP_THROW:
3076 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3077 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3078 (gpointer)"mono_arch_throw_exception");
3079 EMIT_CALL ();
3080 break;
3081 case OP_RETHROW:
3082 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3083 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3084 (gpointer)"mono_arch_rethrow_exception");
3085 EMIT_CALL ();
3086 break;
3087 case OP_START_HANDLER: {
3089 * The START_HANDLER instruction marks the beginning of a handler
3090 * block. It is called using a call instruction, so %o7 contains
3091 * the return address. Since the handler executes in the same stack
3092 * frame as the method itself, we can't use save/restore to save
3093 * the return address. Instead, we save it into a dedicated
3094 * variable.
3096 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3097 if (!sparc_is_imm13 (spvar->inst_offset)) {
3098 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3099 sparc_sti (code, sparc_o7, spvar->inst_basereg, GP_SCRATCH_REG);
3101 else
3102 sparc_sti_imm (code, sparc_o7, spvar->inst_basereg, spvar->inst_offset);
3103 break;
3105 case OP_ENDFILTER: {
3106 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3107 if (!sparc_is_imm13 (spvar->inst_offset)) {
3108 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3109 sparc_ldi (code, spvar->inst_basereg, GP_SCRATCH_REG, sparc_o7);
3111 else
3112 sparc_ldi_imm (code, spvar->inst_basereg, spvar->inst_offset, sparc_o7);
3113 sparc_jmpl_imm (code, sparc_o7, 8, sparc_g0);
3114 /* Delay slot */
3115 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3116 break;
3118 case OP_ENDFINALLY: {
3119 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3120 if (!sparc_is_imm13 (spvar->inst_offset)) {
3121 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3122 sparc_ldi (code, spvar->inst_basereg, GP_SCRATCH_REG, sparc_o7);
3124 else
3125 sparc_ldi_imm (code, spvar->inst_basereg, spvar->inst_offset, sparc_o7);
3126 sparc_jmpl_imm (code, sparc_o7, 8, sparc_g0);
3127 sparc_nop (code);
3128 break;
3130 case OP_CALL_HANDLER:
3131 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3132 /* This is a jump inside the method, so call_simple works even on V9 */
3133 sparc_call_simple (code, 0);
3134 sparc_nop (code);
3135 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
3136 break;
3137 case OP_LABEL:
3138 ins->inst_c0 = (guint8*)code - cfg->native_code;
3139 break;
3140 case OP_RELAXED_NOP:
3141 case OP_NOP:
3142 case OP_DUMMY_USE:
3143 case OP_DUMMY_STORE:
3144 case OP_NOT_REACHED:
3145 case OP_NOT_NULL:
3146 break;
3147 case OP_BR:
3148 //g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
3149 if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins)
3150 break;
3151 if (ins->inst_target_bb->native_offset) {
3152 gint32 disp = (ins->inst_target_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2;
3153 g_assert (sparc_is_imm22 (disp));
3154 sparc_branch (code, 1, sparc_ba, disp);
3155 } else {
3156 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3157 sparc_branch (code, 1, sparc_ba, 0);
3159 sparc_nop (code);
3160 break;
3161 case OP_BR_REG:
3162 sparc_jmp (code, ins->sreg1, sparc_g0);
3163 sparc_nop (code);
3164 break;
3165 case OP_CEQ:
3166 case OP_CLT:
3167 case OP_CLT_UN:
3168 case OP_CGT:
3169 case OP_CGT_UN:
3170 if (v64 && (cfg->opt & MONO_OPT_CMOV)) {
3171 sparc_clr_reg (code, ins->dreg);
3172 sparc_movcc_imm (code, sparc_xcc, opcode_to_sparc_cond (ins->opcode), 1, ins->dreg);
3174 else {
3175 sparc_clr_reg (code, ins->dreg);
3176 #ifdef SPARCV9
3177 sparc_branchp (code, 1, opcode_to_sparc_cond (ins->opcode), DEFAULT_ICC, 0, 2);
3178 #else
3179 sparc_branch (code, 1, opcode_to_sparc_cond (ins->opcode), 2);
3180 #endif
3181 /* delay slot */
3182 sparc_set (code, 1, ins->dreg);
3184 break;
3185 case OP_ICEQ:
3186 case OP_ICLT:
3187 case OP_ICLT_UN:
3188 case OP_ICGT:
3189 case OP_ICGT_UN:
3190 if (v64 && (cfg->opt & MONO_OPT_CMOV)) {
3191 sparc_clr_reg (code, ins->dreg);
3192 sparc_movcc_imm (code, sparc_icc, opcode_to_sparc_cond (ins->opcode), 1, ins->dreg);
3194 else {
3195 sparc_clr_reg (code, ins->dreg);
3196 sparc_branchp (code, 1, opcode_to_sparc_cond (ins->opcode), sparc_icc_short, 0, 2);
3197 /* delay slot */
3198 sparc_set (code, 1, ins->dreg);
3200 break;
3201 case OP_COND_EXC_EQ:
3202 case OP_COND_EXC_NE_UN:
3203 case OP_COND_EXC_LT:
3204 case OP_COND_EXC_LT_UN:
3205 case OP_COND_EXC_GT:
3206 case OP_COND_EXC_GT_UN:
3207 case OP_COND_EXC_GE:
3208 case OP_COND_EXC_GE_UN:
3209 case OP_COND_EXC_LE:
3210 case OP_COND_EXC_LE_UN:
3211 case OP_COND_EXC_OV:
3212 case OP_COND_EXC_NO:
3213 case OP_COND_EXC_C:
3214 case OP_COND_EXC_NC:
3215 case OP_COND_EXC_IEQ:
3216 case OP_COND_EXC_INE_UN:
3217 case OP_COND_EXC_ILT:
3218 case OP_COND_EXC_ILT_UN:
3219 case OP_COND_EXC_IGT:
3220 case OP_COND_EXC_IGT_UN:
3221 case OP_COND_EXC_IGE:
3222 case OP_COND_EXC_IGE_UN:
3223 case OP_COND_EXC_ILE:
3224 case OP_COND_EXC_ILE_UN:
3225 case OP_COND_EXC_IOV:
3226 case OP_COND_EXC_INO:
3227 case OP_COND_EXC_IC:
3228 case OP_COND_EXC_INC:
3229 #ifdef SPARCV9
3230 NOT_IMPLEMENTED;
3231 #else
3232 EMIT_COND_SYSTEM_EXCEPTION (ins, opcode_to_sparc_cond (ins->opcode), ins->inst_p1);
3233 #endif
3234 break;
3235 case OP_SPARC_COND_EXC_EQZ:
3236 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brz, ins->inst_p1);
3237 break;
3238 case OP_SPARC_COND_EXC_GEZ:
3239 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brgez, ins->inst_p1);
3240 break;
3241 case OP_SPARC_COND_EXC_GTZ:
3242 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brgz, ins->inst_p1);
3243 break;
3244 case OP_SPARC_COND_EXC_LEZ:
3245 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brlez, ins->inst_p1);
3246 break;
3247 case OP_SPARC_COND_EXC_LTZ:
3248 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brlz, ins->inst_p1);
3249 break;
3250 case OP_SPARC_COND_EXC_NEZ:
3251 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brnz, ins->inst_p1);
3252 break;
3254 case OP_IBEQ:
3255 case OP_IBNE_UN:
3256 case OP_IBLT:
3257 case OP_IBLT_UN:
3258 case OP_IBGT:
3259 case OP_IBGT_UN:
3260 case OP_IBGE:
3261 case OP_IBGE_UN:
3262 case OP_IBLE:
3263 case OP_IBLE_UN: {
3264 if (mono_hwcap_sparc_is_v9)
3265 EMIT_COND_BRANCH_PREDICTED (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3266 else
3267 EMIT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3268 break;
3271 case OP_SPARC_BRZ:
3272 EMIT_COND_BRANCH_BPR (ins, brz, 1, 1, 1);
3273 break;
3274 case OP_SPARC_BRLEZ:
3275 EMIT_COND_BRANCH_BPR (ins, brlez, 1, 1, 1);
3276 break;
3277 case OP_SPARC_BRLZ:
3278 EMIT_COND_BRANCH_BPR (ins, brlz, 1, 1, 1);
3279 break;
3280 case OP_SPARC_BRNZ:
3281 EMIT_COND_BRANCH_BPR (ins, brnz, 1, 1, 1);
3282 break;
3283 case OP_SPARC_BRGZ:
3284 EMIT_COND_BRANCH_BPR (ins, brgz, 1, 1, 1);
3285 break;
3286 case OP_SPARC_BRGEZ:
3287 EMIT_COND_BRANCH_BPR (ins, brgez, 1, 1, 1);
3288 break;
3290 /* floating point opcodes */
3291 case OP_R8CONST:
3292 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, ins->inst_p0);
3293 #ifdef SPARCV9
3294 sparc_set_template (code, sparc_o7);
3295 #else
3296 sparc_sethi (code, 0, sparc_o7);
3297 #endif
3298 sparc_lddf_imm (code, sparc_o7, 0, ins->dreg);
3299 break;
3300 case OP_R4CONST:
3301 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, ins->inst_p0);
3302 #ifdef SPARCV9
3303 sparc_set_template (code, sparc_o7);
3304 #else
3305 sparc_sethi (code, 0, sparc_o7);
3306 #endif
3307 sparc_ldf_imm (code, sparc_o7, 0, FP_SCRATCH_REG);
3309 /* Extend to double */
3310 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3311 break;
3312 case OP_STORER8_MEMBASE_REG:
3313 if (!sparc_is_imm13 (ins->inst_offset + 4)) {
3314 sparc_set (code, ins->inst_offset, sparc_o7);
3315 /* SPARCV9 handles misaligned fp loads/stores */
3316 if (!v64 && (ins->inst_offset % 8)) {
3317 /* Misaligned */
3318 sparc_add (code, FALSE, ins->inst_destbasereg, sparc_o7, sparc_o7);
3319 sparc_stf (code, ins->sreg1, sparc_o7, sparc_g0);
3320 sparc_stf_imm (code, ins->sreg1 + 1, sparc_o7, 4);
3321 } else
3322 sparc_stdf (code, ins->sreg1, ins->inst_destbasereg, sparc_o7);
3324 else {
3325 if (!v64 && (ins->inst_offset % 8)) {
3326 /* Misaligned */
3327 sparc_stf_imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3328 sparc_stf_imm (code, ins->sreg1 + 1, ins->inst_destbasereg, ins->inst_offset + 4);
3329 } else
3330 sparc_stdf_imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3332 break;
3333 case OP_LOADR8_MEMBASE:
3334 EMIT_LOAD_MEMBASE (ins, lddf);
3335 break;
3336 case OP_STORER4_MEMBASE_REG:
3337 /* This requires a double->single conversion */
3338 sparc_fdtos (code, ins->sreg1, FP_SCRATCH_REG);
3339 if (!sparc_is_imm13 (ins->inst_offset)) {
3340 sparc_set (code, ins->inst_offset, sparc_o7);
3341 sparc_stf (code, FP_SCRATCH_REG, ins->inst_destbasereg, sparc_o7);
3343 else
3344 sparc_stf_imm (code, FP_SCRATCH_REG, ins->inst_destbasereg, ins->inst_offset);
3345 break;
3346 case OP_LOADR4_MEMBASE: {
3347 /* ldf needs a single precision register */
3348 int dreg = ins->dreg;
3349 ins->dreg = FP_SCRATCH_REG;
3350 EMIT_LOAD_MEMBASE (ins, ldf);
3351 ins->dreg = dreg;
3352 /* Extend to double */
3353 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3354 break;
3356 case OP_ICONV_TO_R4: {
3357 MonoInst *spill = cfg->arch.float_spill_slot;
3358 gint32 reg = spill->inst_basereg;
3359 gint32 offset = spill->inst_offset;
3361 g_assert (spill->opcode == OP_REGOFFSET);
3362 #ifdef SPARCV9
3363 if (!sparc_is_imm13 (offset)) {
3364 sparc_set (code, offset, sparc_o7);
3365 sparc_stx (code, ins->sreg1, reg, offset);
3366 sparc_lddf (code, reg, offset, FP_SCRATCH_REG);
3367 } else {
3368 sparc_stx_imm (code, ins->sreg1, reg, offset);
3369 sparc_lddf_imm (code, reg, offset, FP_SCRATCH_REG);
3371 sparc_fxtos (code, FP_SCRATCH_REG, FP_SCRATCH_REG);
3372 #else
3373 if (!sparc_is_imm13 (offset)) {
3374 sparc_set (code, offset, sparc_o7);
3375 sparc_st (code, ins->sreg1, reg, sparc_o7);
3376 sparc_ldf (code, reg, sparc_o7, FP_SCRATCH_REG);
3377 } else {
3378 sparc_st_imm (code, ins->sreg1, reg, offset);
3379 sparc_ldf_imm (code, reg, offset, FP_SCRATCH_REG);
3381 sparc_fitos (code, FP_SCRATCH_REG, FP_SCRATCH_REG);
3382 #endif
3383 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3384 break;
3386 case OP_ICONV_TO_R8: {
3387 MonoInst *spill = cfg->arch.float_spill_slot;
3388 gint32 reg = spill->inst_basereg;
3389 gint32 offset = spill->inst_offset;
3391 g_assert (spill->opcode == OP_REGOFFSET);
3393 #ifdef SPARCV9
3394 if (!sparc_is_imm13 (offset)) {
3395 sparc_set (code, offset, sparc_o7);
3396 sparc_stx (code, ins->sreg1, reg, sparc_o7);
3397 sparc_lddf (code, reg, sparc_o7, FP_SCRATCH_REG);
3398 } else {
3399 sparc_stx_imm (code, ins->sreg1, reg, offset);
3400 sparc_lddf_imm (code, reg, offset, FP_SCRATCH_REG);
3402 sparc_fxtod (code, FP_SCRATCH_REG, ins->dreg);
3403 #else
3404 if (!sparc_is_imm13 (offset)) {
3405 sparc_set (code, offset, sparc_o7);
3406 sparc_st (code, ins->sreg1, reg, sparc_o7);
3407 sparc_ldf (code, reg, sparc_o7, FP_SCRATCH_REG);
3408 } else {
3409 sparc_st_imm (code, ins->sreg1, reg, offset);
3410 sparc_ldf_imm (code, reg, offset, FP_SCRATCH_REG);
3412 sparc_fitod (code, FP_SCRATCH_REG, ins->dreg);
3413 #endif
3414 break;
3416 case OP_FCONV_TO_I1:
3417 case OP_FCONV_TO_U1:
3418 case OP_FCONV_TO_I2:
3419 case OP_FCONV_TO_U2:
3420 #ifndef SPARCV9
3421 case OP_FCONV_TO_I:
3422 case OP_FCONV_TO_U:
3423 #endif
3424 case OP_FCONV_TO_I4:
3425 case OP_FCONV_TO_U4: {
3426 MonoInst *spill = cfg->arch.float_spill_slot;
3427 gint32 reg = spill->inst_basereg;
3428 gint32 offset = spill->inst_offset;
3430 g_assert (spill->opcode == OP_REGOFFSET);
3432 sparc_fdtoi (code, ins->sreg1, FP_SCRATCH_REG);
3433 if (!sparc_is_imm13 (offset)) {
3434 sparc_set (code, offset, sparc_o7);
3435 sparc_stdf (code, FP_SCRATCH_REG, reg, sparc_o7);
3436 sparc_ld (code, reg, sparc_o7, ins->dreg);
3437 } else {
3438 sparc_stdf_imm (code, FP_SCRATCH_REG, reg, offset);
3439 sparc_ld_imm (code, reg, offset, ins->dreg);
3442 switch (ins->opcode) {
3443 case OP_FCONV_TO_I1:
3444 case OP_FCONV_TO_U1:
3445 sparc_and_imm (code, 0, ins->dreg, 0xff, ins->dreg);
3446 break;
3447 case OP_FCONV_TO_I2:
3448 case OP_FCONV_TO_U2:
3449 sparc_set (code, 0xffff, sparc_o7);
3450 sparc_and (code, 0, ins->dreg, sparc_o7, ins->dreg);
3451 break;
3452 default:
3453 break;
3455 break;
3457 case OP_FCONV_TO_I8:
3458 case OP_FCONV_TO_U8:
3459 /* Emulated */
3460 g_assert_not_reached ();
3461 break;
3462 case OP_FCONV_TO_R4:
3463 /* FIXME: Change precision ? */
3464 #ifdef SPARCV9
3465 sparc_fmovd (code, ins->sreg1, ins->dreg);
3466 #else
3467 sparc_fmovs (code, ins->sreg1, ins->dreg);
3468 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
3469 #endif
3470 break;
3471 case OP_LCONV_TO_R_UN: {
3472 /* Emulated */
3473 g_assert_not_reached ();
3474 break;
3476 case OP_LCONV_TO_OVF_I:
3477 case OP_LCONV_TO_OVF_I4_2: {
3478 guint32 *br [3], *label [1];
3481 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
3483 sparc_cmp_imm (code, ins->sreg1, 0);
3484 br [0] = code;
3485 sparc_branch (code, 1, sparc_bneg, 0);
3486 sparc_nop (code);
3488 /* positive */
3489 /* ms word must be 0 */
3490 sparc_cmp_imm (code, ins->sreg2, 0);
3491 br [1] = code;
3492 sparc_branch (code, 1, sparc_be, 0);
3493 sparc_nop (code);
3495 label [0] = code;
3497 EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_ba, "OverflowException");
3499 /* negative */
3500 sparc_patch (br [0], code);
3502 /* ms word must 0xfffffff */
3503 sparc_cmp_imm (code, ins->sreg2, -1);
3504 br [2] = code;
3505 sparc_branch (code, 1, sparc_bne, 0);
3506 sparc_nop (code);
3507 sparc_patch (br [2], label [0]);
3509 /* Ok */
3510 sparc_patch (br [1], code);
3511 if (ins->sreg1 != ins->dreg)
3512 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
3513 break;
3515 case OP_FADD:
3516 sparc_faddd (code, ins->sreg1, ins->sreg2, ins->dreg);
3517 break;
3518 case OP_FSUB:
3519 sparc_fsubd (code, ins->sreg1, ins->sreg2, ins->dreg);
3520 break;
3521 case OP_FMUL:
3522 sparc_fmuld (code, ins->sreg1, ins->sreg2, ins->dreg);
3523 break;
3524 case OP_FDIV:
3525 sparc_fdivd (code, ins->sreg1, ins->sreg2, ins->dreg);
3526 break;
3527 case OP_FNEG:
3528 #ifdef SPARCV9
3529 sparc_fnegd (code, ins->sreg1, ins->dreg);
3530 #else
3531 /* FIXME: why don't use fnegd ? */
3532 sparc_fnegs (code, ins->sreg1, ins->dreg);
3533 #endif
3534 break;
3535 case OP_FREM:
3536 sparc_fdivd (code, ins->sreg1, ins->sreg2, FP_SCRATCH_REG);
3537 sparc_fmuld (code, ins->sreg2, FP_SCRATCH_REG, FP_SCRATCH_REG);
3538 sparc_fsubd (code, ins->sreg1, FP_SCRATCH_REG, ins->dreg);
3539 break;
3540 case OP_FCOMPARE:
3541 sparc_fcmpd (code, ins->sreg1, ins->sreg2);
3542 break;
3543 case OP_FCEQ:
3544 case OP_FCLT:
3545 case OP_FCLT_UN:
3546 case OP_FCGT:
3547 case OP_FCGT_UN:
3548 sparc_fcmpd (code, ins->sreg1, ins->sreg2);
3549 sparc_clr_reg (code, ins->dreg);
3550 switch (ins->opcode) {
3551 case OP_FCLT_UN:
3552 case OP_FCGT_UN:
3553 sparc_fbranch (code, 1, opcode_to_sparc_cond (ins->opcode), 4);
3554 /* delay slot */
3555 sparc_set (code, 1, ins->dreg);
3556 sparc_fbranch (code, 1, sparc_fbu, 2);
3557 /* delay slot */
3558 sparc_set (code, 1, ins->dreg);
3559 break;
3560 default:
3561 sparc_fbranch (code, 1, opcode_to_sparc_cond (ins->opcode), 2);
3562 /* delay slot */
3563 sparc_set (code, 1, ins->dreg);
3565 break;
3566 case OP_FBEQ:
3567 case OP_FBLT:
3568 case OP_FBGT:
3569 EMIT_FLOAT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3570 break;
3571 case OP_FBGE: {
3572 /* clt.un + brfalse */
3573 guint32 *p = code;
3574 sparc_fbranch (code, 1, sparc_fbul, 0);
3575 /* delay slot */
3576 sparc_nop (code);
3577 EMIT_FLOAT_COND_BRANCH (ins, sparc_fba, 1, 1);
3578 sparc_patch (p, (guint8*)code);
3579 break;
3581 case OP_FBLE: {
3582 /* cgt.un + brfalse */
3583 guint32 *p = code;
3584 sparc_fbranch (code, 1, sparc_fbug, 0);
3585 /* delay slot */
3586 sparc_nop (code);
3587 EMIT_FLOAT_COND_BRANCH (ins, sparc_fba, 1, 1);
3588 sparc_patch (p, (guint8*)code);
3589 break;
3591 case OP_FBNE_UN:
3592 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbne, 1, 1);
3593 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3594 break;
3595 case OP_FBLT_UN:
3596 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbl, 1, 1);
3597 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3598 break;
3599 case OP_FBGT_UN:
3600 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbg, 1, 1);
3601 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3602 break;
3603 case OP_FBGE_UN:
3604 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbge, 1, 1);
3605 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3606 break;
3607 case OP_FBLE_UN:
3608 EMIT_FLOAT_COND_BRANCH (ins, sparc_fble, 1, 1);
3609 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3610 break;
3611 case OP_CKFINITE: {
3612 MonoInst *spill = cfg->arch.float_spill_slot;
3613 gint32 reg = spill->inst_basereg;
3614 gint32 offset = spill->inst_offset;
3616 g_assert (spill->opcode == OP_REGOFFSET);
3618 if (!sparc_is_imm13 (offset)) {
3619 sparc_set (code, offset, sparc_o7);
3620 sparc_stdf (code, ins->sreg1, reg, sparc_o7);
3621 sparc_lduh (code, reg, sparc_o7, sparc_o7);
3622 } else {
3623 sparc_stdf_imm (code, ins->sreg1, reg, offset);
3624 sparc_lduh_imm (code, reg, offset, sparc_o7);
3626 sparc_srl_imm (code, sparc_o7, 4, sparc_o7);
3627 sparc_and_imm (code, FALSE, sparc_o7, 2047, sparc_o7);
3628 sparc_cmp_imm (code, sparc_o7, 2047);
3629 EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_be, "OverflowException");
3630 #ifdef SPARCV9
3631 sparc_fmovd (code, ins->sreg1, ins->dreg);
3632 #else
3633 sparc_fmovs (code, ins->sreg1, ins->dreg);
3634 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
3635 #endif
3636 break;
3639 case OP_MEMORY_BARRIER:
3640 sparc_membar (code, sparc_membar_all);
3641 break;
3642 case OP_GC_SAFE_POINT:
3643 break;
3645 default:
3646 #ifdef __GNUC__
3647 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
3648 #else
3649 g_warning ("%s:%d: unknown opcode %s\n", __FILE__, __LINE__, mono_inst_name (ins->opcode));
3650 #endif
3651 g_assert_not_reached ();
3654 if ((((guint8*)code) - code_start) > max_len) {
3655 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
3656 mono_inst_name (ins->opcode), max_len, ((guint8*)code) - code_start);
3657 g_assert_not_reached ();
3660 cpos += max_len;
3662 last_ins = ins;
3665 cfg->code_len = (guint8*)code - cfg->native_code;
3668 void
3669 mono_arch_register_lowlevel_calls (void)
3671 mono_register_jit_icall (mono_arch_get_lmf_addr, "mono_arch_get_lmf_addr", NULL, TRUE);
3674 void
3675 mono_arch_patch_code (MonoCompile *cfg, MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors, MonoError *error)
3677 MonoJumpInfo *patch_info;
3679 mono_error_init (error);
3681 /* FIXME: Move part of this to arch independent code */
3682 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
3683 unsigned char *ip = patch_info->ip.i + code;
3684 gpointer target;
3686 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors, error);
3687 return_if_nok (error);
3689 switch (patch_info->type) {
3690 case MONO_PATCH_INFO_NONE:
3691 continue;
3692 case MONO_PATCH_INFO_METHOD_JUMP: {
3693 guint32 *ip2 = (guint32*)ip;
3694 /* Might already been patched */
3695 sparc_set_template (ip2, sparc_o7);
3696 break;
3698 default:
3699 break;
3701 sparc_patch ((guint32*)ip, target);
3705 void*
3706 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
3708 int i;
3709 guint32 *code = (guint32*)p;
3710 MonoMethodSignature *sig = mono_method_signature (cfg->method);
3711 CallInfo *cinfo;
3713 /* Save registers to stack */
3714 for (i = 0; i < 6; ++i)
3715 sparc_sti_imm (code, sparc_i0 + i, sparc_fp, ARGS_OFFSET + (i * sizeof (gpointer)));
3717 cinfo = get_call_info (cfg, sig, FALSE);
3719 /* Save float regs on V9, since they are caller saved */
3720 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3721 ArgInfo *ainfo = cinfo->args + i;
3722 gint32 stack_offset;
3724 stack_offset = ainfo->offset + ARGS_OFFSET;
3726 if (ainfo->storage == ArgInFloatReg) {
3727 if (!sparc_is_imm13 (stack_offset))
3728 NOT_IMPLEMENTED;
3729 sparc_stf_imm (code, ainfo->reg, sparc_fp, stack_offset);
3731 else if (ainfo->storage == ArgInDoubleReg) {
3732 /* The offset is guaranteed to be aligned by the ABI rules */
3733 sparc_stdf_imm (code, ainfo->reg, sparc_fp, stack_offset);
3737 sparc_set (code, cfg->method, sparc_o0);
3738 sparc_add_imm (code, FALSE, sparc_fp, MONO_SPARC_STACK_BIAS, sparc_o1);
3740 mono_add_patch_info (cfg, (guint8*)code-cfg->native_code, MONO_PATCH_INFO_ABS, func);
3741 EMIT_CALL ();
3743 /* Restore float regs on V9 */
3744 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3745 ArgInfo *ainfo = cinfo->args + i;
3746 gint32 stack_offset;
3748 stack_offset = ainfo->offset + ARGS_OFFSET;
3750 if (ainfo->storage == ArgInFloatReg) {
3751 if (!sparc_is_imm13 (stack_offset))
3752 NOT_IMPLEMENTED;
3753 sparc_ldf_imm (code, sparc_fp, stack_offset, ainfo->reg);
3755 else if (ainfo->storage == ArgInDoubleReg) {
3756 /* The offset is guaranteed to be aligned by the ABI rules */
3757 sparc_lddf_imm (code, sparc_fp, stack_offset, ainfo->reg);
3761 g_free (cinfo);
3763 return code;
3766 enum {
3767 SAVE_NONE,
3768 SAVE_STRUCT,
3769 SAVE_ONE,
3770 SAVE_TWO,
3771 SAVE_FP
3774 void*
3775 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
3777 guint32 *code = (guint32*)p;
3778 int save_mode = SAVE_NONE;
3779 MonoMethod *method = cfg->method;
3781 switch (mini_get_underlying_type (mono_method_signature (method)->ret)->type) {
3782 case MONO_TYPE_VOID:
3783 /* special case string .ctor icall */
3784 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
3785 save_mode = SAVE_ONE;
3786 else
3787 save_mode = SAVE_NONE;
3788 break;
3789 case MONO_TYPE_I8:
3790 case MONO_TYPE_U8:
3791 #ifdef SPARCV9
3792 save_mode = SAVE_ONE;
3793 #else
3794 save_mode = SAVE_TWO;
3795 #endif
3796 break;
3797 case MONO_TYPE_R4:
3798 case MONO_TYPE_R8:
3799 save_mode = SAVE_FP;
3800 break;
3801 case MONO_TYPE_VALUETYPE:
3802 save_mode = SAVE_STRUCT;
3803 break;
3804 default:
3805 save_mode = SAVE_ONE;
3806 break;
3809 /* Save the result to the stack and also put it into the output registers */
3811 switch (save_mode) {
3812 case SAVE_TWO:
3813 /* V8 only */
3814 sparc_st_imm (code, sparc_i0, sparc_fp, 68);
3815 sparc_st_imm (code, sparc_i0, sparc_fp, 72);
3816 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
3817 sparc_mov_reg_reg (code, sparc_i1, sparc_o2);
3818 break;
3819 case SAVE_ONE:
3820 sparc_sti_imm (code, sparc_i0, sparc_fp, ARGS_OFFSET);
3821 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
3822 break;
3823 case SAVE_FP:
3824 #ifdef SPARCV9
3825 sparc_stdf_imm (code, sparc_f0, sparc_fp, ARGS_OFFSET);
3826 #else
3827 sparc_stdf_imm (code, sparc_f0, sparc_fp, 72);
3828 sparc_ld_imm (code, sparc_fp, 72, sparc_o1);
3829 sparc_ld_imm (code, sparc_fp, 72 + 4, sparc_o2);
3830 #endif
3831 break;
3832 case SAVE_STRUCT:
3833 #ifdef SPARCV9
3834 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
3835 #else
3836 sparc_ld_imm (code, sparc_fp, 64, sparc_o1);
3837 #endif
3838 break;
3839 case SAVE_NONE:
3840 default:
3841 break;
3844 sparc_set (code, cfg->method, sparc_o0);
3846 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, func);
3847 EMIT_CALL ();
3849 /* Restore result */
3851 switch (save_mode) {
3852 case SAVE_TWO:
3853 sparc_ld_imm (code, sparc_fp, 68, sparc_i0);
3854 sparc_ld_imm (code, sparc_fp, 72, sparc_i0);
3855 break;
3856 case SAVE_ONE:
3857 sparc_ldi_imm (code, sparc_fp, ARGS_OFFSET, sparc_i0);
3858 break;
3859 case SAVE_FP:
3860 sparc_lddf_imm (code, sparc_fp, ARGS_OFFSET, sparc_f0);
3861 break;
3862 case SAVE_NONE:
3863 default:
3864 break;
3867 return code;
3870 guint8 *
3871 mono_arch_emit_prolog (MonoCompile *cfg)
3873 MonoMethod *method = cfg->method;
3874 MonoMethodSignature *sig;
3875 MonoInst *inst;
3876 guint32 *code;
3877 CallInfo *cinfo;
3878 guint32 i, offset;
3880 cfg->code_size = 256;
3881 cfg->native_code = g_malloc (cfg->code_size);
3882 code = (guint32*)cfg->native_code;
3884 /* FIXME: Generate intermediate code instead */
3886 offset = cfg->stack_offset;
3887 offset += (16 * sizeof (gpointer)); /* register save area */
3888 #ifndef SPARCV9
3889 offset += 4; /* struct/union return pointer */
3890 #endif
3892 /* add parameter area size for called functions */
3893 if (cfg->param_area < (6 * sizeof (gpointer)))
3894 /* Reserve space for the first 6 arguments even if it is unused */
3895 offset += 6 * sizeof (gpointer);
3896 else
3897 offset += cfg->param_area;
3899 /* align the stack size */
3900 offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
3903 * localloc'd memory is stored between the local variables (whose
3904 * size is given by cfg->stack_offset), and between the space reserved
3905 * by the ABI.
3907 cfg->arch.localloc_offset = offset - cfg->stack_offset;
3909 cfg->stack_offset = offset;
3911 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
3912 /* Perform stack touching */
3913 NOT_IMPLEMENTED;
3914 #endif
3916 if (!sparc_is_imm13 (- cfg->stack_offset)) {
3917 /* Can't use sparc_o7 here, since we're still in the caller's frame */
3918 sparc_set (code, (- cfg->stack_offset), GP_SCRATCH_REG);
3919 sparc_save (code, sparc_sp, GP_SCRATCH_REG, sparc_sp);
3921 else
3922 sparc_save_imm (code, sparc_sp, - cfg->stack_offset, sparc_sp);
3925 if (strstr (cfg->method->name, "foo")) {
3926 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, mono_sparc_break);
3927 sparc_call_simple (code, 0);
3928 sparc_nop (code);
3932 sig = mono_method_signature (method);
3934 cinfo = get_call_info (cfg, sig, FALSE);
3936 /* Keep in sync with emit_load_volatile_arguments */
3937 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3938 ArgInfo *ainfo = cinfo->args + i;
3939 gint32 stack_offset;
3940 MonoType *arg_type;
3941 inst = cfg->args [i];
3943 if (sig->hasthis && (i == 0))
3944 arg_type = &mono_defaults.object_class->byval_arg;
3945 else
3946 arg_type = sig->params [i - sig->hasthis];
3948 stack_offset = ainfo->offset + ARGS_OFFSET;
3950 /* Save the split arguments so they will reside entirely on the stack */
3951 if (ainfo->storage == ArgInSplitRegStack) {
3952 /* Save the register to the stack */
3953 g_assert (inst->opcode == OP_REGOFFSET);
3954 if (!sparc_is_imm13 (stack_offset))
3955 NOT_IMPLEMENTED;
3956 sparc_st_imm (code, sparc_i5, inst->inst_basereg, stack_offset);
3959 if (!v64 && !arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
3960 /* Save the argument to a dword aligned stack location */
3962 * stack_offset contains the offset of the argument on the stack.
3963 * inst->inst_offset contains the dword aligned offset where the value
3964 * should be stored.
3966 if (ainfo->storage == ArgInIRegPair) {
3967 if (!sparc_is_imm13 (inst->inst_offset + 4))
3968 NOT_IMPLEMENTED;
3969 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, inst->inst_offset);
3970 sparc_st_imm (code, sparc_i0 + ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
3972 else
3973 if (ainfo->storage == ArgInSplitRegStack) {
3974 #ifdef SPARCV9
3975 g_assert_not_reached ();
3976 #endif
3977 if (stack_offset != inst->inst_offset) {
3978 /* stack_offset is not dword aligned, so we need to make a copy */
3979 sparc_st_imm (code, sparc_i5, inst->inst_basereg, inst->inst_offset);
3980 sparc_ld_imm (code, sparc_fp, stack_offset + 4, sparc_o7);
3981 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset + 4);
3984 else
3985 if (ainfo->storage == ArgOnStackPair) {
3986 #ifdef SPARCV9
3987 g_assert_not_reached ();
3988 #endif
3989 if (stack_offset != inst->inst_offset) {
3990 /* stack_offset is not dword aligned, so we need to make a copy */
3991 sparc_ld_imm (code, sparc_fp, stack_offset, sparc_o7);
3992 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset);
3993 sparc_ld_imm (code, sparc_fp, stack_offset + 4, sparc_o7);
3994 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset + 4);
3997 else
3998 g_assert_not_reached ();
4000 else
4001 if ((ainfo->storage == ArgInIReg) && (inst->opcode != OP_REGVAR)) {
4002 /* Argument in register, but need to be saved to stack */
4003 if (!sparc_is_imm13 (stack_offset))
4004 NOT_IMPLEMENTED;
4005 if ((stack_offset - ARGS_OFFSET) & 0x1)
4006 sparc_stb_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4007 else
4008 if ((stack_offset - ARGS_OFFSET) & 0x2)
4009 sparc_sth_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4010 else
4011 if ((stack_offset - ARGS_OFFSET) & 0x4)
4012 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4013 else {
4014 if (v64)
4015 sparc_stx_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4016 else
4017 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4020 else
4021 if ((ainfo->storage == ArgInIRegPair) && (inst->opcode != OP_REGVAR)) {
4022 #ifdef SPARCV9
4023 NOT_IMPLEMENTED;
4024 #endif
4025 /* Argument in regpair, but need to be saved to stack */
4026 if (!sparc_is_imm13 (inst->inst_offset + 4))
4027 NOT_IMPLEMENTED;
4028 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, inst->inst_offset);
4029 sparc_st_imm (code, sparc_i0 + ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4031 else if ((ainfo->storage == ArgInFloatReg) && (inst->opcode != OP_REGVAR)) {
4032 if (!sparc_is_imm13 (stack_offset))
4033 NOT_IMPLEMENTED;
4034 sparc_stf_imm (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4036 else if ((ainfo->storage == ArgInDoubleReg) && (inst->opcode != OP_REGVAR)) {
4037 /* The offset is guaranteed to be aligned by the ABI rules */
4038 sparc_stdf_imm (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4041 if ((ainfo->storage == ArgInFloatReg) && (inst->opcode == OP_REGVAR)) {
4042 /* Need to move into the a double precision register */
4043 sparc_fstod (code, ainfo->reg, ainfo->reg - 1);
4046 if ((ainfo->storage == ArgInSplitRegStack) || (ainfo->storage == ArgOnStack))
4047 if (inst->opcode == OP_REGVAR)
4048 /* FIXME: Load the argument into memory */
4049 NOT_IMPLEMENTED;
4052 g_free (cinfo);
4054 if (cfg->method->save_lmf) {
4055 gint32 lmf_offset = STACK_BIAS - cfg->arch.lmf_offset;
4057 /* Save ip */
4058 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
4059 sparc_set_template (code, sparc_o7);
4060 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ip));
4061 /* Save sp */
4062 sparc_sti_imm (code, sparc_sp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, sp));
4063 /* Save fp */
4064 sparc_sti_imm (code, sparc_fp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebp));
4065 /* Save method */
4066 /* FIXME: add a relocation for this */
4067 sparc_set (code, cfg->method, sparc_o7);
4068 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method));
4070 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4071 (gpointer)"mono_arch_get_lmf_addr");
4072 EMIT_CALL ();
4074 code = (guint32*)mono_sparc_emit_save_lmf (code, lmf_offset);
4077 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4078 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
4080 cfg->code_len = (guint8*)code - cfg->native_code;
4082 g_assert (cfg->code_len <= cfg->code_size);
4084 return (guint8*)code;
4087 void
4088 mono_arch_emit_epilog (MonoCompile *cfg)
4090 MonoMethod *method = cfg->method;
4091 guint32 *code;
4092 int can_fold = 0;
4093 int max_epilog_size = 16 + 20 * 4;
4095 if (cfg->method->save_lmf)
4096 max_epilog_size += 128;
4098 if (mono_jit_trace_calls != NULL)
4099 max_epilog_size += 50;
4101 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
4102 max_epilog_size += 50;
4104 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4105 cfg->code_size *= 2;
4106 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4107 cfg->stat_code_reallocs++;
4110 code = (guint32*)(cfg->native_code + cfg->code_len);
4112 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4113 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4115 if (cfg->method->save_lmf) {
4116 gint32 lmf_offset = STACK_BIAS - cfg->arch.lmf_offset;
4118 code = mono_sparc_emit_restore_lmf (code, lmf_offset);
4122 * The V8 ABI requires that calls to functions which return a structure
4123 * return to %i7+12
4125 if (!v64 && mono_method_signature (cfg->method)->pinvoke && MONO_TYPE_ISSTRUCT(mono_method_signature (cfg->method)->ret))
4126 sparc_jmpl_imm (code, sparc_i7, 12, sparc_g0);
4127 else
4128 sparc_ret (code);
4130 /* Only fold last instruction into the restore if the exit block has an in count of 1
4131 and the previous block hasn't been optimized away since it may have an in count > 1 */
4132 if (cfg->bb_exit->in_count == 1 && cfg->bb_exit->in_bb[0]->native_offset != cfg->bb_exit->native_offset)
4133 can_fold = 1;
4136 * FIXME: The last instruction might have a branch pointing into it like in
4137 * int_ceq sparc_i0 <-
4139 can_fold = 0;
4141 /* Try folding last instruction into the restore */
4142 if (can_fold && (sparc_inst_op (code [-2]) == 0x2) && (sparc_inst_op3 (code [-2]) == 0x2) && sparc_inst_imm (code [-2]) && (sparc_inst_rd (code [-2]) == sparc_i0)) {
4143 /* or reg, imm, %i0 */
4144 int reg = sparc_inst_rs1 (code [-2]);
4145 int imm = (((gint32)(sparc_inst_imm13 (code [-2]))) << 19) >> 19;
4146 code [-2] = code [-1];
4147 code --;
4148 sparc_restore_imm (code, reg, imm, sparc_o0);
4150 else
4151 if (can_fold && (sparc_inst_op (code [-2]) == 0x2) && (sparc_inst_op3 (code [-2]) == 0x2) && (!sparc_inst_imm (code [-2])) && (sparc_inst_rd (code [-2]) == sparc_i0)) {
4152 /* or reg, reg, %i0 */
4153 int reg1 = sparc_inst_rs1 (code [-2]);
4154 int reg2 = sparc_inst_rs2 (code [-2]);
4155 code [-2] = code [-1];
4156 code --;
4157 sparc_restore (code, reg1, reg2, sparc_o0);
4159 else
4160 sparc_restore_imm (code, sparc_g0, 0, sparc_g0);
4162 cfg->code_len = (guint8*)code - cfg->native_code;
4164 g_assert (cfg->code_len < cfg->code_size);
4168 void
4169 mono_arch_emit_exceptions (MonoCompile *cfg)
4171 MonoJumpInfo *patch_info;
4172 guint32 *code;
4173 int nthrows = 0, i;
4174 int exc_count = 0;
4175 guint32 code_size;
4176 MonoClass *exc_classes [16];
4177 guint8 *exc_throw_start [16], *exc_throw_end [16];
4179 /* Compute needed space */
4180 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4181 if (patch_info->type == MONO_PATCH_INFO_EXC)
4182 exc_count++;
4186 * make sure we have enough space for exceptions
4188 #ifdef SPARCV9
4189 code_size = exc_count * (20 * 4);
4190 #else
4191 code_size = exc_count * 24;
4192 #endif
4194 while (cfg->code_len + code_size > (cfg->code_size - 16)) {
4195 cfg->code_size *= 2;
4196 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4197 cfg->stat_code_reallocs++;
4200 code = (guint32*)(cfg->native_code + cfg->code_len);
4202 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4203 switch (patch_info->type) {
4204 case MONO_PATCH_INFO_EXC: {
4205 MonoClass *exc_class;
4206 guint32 *buf, *buf2;
4207 guint32 throw_ip, type_idx;
4208 gint32 disp;
4210 sparc_patch ((guint32*)(cfg->native_code + patch_info->ip.i), code);
4212 exc_class = mono_class_load_from_name (mono_defaults.corlib, "System", patch_info->data.name);
4213 type_idx = exc_class->type_token - MONO_TOKEN_TYPE_DEF;
4214 throw_ip = patch_info->ip.i;
4216 /* Find a throw sequence for the same exception class */
4217 for (i = 0; i < nthrows; ++i)
4218 if (exc_classes [i] == exc_class)
4219 break;
4221 if (i < nthrows) {
4222 guint32 throw_offset = (((guint8*)exc_throw_end [i] - cfg->native_code) - throw_ip) >> 2;
4223 if (!sparc_is_imm13 (throw_offset))
4224 sparc_set32 (code, throw_offset, sparc_o1);
4226 disp = (exc_throw_start [i] - (guint8*)code) >> 2;
4227 g_assert (sparc_is_imm22 (disp));
4228 sparc_branch (code, 0, sparc_ba, disp);
4229 if (sparc_is_imm13 (throw_offset))
4230 sparc_set32 (code, throw_offset, sparc_o1);
4231 else
4232 sparc_nop (code);
4233 patch_info->type = MONO_PATCH_INFO_NONE;
4235 else {
4236 /* Emit the template for setting o1 */
4237 buf = code;
4238 if (sparc_is_imm13 (((((guint8*)code - cfg->native_code) - throw_ip) >> 2) - 8))
4239 /* Can use a short form */
4240 sparc_nop (code);
4241 else
4242 sparc_set_template (code, sparc_o1);
4243 buf2 = code;
4245 if (nthrows < 16) {
4246 exc_classes [nthrows] = exc_class;
4247 exc_throw_start [nthrows] = (guint8*)code;
4251 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, mono_sparc_break);
4252 EMIT_CALL();
4255 /* first arg = type token */
4256 /* Pass the type index to reduce the size of the sparc_set */
4257 if (!sparc_is_imm13 (type_idx))
4258 sparc_set32 (code, type_idx, sparc_o0);
4260 /* second arg = offset between the throw ip and the current ip */
4261 /* On sparc, the saved ip points to the call instruction */
4262 disp = (((guint8*)code - cfg->native_code) - throw_ip) >> 2;
4263 sparc_set32 (buf, disp, sparc_o1);
4264 while (buf < buf2)
4265 sparc_nop (buf);
4267 if (nthrows < 16) {
4268 exc_throw_end [nthrows] = (guint8*)code;
4269 nthrows ++;
4272 patch_info->data.name = "mono_arch_throw_corlib_exception";
4273 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4274 patch_info->ip.i = (guint8*)code - cfg->native_code;
4276 EMIT_CALL ();
4278 if (sparc_is_imm13 (type_idx)) {
4279 /* Put it into the delay slot */
4280 code --;
4281 buf = code;
4282 sparc_set32 (code, type_idx, sparc_o0);
4283 g_assert (code - buf == 1);
4286 break;
4288 default:
4289 /* do nothing */
4290 break;
4294 cfg->code_len = (guint8*)code - cfg->native_code;
4296 g_assert (cfg->code_len < cfg->code_size);
4300 gboolean lmf_addr_key_inited = FALSE;
4302 #ifdef MONO_SPARC_THR_TLS
4303 thread_key_t lmf_addr_key;
4304 #else
4305 pthread_key_t lmf_addr_key;
4306 #endif
4308 gpointer
4309 mono_arch_get_lmf_addr (void)
4311 /* This is perf critical so we bypass the IO layer */
4312 /* The thr_... functions seem to be somewhat faster */
4313 #ifdef MONO_SPARC_THR_TLS
4314 gpointer res;
4315 thr_getspecific (lmf_addr_key, &res);
4316 return res;
4317 #else
4318 return pthread_getspecific (lmf_addr_key);
4319 #endif
4322 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
4325 * There seems to be no way to determine stack boundaries under solaris,
4326 * so it's not possible to determine whenever a SIGSEGV is caused by stack
4327 * overflow or not.
4329 #error "--with-sigaltstack=yes not supported on solaris"
4331 #endif
4333 void
4334 mono_arch_tls_init (void)
4336 MonoJitTlsData *jit_tls;
4338 if (!lmf_addr_key_inited) {
4339 int res;
4341 lmf_addr_key_inited = TRUE;
4343 #ifdef MONO_SPARC_THR_TLS
4344 res = thr_keycreate (&lmf_addr_key, NULL);
4345 #else
4346 res = pthread_key_create (&lmf_addr_key, NULL);
4347 #endif
4348 g_assert (res == 0);
4352 jit_tls = mono_get_jit_tls ();
4354 #ifdef MONO_SPARC_THR_TLS
4355 thr_setspecific (lmf_addr_key, &jit_tls->lmf);
4356 #else
4357 pthread_setspecific (lmf_addr_key, &jit_tls->lmf);
4358 #endif
4361 void
4362 mono_arch_finish_init (void)
4366 void
4367 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
4371 MonoInst*
4372 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4374 MonoInst *ins = NULL;
4376 return ins;
4380 * mono_arch_get_argument_info:
4381 * @csig: a method signature
4382 * @param_count: the number of parameters to consider
4383 * @arg_info: an array to store the result infos
4385 * Gathers information on parameters such as size, alignment and
4386 * padding. arg_info should be large enought to hold param_count + 1 entries.
4388 * Returns the size of the activation frame.
4391 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
4393 int k, align;
4394 CallInfo *cinfo;
4395 ArgInfo *ainfo;
4397 cinfo = get_call_info (NULL, csig, FALSE);
4399 if (csig->hasthis) {
4400 ainfo = &cinfo->args [0];
4401 arg_info [0].offset = ARGS_OFFSET - MONO_SPARC_STACK_BIAS + ainfo->offset;
4404 for (k = 0; k < param_count; k++) {
4405 ainfo = &cinfo->args [k + csig->hasthis];
4407 arg_info [k + 1].offset = ARGS_OFFSET - MONO_SPARC_STACK_BIAS + ainfo->offset;
4408 arg_info [k + 1].size = mono_type_size (csig->params [k], &align);
4411 g_free (cinfo);
4413 return 0;
4416 gboolean
4417 mono_arch_print_tree (MonoInst *tree, int arity)
4419 return 0;
4422 mgreg_t
4423 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
4425 /* FIXME: implement */
4426 g_assert_not_reached ();
4429 gboolean
4430 mono_arch_opcode_supported (int opcode)
4432 return FALSE;