[Sockets]: Always reset internal SAEA completion when reattempting connection in...
[mono-project.git] / mono / mini / mini-sparc.c
blob0b4b7fccd975d8bf6af92fce2d137a7864d0a942
1 /**
2 * \file
3 * Sparc backend for the Mono code generator
5 * Authors:
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
9 * Modified for SPARC:
10 * Christopher Taylor (ct@gentoo.org)
11 * Mark Crichton (crichton@gimp.org)
12 * Zoltan Varga (vargaz@freemail.hu)
14 * (C) 2003 Ximian, Inc.
16 #include "mini.h"
17 #include <string.h>
18 #include <pthread.h>
19 #include <unistd.h>
21 #ifndef __linux__
22 #include <thread.h>
23 #endif
25 #include <unistd.h>
26 #include <sys/mman.h>
28 #include <mono/metadata/appdomain.h>
29 #include <mono/metadata/debug-helpers.h>
30 #include <mono/metadata/tokentype.h>
31 #include <mono/utils/mono-math.h>
32 #include <mono/utils/mono-hwcap.h>
33 #include <mono/utils/unlocked.h>
35 #include "mini-sparc.h"
36 #include "trace.h"
37 #include "cpu-sparc.h"
38 #include "jit-icalls.h"
39 #include "ir-emit.h"
40 #include "mono/utils/mono-tls-inline.h"
43 * Sparc V9 means two things:
44 * - the instruction set
45 * - the ABI
47 * V9 instructions are only usable if the underlying processor is 64 bit. Most Sparc
48 * processors in use are 64 bit processors. The V9 ABI is only usable if the
49 * mono executable is a 64 bit executable. So it would make sense to use the 64 bit
50 * instructions without using the 64 bit ABI.
54 * Register usage:
55 * - %i0..%i<n> hold the incoming arguments, these are never written by JITted
56 * code. Unused input registers are used for global register allocation.
57 * - %o0..%o5 and %l7 is used for local register allocation and passing arguments
58 * - %l0..%l6 is used for global register allocation
59 * - %o7 and %g1 is used as scratch registers in opcodes
60 * - all floating point registers are used for local register allocation except %f0.
61 * Only double precision registers are used.
62 * In 64 bit mode:
63 * - fp registers %d0..%d30 are used for parameter passing, and %d32..%d62 are
64 * used for local allocation.
68 * Alignment:
69 * - doubles and longs must be stored in dword aligned locations
73 * The following things are not implemented or do not work:
74 * - some fp arithmetic corner cases
75 * The following tests in mono/mini are expected to fail:
76 * - test_0_simple_double_casts
77 * This test casts (guint64)-1 to double and then back to guint64 again.
78 * Under x86, it returns 0, while under sparc it returns -1.
80 * In addition to this, the runtime requires the trunc function, or its
81 * solaris counterpart, aintl, to do some double->int conversions. If this
82 * function is not available, it is emulated somewhat, but the results can be
83 * strange.
87 * SPARCV9 FIXME:
88 * - optimize sparc_set according to the memory model
89 * - when non-AOT compiling, compute patch targets immediately so we don't
90 * have to emit the 6 byte template.
91 * - varags
92 * - struct arguments/returns
96 * SPARCV9 ISSUES:
97 * - sparc_call_simple can't be used in a lot of places since the displacement
98 * might not fit into an imm30.
99 * - g1 can't be used in a lot of places since it is used as a scratch reg in
100 * sparc_set.
101 * - sparc_f0 can't be used as a scratch register on V9
102 * - the %d34..%d62 fp registers are encoded as: %dx = %f(x - 32 + 1), ie.
103 * %d36 = %f5.
104 * - ldind.i4/u4 needs to sign extend/clear out upper word -> slows things down
105 * - ins->dreg can't be used as a scatch register in r4 opcodes since it might
106 * be a double precision register which has no single precision part.
107 * - passing/returning structs is hard to implement, because:
108 * - the spec is very hard to understand
109 * - it requires knowledge about the fields of structure, needs to handle
110 * nested structures etc.
114 * Possible optimizations:
115 * - delay slot scheduling
116 * - allocate large constants to registers
117 * - add more mul/div/rem optimizations
120 #ifndef __linux__
121 #define MONO_SPARC_THR_TLS 1
122 #endif
125 * There was a 64 bit bug in glib-2.2: g_bit_nth_msf (0, -1) would return 32,
126 * causing infinite loops in dominator computation. So glib-2.4 is required.
128 #ifdef SPARCV9
129 #if GLIB_MAJOR_VERSION == 2 && GLIB_MINOR_VERSION < 4
130 #error "glib 2.4 or later is required for 64 bit mode."
131 #endif
132 #endif
134 #define SIGNAL_STACK_SIZE (64 * 1024)
136 #define STACK_BIAS MONO_SPARC_STACK_BIAS
138 #ifdef SPARCV9
140 /* %g1 is used by sparc_set */
141 #define GP_SCRATCH_REG sparc_g4
142 /* %f0 is used for parameter passing */
143 #define FP_SCRATCH_REG sparc_f30
144 #define ARGS_OFFSET (STACK_BIAS + 128)
146 #else
148 #define FP_SCRATCH_REG sparc_f0
149 #define ARGS_OFFSET 68
150 #define GP_SCRATCH_REG sparc_g1
152 #endif
154 /* Whenever this is a 64bit executable */
155 #if SPARCV9
156 static gboolean v64 = TRUE;
157 #else
158 static gboolean v64 = FALSE;
159 #endif
161 static gpointer mono_arch_get_lmf_addr (void);
163 const char*
164 mono_arch_regname (int reg) {
165 static const char * rnames[] = {
166 "sparc_g0", "sparc_g1", "sparc_g2", "sparc_g3", "sparc_g4",
167 "sparc_g5", "sparc_g6", "sparc_g7", "sparc_o0", "sparc_o1",
168 "sparc_o2", "sparc_o3", "sparc_o4", "sparc_o5", "sparc_sp",
169 "sparc_call", "sparc_l0", "sparc_l1", "sparc_l2", "sparc_l3",
170 "sparc_l4", "sparc_l5", "sparc_l6", "sparc_l7", "sparc_i0",
171 "sparc_i1", "sparc_i2", "sparc_i3", "sparc_i4", "sparc_i5",
172 "sparc_fp", "sparc_retadr"
174 if (reg >= 0 && reg < 32)
175 return rnames [reg];
176 return "unknown";
179 const char*
180 mono_arch_fregname (int reg) {
181 static const char *rnames [] = {
182 "sparc_f0", "sparc_f1", "sparc_f2", "sparc_f3", "sparc_f4",
183 "sparc_f5", "sparc_f6", "sparc_f7", "sparc_f8", "sparc_f9",
184 "sparc_f10", "sparc_f11", "sparc_f12", "sparc_f13", "sparc_f14",
185 "sparc_f15", "sparc_f16", "sparc_f17", "sparc_f18", "sparc_f19",
186 "sparc_f20", "sparc_f21", "sparc_f22", "sparc_f23", "sparc_f24",
187 "sparc_f25", "sparc_f26", "sparc_f27", "sparc_f28", "sparc_f29",
188 "sparc_f30", "sparc_f31"
191 if (reg >= 0 && reg < 32)
192 return rnames [reg];
193 else
194 return "unknown";
198 * Initialize the cpu to execute managed code.
200 void
201 mono_arch_cpu_init (void)
206 * Initialize architecture specific code.
208 void
209 mono_arch_init (void)
214 * Cleanup architecture specific code.
216 void
217 mono_arch_cleanup (void)
221 gboolean
222 mono_arch_have_fast_tls (void)
224 return FALSE;
228 * This function returns the optimizations supported on this cpu.
230 guint32
231 mono_arch_cpu_optimizations (guint32 *exclude_mask)
233 guint32 opts = 0;
235 *exclude_mask = 0;
238 * On some processors, the cmov instructions are even slower than the
239 * normal ones...
241 if (mono_hwcap_sparc_is_v9)
242 opts |= MONO_OPT_CMOV | MONO_OPT_FCMOV;
243 else
244 *exclude_mask |= MONO_OPT_CMOV | MONO_OPT_FCMOV;
246 return opts;
250 * This function test for all SIMD functions supported.
252 * Returns a bitmask corresponding to all supported versions.
255 guint32
256 mono_arch_cpu_enumerate_simd_versions (void)
258 /* SIMD is currently unimplemented */
259 return 0;
262 #ifdef __GNUC__
263 #define flushi(addr) __asm__ __volatile__ ("iflush %0"::"r"(addr):"memory")
264 #else /* assume Sun's compiler */
265 static void flushi(void *addr)
267 asm("flush %i0");
269 #endif
271 #ifndef __linux__
272 void sync_instruction_memory(caddr_t addr, int len);
273 #endif
275 void
276 mono_arch_flush_icache (guint8 *code, gint size)
278 #ifndef __linux__
279 /* Hopefully this is optimized based on the actual CPU */
280 sync_instruction_memory (code, size);
281 #else
282 gulong start = (gulong) code;
283 gulong end = start + size;
284 gulong align;
286 /* Sparcv9 chips only need flushes on 32 byte
287 * cacheline boundaries.
289 * Sparcv8 needs a flush every 8 bytes.
291 align = (mono_hwcap_sparc_is_v9 ? 32 : 8);
293 start &= ~(align - 1);
294 end = (end + (align - 1)) & ~(align - 1);
296 while (start < end) {
297 #ifdef __GNUC__
298 __asm__ __volatile__ ("iflush %0"::"r"(start));
299 #else
300 flushi (start);
301 #endif
302 start += align;
304 #endif
308 * mono_sparc_flushw:
310 * Flush all register windows to memory. Every register window is saved to
311 * a 16 word area on the stack pointed to by its %sp register.
313 void
314 mono_sparc_flushw (void)
316 static guint32 start [64];
317 static int inited = 0;
318 guint32 *code;
319 static void (*flushw) (void);
321 if (!inited) {
322 code = start;
324 sparc_save_imm (code, sparc_sp, -160, sparc_sp);
325 sparc_flushw (code);
326 sparc_ret (code);
327 sparc_restore_simple (code);
329 g_assert ((code - start) < 64);
331 mono_arch_flush_icache ((guint8*)start, (guint8*)code - (guint8*)start);
333 flushw = (gpointer)start;
335 inited = 1;
338 flushw ();
341 void
342 mono_arch_flush_register_windows (void)
344 mono_sparc_flushw ();
347 gboolean
348 mono_arch_is_inst_imm (int opcode, int imm_opcode, gint64 imm)
350 return sparc_is_imm13 (imm);
353 gboolean
354 mono_sparc_is_v9 (void) {
355 return mono_hwcap_sparc_is_v9;
358 gboolean
359 mono_sparc_is_sparc64 (void) {
360 return v64;
363 typedef enum {
364 ArgInIReg,
365 ArgInIRegPair,
366 ArgInSplitRegStack,
367 ArgInFReg,
368 ArgInFRegPair,
369 ArgOnStack,
370 ArgOnStackPair,
371 ArgInFloatReg, /* V9 only */
372 ArgInDoubleReg /* V9 only */
373 } ArgStorage;
375 typedef struct {
376 gint16 offset;
377 /* This needs to be offset by %i0 or %o0 depending on caller/callee */
378 gint8 reg;
379 ArgStorage storage;
380 guint32 vt_offset; /* for valuetypes */
381 } ArgInfo;
383 struct CallInfo {
384 int nargs;
385 guint32 stack_usage;
386 guint32 reg_usage;
387 ArgInfo ret;
388 ArgInfo sig_cookie;
389 ArgInfo args [1];
392 #define DEBUG(a)
394 /* %o0..%o5 */
395 #define PARAM_REGS 6
397 static void inline
398 add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean pair)
400 ainfo->offset = *stack_size;
402 if (!pair) {
403 if (*gr >= PARAM_REGS) {
404 ainfo->storage = ArgOnStack;
406 else {
407 ainfo->storage = ArgInIReg;
408 ainfo->reg = *gr;
409 (*gr) ++;
412 /* Allways reserve stack space for parameters passed in registers */
413 (*stack_size) += sizeof (target_mgreg_t);
415 else {
416 if (*gr < PARAM_REGS - 1) {
417 /* A pair of registers */
418 ainfo->storage = ArgInIRegPair;
419 ainfo->reg = *gr;
420 (*gr) += 2;
422 else if (*gr >= PARAM_REGS) {
423 /* A pair of stack locations */
424 ainfo->storage = ArgOnStackPair;
426 else {
427 ainfo->storage = ArgInSplitRegStack;
428 ainfo->reg = *gr;
429 (*gr) ++;
432 (*stack_size) += 2 * sizeof (target_mgreg_t);
436 #ifdef SPARCV9
438 #define FLOAT_PARAM_REGS 32
440 static void inline
441 add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean single)
443 ainfo->offset = *stack_size;
445 if (single) {
446 if (*gr >= FLOAT_PARAM_REGS) {
447 ainfo->storage = ArgOnStack;
449 else {
450 /* A single is passed in an even numbered fp register */
451 ainfo->storage = ArgInFloatReg;
452 ainfo->reg = *gr + 1;
453 (*gr) += 2;
456 else {
457 if (*gr < FLOAT_PARAM_REGS) {
458 /* A double register */
459 ainfo->storage = ArgInDoubleReg;
460 ainfo->reg = *gr;
461 (*gr) += 2;
463 else {
464 ainfo->storage = ArgOnStack;
468 (*stack_size) += sizeof (target_mgreg_t);
471 #endif
474 * get_call_info:
476 * Obtain information about a call according to the calling convention.
477 * For V8, see the "System V ABI, Sparc Processor Supplement" Sparc V8 version
478 * document for more information.
479 * For V9, see the "Low Level System Information (64-bit psABI)" chapter in
480 * the 'Sparc Compliance Definition 2.4' document.
482 static CallInfo*
483 get_call_info (MonoCompile *cfg, MonoMethodSignature *sig, gboolean is_pinvoke)
485 guint32 i, gr, fr;
486 int n = sig->hasthis + sig->param_count;
487 guint32 stack_size = 0;
488 CallInfo *cinfo;
489 MonoType *ret_type;
491 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
493 gr = 0;
494 fr = 0;
496 #ifdef SPARCV9
497 if (MONO_TYPE_ISSTRUCT ((sig->ret))) {
498 /* The address of the return value is passed in %o0 */
499 add_general (&gr, &stack_size, &cinfo->ret, FALSE);
500 cinfo->ret.reg += sparc_i0;
501 /* FIXME: Pass this after this as on other platforms */
502 NOT_IMPLEMENTED;
504 #endif
506 /* this */
507 if (sig->hasthis)
508 add_general (&gr, &stack_size, cinfo->args + 0, FALSE);
510 if ((sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
511 gr = PARAM_REGS;
513 /* Emit the signature cookie just before the implicit arguments */
514 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
517 for (i = 0; i < sig->param_count; ++i) {
518 ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
519 MonoType *ptype;
521 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
522 gr = PARAM_REGS;
524 /* Emit the signature cookie just before the implicit arguments */
525 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
528 DEBUG(printf("param %d: ", i));
529 if (sig->params [i]->byref) {
530 DEBUG(printf("byref\n"));
532 add_general (&gr, &stack_size, ainfo, FALSE);
533 continue;
535 ptype = mini_get_underlying_type (sig->params [i]);
536 switch (ptype->type) {
537 case MONO_TYPE_BOOLEAN:
538 case MONO_TYPE_I1:
539 case MONO_TYPE_U1:
540 add_general (&gr, &stack_size, ainfo, FALSE);
541 /* the value is in the ls byte */
542 ainfo->offset += sizeof (target_mgreg_t) - 1;
543 break;
544 case MONO_TYPE_I2:
545 case MONO_TYPE_U2:
546 case MONO_TYPE_CHAR:
547 add_general (&gr, &stack_size, ainfo, FALSE);
548 /* the value is in the ls word */
549 ainfo->offset += sizeof (target_mgreg_t) - 2;
550 break;
551 case MONO_TYPE_I4:
552 case MONO_TYPE_U4:
553 add_general (&gr, &stack_size, ainfo, FALSE);
554 /* the value is in the ls dword */
555 ainfo->offset += sizeof (target_mgreg_t) - 4;
556 break;
557 case MONO_TYPE_I:
558 case MONO_TYPE_U:
559 case MONO_TYPE_PTR:
560 case MONO_TYPE_FNPTR:
561 case MONO_TYPE_CLASS:
562 case MONO_TYPE_OBJECT:
563 case MONO_TYPE_STRING:
564 case MONO_TYPE_SZARRAY:
565 case MONO_TYPE_ARRAY:
566 add_general (&gr, &stack_size, ainfo, FALSE);
567 break;
568 case MONO_TYPE_GENERICINST:
569 if (!mono_type_generic_inst_is_valuetype (ptype)) {
570 add_general (&gr, &stack_size, ainfo, FALSE);
571 break;
573 /* Fall through */
574 case MONO_TYPE_VALUETYPE:
575 #ifdef SPARCV9
576 if (sig->pinvoke)
577 NOT_IMPLEMENTED;
578 #endif
579 add_general (&gr, &stack_size, ainfo, FALSE);
580 break;
581 case MONO_TYPE_TYPEDBYREF:
582 add_general (&gr, &stack_size, ainfo, FALSE);
583 break;
584 case MONO_TYPE_U8:
585 case MONO_TYPE_I8:
586 #ifdef SPARCV9
587 add_general (&gr, &stack_size, ainfo, FALSE);
588 #else
589 add_general (&gr, &stack_size, ainfo, TRUE);
590 #endif
591 break;
592 case MONO_TYPE_R4:
593 #ifdef SPARCV9
594 add_float (&fr, &stack_size, ainfo, TRUE);
595 gr ++;
596 #else
597 /* single precision values are passed in integer registers */
598 add_general (&gr, &stack_size, ainfo, FALSE);
599 #endif
600 break;
601 case MONO_TYPE_R8:
602 #ifdef SPARCV9
603 add_float (&fr, &stack_size, ainfo, FALSE);
604 gr ++;
605 #else
606 /* double precision values are passed in a pair of registers */
607 add_general (&gr, &stack_size, ainfo, TRUE);
608 #endif
609 break;
610 default:
611 g_assert_not_reached ();
615 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n > 0) && (sig->sentinelpos == sig->param_count)) {
616 gr = PARAM_REGS;
618 /* Emit the signature cookie just before the implicit arguments */
619 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
622 /* return value */
623 ret_type = mini_get_underlying_type (sig->ret);
624 switch (ret_type->type) {
625 case MONO_TYPE_BOOLEAN:
626 case MONO_TYPE_I1:
627 case MONO_TYPE_U1:
628 case MONO_TYPE_I2:
629 case MONO_TYPE_U2:
630 case MONO_TYPE_CHAR:
631 case MONO_TYPE_I4:
632 case MONO_TYPE_U4:
633 case MONO_TYPE_I:
634 case MONO_TYPE_U:
635 case MONO_TYPE_PTR:
636 case MONO_TYPE_FNPTR:
637 case MONO_TYPE_CLASS:
638 case MONO_TYPE_OBJECT:
639 case MONO_TYPE_SZARRAY:
640 case MONO_TYPE_ARRAY:
641 case MONO_TYPE_STRING:
642 cinfo->ret.storage = ArgInIReg;
643 cinfo->ret.reg = sparc_i0;
644 if (gr < 1)
645 gr = 1;
646 break;
647 case MONO_TYPE_U8:
648 case MONO_TYPE_I8:
649 #ifdef SPARCV9
650 cinfo->ret.storage = ArgInIReg;
651 cinfo->ret.reg = sparc_i0;
652 if (gr < 1)
653 gr = 1;
654 #else
655 cinfo->ret.storage = ArgInIRegPair;
656 cinfo->ret.reg = sparc_i0;
657 if (gr < 2)
658 gr = 2;
659 #endif
660 break;
661 case MONO_TYPE_R4:
662 case MONO_TYPE_R8:
663 cinfo->ret.storage = ArgInFReg;
664 cinfo->ret.reg = sparc_f0;
665 break;
666 case MONO_TYPE_GENERICINST:
667 if (!mono_type_generic_inst_is_valuetype (ret_type)) {
668 cinfo->ret.storage = ArgInIReg;
669 cinfo->ret.reg = sparc_i0;
670 if (gr < 1)
671 gr = 1;
672 break;
674 /* Fall through */
675 case MONO_TYPE_VALUETYPE:
676 if (v64) {
677 if (sig->pinvoke)
678 NOT_IMPLEMENTED;
679 else
680 /* Already done */
683 else
684 cinfo->ret.storage = ArgOnStack;
685 break;
686 case MONO_TYPE_TYPEDBYREF:
687 if (v64) {
688 if (sig->pinvoke)
689 /* Same as a valuetype with size 24 */
690 NOT_IMPLEMENTED;
691 else
692 /* Already done */
695 else
696 cinfo->ret.storage = ArgOnStack;
697 break;
698 case MONO_TYPE_VOID:
699 break;
700 default:
701 g_error ("Can't handle as return value 0x%x", sig->ret->type);
704 cinfo->stack_usage = stack_size;
705 cinfo->reg_usage = gr;
706 return cinfo;
709 GList *
710 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
712 GList *vars = NULL;
713 int i;
716 * FIXME: If an argument is allocated to a register, then load it from the
717 * stack in the prolog.
720 for (i = 0; i < cfg->num_varinfo; i++) {
721 MonoInst *ins = cfg->varinfo [i];
722 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
724 /* unused vars */
725 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
726 continue;
728 /* FIXME: Make arguments on stack allocateable to registers */
729 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode == OP_REGVAR) || (ins->opcode == OP_ARG))
730 continue;
732 if (mono_is_regsize_var (ins->inst_vtype)) {
733 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
734 g_assert (i == vmv->idx);
736 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
740 return vars;
743 GList *
744 mono_arch_get_global_int_regs (MonoCompile *cfg)
746 GList *regs = NULL;
747 int i;
748 MonoMethodSignature *sig;
749 CallInfo *cinfo;
751 sig = mono_method_signature_internal (cfg->method);
753 cinfo = get_call_info (cfg, sig, FALSE);
755 /* Use unused input registers */
756 for (i = cinfo->reg_usage; i < 6; ++i)
757 regs = g_list_prepend (regs, GUINT_TO_POINTER (sparc_i0 + i));
759 /* Use %l0..%l6 as global registers */
760 for (i = sparc_l0; i < sparc_l7; ++i)
761 regs = g_list_prepend (regs, GUINT_TO_POINTER (i));
763 g_free (cinfo);
765 return regs;
769 * mono_arch_regalloc_cost:
771 * Return the cost, in number of memory references, of the action of
772 * allocating the variable VMV into a register during global register
773 * allocation.
775 guint32
776 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
778 return 0;
782 * Set var information according to the calling convention. sparc version.
783 * The locals var stuff should most likely be split in another method.
786 void
787 mono_arch_allocate_vars (MonoCompile *cfg)
789 MonoMethodSignature *sig;
790 MonoMethodHeader *header;
791 MonoInst *inst;
792 int i, offset, size, align, curinst;
793 CallInfo *cinfo;
795 header = cfg->header;
797 sig = mono_method_signature_internal (cfg->method);
799 cinfo = get_call_info (cfg, sig, FALSE);
801 if (sig->ret->type != MONO_TYPE_VOID) {
802 switch (cinfo->ret.storage) {
803 case ArgInIReg:
804 case ArgInFReg:
805 cfg->ret->opcode = OP_REGVAR;
806 cfg->ret->inst_c0 = cinfo->ret.reg;
807 break;
808 case ArgInIRegPair: {
809 MonoType *t = mini_get_underlying_type (sig->ret);
810 if (((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
811 MonoInst *low = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->ret->dreg));
812 MonoInst *high = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->ret->dreg));
814 low->opcode = OP_REGVAR;
815 low->dreg = cinfo->ret.reg + 1;
816 high->opcode = OP_REGVAR;
817 high->dreg = cinfo->ret.reg;
819 cfg->ret->opcode = OP_REGVAR;
820 cfg->ret->inst_c0 = cinfo->ret.reg;
821 break;
823 case ArgOnStack:
824 #ifdef SPARCV9
825 g_assert_not_reached ();
826 #else
827 /* valuetypes */
828 cfg->vret_addr->opcode = OP_REGOFFSET;
829 cfg->vret_addr->inst_basereg = sparc_fp;
830 cfg->vret_addr->inst_offset = 64;
831 #endif
832 break;
833 default:
834 NOT_IMPLEMENTED;
836 cfg->ret->dreg = cfg->ret->inst_c0;
840 * We use the ABI calling conventions for managed code as well.
841 * Exception: valuetypes are never returned in registers on V9.
842 * FIXME: Use something more optimized.
845 /* Locals are allocated backwards from %fp */
846 cfg->frame_reg = sparc_fp;
847 offset = 0;
850 * Reserve a stack slot for holding information used during exception
851 * handling.
853 if (header->num_clauses)
854 offset += sizeof (target_mgreg_t) * 2;
856 if (cfg->method->save_lmf) {
857 offset += sizeof (MonoLMF);
858 cfg->arch.lmf_offset = offset;
861 curinst = cfg->locals_start;
862 for (i = curinst; i < cfg->num_varinfo; ++i) {
863 inst = cfg->varinfo [i];
865 if ((inst->opcode == OP_REGVAR) || (inst->opcode == OP_REGOFFSET)) {
866 //g_print ("allocating local %d to %s\n", i, mono_arch_regname (inst->dreg));
867 continue;
870 if (inst->flags & MONO_INST_IS_DEAD)
871 continue;
873 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
874 * pinvoke wrappers when they call functions returning structure */
875 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
876 size = mono_class_native_size (mono_class_from_mono_type_internal (inst->inst_vtype), &align);
877 else
878 size = mini_type_stack_size (inst->inst_vtype, &align);
881 * This is needed since structures containing doubles must be doubleword
882 * aligned.
883 * FIXME: Do this only if needed.
885 if (MONO_TYPE_ISSTRUCT (inst->inst_vtype))
886 align = 8;
889 * variables are accessed as negative offsets from %fp, so increase
890 * the offset before assigning it to a variable
892 offset += size;
894 offset += align - 1;
895 offset &= ~(align - 1);
896 inst->opcode = OP_REGOFFSET;
897 inst->inst_basereg = sparc_fp;
898 inst->inst_offset = STACK_BIAS + -offset;
900 //g_print ("allocating local %d to [%s - %d]\n", i, mono_arch_regname (inst->inst_basereg), - inst->inst_offset);
903 if (sig->call_convention == MONO_CALL_VARARG) {
904 cfg->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
907 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
908 inst = cfg->args [i];
909 if (inst->opcode != OP_REGVAR) {
910 ArgInfo *ainfo = &cinfo->args [i];
911 gboolean inreg = TRUE;
912 MonoType *arg_type;
913 ArgStorage storage;
915 if (sig->hasthis && (i == 0))
916 arg_type = mono_get_object_type ();
917 else
918 arg_type = sig->params [i - sig->hasthis];
920 #ifndef SPARCV9
921 if (!arg_type->byref && ((arg_type->type == MONO_TYPE_R4)
922 || (arg_type->type == MONO_TYPE_R8)))
924 * Since float arguments are passed in integer registers, we need to
925 * save them to the stack in the prolog.
927 inreg = FALSE;
928 #endif
930 /* FIXME: Allocate volatile arguments to registers */
931 /* FIXME: This makes the argument holding a vtype address into volatile */
932 if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
933 inreg = FALSE;
935 if (MONO_TYPE_ISSTRUCT (arg_type))
936 /* FIXME: this isn't needed */
937 inreg = FALSE;
939 inst->opcode = OP_REGOFFSET;
941 if (!inreg)
942 storage = ArgOnStack;
943 else
944 storage = ainfo->storage;
946 switch (storage) {
947 case ArgInIReg:
948 inst->opcode = OP_REGVAR;
949 inst->dreg = sparc_i0 + ainfo->reg;
950 break;
951 case ArgInIRegPair:
952 if (inst->type == STACK_I8) {
953 MonoInst *low = get_vreg_to_inst (cfg, MONO_LVREG_LS (inst->dreg));
954 MonoInst *high = get_vreg_to_inst (cfg, MONO_LVREG_MS (inst->dreg));
956 low->opcode = OP_REGVAR;
957 low->dreg = sparc_i0 + ainfo->reg + 1;
958 high->opcode = OP_REGVAR;
959 high->dreg = sparc_i0 + ainfo->reg;
961 inst->opcode = OP_REGVAR;
962 inst->dreg = sparc_i0 + ainfo->reg;
963 break;
964 case ArgInFloatReg:
965 case ArgInDoubleReg:
967 * Since float regs are volatile, we save the arguments to
968 * the stack in the prolog.
969 * FIXME: Avoid this if the method contains no calls.
971 case ArgOnStack:
972 case ArgOnStackPair:
973 case ArgInSplitRegStack:
974 /* Split arguments are saved to the stack in the prolog */
975 inst->opcode = OP_REGOFFSET;
976 /* in parent frame */
977 inst->inst_basereg = sparc_fp;
978 inst->inst_offset = ainfo->offset + ARGS_OFFSET;
980 if (!arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
982 * It is very hard to load doubles from non-doubleword aligned
983 * memory locations. So if the offset is misaligned, we copy the
984 * argument to a stack location in the prolog.
986 if ((inst->inst_offset - STACK_BIAS) % 8) {
987 inst->inst_basereg = sparc_fp;
988 offset += 8;
989 align = 8;
990 offset += align - 1;
991 offset &= ~(align - 1);
992 inst->inst_offset = STACK_BIAS + -offset;
996 break;
997 default:
998 NOT_IMPLEMENTED;
1001 if (MONO_TYPE_ISSTRUCT (arg_type)) {
1002 /* Add a level of indirection */
1004 * It would be easier to add OP_LDIND_I here, but ldind_i instructions
1005 * are destructively modified in a lot of places in inssel.brg.
1007 MonoInst *indir;
1008 MONO_INST_NEW (cfg, indir, 0);
1009 *indir = *inst;
1010 inst->opcode = OP_VTARG_ADDR;
1011 inst->inst_left = indir;
1017 * spillvars are stored between the normal locals and the storage reserved
1018 * by the ABI.
1021 cfg->stack_offset = offset;
1023 g_free (cinfo);
1026 void
1027 mono_arch_create_vars (MonoCompile *cfg)
1029 MonoMethodSignature *sig;
1031 sig = mono_method_signature_internal (cfg->method);
1033 if (MONO_TYPE_ISSTRUCT ((sig->ret))) {
1034 cfg->vret_addr = mono_compile_create_var (cfg, mono_get_int_type (), OP_ARG);
1035 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1036 printf ("vret_addr = ");
1037 mono_print_ins (cfg->vret_addr);
1041 if (!sig->ret->byref && (sig->ret->type == MONO_TYPE_I8 || sig->ret->type == MONO_TYPE_U8)) {
1042 MonoInst *low = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->ret->dreg));
1043 MonoInst *high = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->ret->dreg));
1045 low->flags |= MONO_INST_VOLATILE;
1046 high->flags |= MONO_INST_VOLATILE;
1049 /* Add a properly aligned dword for use by int<->float conversion opcodes */
1050 cfg->arch.float_spill_slot = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.double_class), OP_ARG);
1051 ((MonoInst*)cfg->arch.float_spill_slot)->flags |= MONO_INST_VOLATILE;
1054 static void
1055 add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, guint32 sreg)
1057 MonoInst *arg;
1059 MONO_INST_NEW (cfg, arg, 0);
1061 arg->sreg1 = sreg;
1063 switch (storage) {
1064 case ArgInIReg:
1065 arg->opcode = OP_MOVE;
1066 arg->dreg = mono_alloc_ireg (cfg);
1068 mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, FALSE);
1069 break;
1070 case ArgInFloatReg:
1071 arg->opcode = OP_FMOVE;
1072 arg->dreg = mono_alloc_freg (cfg);
1074 mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, TRUE);
1075 break;
1076 default:
1077 g_assert_not_reached ();
1080 MONO_ADD_INS (cfg->cbb, arg);
1083 static void
1084 add_outarg_load (MonoCompile *cfg, MonoCallInst *call, int opcode, int basereg, int offset, int reg)
1086 int dreg = mono_alloc_ireg (cfg);
1088 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, sparc_sp, offset);
1090 mono_call_inst_add_outarg_reg (cfg, call, dreg, reg, FALSE);
1093 static void
1094 emit_pass_long (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in)
1096 int offset = ARGS_OFFSET + ainfo->offset;
1098 switch (ainfo->storage) {
1099 case ArgInIRegPair:
1100 add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg + 1, MONO_LVREG_LS (in->dreg));
1101 add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, MONO_LVREG_MS (in->dreg));
1102 break;
1103 case ArgOnStackPair:
1104 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset, MONO_LVREG_MS (in->dreg));
1105 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset + 4, MONO_LVREG_LS (in->dreg));
1106 break;
1107 case ArgInSplitRegStack:
1108 add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, MONO_LVREG_MS (in->dreg));
1109 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset + 4, MONO_LVREG_LS (in->dreg));
1110 break;
1111 default:
1112 g_assert_not_reached ();
1116 static void
1117 emit_pass_double (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in)
1119 int offset = ARGS_OFFSET + ainfo->offset;
1121 switch (ainfo->storage) {
1122 case ArgInIRegPair:
1123 /* floating-point <-> integer transfer must go through memory */
1124 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg);
1126 /* Load into a register pair */
1127 add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg);
1128 add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset + 4, sparc_o0 + ainfo->reg + 1);
1129 break;
1130 case ArgOnStackPair:
1131 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg);
1132 break;
1133 case ArgInSplitRegStack:
1134 /* floating-point <-> integer transfer must go through memory */
1135 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg);
1136 /* Load most significant word into register */
1137 add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg);
1138 break;
1139 default:
1140 g_assert_not_reached ();
1144 static void
1145 emit_pass_float (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in)
1147 int offset = ARGS_OFFSET + ainfo->offset;
1149 switch (ainfo->storage) {
1150 case ArgInIReg:
1151 /* floating-point <-> integer transfer must go through memory */
1152 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, sparc_sp, offset, in->dreg);
1153 add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg);
1154 break;
1155 case ArgOnStack:
1156 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, sparc_sp, offset, in->dreg);
1157 break;
1158 default:
1159 g_assert_not_reached ();
1163 static void
1164 emit_pass_other (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in);
1166 static void
1167 emit_pass_vtype (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in, gboolean pinvoke)
1169 MonoInst *arg;
1170 guint32 align, offset, pad, size;
1172 if (arg_type->type == MONO_TYPE_TYPEDBYREF) {
1173 size = MONO_ABI_SIZEOF (MonoTypedRef);
1174 align = sizeof (target_mgreg_t);
1176 else if (pinvoke)
1177 size = mono_type_native_stack_size (m_class_get_byval_arg (in->klass), &align);
1178 else {
1180 * Other backends use mono_type_stack_size (), but that
1181 * aligns the size to 8, which is larger than the size of
1182 * the source, leading to reads of invalid memory if the
1183 * source is at the end of address space.
1185 size = mono_class_value_size (in->klass, &align);
1188 /* The first 6 argument locations are reserved */
1189 if (cinfo->stack_usage < 6 * sizeof (target_mgreg_t))
1190 cinfo->stack_usage = 6 * sizeof (target_mgreg_t);
1192 offset = ALIGN_TO ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage, align);
1193 pad = offset - ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage);
1195 cinfo->stack_usage += size;
1196 cinfo->stack_usage += pad;
1199 * We use OP_OUTARG_VT to copy the valuetype to a stack location, then
1200 * use the normal OUTARG opcodes to pass the address of the location to
1201 * the callee.
1203 if (size > 0) {
1204 MONO_INST_NEW (cfg, arg, OP_OUTARG_VT);
1205 arg->sreg1 = in->dreg;
1206 arg->klass = in->klass;
1207 arg->backend.size = size;
1208 arg->inst_p0 = call;
1209 arg->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1210 memcpy (arg->inst_p1, ainfo, sizeof (ArgInfo));
1211 ((ArgInfo*)(arg->inst_p1))->offset = STACK_BIAS + offset;
1212 MONO_ADD_INS (cfg->cbb, arg);
1214 MONO_INST_NEW (cfg, arg, OP_ADD_IMM);
1215 arg->dreg = mono_alloc_preg (cfg);
1216 arg->sreg1 = sparc_sp;
1217 arg->inst_imm = STACK_BIAS + offset;
1218 MONO_ADD_INS (cfg->cbb, arg);
1220 emit_pass_other (cfg, call, ainfo, NULL, arg);
1224 static void
1225 emit_pass_other (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in)
1227 int offset = ARGS_OFFSET + ainfo->offset;
1228 int opcode;
1230 switch (ainfo->storage) {
1231 case ArgInIReg:
1232 add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, in->dreg);
1233 break;
1234 case ArgOnStack:
1235 #ifdef SPARCV9
1236 NOT_IMPLEMENTED;
1237 #else
1238 if (offset & 0x1)
1239 opcode = OP_STOREI1_MEMBASE_REG;
1240 else if (offset & 0x2)
1241 opcode = OP_STOREI2_MEMBASE_REG;
1242 else
1243 opcode = OP_STOREI4_MEMBASE_REG;
1244 MONO_EMIT_NEW_STORE_MEMBASE (cfg, opcode, sparc_sp, offset, in->dreg);
1245 #endif
1246 break;
1247 default:
1248 g_assert_not_reached ();
1252 static void
1253 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
1255 MonoMethodSignature *tmp_sig;
1258 * mono_ArgIterator_Setup assumes the signature cookie is
1259 * passed first and all the arguments which were before it are
1260 * passed on the stack after the signature. So compensate by
1261 * passing a different signature.
1263 tmp_sig = mono_metadata_signature_dup (call->signature);
1264 tmp_sig->param_count -= call->signature->sentinelpos;
1265 tmp_sig->sentinelpos = 0;
1266 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1268 /* FIXME: Add support for signature tokens to AOT */
1269 cfg->disable_aot = TRUE;
1270 /* We allways pass the signature on the stack for simplicity */
1271 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sparc_sp, ARGS_OFFSET + cinfo->sig_cookie.offset, tmp_sig);
1274 void
1275 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1277 MonoInst *in;
1278 MonoMethodSignature *sig;
1279 int i, n;
1280 CallInfo *cinfo;
1281 ArgInfo *ainfo;
1282 guint32 extra_space = 0;
1284 sig = call->signature;
1285 n = sig->param_count + sig->hasthis;
1287 cinfo = get_call_info (cfg, sig, sig->pinvoke);
1289 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
1290 /* Set the 'struct/union return pointer' location on the stack */
1291 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, 64, call->vret_var->dreg);
1294 for (i = 0; i < n; ++i) {
1295 MonoType *arg_type;
1297 ainfo = cinfo->args + i;
1299 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1300 /* Emit the signature cookie just before the first implicit argument */
1301 emit_sig_cookie (cfg, call, cinfo);
1304 in = call->args [i];
1306 if (sig->hasthis && (i == 0))
1307 arg_type = mono_get_object_type ();
1308 else
1309 arg_type = sig->params [i - sig->hasthis];
1311 arg_type = mini_get_underlying_type (arg_type);
1312 if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis])))
1313 emit_pass_vtype (cfg, call, cinfo, ainfo, arg_type, in, sig->pinvoke);
1314 else if (!arg_type->byref && ((arg_type->type == MONO_TYPE_I8) || (arg_type->type == MONO_TYPE_U8)))
1315 emit_pass_long (cfg, call, ainfo, in);
1316 else if (!arg_type->byref && (arg_type->type == MONO_TYPE_R8))
1317 emit_pass_double (cfg, call, ainfo, in);
1318 else if (!arg_type->byref && (arg_type->type == MONO_TYPE_R4))
1319 emit_pass_float (cfg, call, ainfo, in);
1320 else
1321 emit_pass_other (cfg, call, ainfo, arg_type, in);
1324 /* Handle the case where there are no implicit arguments */
1325 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos)) {
1326 emit_sig_cookie (cfg, call, cinfo);
1329 call->stack_usage = cinfo->stack_usage + extra_space;
1331 g_free (cinfo);
1334 void
1335 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1337 ArgInfo *ainfo = (ArgInfo*)ins->inst_p1;
1338 int size = ins->backend.size;
1340 mini_emit_memcpy (cfg, sparc_sp, ainfo->offset, src->dreg, 0, size, TARGET_SIZEOF_VOID_P);
1343 void
1344 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1346 CallInfo *cinfo = get_call_info (cfg, mono_method_signature_internal (method), FALSE);
1347 MonoType *ret = mini_get_underlying_type (mono_method_signature_internal (method)->ret);
1349 switch (cinfo->ret.storage) {
1350 case ArgInIReg:
1351 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1352 break;
1353 case ArgInIRegPair:
1354 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
1355 MONO_EMIT_NEW_UNALU (cfg, OP_LMOVE, cfg->ret->dreg, val->dreg);
1356 } else {
1357 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, MONO_LVREG_MS (cfg->ret->dreg), MONO_LVREG_MS (val->dreg));
1358 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, MONO_LVREG_LS (cfg->ret->dreg), MONO_LVREG_LS (val->dreg));
1360 break;
1361 case ArgInFReg:
1362 if (ret->type == MONO_TYPE_R4)
1363 MONO_EMIT_NEW_UNALU (cfg, OP_SETFRET, cfg->ret->dreg, val->dreg);
1364 else
1365 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
1366 break;
1367 default:
1368 g_assert_not_reached ();
1371 g_assert (cinfo);
1374 int cond_to_sparc_cond [][3] = {
1375 {sparc_be, sparc_be, sparc_fbe},
1376 {sparc_bne, sparc_bne, 0},
1377 {sparc_ble, sparc_ble, sparc_fble},
1378 {sparc_bge, sparc_bge, sparc_fbge},
1379 {sparc_bl, sparc_bl, sparc_fbl},
1380 {sparc_bg, sparc_bg, sparc_fbg},
1381 {sparc_bleu, sparc_bleu, 0},
1382 {sparc_beu, sparc_beu, 0},
1383 {sparc_blu, sparc_blu, sparc_fbl},
1384 {sparc_bgu, sparc_bgu, sparc_fbg}
1387 /* Map opcode to the sparc condition codes */
1388 static SparcCond
1389 opcode_to_sparc_cond (int opcode)
1391 CompRelation rel;
1392 CompType t;
1394 switch (opcode) {
1395 case OP_COND_EXC_OV:
1396 case OP_COND_EXC_IOV:
1397 return sparc_bvs;
1398 case OP_COND_EXC_C:
1399 case OP_COND_EXC_IC:
1400 return sparc_bcs;
1401 case OP_COND_EXC_NO:
1402 case OP_COND_EXC_NC:
1403 NOT_IMPLEMENTED;
1404 default:
1405 rel = mono_opcode_to_cond (opcode);
1406 t = mono_opcode_to_type (opcode, -1);
1408 return cond_to_sparc_cond [rel][t];
1409 break;
1412 return -1;
1415 #define COMPUTE_DISP(ins) \
1416 if (ins->inst_true_bb->native_offset) \
1417 disp = (ins->inst_true_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2; \
1418 else { \
1419 disp = 0; \
1420 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1423 #ifdef SPARCV9
1424 #define DEFAULT_ICC sparc_xcc_short
1425 #else
1426 #define DEFAULT_ICC sparc_icc_short
1427 #endif
1429 #ifdef SPARCV9
1430 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) \
1431 do { \
1432 gint32 disp; \
1433 guint32 predict; \
1434 COMPUTE_DISP(ins); \
1435 predict = (disp != 0) ? 1 : 0; \
1436 g_assert (sparc_is_imm19 (disp)); \
1437 sparc_branchp (code, (annul), cond, icc, (predict), disp); \
1438 if (filldelay) sparc_nop (code); \
1439 } while (0)
1440 #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_ICC ((ins), (cond), (annul), (filldelay), (sparc_xcc_short))
1441 #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) \
1442 do { \
1443 gint32 disp; \
1444 guint32 predict; \
1445 COMPUTE_DISP(ins); \
1446 predict = (disp != 0) ? 1 : 0; \
1447 g_assert (sparc_is_imm19 (disp)); \
1448 sparc_fbranch (code, (annul), cond, disp); \
1449 if (filldelay) sparc_nop (code); \
1450 } while (0)
1451 #else
1452 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) g_assert_not_reached ()
1453 #define EMIT_COND_BRANCH_GENERAL(ins,bop,cond,annul,filldelay) \
1454 do { \
1455 gint32 disp; \
1456 COMPUTE_DISP(ins); \
1457 g_assert (sparc_is_imm22 (disp)); \
1458 sparc_ ## bop (code, (annul), cond, disp); \
1459 if (filldelay) sparc_nop (code); \
1460 } while (0)
1461 #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),branch,(cond),annul,filldelay)
1462 #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),fbranch,(cond),annul,filldelay)
1463 #endif
1465 #define EMIT_COND_BRANCH_PREDICTED(ins,cond,annul,filldelay) \
1466 do { \
1467 gint32 disp; \
1468 guint32 predict; \
1469 COMPUTE_DISP(ins); \
1470 predict = (disp != 0) ? 1 : 0; \
1471 g_assert (sparc_is_imm19 (disp)); \
1472 sparc_branchp (code, (annul), (cond), DEFAULT_ICC, (predict), disp); \
1473 if (filldelay) sparc_nop (code); \
1474 } while (0)
1476 #define EMIT_COND_BRANCH_BPR(ins,bop,predict,annul,filldelay) \
1477 do { \
1478 gint32 disp; \
1479 COMPUTE_DISP(ins); \
1480 g_assert (sparc_is_imm22 (disp)); \
1481 sparc_ ## bop (code, (annul), (predict), ins->sreg1, disp); \
1482 if (filldelay) sparc_nop (code); \
1483 } while (0)
1485 /* emit an exception if condition is fail */
1487 * We put the exception throwing code out-of-line, at the end of the method
1489 #define EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,filldelay,icc) do { \
1490 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
1491 MONO_PATCH_INFO_EXC, sexc_name); \
1492 if (mono_hwcap_sparc_is_v9 && ((icc) != sparc_icc_short)) { \
1493 sparc_branchp (code, 0, (cond), (icc), 0, 0); \
1495 else { \
1496 sparc_branch (code, 0, cond, 0); \
1498 if (filldelay) sparc_nop (code); \
1499 } while (0);
1501 #define EMIT_COND_SYSTEM_EXCEPTION(ins,cond,sexc_name) EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,TRUE,DEFAULT_ICC)
1503 #define EMIT_COND_SYSTEM_EXCEPTION_BPR(ins,bop,sexc_name) do { \
1504 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
1505 MONO_PATCH_INFO_EXC, sexc_name); \
1506 sparc_ ## bop (code, FALSE, FALSE, ins->sreg1, 0); \
1507 sparc_nop (code); \
1508 } while (0);
1510 #define EMIT_ALU_IMM(ins,op,setcc) do { \
1511 if (sparc_is_imm13 ((ins)->inst_imm)) \
1512 sparc_ ## op ## _imm (code, (setcc), (ins)->sreg1, ins->inst_imm, (ins)->dreg); \
1513 else { \
1514 sparc_set (code, ins->inst_imm, sparc_o7); \
1515 sparc_ ## op (code, (setcc), (ins)->sreg1, sparc_o7, (ins)->dreg); \
1517 } while (0);
1519 #define EMIT_LOAD_MEMBASE(ins,op) do { \
1520 if (sparc_is_imm13 (ins->inst_offset)) \
1521 sparc_ ## op ## _imm (code, ins->inst_basereg, ins->inst_offset, ins->dreg); \
1522 else { \
1523 sparc_set (code, ins->inst_offset, sparc_o7); \
1524 sparc_ ## op (code, ins->inst_basereg, sparc_o7, ins->dreg); \
1526 } while (0);
1528 /* max len = 5 */
1529 #define EMIT_STORE_MEMBASE_IMM(ins,op) do { \
1530 guint32 sreg; \
1531 if (ins->inst_imm == 0) \
1532 sreg = sparc_g0; \
1533 else { \
1534 sparc_set (code, ins->inst_imm, sparc_o7); \
1535 sreg = sparc_o7; \
1537 if (!sparc_is_imm13 (ins->inst_offset)) { \
1538 sparc_set (code, ins->inst_offset, GP_SCRATCH_REG); \
1539 sparc_ ## op (code, sreg, ins->inst_destbasereg, GP_SCRATCH_REG); \
1541 else \
1542 sparc_ ## op ## _imm (code, sreg, ins->inst_destbasereg, ins->inst_offset); \
1543 } while (0);
1545 #define EMIT_STORE_MEMBASE_REG(ins,op) do { \
1546 if (!sparc_is_imm13 (ins->inst_offset)) { \
1547 sparc_set (code, ins->inst_offset, sparc_o7); \
1548 sparc_ ## op (code, ins->sreg1, ins->inst_destbasereg, sparc_o7); \
1550 else \
1551 sparc_ ## op ## _imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); \
1552 } while (0);
1554 #define EMIT_CALL() do { \
1555 if (v64) { \
1556 sparc_set_template (code, sparc_o7); \
1557 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7); \
1559 else { \
1560 sparc_call_simple (code, 0); \
1562 sparc_nop (code); \
1563 } while (0);
1566 * A call template is 7 instructions long, so we want to avoid it if possible.
1568 static guint32*
1569 emit_call (MonoCompile *cfg, guint32 *code, guint32 patch_type, gconstpointer data)
1571 ERROR_DECL (error);
1572 gpointer target;
1574 /* FIXME: This only works if the target method is already compiled */
1575 if (0 && v64 && !cfg->compile_aot) {
1576 MonoJumpInfo patch_info;
1578 patch_info.type = patch_type;
1579 patch_info.data.target = data;
1581 target = mono_resolve_patch_target (cfg->method, cfg->domain, NULL, &patch_info, FALSE, error);
1582 mono_error_raise_exception_deprecated (error); /* FIXME: don't raise here */
1584 /* FIXME: Add optimizations if the target is close enough */
1585 sparc_set (code, target, sparc_o7);
1586 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7);
1587 sparc_nop (code);
1589 else {
1590 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, patch_type, data);
1591 EMIT_CALL ();
1594 return code;
1597 void
1598 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
1602 void
1603 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
1605 MonoInst *ins, *n, *last_ins = NULL;
1606 ins = bb->code;
1608 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
1609 switch (ins->opcode) {
1610 case OP_MUL_IMM:
1611 /* remove unnecessary multiplication with 1 */
1612 if (ins->inst_imm == 1) {
1613 if (ins->dreg != ins->sreg1) {
1614 ins->opcode = OP_MOVE;
1615 } else {
1616 MONO_DELETE_INS (bb, ins);
1617 continue;
1620 break;
1621 #ifndef SPARCV9
1622 case OP_LOAD_MEMBASE:
1623 case OP_LOADI4_MEMBASE:
1625 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1626 * OP_LOAD_MEMBASE offset(basereg), reg
1628 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
1629 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1630 ins->inst_basereg == last_ins->inst_destbasereg &&
1631 ins->inst_offset == last_ins->inst_offset) {
1632 if (ins->dreg == last_ins->sreg1) {
1633 MONO_DELETE_INS (bb, ins);
1634 continue;
1635 } else {
1636 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1637 ins->opcode = OP_MOVE;
1638 ins->sreg1 = last_ins->sreg1;
1642 * Note: reg1 must be different from the basereg in the second load
1643 * OP_LOAD_MEMBASE offset(basereg), reg1
1644 * OP_LOAD_MEMBASE offset(basereg), reg2
1645 * -->
1646 * OP_LOAD_MEMBASE offset(basereg), reg1
1647 * OP_MOVE reg1, reg2
1649 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
1650 || last_ins->opcode == OP_LOAD_MEMBASE) &&
1651 ins->inst_basereg != last_ins->dreg &&
1652 ins->inst_basereg == last_ins->inst_basereg &&
1653 ins->inst_offset == last_ins->inst_offset) {
1655 if (ins->dreg == last_ins->dreg) {
1656 MONO_DELETE_INS (bb, ins);
1657 continue;
1658 } else {
1659 ins->opcode = OP_MOVE;
1660 ins->sreg1 = last_ins->dreg;
1663 //g_assert_not_reached ();
1665 #if 0
1667 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1668 * OP_LOAD_MEMBASE offset(basereg), reg
1669 * -->
1670 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1671 * OP_ICONST reg, imm
1673 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
1674 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
1675 ins->inst_basereg == last_ins->inst_destbasereg &&
1676 ins->inst_offset == last_ins->inst_offset) {
1677 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1678 ins->opcode = OP_ICONST;
1679 ins->inst_c0 = last_ins->inst_imm;
1680 g_assert_not_reached (); // check this rule
1681 #endif
1683 break;
1684 #endif
1685 case OP_LOADI1_MEMBASE:
1686 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
1687 ins->inst_basereg == last_ins->inst_destbasereg &&
1688 ins->inst_offset == last_ins->inst_offset) {
1689 if (ins->dreg == last_ins->sreg1) {
1690 MONO_DELETE_INS (bb, ins);
1691 continue;
1692 } else {
1693 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1694 ins->opcode = OP_MOVE;
1695 ins->sreg1 = last_ins->sreg1;
1698 break;
1699 case OP_LOADI2_MEMBASE:
1700 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
1701 ins->inst_basereg == last_ins->inst_destbasereg &&
1702 ins->inst_offset == last_ins->inst_offset) {
1703 if (ins->dreg == last_ins->sreg1) {
1704 MONO_DELETE_INS (bb, ins);
1705 continue;
1706 } else {
1707 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1708 ins->opcode = OP_MOVE;
1709 ins->sreg1 = last_ins->sreg1;
1712 break;
1713 case OP_STOREI4_MEMBASE_IMM:
1714 /* Convert pairs of 0 stores to a dword 0 store */
1715 /* Used when initializing temporaries */
1716 /* We know sparc_fp is dword aligned */
1717 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM) &&
1718 (ins->inst_destbasereg == last_ins->inst_destbasereg) &&
1719 (ins->inst_destbasereg == sparc_fp) &&
1720 (ins->inst_offset < 0) &&
1721 ((ins->inst_offset % 8) == 0) &&
1722 ((ins->inst_offset == last_ins->inst_offset - 4)) &&
1723 (ins->inst_imm == 0) &&
1724 (last_ins->inst_imm == 0)) {
1725 if (mono_hwcap_sparc_is_v9) {
1726 last_ins->opcode = OP_STOREI8_MEMBASE_IMM;
1727 last_ins->inst_offset = ins->inst_offset;
1728 MONO_DELETE_INS (bb, ins);
1729 continue;
1732 break;
1733 case OP_IBEQ:
1734 case OP_IBNE_UN:
1735 case OP_IBLT:
1736 case OP_IBGT:
1737 case OP_IBGE:
1738 case OP_IBLE:
1739 case OP_COND_EXC_EQ:
1740 case OP_COND_EXC_GE:
1741 case OP_COND_EXC_GT:
1742 case OP_COND_EXC_LE:
1743 case OP_COND_EXC_LT:
1744 case OP_COND_EXC_NE_UN:
1746 * Convert compare with zero+branch to BRcc
1749 * This only works in 64 bit mode, since it examines all 64
1750 * bits of the register.
1751 * Only do this if the method is small since BPr only has a 16bit
1752 * displacement.
1754 if (v64 && (cfg->header->code_size < 10000) && last_ins &&
1755 (last_ins->opcode == OP_COMPARE_IMM) &&
1756 (last_ins->inst_imm == 0)) {
1757 switch (ins->opcode) {
1758 case OP_IBEQ:
1759 ins->opcode = OP_SPARC_BRZ;
1760 break;
1761 case OP_IBNE_UN:
1762 ins->opcode = OP_SPARC_BRNZ;
1763 break;
1764 case OP_IBLT:
1765 ins->opcode = OP_SPARC_BRLZ;
1766 break;
1767 case OP_IBGT:
1768 ins->opcode = OP_SPARC_BRGZ;
1769 break;
1770 case OP_IBGE:
1771 ins->opcode = OP_SPARC_BRGEZ;
1772 break;
1773 case OP_IBLE:
1774 ins->opcode = OP_SPARC_BRLEZ;
1775 break;
1776 case OP_COND_EXC_EQ:
1777 ins->opcode = OP_SPARC_COND_EXC_EQZ;
1778 break;
1779 case OP_COND_EXC_GE:
1780 ins->opcode = OP_SPARC_COND_EXC_GEZ;
1781 break;
1782 case OP_COND_EXC_GT:
1783 ins->opcode = OP_SPARC_COND_EXC_GTZ;
1784 break;
1785 case OP_COND_EXC_LE:
1786 ins->opcode = OP_SPARC_COND_EXC_LEZ;
1787 break;
1788 case OP_COND_EXC_LT:
1789 ins->opcode = OP_SPARC_COND_EXC_LTZ;
1790 break;
1791 case OP_COND_EXC_NE_UN:
1792 ins->opcode = OP_SPARC_COND_EXC_NEZ;
1793 break;
1794 default:
1795 g_assert_not_reached ();
1797 ins->sreg1 = last_ins->sreg1;
1798 *last_ins = *ins;
1799 MONO_DELETE_INS (bb, ins);
1800 continue;
1802 break;
1803 case OP_MOVE:
1805 * OP_MOVE reg, reg
1807 if (ins->dreg == ins->sreg1) {
1808 MONO_DELETE_INS (bb, ins);
1809 continue;
1812 * OP_MOVE sreg, dreg
1813 * OP_MOVE dreg, sreg
1815 if (last_ins && last_ins->opcode == OP_MOVE &&
1816 ins->sreg1 == last_ins->dreg &&
1817 ins->dreg == last_ins->sreg1) {
1818 MONO_DELETE_INS (bb, ins);
1819 continue;
1821 break;
1823 last_ins = ins;
1824 ins = ins->next;
1826 bb->last_ins = last_ins;
1829 void
1830 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *ins)
1832 switch (ins->opcode) {
1833 case OP_LNEG:
1834 MONO_EMIT_NEW_BIALU (cfg, OP_SUBCC, MONO_LVREG_LS (ins->dreg), 0, MONO_LVREG_LS (ins->sreg1));
1835 MONO_EMIT_NEW_BIALU (cfg, OP_SBB, MONO_LVREG_MS (ins->dreg), 0, MONO_LVREG_MS (ins->sreg1));
1836 NULLIFY_INS (ins);
1837 break;
1838 default:
1839 break;
1843 void
1844 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1848 /* FIXME: Strange loads from the stack in basic-float.cs:test_2_rem */
1850 static void
1851 sparc_patch (guint32 *code, const gpointer target)
1853 guint32 *c = code;
1854 guint32 ins = *code;
1855 guint32 op = ins >> 30;
1856 guint32 op2 = (ins >> 22) & 0x7;
1857 guint32 rd = (ins >> 25) & 0x1f;
1858 guint8* target8 = (guint8*)target;
1859 gint64 disp = (target8 - (guint8*)code) >> 2;
1860 int reg;
1862 // g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
1864 if ((op == 0) && (op2 == 2)) {
1865 if (!sparc_is_imm22 (disp))
1866 NOT_IMPLEMENTED;
1867 /* Bicc */
1868 *code = ((ins >> 22) << 22) | (disp & 0x3fffff);
1870 else if ((op == 0) && (op2 == 1)) {
1871 if (!sparc_is_imm19 (disp))
1872 NOT_IMPLEMENTED;
1873 /* BPcc */
1874 *code = ((ins >> 19) << 19) | (disp & 0x7ffff);
1876 else if ((op == 0) && (op2 == 3)) {
1877 if (!sparc_is_imm16 (disp))
1878 NOT_IMPLEMENTED;
1879 /* BPr */
1880 *code &= ~(0x180000 | 0x3fff);
1881 *code |= ((disp << 21) & (0x180000)) | (disp & 0x3fff);
1883 else if ((op == 0) && (op2 == 6)) {
1884 if (!sparc_is_imm22 (disp))
1885 NOT_IMPLEMENTED;
1886 /* FBicc */
1887 *code = ((ins >> 22) << 22) | (disp & 0x3fffff);
1889 else if ((op == 0) && (op2 == 4)) {
1890 guint32 ins2 = code [1];
1892 if (((ins2 >> 30) == 2) && (((ins2 >> 19) & 0x3f) == 2)) {
1893 /* sethi followed by or */
1894 guint32 *p = code;
1895 sparc_set (p, target8, rd);
1896 while (p <= (code + 1))
1897 sparc_nop (p);
1899 else if (ins2 == 0x01000000) {
1900 /* sethi followed by nop */
1901 guint32 *p = code;
1902 sparc_set (p, target8, rd);
1903 while (p <= (code + 1))
1904 sparc_nop (p);
1906 else if ((sparc_inst_op (ins2) == 3) && (sparc_inst_imm (ins2))) {
1907 /* sethi followed by load/store */
1908 #ifndef SPARCV9
1909 guint32 t = (guint32)target8;
1910 *code &= ~(0x3fffff);
1911 *code |= (t >> 10);
1912 *(code + 1) &= ~(0x3ff);
1913 *(code + 1) |= (t & 0x3ff);
1914 #endif
1916 else if (v64 &&
1917 (sparc_inst_rd (ins) == sparc_g1) &&
1918 (sparc_inst_op (c [1]) == 0) && (sparc_inst_op2 (c [1]) == 4) &&
1919 (sparc_inst_op (c [2]) == 2) && (sparc_inst_op3 (c [2]) == 2) &&
1920 (sparc_inst_op (c [3]) == 2) && (sparc_inst_op3 (c [3]) == 2))
1922 /* sparc_set */
1923 guint32 *p = c;
1924 reg = sparc_inst_rd (c [1]);
1925 sparc_set (p, target8, reg);
1926 while (p < (c + 6))
1927 sparc_nop (p);
1929 else if ((sparc_inst_op (ins2) == 2) && (sparc_inst_op3 (ins2) == 0x38) &&
1930 (sparc_inst_imm (ins2))) {
1931 /* sethi followed by jmpl */
1932 #ifndef SPARCV9
1933 guint32 t = (guint32)target8;
1934 *code &= ~(0x3fffff);
1935 *code |= (t >> 10);
1936 *(code + 1) &= ~(0x3ff);
1937 *(code + 1) |= (t & 0x3ff);
1938 #endif
1940 else
1941 NOT_IMPLEMENTED;
1943 else if (op == 01) {
1944 gint64 disp = (target8 - (guint8*)code) >> 2;
1946 if (!sparc_is_imm30 (disp))
1947 NOT_IMPLEMENTED;
1948 sparc_call_simple (code, target8 - (guint8*)code);
1950 else if ((op == 2) && (sparc_inst_op3 (ins) == 0x2) && sparc_inst_imm (ins)) {
1951 /* mov imm, reg */
1952 g_assert (sparc_is_imm13 (target8));
1953 *code &= ~(0x1fff);
1954 *code |= (guint32)target8;
1956 else if ((sparc_inst_op (ins) == 2) && (sparc_inst_op3 (ins) == 0x7)) {
1957 /* sparc_set case 5. */
1958 guint32 *p = c;
1960 g_assert (v64);
1961 reg = sparc_inst_rd (c [3]);
1962 sparc_set (p, target, reg);
1963 while (p < (c + 6))
1964 sparc_nop (p);
1966 else
1967 NOT_IMPLEMENTED;
1969 // g_print ("patched with 0x%08x\n", ins);
1973 * mono_sparc_emit_save_lmf:
1975 * Emit the code neccesary to push a new entry onto the lmf stack. Used by
1976 * trampolines as well.
1978 guint32*
1979 mono_sparc_emit_save_lmf (guint32 *code, guint32 lmf_offset)
1981 /* Save lmf_addr */
1982 sparc_sti_imm (code, sparc_o0, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr));
1983 /* Save previous_lmf */
1984 sparc_ldi (code, sparc_o0, sparc_g0, sparc_o7);
1985 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf));
1986 /* Set new lmf */
1987 sparc_add_imm (code, FALSE, sparc_fp, lmf_offset, sparc_o7);
1988 sparc_sti (code, sparc_o7, sparc_o0, sparc_g0);
1990 return code;
1993 guint32*
1994 mono_sparc_emit_restore_lmf (guint32 *code, guint32 lmf_offset)
1996 /* Load previous_lmf */
1997 sparc_ldi_imm (code, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sparc_l0);
1998 /* Load lmf_addr */
1999 sparc_ldi_imm (code, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), sparc_l1);
2000 /* *(lmf) = previous_lmf */
2001 sparc_sti (code, sparc_l0, sparc_l1, sparc_g0);
2002 return code;
2005 static guint32*
2006 emit_save_sp_to_lmf (MonoCompile *cfg, guint32 *code)
2009 * Since register windows are saved to the current value of %sp, we need to
2010 * set the sp field in the lmf before the call, not in the prolog.
2012 if (cfg->method->save_lmf) {
2013 gint32 lmf_offset = MONO_SPARC_STACK_BIAS - cfg->arch.lmf_offset;
2015 /* Save sp */
2016 sparc_sti_imm (code, sparc_sp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, sp));
2019 return code;
2022 static guint32*
2023 emit_vret_token (MonoInst *ins, guint32 *code)
2025 MonoCallInst *call = (MonoCallInst*)ins;
2026 guint32 size;
2029 * The sparc ABI requires that calls to functions which return a structure
2030 * contain an additional unimpl instruction which is checked by the callee.
2032 if (call->signature->pinvoke && MONO_TYPE_ISSTRUCT(call->signature->ret)) {
2033 if (call->signature->ret->type == MONO_TYPE_TYPEDBYREF)
2034 size = mini_type_stack_size (call->signature->ret, NULL);
2035 else
2036 size = mono_class_native_size (call->signature->ret->data.klass, NULL);
2037 sparc_unimp (code, size & 0xfff);
2040 return code;
2043 static guint32*
2044 emit_move_return_value (MonoInst *ins, guint32 *code)
2046 /* Move return value to the target register */
2047 /* FIXME: do more things in the local reg allocator */
2048 switch (ins->opcode) {
2049 case OP_VOIDCALL:
2050 case OP_VOIDCALL_REG:
2051 case OP_VOIDCALL_MEMBASE:
2052 break;
2053 case OP_CALL:
2054 case OP_CALL_REG:
2055 case OP_CALL_MEMBASE:
2056 g_assert (ins->dreg == sparc_o0);
2057 break;
2058 case OP_LCALL:
2059 case OP_LCALL_REG:
2060 case OP_LCALL_MEMBASE:
2062 * ins->dreg is the least significant reg due to the lreg: LCALL rule
2063 * in inssel-long32.brg.
2065 #ifdef SPARCV9
2066 sparc_mov_reg_reg (code, sparc_o0, ins->dreg);
2067 #else
2068 g_assert (ins->dreg == sparc_o1);
2069 #endif
2070 break;
2071 case OP_FCALL:
2072 case OP_FCALL_REG:
2073 case OP_FCALL_MEMBASE:
2074 #ifdef SPARCV9
2075 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
2076 sparc_fmovs (code, sparc_f0, ins->dreg);
2077 sparc_fstod (code, ins->dreg, ins->dreg);
2079 else
2080 sparc_fmovd (code, sparc_f0, ins->dreg);
2081 #else
2082 sparc_fmovs (code, sparc_f0, ins->dreg);
2083 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4)
2084 sparc_fstod (code, ins->dreg, ins->dreg);
2085 else
2086 sparc_fmovs (code, sparc_f1, ins->dreg + 1);
2087 #endif
2088 break;
2089 case OP_VCALL:
2090 case OP_VCALL_REG:
2091 case OP_VCALL_MEMBASE:
2092 case OP_VCALL2:
2093 case OP_VCALL2_REG:
2094 case OP_VCALL2_MEMBASE:
2095 break;
2096 default:
2097 NOT_IMPLEMENTED;
2100 return code;
2104 * emit_load_volatile_arguments:
2106 * Load volatile arguments from the stack to the original input registers.
2107 * Required before a tailcall.
2109 static guint32*
2110 emit_load_volatile_arguments (MonoCompile *cfg, guint32 *code)
2112 MonoMethod *method = cfg->method;
2113 MonoMethodSignature *sig;
2114 MonoInst *inst;
2115 CallInfo *cinfo;
2116 guint32 i, ireg;
2118 /* FIXME: Generate intermediate code instead */
2120 sig = mono_method_signature_internal (method);
2122 cinfo = get_call_info (cfg, sig, FALSE);
2124 /* This is the opposite of the code in emit_prolog */
2126 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2127 ArgInfo *ainfo = cinfo->args + i;
2128 gint32 stack_offset;
2129 MonoType *arg_type;
2131 inst = cfg->args [i];
2133 if (sig->hasthis && (i == 0))
2134 arg_type = mono_get_object_type ();
2135 else
2136 arg_type = sig->params [i - sig->hasthis];
2138 stack_offset = ainfo->offset + ARGS_OFFSET;
2139 ireg = sparc_i0 + ainfo->reg;
2141 if (ainfo->storage == ArgInSplitRegStack) {
2142 g_assert (inst->opcode == OP_REGOFFSET);
2144 if (!sparc_is_imm13 (stack_offset))
2145 NOT_IMPLEMENTED;
2146 sparc_st_imm (code, inst->inst_basereg, stack_offset, sparc_i5);
2149 if (!v64 && !arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
2150 if (ainfo->storage == ArgInIRegPair) {
2151 if (!sparc_is_imm13 (inst->inst_offset + 4))
2152 NOT_IMPLEMENTED;
2153 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, ireg);
2154 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, ireg + 1);
2156 else
2157 if (ainfo->storage == ArgInSplitRegStack) {
2158 if (stack_offset != inst->inst_offset) {
2159 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, sparc_i5);
2160 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, sparc_o7);
2161 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset + 4);
2165 else
2166 if (ainfo->storage == ArgOnStackPair) {
2167 if (stack_offset != inst->inst_offset) {
2168 /* stack_offset is not dword aligned, so we need to make a copy */
2169 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, sparc_o7);
2170 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset);
2172 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, sparc_o7);
2173 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset + 4);
2177 else
2178 g_assert_not_reached ();
2180 else
2181 if ((ainfo->storage == ArgInIReg) && (inst->opcode != OP_REGVAR)) {
2182 /* Argument in register, but need to be saved to stack */
2183 if (!sparc_is_imm13 (stack_offset))
2184 NOT_IMPLEMENTED;
2185 if ((stack_offset - ARGS_OFFSET) & 0x1)
2186 /* FIXME: Is this ldsb or ldub ? */
2187 sparc_ldsb_imm (code, inst->inst_basereg, stack_offset, ireg);
2188 else
2189 if ((stack_offset - ARGS_OFFSET) & 0x2)
2190 sparc_ldsh_imm (code, inst->inst_basereg, stack_offset, ireg);
2191 else
2192 if ((stack_offset - ARGS_OFFSET) & 0x4)
2193 sparc_ld_imm (code, inst->inst_basereg, stack_offset, ireg);
2194 else {
2195 if (v64)
2196 sparc_ldx_imm (code, inst->inst_basereg, stack_offset, ireg);
2197 else
2198 sparc_ld_imm (code, inst->inst_basereg, stack_offset, ireg);
2201 else if ((ainfo->storage == ArgInIRegPair) && (inst->opcode != OP_REGVAR)) {
2202 /* Argument in regpair, but need to be saved to stack */
2203 if (!sparc_is_imm13 (inst->inst_offset + 4))
2204 NOT_IMPLEMENTED;
2205 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, ireg);
2206 sparc_st_imm (code, inst->inst_basereg, inst->inst_offset + 4, ireg + 1);
2208 else if ((ainfo->storage == ArgInFloatReg) && (inst->opcode != OP_REGVAR)) {
2209 NOT_IMPLEMENTED;
2211 else if ((ainfo->storage == ArgInDoubleReg) && (inst->opcode != OP_REGVAR)) {
2212 NOT_IMPLEMENTED;
2215 if ((ainfo->storage == ArgInSplitRegStack) || (ainfo->storage == ArgOnStack))
2216 if (inst->opcode == OP_REGVAR)
2217 /* FIXME: Load the argument into memory */
2218 NOT_IMPLEMENTED;
2221 g_free (cinfo);
2223 return code;
2227 * mono_sparc_is_virtual_call:
2229 * Determine whenever the instruction at CODE is a virtual call.
2231 gboolean
2232 mono_sparc_is_virtual_call (guint32 *code)
2234 guint32 buf[1];
2235 guint32 *p;
2237 p = buf;
2239 if ((sparc_inst_op (*code) == 0x2) && (sparc_inst_op3 (*code) == 0x38)) {
2241 * Register indirect call. If it is a virtual call, then the
2242 * instruction in the delay slot is a special kind of nop.
2245 /* Construct special nop */
2246 sparc_or_imm (p, FALSE, sparc_g0, 0xca, sparc_g0);
2247 p --;
2249 if (code [1] == p [0])
2250 return TRUE;
2253 return FALSE;
2256 #define CMP_SIZE 3
2257 #define BR_SMALL_SIZE 2
2258 #define BR_LARGE_SIZE 2
2259 #define JUMP_IMM_SIZE 5
2260 #define ENABLE_WRONG_METHOD_CHECK 0
2263 * LOCKING: called with the domain lock held
2265 gpointer
2266 mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
2267 gpointer fail_tramp)
2269 int i;
2270 int size = 0;
2271 guint32 *code, *start;
2273 for (i = 0; i < count; ++i) {
2274 MonoIMTCheckItem *item = imt_entries [i];
2275 if (item->is_equals) {
2276 if (item->check_target_idx) {
2277 if (!item->compare_done)
2278 item->chunk_size += CMP_SIZE;
2279 item->chunk_size += BR_SMALL_SIZE + JUMP_IMM_SIZE;
2280 } else {
2281 if (fail_tramp)
2282 item->chunk_size += 16;
2283 item->chunk_size += JUMP_IMM_SIZE;
2284 #if ENABLE_WRONG_METHOD_CHECK
2285 item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + 1;
2286 #endif
2288 } else {
2289 item->chunk_size += CMP_SIZE + BR_LARGE_SIZE;
2290 imt_entries [item->check_target_idx]->compare_done = TRUE;
2292 size += item->chunk_size;
2294 if (fail_tramp)
2295 code = mono_method_alloc_generic_virtual_trampoline (domain, size * 4);
2296 else
2297 code = mono_domain_code_reserve (domain, size * 4);
2298 start = code;
2299 for (i = 0; i < count; ++i) {
2300 MonoIMTCheckItem *item = imt_entries [i];
2301 item->code_target = (guint8*)code;
2302 if (item->is_equals) {
2303 gboolean fail_case = !item->check_target_idx && fail_tramp;
2305 if (item->check_target_idx || fail_case) {
2306 if (!item->compare_done || fail_case) {
2307 sparc_set (code, (guint32)item->key, sparc_g5);
2308 sparc_cmp (code, MONO_ARCH_IMT_REG, sparc_g5);
2310 item->jmp_code = (guint8*)code;
2311 sparc_branch (code, 0, sparc_bne, 0);
2312 sparc_nop (code);
2313 if (item->has_target_code) {
2314 sparc_set (code, item->value.target_code, sparc_f5);
2315 } else {
2316 sparc_set (code, ((guint32)(&(vtable->vtable [item->value.vtable_slot]))), sparc_g5);
2317 sparc_ld (code, sparc_g5, 0, sparc_g5);
2319 sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
2320 sparc_nop (code);
2322 if (fail_case) {
2323 sparc_patch (item->jmp_code, code);
2324 sparc_set (code, fail_tramp, sparc_g5);
2325 sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
2326 sparc_nop (code);
2327 item->jmp_code = NULL;
2329 } else {
2330 /* enable the commented code to assert on wrong method */
2331 #if ENABLE_WRONG_METHOD_CHECK
2332 g_assert_not_reached ();
2333 #endif
2334 sparc_set (code, ((guint32)(&(vtable->vtable [item->value.vtable_slot]))), sparc_g5);
2335 sparc_ld (code, sparc_g5, 0, sparc_g5);
2336 sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
2337 sparc_nop (code);
2338 #if ENABLE_WRONG_METHOD_CHECK
2339 g_assert_not_reached ();
2340 #endif
2342 } else {
2343 sparc_set (code, (guint32)item->key, sparc_g5);
2344 sparc_cmp (code, MONO_ARCH_IMT_REG, sparc_g5);
2345 item->jmp_code = (guint8*)code;
2346 sparc_branch (code, 0, sparc_beu, 0);
2347 sparc_nop (code);
2350 /* patch the branches to get to the target items */
2351 for (i = 0; i < count; ++i) {
2352 MonoIMTCheckItem *item = imt_entries [i];
2353 if (item->jmp_code) {
2354 if (item->check_target_idx) {
2355 sparc_patch ((guint32*)item->jmp_code, imt_entries [item->check_target_idx]->code_target);
2360 mono_arch_flush_icache ((guint8*)start, (code - start));
2361 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL));
2363 UnlockedAdd (&mono_stats.imt_trampolines_size, (code - start));
2364 g_assert (code - start <= size);
2366 mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, NULL), domain);
2368 return start;
2371 MonoMethod*
2372 mono_arch_find_imt_method (host_mgreg_t *regs, guint8 *code)
2374 #ifdef SPARCV9
2375 g_assert_not_reached ();
2376 #endif
2378 return (MonoMethod*)regs [sparc_g1];
2381 gpointer
2382 mono_arch_get_this_arg_from_call (host_mgreg_t *regs, guint8 *code)
2384 mono_sparc_flushw ();
2386 return (gpointer)regs [sparc_o0];
2390 * Some conventions used in the following code.
2391 * 2) The only scratch registers we have are o7 and g1. We try to
2392 * stick to o7 when we can, and use g1 when necessary.
2395 void
2396 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
2398 MonoInst *ins;
2399 MonoCallInst *call;
2400 guint offset;
2401 guint32 *code = (guint32*)(cfg->native_code + cfg->code_len);
2402 MonoInst *last_ins = NULL;
2403 int max_len, cpos;
2404 const char *spec;
2406 if (cfg->verbose_level > 2)
2407 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
2409 cpos = bb->max_offset;
2411 MONO_BB_FOR_EACH_INS (bb, ins) {
2412 guint8* code_start;
2414 offset = (guint8*)code - cfg->native_code;
2415 spec = ins_get_spec (ins->opcode);
2416 max_len = ins_get_size (ins->opcode);
2417 code = realloc_code (cfg, max_len);
2418 code_start = (guint8*)code;
2419 // if (ins->cil_code)
2420 // g_print ("cil code\n");
2421 mono_debug_record_line_number (cfg, ins, offset);
2423 switch (ins->opcode) {
2424 case OP_STOREI1_MEMBASE_IMM:
2425 EMIT_STORE_MEMBASE_IMM (ins, stb);
2426 break;
2427 case OP_STOREI2_MEMBASE_IMM:
2428 EMIT_STORE_MEMBASE_IMM (ins, sth);
2429 break;
2430 case OP_STORE_MEMBASE_IMM:
2431 EMIT_STORE_MEMBASE_IMM (ins, sti);
2432 break;
2433 case OP_STOREI4_MEMBASE_IMM:
2434 EMIT_STORE_MEMBASE_IMM (ins, st);
2435 break;
2436 case OP_STOREI8_MEMBASE_IMM:
2437 #ifdef SPARCV9
2438 EMIT_STORE_MEMBASE_IMM (ins, stx);
2439 #else
2440 /* Only generated by peephole opts */
2441 g_assert ((ins->inst_offset % 8) == 0);
2442 g_assert (ins->inst_imm == 0);
2443 EMIT_STORE_MEMBASE_IMM (ins, stx);
2444 #endif
2445 break;
2446 case OP_STOREI1_MEMBASE_REG:
2447 EMIT_STORE_MEMBASE_REG (ins, stb);
2448 break;
2449 case OP_STOREI2_MEMBASE_REG:
2450 EMIT_STORE_MEMBASE_REG (ins, sth);
2451 break;
2452 case OP_STOREI4_MEMBASE_REG:
2453 EMIT_STORE_MEMBASE_REG (ins, st);
2454 break;
2455 case OP_STOREI8_MEMBASE_REG:
2456 #ifdef SPARCV9
2457 EMIT_STORE_MEMBASE_REG (ins, stx);
2458 #else
2459 /* Only used by OP_MEMSET */
2460 EMIT_STORE_MEMBASE_REG (ins, std);
2461 #endif
2462 break;
2463 case OP_STORE_MEMBASE_REG:
2464 EMIT_STORE_MEMBASE_REG (ins, sti);
2465 break;
2466 case OP_LOADU4_MEM:
2467 sparc_set (code, ins->inst_c0, ins->dreg);
2468 sparc_ld (code, ins->dreg, sparc_g0, ins->dreg);
2469 break;
2470 case OP_LOADI4_MEMBASE:
2471 #ifdef SPARCV9
2472 EMIT_LOAD_MEMBASE (ins, ldsw);
2473 #else
2474 EMIT_LOAD_MEMBASE (ins, ld);
2475 #endif
2476 break;
2477 case OP_LOADU4_MEMBASE:
2478 EMIT_LOAD_MEMBASE (ins, ld);
2479 break;
2480 case OP_LOADU1_MEMBASE:
2481 EMIT_LOAD_MEMBASE (ins, ldub);
2482 break;
2483 case OP_LOADI1_MEMBASE:
2484 EMIT_LOAD_MEMBASE (ins, ldsb);
2485 break;
2486 case OP_LOADU2_MEMBASE:
2487 EMIT_LOAD_MEMBASE (ins, lduh);
2488 break;
2489 case OP_LOADI2_MEMBASE:
2490 EMIT_LOAD_MEMBASE (ins, ldsh);
2491 break;
2492 case OP_LOAD_MEMBASE:
2493 #ifdef SPARCV9
2494 EMIT_LOAD_MEMBASE (ins, ldx);
2495 #else
2496 EMIT_LOAD_MEMBASE (ins, ld);
2497 #endif
2498 break;
2499 #ifdef SPARCV9
2500 case OP_LOADI8_MEMBASE:
2501 EMIT_LOAD_MEMBASE (ins, ldx);
2502 break;
2503 #endif
2504 case OP_ICONV_TO_I1:
2505 sparc_sll_imm (code, ins->sreg1, 24, sparc_o7);
2506 sparc_sra_imm (code, sparc_o7, 24, ins->dreg);
2507 break;
2508 case OP_ICONV_TO_I2:
2509 sparc_sll_imm (code, ins->sreg1, 16, sparc_o7);
2510 sparc_sra_imm (code, sparc_o7, 16, ins->dreg);
2511 break;
2512 case OP_ICONV_TO_U1:
2513 sparc_and_imm (code, FALSE, ins->sreg1, 0xff, ins->dreg);
2514 break;
2515 case OP_ICONV_TO_U2:
2516 sparc_sll_imm (code, ins->sreg1, 16, sparc_o7);
2517 sparc_srl_imm (code, sparc_o7, 16, ins->dreg);
2518 break;
2519 case OP_LCONV_TO_OVF_U4:
2520 case OP_ICONV_TO_OVF_U4:
2521 /* Only used on V9 */
2522 sparc_cmp_imm (code, ins->sreg1, 0);
2523 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code,
2524 MONO_PATCH_INFO_EXC, "OverflowException");
2525 sparc_branchp (code, 0, sparc_bl, sparc_xcc_short, 0, 0);
2526 /* Delay slot */
2527 sparc_set (code, 1, sparc_o7);
2528 sparc_sllx_imm (code, sparc_o7, 32, sparc_o7);
2529 sparc_cmp (code, ins->sreg1, sparc_o7);
2530 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code,
2531 MONO_PATCH_INFO_EXC, "OverflowException");
2532 sparc_branchp (code, 0, sparc_bge, sparc_xcc_short, 0, 0);
2533 sparc_nop (code);
2534 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
2535 break;
2536 case OP_LCONV_TO_OVF_I4_UN:
2537 case OP_ICONV_TO_OVF_I4_UN:
2538 /* Only used on V9 */
2539 NOT_IMPLEMENTED;
2540 break;
2541 case OP_COMPARE:
2542 case OP_LCOMPARE:
2543 case OP_ICOMPARE:
2544 sparc_cmp (code, ins->sreg1, ins->sreg2);
2545 break;
2546 case OP_COMPARE_IMM:
2547 case OP_ICOMPARE_IMM:
2548 if (sparc_is_imm13 (ins->inst_imm))
2549 sparc_cmp_imm (code, ins->sreg1, ins->inst_imm);
2550 else {
2551 sparc_set (code, ins->inst_imm, sparc_o7);
2552 sparc_cmp (code, ins->sreg1, sparc_o7);
2554 break;
2555 case OP_BREAK:
2557 * gdb does not like encountering 'ta 1' in the debugged code. So
2558 * instead of emitting a trap, we emit a call a C function and place a
2559 * breakpoint there.
2561 //sparc_ta (code, 1);
2562 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_JIT_ICALL_ID,
2563 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break));
2564 EMIT_CALL();
2565 break;
2566 case OP_ADDCC:
2567 case OP_IADDCC:
2568 sparc_add (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2569 break;
2570 case OP_IADD:
2571 sparc_add (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2572 break;
2573 case OP_ADDCC_IMM:
2574 case OP_ADD_IMM:
2575 case OP_IADD_IMM:
2576 /* according to inssel-long32.brg, this should set cc */
2577 EMIT_ALU_IMM (ins, add, TRUE);
2578 break;
2579 case OP_ADC:
2580 case OP_IADC:
2581 /* according to inssel-long32.brg, this should set cc */
2582 sparc_addx (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2583 break;
2584 case OP_ADC_IMM:
2585 case OP_IADC_IMM:
2586 EMIT_ALU_IMM (ins, addx, TRUE);
2587 break;
2588 case OP_SUBCC:
2589 case OP_ISUBCC:
2590 sparc_sub (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2591 break;
2592 case OP_ISUB:
2593 sparc_sub (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2594 break;
2595 case OP_SUBCC_IMM:
2596 case OP_SUB_IMM:
2597 case OP_ISUB_IMM:
2598 /* according to inssel-long32.brg, this should set cc */
2599 EMIT_ALU_IMM (ins, sub, TRUE);
2600 break;
2601 case OP_SBB:
2602 case OP_ISBB:
2603 /* according to inssel-long32.brg, this should set cc */
2604 sparc_subx (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2605 break;
2606 case OP_SBB_IMM:
2607 case OP_ISBB_IMM:
2608 EMIT_ALU_IMM (ins, subx, TRUE);
2609 break;
2610 case OP_IAND:
2611 sparc_and (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2612 break;
2613 case OP_AND_IMM:
2614 case OP_IAND_IMM:
2615 EMIT_ALU_IMM (ins, and, FALSE);
2616 break;
2617 case OP_IDIV:
2618 /* Sign extend sreg1 into %y */
2619 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2620 sparc_wry (code, sparc_o7, sparc_g0);
2621 sparc_sdiv (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2622 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2623 break;
2624 case OP_IDIV_UN:
2625 sparc_wry (code, sparc_g0, sparc_g0);
2626 sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2627 break;
2628 case OP_DIV_IMM:
2629 case OP_IDIV_IMM: {
2630 int i, imm;
2632 /* Transform division into a shift */
2633 for (i = 1; i < 30; ++i) {
2634 imm = (1 << i);
2635 if (ins->inst_imm == imm)
2636 break;
2638 if (i < 30) {
2639 if (i == 1) {
2640 /* gcc 2.95.3 */
2641 sparc_srl_imm (code, ins->sreg1, 31, sparc_o7);
2642 sparc_add (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2643 sparc_sra_imm (code, ins->dreg, 1, ins->dreg);
2645 else {
2646 /* http://compilers.iecc.com/comparch/article/93-04-079 */
2647 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2648 sparc_srl_imm (code, sparc_o7, 32 - i, sparc_o7);
2649 sparc_add (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2650 sparc_sra_imm (code, ins->dreg, i, ins->dreg);
2653 else {
2654 /* Sign extend sreg1 into %y */
2655 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2656 sparc_wry (code, sparc_o7, sparc_g0);
2657 EMIT_ALU_IMM (ins, sdiv, TRUE);
2658 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2660 break;
2662 case OP_IDIV_UN_IMM:
2663 sparc_wry (code, sparc_g0, sparc_g0);
2664 EMIT_ALU_IMM (ins, udiv, FALSE);
2665 break;
2666 case OP_IREM:
2667 /* Sign extend sreg1 into %y */
2668 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2669 sparc_wry (code, sparc_o7, sparc_g0);
2670 sparc_sdiv (code, TRUE, ins->sreg1, ins->sreg2, sparc_o7);
2671 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2672 sparc_smul (code, FALSE, ins->sreg2, sparc_o7, sparc_o7);
2673 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2674 break;
2675 case OP_IREM_UN:
2676 sparc_wry (code, sparc_g0, sparc_g0);
2677 sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, sparc_o7);
2678 sparc_umul (code, FALSE, ins->sreg2, sparc_o7, sparc_o7);
2679 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2680 break;
2681 case OP_REM_IMM:
2682 case OP_IREM_IMM:
2683 /* Sign extend sreg1 into %y */
2684 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2685 sparc_wry (code, sparc_o7, sparc_g0);
2686 if (!sparc_is_imm13 (ins->inst_imm)) {
2687 sparc_set (code, ins->inst_imm, GP_SCRATCH_REG);
2688 sparc_sdiv (code, TRUE, ins->sreg1, GP_SCRATCH_REG, sparc_o7);
2689 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2690 sparc_smul (code, FALSE, sparc_o7, GP_SCRATCH_REG, sparc_o7);
2692 else {
2693 sparc_sdiv_imm (code, TRUE, ins->sreg1, ins->inst_imm, sparc_o7);
2694 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2695 sparc_smul_imm (code, FALSE, sparc_o7, ins->inst_imm, sparc_o7);
2697 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2698 break;
2699 case OP_IREM_UN_IMM:
2700 sparc_set (code, ins->inst_imm, GP_SCRATCH_REG);
2701 sparc_wry (code, sparc_g0, sparc_g0);
2702 sparc_udiv (code, FALSE, ins->sreg1, GP_SCRATCH_REG, sparc_o7);
2703 sparc_umul (code, FALSE, GP_SCRATCH_REG, sparc_o7, sparc_o7);
2704 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2705 break;
2706 case OP_IOR:
2707 sparc_or (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2708 break;
2709 case OP_OR_IMM:
2710 case OP_IOR_IMM:
2711 EMIT_ALU_IMM (ins, or, FALSE);
2712 break;
2713 case OP_IXOR:
2714 sparc_xor (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2715 break;
2716 case OP_XOR_IMM:
2717 case OP_IXOR_IMM:
2718 EMIT_ALU_IMM (ins, xor, FALSE);
2719 break;
2720 case OP_ISHL:
2721 sparc_sll (code, ins->sreg1, ins->sreg2, ins->dreg);
2722 break;
2723 case OP_SHL_IMM:
2724 case OP_ISHL_IMM:
2725 if (ins->inst_imm < (1 << 5))
2726 sparc_sll_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2727 else {
2728 sparc_set (code, ins->inst_imm, sparc_o7);
2729 sparc_sll (code, ins->sreg1, sparc_o7, ins->dreg);
2731 break;
2732 case OP_ISHR:
2733 sparc_sra (code, ins->sreg1, ins->sreg2, ins->dreg);
2734 break;
2735 case OP_ISHR_IMM:
2736 case OP_SHR_IMM:
2737 if (ins->inst_imm < (1 << 5))
2738 sparc_sra_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2739 else {
2740 sparc_set (code, ins->inst_imm, sparc_o7);
2741 sparc_sra (code, ins->sreg1, sparc_o7, ins->dreg);
2743 break;
2744 case OP_SHR_UN_IMM:
2745 case OP_ISHR_UN_IMM:
2746 if (ins->inst_imm < (1 << 5))
2747 sparc_srl_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2748 else {
2749 sparc_set (code, ins->inst_imm, sparc_o7);
2750 sparc_srl (code, ins->sreg1, sparc_o7, ins->dreg);
2752 break;
2753 case OP_ISHR_UN:
2754 sparc_srl (code, ins->sreg1, ins->sreg2, ins->dreg);
2755 break;
2756 case OP_LSHL:
2757 sparc_sllx (code, ins->sreg1, ins->sreg2, ins->dreg);
2758 break;
2759 case OP_LSHL_IMM:
2760 if (ins->inst_imm < (1 << 6))
2761 sparc_sllx_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2762 else {
2763 sparc_set (code, ins->inst_imm, sparc_o7);
2764 sparc_sllx (code, ins->sreg1, sparc_o7, ins->dreg);
2766 break;
2767 case OP_LSHR:
2768 sparc_srax (code, ins->sreg1, ins->sreg2, ins->dreg);
2769 break;
2770 case OP_LSHR_IMM:
2771 if (ins->inst_imm < (1 << 6))
2772 sparc_srax_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2773 else {
2774 sparc_set (code, ins->inst_imm, sparc_o7);
2775 sparc_srax (code, ins->sreg1, sparc_o7, ins->dreg);
2777 break;
2778 case OP_LSHR_UN:
2779 sparc_srlx (code, ins->sreg1, ins->sreg2, ins->dreg);
2780 break;
2781 case OP_LSHR_UN_IMM:
2782 if (ins->inst_imm < (1 << 6))
2783 sparc_srlx_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2784 else {
2785 sparc_set (code, ins->inst_imm, sparc_o7);
2786 sparc_srlx (code, ins->sreg1, sparc_o7, ins->dreg);
2788 break;
2789 case OP_INOT:
2790 /* can't use sparc_not */
2791 sparc_xnor (code, FALSE, ins->sreg1, sparc_g0, ins->dreg);
2792 break;
2793 case OP_INEG:
2794 /* can't use sparc_neg */
2795 sparc_sub (code, FALSE, sparc_g0, ins->sreg1, ins->dreg);
2796 break;
2797 case OP_IMUL:
2798 sparc_smul (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2799 break;
2800 case OP_IMUL_IMM:
2801 case OP_MUL_IMM: {
2802 int i, imm;
2804 if ((ins->inst_imm == 1) && (ins->sreg1 == ins->dreg))
2805 break;
2807 /* Transform multiplication into a shift */
2808 for (i = 0; i < 30; ++i) {
2809 imm = (1 << i);
2810 if (ins->inst_imm == imm)
2811 break;
2813 if (i < 30)
2814 sparc_sll_imm (code, ins->sreg1, i, ins->dreg);
2815 else
2816 EMIT_ALU_IMM (ins, smul, FALSE);
2817 break;
2819 case OP_IMUL_OVF:
2820 sparc_smul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2821 sparc_rdy (code, sparc_g1);
2822 sparc_sra_imm (code, ins->dreg, 31, sparc_o7);
2823 sparc_cmp (code, sparc_g1, sparc_o7);
2824 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short);
2825 break;
2826 case OP_IMUL_OVF_UN:
2827 sparc_umul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2828 sparc_rdy (code, sparc_o7);
2829 sparc_cmp (code, sparc_o7, sparc_g0);
2830 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short);
2831 break;
2832 case OP_ICONST:
2833 sparc_set (code, ins->inst_c0, ins->dreg);
2834 break;
2835 case OP_I8CONST:
2836 sparc_set (code, ins->inst_l, ins->dreg);
2837 break;
2838 case OP_AOTCONST:
2839 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2840 sparc_set_template (code, ins->dreg);
2841 break;
2842 case OP_JUMP_TABLE:
2843 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2844 sparc_set_template (code, ins->dreg);
2845 break;
2846 case OP_ICONV_TO_I4:
2847 case OP_ICONV_TO_U4:
2848 case OP_MOVE:
2849 if (ins->sreg1 != ins->dreg)
2850 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
2851 break;
2852 case OP_FMOVE:
2853 #ifdef SPARCV9
2854 if (ins->sreg1 != ins->dreg)
2855 sparc_fmovd (code, ins->sreg1, ins->dreg);
2856 #else
2857 sparc_fmovs (code, ins->sreg1, ins->dreg);
2858 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
2859 #endif
2860 break;
2861 case OP_CHECK_THIS:
2862 /* ensure ins->sreg1 is not NULL */
2863 /* Might be misaligned in case of vtypes so use a byte load */
2864 sparc_ldsb_imm (code, ins->sreg1, 0, sparc_g0);
2865 break;
2866 case OP_ARGLIST:
2867 sparc_add_imm (code, FALSE, sparc_fp, cfg->sig_cookie, sparc_o7);
2868 sparc_sti_imm (code, sparc_o7, ins->sreg1, 0);
2869 break;
2870 case OP_FCALL:
2871 case OP_LCALL:
2872 case OP_VCALL:
2873 case OP_VCALL2:
2874 case OP_VOIDCALL:
2875 case OP_CALL: {
2876 call = (MonoCallInst*)ins;
2877 g_assert (!call->virtual);
2878 code = emit_save_sp_to_lmf (cfg, code);
2880 const MonoJumpInfoTarget patch = mono_call_to_patch (call);
2881 code = emit_call (cfg, code, patch.type, patch.target);
2882 code = emit_vret_token (ins, code);
2883 code = emit_move_return_value (ins, code);
2884 break;
2886 case OP_FCALL_REG:
2887 case OP_LCALL_REG:
2888 case OP_VCALL_REG:
2889 case OP_VCALL2_REG:
2890 case OP_VOIDCALL_REG:
2891 case OP_CALL_REG:
2892 call = (MonoCallInst*)ins;
2893 code = emit_save_sp_to_lmf (cfg, code);
2894 sparc_jmpl (code, ins->sreg1, sparc_g0, sparc_callsite);
2896 * We emit a special kind of nop in the delay slot to tell the
2897 * trampoline code that this is a virtual call, thus an unbox
2898 * trampoline might need to be called.
2900 if (call->virtual)
2901 sparc_or_imm (code, FALSE, sparc_g0, 0xca, sparc_g0);
2902 else
2903 sparc_nop (code);
2905 code = emit_vret_token (ins, code);
2906 code = emit_move_return_value (ins, code);
2907 break;
2908 case OP_FCALL_MEMBASE:
2909 case OP_LCALL_MEMBASE:
2910 case OP_VCALL_MEMBASE:
2911 case OP_VCALL2_MEMBASE:
2912 case OP_VOIDCALL_MEMBASE:
2913 case OP_CALL_MEMBASE:
2914 call = (MonoCallInst*)ins;
2915 code = emit_save_sp_to_lmf (cfg, code);
2916 if (sparc_is_imm13 (ins->inst_offset)) {
2917 sparc_ldi_imm (code, ins->inst_basereg, ins->inst_offset, sparc_o7);
2918 } else {
2919 sparc_set (code, ins->inst_offset, sparc_o7);
2920 sparc_ldi (code, ins->inst_basereg, sparc_o7, sparc_o7);
2922 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_callsite);
2923 if (call->virtual)
2924 sparc_or_imm (code, FALSE, sparc_g0, 0xca, sparc_g0);
2925 else
2926 sparc_nop (code);
2928 code = emit_vret_token (ins, code);
2929 code = emit_move_return_value (ins, code);
2930 break;
2931 case OP_SETFRET:
2932 if (mono_method_signature_internal (cfg->method)->ret->type == MONO_TYPE_R4)
2933 sparc_fdtos (code, ins->sreg1, sparc_f0);
2934 else {
2935 #ifdef SPARCV9
2936 sparc_fmovd (code, ins->sreg1, ins->dreg);
2937 #else
2938 /* FIXME: Why not use fmovd ? */
2939 sparc_fmovs (code, ins->sreg1, ins->dreg);
2940 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
2941 #endif
2943 break;
2944 case OP_LOCALLOC: {
2945 guint32 size_reg;
2946 gint32 offset2;
2948 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
2949 /* Perform stack touching */
2950 NOT_IMPLEMENTED;
2951 #endif
2953 /* Keep alignment */
2954 /* Add 4 to compensate for the rounding of localloc_offset */
2955 sparc_add_imm (code, FALSE, ins->sreg1, 4 + MONO_ARCH_LOCALLOC_ALIGNMENT - 1, ins->dreg);
2956 sparc_set (code, ~(MONO_ARCH_LOCALLOC_ALIGNMENT - 1), sparc_o7);
2957 sparc_and (code, FALSE, ins->dreg, sparc_o7, ins->dreg);
2959 if ((ins->flags & MONO_INST_INIT) && (ins->sreg1 == ins->dreg)) {
2960 #ifdef SPARCV9
2961 size_reg = sparc_g4;
2962 #else
2963 size_reg = sparc_g1;
2964 #endif
2965 sparc_mov_reg_reg (code, ins->dreg, size_reg);
2967 else
2968 size_reg = ins->sreg1;
2970 sparc_sub (code, FALSE, sparc_sp, ins->dreg, ins->dreg);
2971 /* Keep %sp valid at all times */
2972 sparc_mov_reg_reg (code, ins->dreg, sparc_sp);
2973 /* Round localloc_offset too so the result is at least 8 aligned */
2974 offset2 = ALIGN_TO (cfg->arch.localloc_offset, 8);
2975 g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + offset2));
2976 sparc_add_imm (code, FALSE, ins->dreg, MONO_SPARC_STACK_BIAS + offset2, ins->dreg);
2978 if (ins->flags & MONO_INST_INIT) {
2979 guint32 *br [3];
2980 /* Initialize memory region */
2981 sparc_cmp_imm (code, size_reg, 0);
2982 br [0] = code;
2983 sparc_branch (code, 0, sparc_be, 0);
2984 /* delay slot */
2985 sparc_set (code, 0, sparc_o7);
2986 sparc_sub_imm (code, 0, size_reg, mono_hwcap_sparc_is_v9 ? 8 : 4, size_reg);
2987 /* start of loop */
2988 br [1] = code;
2989 if (mono_hwcap_sparc_is_v9)
2990 sparc_stx (code, sparc_g0, ins->dreg, sparc_o7);
2991 else
2992 sparc_st (code, sparc_g0, ins->dreg, sparc_o7);
2993 sparc_cmp (code, sparc_o7, size_reg);
2994 br [2] = code;
2995 sparc_branch (code, 0, sparc_bl, 0);
2996 sparc_patch (br [2], br [1]);
2997 /* delay slot */
2998 sparc_add_imm (code, 0, sparc_o7, mono_hwcap_sparc_is_v9 ? 8 : 4, sparc_o7);
2999 sparc_patch (br [0], code);
3001 break;
3003 case OP_LOCALLOC_IMM: {
3004 gint32 offset = ins->inst_imm;
3005 gint32 offset2;
3007 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
3008 /* Perform stack touching */
3009 NOT_IMPLEMENTED;
3010 #endif
3012 /* To compensate for the rounding of localloc_offset */
3013 offset += sizeof (target_mgreg_t);
3014 offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
3015 if (sparc_is_imm13 (offset))
3016 sparc_sub_imm (code, FALSE, sparc_sp, offset, sparc_sp);
3017 else {
3018 sparc_set (code, offset, sparc_o7);
3019 sparc_sub (code, FALSE, sparc_sp, sparc_o7, sparc_sp);
3021 /* Round localloc_offset too so the result is at least 8 aligned */
3022 offset2 = ALIGN_TO (cfg->arch.localloc_offset, 8);
3023 g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + offset2));
3024 sparc_add_imm (code, FALSE, sparc_sp, MONO_SPARC_STACK_BIAS + offset2, ins->dreg);
3025 if ((ins->flags & MONO_INST_INIT) && (offset > 0)) {
3026 guint32 *br [2];
3027 int i;
3029 if (offset <= 16) {
3030 i = 0;
3031 while (i < offset) {
3032 if (mono_hwcap_sparc_is_v9) {
3033 sparc_stx_imm (code, sparc_g0, ins->dreg, i);
3034 i += 8;
3036 else {
3037 sparc_st_imm (code, sparc_g0, ins->dreg, i);
3038 i += 4;
3042 else {
3043 sparc_set (code, offset, sparc_o7);
3044 sparc_sub_imm (code, 0, sparc_o7, mono_hwcap_sparc_is_v9 ? 8 : 4, sparc_o7);
3045 /* beginning of loop */
3046 br [0] = code;
3047 if (mono_hwcap_sparc_is_v9)
3048 sparc_stx (code, sparc_g0, ins->dreg, sparc_o7);
3049 else
3050 sparc_st (code, sparc_g0, ins->dreg, sparc_o7);
3051 sparc_cmp_imm (code, sparc_o7, 0);
3052 br [1] = code;
3053 sparc_branch (code, 0, sparc_bne, 0);
3054 /* delay slot */
3055 sparc_sub_imm (code, 0, sparc_o7, mono_hwcap_sparc_is_v9 ? 8 : 4, sparc_o7);
3056 sparc_patch (br [1], br [0]);
3059 break;
3061 case OP_THROW:
3062 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3063 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
3064 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_throw_exception));
3065 EMIT_CALL ();
3066 break;
3067 case OP_RETHROW:
3068 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3069 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
3070 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_rethrow_exception));
3071 EMIT_CALL ();
3072 break;
3073 case OP_START_HANDLER: {
3075 * The START_HANDLER instruction marks the beginning of a handler
3076 * block. It is called using a call instruction, so %o7 contains
3077 * the return address. Since the handler executes in the same stack
3078 * frame as the method itself, we can't use save/restore to save
3079 * the return address. Instead, we save it into a dedicated
3080 * variable.
3082 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3083 if (!sparc_is_imm13 (spvar->inst_offset)) {
3084 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3085 sparc_sti (code, sparc_o7, spvar->inst_basereg, GP_SCRATCH_REG);
3087 else
3088 sparc_sti_imm (code, sparc_o7, spvar->inst_basereg, spvar->inst_offset);
3089 break;
3091 case OP_ENDFILTER: {
3092 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3093 if (!sparc_is_imm13 (spvar->inst_offset)) {
3094 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3095 sparc_ldi (code, spvar->inst_basereg, GP_SCRATCH_REG, sparc_o7);
3097 else
3098 sparc_ldi_imm (code, spvar->inst_basereg, spvar->inst_offset, sparc_o7);
3099 sparc_jmpl_imm (code, sparc_o7, 8, sparc_g0);
3100 /* Delay slot */
3101 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3102 break;
3104 case OP_ENDFINALLY: {
3105 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3106 if (!sparc_is_imm13 (spvar->inst_offset)) {
3107 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3108 sparc_ldi (code, spvar->inst_basereg, GP_SCRATCH_REG, sparc_o7);
3110 else
3111 sparc_ldi_imm (code, spvar->inst_basereg, spvar->inst_offset, sparc_o7);
3112 sparc_jmpl_imm (code, sparc_o7, 8, sparc_g0);
3113 sparc_nop (code);
3114 break;
3116 case OP_CALL_HANDLER:
3117 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3118 /* This is a jump inside the method, so call_simple works even on V9 */
3119 sparc_call_simple (code, 0);
3120 sparc_nop (code);
3121 for (GList *tmp = ins->inst_eh_blocks; tmp != bb->clause_holes; tmp = tmp->prev)
3122 mono_cfg_add_try_hole (cfg, ((MonoLeaveClause *) tmp->data)->clause, code, bb);
3123 break;
3124 case OP_LABEL:
3125 ins->inst_c0 = (guint8*)code - cfg->native_code;
3126 break;
3127 case OP_RELAXED_NOP:
3128 case OP_NOP:
3129 case OP_DUMMY_USE:
3130 case OP_DUMMY_ICONST:
3131 case OP_DUMMY_I8CONST:
3132 case OP_DUMMY_R8CONST:
3133 case OP_DUMMY_R4CONST:
3134 case OP_NOT_REACHED:
3135 case OP_NOT_NULL:
3136 break;
3137 case OP_BR:
3138 //g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
3139 if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins)
3140 break;
3141 if (ins->inst_target_bb->native_offset) {
3142 gint32 disp = (ins->inst_target_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2;
3143 g_assert (sparc_is_imm22 (disp));
3144 sparc_branch (code, 1, sparc_ba, disp);
3145 } else {
3146 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3147 sparc_branch (code, 1, sparc_ba, 0);
3149 sparc_nop (code);
3150 break;
3151 case OP_BR_REG:
3152 sparc_jmp (code, ins->sreg1, sparc_g0);
3153 sparc_nop (code);
3154 break;
3155 case OP_CEQ:
3156 case OP_CLT:
3157 case OP_CLT_UN:
3158 case OP_CGT:
3159 case OP_CGT_UN:
3160 if (v64 && (cfg->opt & MONO_OPT_CMOV)) {
3161 sparc_clr_reg (code, ins->dreg);
3162 sparc_movcc_imm (code, sparc_xcc, opcode_to_sparc_cond (ins->opcode), 1, ins->dreg);
3164 else {
3165 sparc_clr_reg (code, ins->dreg);
3166 #ifdef SPARCV9
3167 sparc_branchp (code, 1, opcode_to_sparc_cond (ins->opcode), DEFAULT_ICC, 0, 2);
3168 #else
3169 sparc_branch (code, 1, opcode_to_sparc_cond (ins->opcode), 2);
3170 #endif
3171 /* delay slot */
3172 sparc_set (code, 1, ins->dreg);
3174 break;
3175 case OP_ICEQ:
3176 case OP_ICLT:
3177 case OP_ICLT_UN:
3178 case OP_ICGT:
3179 case OP_ICGT_UN:
3180 if (v64 && (cfg->opt & MONO_OPT_CMOV)) {
3181 sparc_clr_reg (code, ins->dreg);
3182 sparc_movcc_imm (code, sparc_icc, opcode_to_sparc_cond (ins->opcode), 1, ins->dreg);
3184 else {
3185 sparc_clr_reg (code, ins->dreg);
3186 sparc_branchp (code, 1, opcode_to_sparc_cond (ins->opcode), sparc_icc_short, 0, 2);
3187 /* delay slot */
3188 sparc_set (code, 1, ins->dreg);
3190 break;
3191 case OP_COND_EXC_EQ:
3192 case OP_COND_EXC_NE_UN:
3193 case OP_COND_EXC_LT:
3194 case OP_COND_EXC_LT_UN:
3195 case OP_COND_EXC_GT:
3196 case OP_COND_EXC_GT_UN:
3197 case OP_COND_EXC_GE:
3198 case OP_COND_EXC_GE_UN:
3199 case OP_COND_EXC_LE:
3200 case OP_COND_EXC_LE_UN:
3201 case OP_COND_EXC_OV:
3202 case OP_COND_EXC_NO:
3203 case OP_COND_EXC_C:
3204 case OP_COND_EXC_NC:
3205 case OP_COND_EXC_IEQ:
3206 case OP_COND_EXC_INE_UN:
3207 case OP_COND_EXC_ILT:
3208 case OP_COND_EXC_ILT_UN:
3209 case OP_COND_EXC_IGT:
3210 case OP_COND_EXC_IGT_UN:
3211 case OP_COND_EXC_IGE:
3212 case OP_COND_EXC_IGE_UN:
3213 case OP_COND_EXC_ILE:
3214 case OP_COND_EXC_ILE_UN:
3215 case OP_COND_EXC_IOV:
3216 case OP_COND_EXC_INO:
3217 case OP_COND_EXC_IC:
3218 case OP_COND_EXC_INC:
3219 #ifdef SPARCV9
3220 NOT_IMPLEMENTED;
3221 #else
3222 EMIT_COND_SYSTEM_EXCEPTION (ins, opcode_to_sparc_cond (ins->opcode), ins->inst_p1);
3223 #endif
3224 break;
3225 case OP_SPARC_COND_EXC_EQZ:
3226 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brz, ins->inst_p1);
3227 break;
3228 case OP_SPARC_COND_EXC_GEZ:
3229 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brgez, ins->inst_p1);
3230 break;
3231 case OP_SPARC_COND_EXC_GTZ:
3232 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brgz, ins->inst_p1);
3233 break;
3234 case OP_SPARC_COND_EXC_LEZ:
3235 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brlez, ins->inst_p1);
3236 break;
3237 case OP_SPARC_COND_EXC_LTZ:
3238 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brlz, ins->inst_p1);
3239 break;
3240 case OP_SPARC_COND_EXC_NEZ:
3241 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brnz, ins->inst_p1);
3242 break;
3244 case OP_IBEQ:
3245 case OP_IBNE_UN:
3246 case OP_IBLT:
3247 case OP_IBLT_UN:
3248 case OP_IBGT:
3249 case OP_IBGT_UN:
3250 case OP_IBGE:
3251 case OP_IBGE_UN:
3252 case OP_IBLE:
3253 case OP_IBLE_UN: {
3254 if (mono_hwcap_sparc_is_v9)
3255 EMIT_COND_BRANCH_PREDICTED (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3256 else
3257 EMIT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3258 break;
3261 case OP_SPARC_BRZ:
3262 EMIT_COND_BRANCH_BPR (ins, brz, 1, 1, 1);
3263 break;
3264 case OP_SPARC_BRLEZ:
3265 EMIT_COND_BRANCH_BPR (ins, brlez, 1, 1, 1);
3266 break;
3267 case OP_SPARC_BRLZ:
3268 EMIT_COND_BRANCH_BPR (ins, brlz, 1, 1, 1);
3269 break;
3270 case OP_SPARC_BRNZ:
3271 EMIT_COND_BRANCH_BPR (ins, brnz, 1, 1, 1);
3272 break;
3273 case OP_SPARC_BRGZ:
3274 EMIT_COND_BRANCH_BPR (ins, brgz, 1, 1, 1);
3275 break;
3276 case OP_SPARC_BRGEZ:
3277 EMIT_COND_BRANCH_BPR (ins, brgez, 1, 1, 1);
3278 break;
3280 /* floating point opcodes */
3281 case OP_R8CONST:
3282 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, ins->inst_p0);
3283 #ifdef SPARCV9
3284 sparc_set_template (code, sparc_o7);
3285 #else
3286 sparc_sethi (code, 0, sparc_o7);
3287 #endif
3288 sparc_lddf_imm (code, sparc_o7, 0, ins->dreg);
3289 break;
3290 case OP_R4CONST:
3291 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, ins->inst_p0);
3292 #ifdef SPARCV9
3293 sparc_set_template (code, sparc_o7);
3294 #else
3295 sparc_sethi (code, 0, sparc_o7);
3296 #endif
3297 sparc_ldf_imm (code, sparc_o7, 0, FP_SCRATCH_REG);
3299 /* Extend to double */
3300 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3301 break;
3302 case OP_STORER8_MEMBASE_REG:
3303 if (!sparc_is_imm13 (ins->inst_offset + 4)) {
3304 sparc_set (code, ins->inst_offset, sparc_o7);
3305 /* SPARCV9 handles misaligned fp loads/stores */
3306 if (!v64 && (ins->inst_offset % 8)) {
3307 /* Misaligned */
3308 sparc_add (code, FALSE, ins->inst_destbasereg, sparc_o7, sparc_o7);
3309 sparc_stf (code, ins->sreg1, sparc_o7, sparc_g0);
3310 sparc_stf_imm (code, ins->sreg1 + 1, sparc_o7, 4);
3311 } else
3312 sparc_stdf (code, ins->sreg1, ins->inst_destbasereg, sparc_o7);
3314 else {
3315 if (!v64 && (ins->inst_offset % 8)) {
3316 /* Misaligned */
3317 sparc_stf_imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3318 sparc_stf_imm (code, ins->sreg1 + 1, ins->inst_destbasereg, ins->inst_offset + 4);
3319 } else
3320 sparc_stdf_imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3322 break;
3323 case OP_LOADR8_MEMBASE:
3324 EMIT_LOAD_MEMBASE (ins, lddf);
3325 break;
3326 case OP_STORER4_MEMBASE_REG:
3327 /* This requires a double->single conversion */
3328 sparc_fdtos (code, ins->sreg1, FP_SCRATCH_REG);
3329 if (!sparc_is_imm13 (ins->inst_offset)) {
3330 sparc_set (code, ins->inst_offset, sparc_o7);
3331 sparc_stf (code, FP_SCRATCH_REG, ins->inst_destbasereg, sparc_o7);
3333 else
3334 sparc_stf_imm (code, FP_SCRATCH_REG, ins->inst_destbasereg, ins->inst_offset);
3335 break;
3336 case OP_LOADR4_MEMBASE: {
3337 /* ldf needs a single precision register */
3338 int dreg = ins->dreg;
3339 ins->dreg = FP_SCRATCH_REG;
3340 EMIT_LOAD_MEMBASE (ins, ldf);
3341 ins->dreg = dreg;
3342 /* Extend to double */
3343 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3344 break;
3346 case OP_ICONV_TO_R4: {
3347 MonoInst *spill = cfg->arch.float_spill_slot;
3348 gint32 reg = spill->inst_basereg;
3349 gint32 offset = spill->inst_offset;
3351 g_assert (spill->opcode == OP_REGOFFSET);
3352 #ifdef SPARCV9
3353 if (!sparc_is_imm13 (offset)) {
3354 sparc_set (code, offset, sparc_o7);
3355 sparc_stx (code, ins->sreg1, reg, offset);
3356 sparc_lddf (code, reg, offset, FP_SCRATCH_REG);
3357 } else {
3358 sparc_stx_imm (code, ins->sreg1, reg, offset);
3359 sparc_lddf_imm (code, reg, offset, FP_SCRATCH_REG);
3361 sparc_fxtos (code, FP_SCRATCH_REG, FP_SCRATCH_REG);
3362 #else
3363 if (!sparc_is_imm13 (offset)) {
3364 sparc_set (code, offset, sparc_o7);
3365 sparc_st (code, ins->sreg1, reg, sparc_o7);
3366 sparc_ldf (code, reg, sparc_o7, FP_SCRATCH_REG);
3367 } else {
3368 sparc_st_imm (code, ins->sreg1, reg, offset);
3369 sparc_ldf_imm (code, reg, offset, FP_SCRATCH_REG);
3371 sparc_fitos (code, FP_SCRATCH_REG, FP_SCRATCH_REG);
3372 #endif
3373 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3374 break;
3376 case OP_ICONV_TO_R8: {
3377 MonoInst *spill = cfg->arch.float_spill_slot;
3378 gint32 reg = spill->inst_basereg;
3379 gint32 offset = spill->inst_offset;
3381 g_assert (spill->opcode == OP_REGOFFSET);
3383 #ifdef SPARCV9
3384 if (!sparc_is_imm13 (offset)) {
3385 sparc_set (code, offset, sparc_o7);
3386 sparc_stx (code, ins->sreg1, reg, sparc_o7);
3387 sparc_lddf (code, reg, sparc_o7, FP_SCRATCH_REG);
3388 } else {
3389 sparc_stx_imm (code, ins->sreg1, reg, offset);
3390 sparc_lddf_imm (code, reg, offset, FP_SCRATCH_REG);
3392 sparc_fxtod (code, FP_SCRATCH_REG, ins->dreg);
3393 #else
3394 if (!sparc_is_imm13 (offset)) {
3395 sparc_set (code, offset, sparc_o7);
3396 sparc_st (code, ins->sreg1, reg, sparc_o7);
3397 sparc_ldf (code, reg, sparc_o7, FP_SCRATCH_REG);
3398 } else {
3399 sparc_st_imm (code, ins->sreg1, reg, offset);
3400 sparc_ldf_imm (code, reg, offset, FP_SCRATCH_REG);
3402 sparc_fitod (code, FP_SCRATCH_REG, ins->dreg);
3403 #endif
3404 break;
3406 case OP_FCONV_TO_I1:
3407 case OP_FCONV_TO_U1:
3408 case OP_FCONV_TO_I2:
3409 case OP_FCONV_TO_U2:
3410 #ifndef SPARCV9
3411 case OP_FCONV_TO_I:
3412 case OP_FCONV_TO_U:
3413 #endif
3414 case OP_FCONV_TO_I4:
3415 case OP_FCONV_TO_U4: {
3416 MonoInst *spill = cfg->arch.float_spill_slot;
3417 gint32 reg = spill->inst_basereg;
3418 gint32 offset = spill->inst_offset;
3420 g_assert (spill->opcode == OP_REGOFFSET);
3422 sparc_fdtoi (code, ins->sreg1, FP_SCRATCH_REG);
3423 if (!sparc_is_imm13 (offset)) {
3424 sparc_set (code, offset, sparc_o7);
3425 sparc_stdf (code, FP_SCRATCH_REG, reg, sparc_o7);
3426 sparc_ld (code, reg, sparc_o7, ins->dreg);
3427 } else {
3428 sparc_stdf_imm (code, FP_SCRATCH_REG, reg, offset);
3429 sparc_ld_imm (code, reg, offset, ins->dreg);
3432 switch (ins->opcode) {
3433 case OP_FCONV_TO_I1:
3434 case OP_FCONV_TO_U1:
3435 sparc_and_imm (code, 0, ins->dreg, 0xff, ins->dreg);
3436 break;
3437 case OP_FCONV_TO_I2:
3438 case OP_FCONV_TO_U2:
3439 sparc_set (code, 0xffff, sparc_o7);
3440 sparc_and (code, 0, ins->dreg, sparc_o7, ins->dreg);
3441 break;
3442 default:
3443 break;
3445 break;
3447 case OP_FCONV_TO_I8:
3448 case OP_FCONV_TO_U8:
3449 /* Emulated */
3450 g_assert_not_reached ();
3451 break;
3452 case OP_FCONV_TO_R4:
3453 /* FIXME: Change precision ? */
3454 #ifdef SPARCV9
3455 sparc_fmovd (code, ins->sreg1, ins->dreg);
3456 #else
3457 sparc_fmovs (code, ins->sreg1, ins->dreg);
3458 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
3459 #endif
3460 break;
3461 case OP_LCONV_TO_R_UN: {
3462 /* Emulated */
3463 g_assert_not_reached ();
3464 break;
3466 case OP_LCONV_TO_OVF_I:
3467 case OP_LCONV_TO_OVF_I4_2: {
3468 guint32 *br [3], *label [1];
3471 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
3473 sparc_cmp_imm (code, ins->sreg1, 0);
3474 br [0] = code;
3475 sparc_branch (code, 1, sparc_bneg, 0);
3476 sparc_nop (code);
3478 /* positive */
3479 /* ms word must be 0 */
3480 sparc_cmp_imm (code, ins->sreg2, 0);
3481 br [1] = code;
3482 sparc_branch (code, 1, sparc_be, 0);
3483 sparc_nop (code);
3485 label [0] = code;
3487 EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_ba, "OverflowException");
3489 /* negative */
3490 sparc_patch (br [0], code);
3492 /* ms word must 0xfffffff */
3493 sparc_cmp_imm (code, ins->sreg2, -1);
3494 br [2] = code;
3495 sparc_branch (code, 1, sparc_bne, 0);
3496 sparc_nop (code);
3497 sparc_patch (br [2], label [0]);
3499 /* Ok */
3500 sparc_patch (br [1], code);
3501 if (ins->sreg1 != ins->dreg)
3502 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
3503 break;
3505 case OP_FADD:
3506 sparc_faddd (code, ins->sreg1, ins->sreg2, ins->dreg);
3507 break;
3508 case OP_FSUB:
3509 sparc_fsubd (code, ins->sreg1, ins->sreg2, ins->dreg);
3510 break;
3511 case OP_FMUL:
3512 sparc_fmuld (code, ins->sreg1, ins->sreg2, ins->dreg);
3513 break;
3514 case OP_FDIV:
3515 sparc_fdivd (code, ins->sreg1, ins->sreg2, ins->dreg);
3516 break;
3517 case OP_FNEG:
3518 #ifdef SPARCV9
3519 sparc_fnegd (code, ins->sreg1, ins->dreg);
3520 #else
3521 /* FIXME: why don't use fnegd ? */
3522 sparc_fnegs (code, ins->sreg1, ins->dreg);
3523 #endif
3524 break;
3525 case OP_FREM:
3526 sparc_fdivd (code, ins->sreg1, ins->sreg2, FP_SCRATCH_REG);
3527 sparc_fmuld (code, ins->sreg2, FP_SCRATCH_REG, FP_SCRATCH_REG);
3528 sparc_fsubd (code, ins->sreg1, FP_SCRATCH_REG, ins->dreg);
3529 break;
3530 case OP_FCOMPARE:
3531 sparc_fcmpd (code, ins->sreg1, ins->sreg2);
3532 break;
3533 case OP_FCEQ:
3534 case OP_FCLT:
3535 case OP_FCLT_UN:
3536 case OP_FCGT:
3537 case OP_FCGT_UN:
3538 sparc_fcmpd (code, ins->sreg1, ins->sreg2);
3539 sparc_clr_reg (code, ins->dreg);
3540 switch (ins->opcode) {
3541 case OP_FCLT_UN:
3542 case OP_FCGT_UN:
3543 sparc_fbranch (code, 1, opcode_to_sparc_cond (ins->opcode), 4);
3544 /* delay slot */
3545 sparc_set (code, 1, ins->dreg);
3546 sparc_fbranch (code, 1, sparc_fbu, 2);
3547 /* delay slot */
3548 sparc_set (code, 1, ins->dreg);
3549 break;
3550 default:
3551 sparc_fbranch (code, 1, opcode_to_sparc_cond (ins->opcode), 2);
3552 /* delay slot */
3553 sparc_set (code, 1, ins->dreg);
3555 break;
3556 case OP_FBEQ:
3557 case OP_FBLT:
3558 case OP_FBGT:
3559 EMIT_FLOAT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3560 break;
3561 case OP_FBGE: {
3562 /* clt.un + brfalse */
3563 guint32 *p = code;
3564 sparc_fbranch (code, 1, sparc_fbul, 0);
3565 /* delay slot */
3566 sparc_nop (code);
3567 EMIT_FLOAT_COND_BRANCH (ins, sparc_fba, 1, 1);
3568 sparc_patch (p, (guint8*)code);
3569 break;
3571 case OP_FBLE: {
3572 /* cgt.un + brfalse */
3573 guint32 *p = code;
3574 sparc_fbranch (code, 1, sparc_fbug, 0);
3575 /* delay slot */
3576 sparc_nop (code);
3577 EMIT_FLOAT_COND_BRANCH (ins, sparc_fba, 1, 1);
3578 sparc_patch (p, (guint8*)code);
3579 break;
3581 case OP_FBNE_UN:
3582 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbne, 1, 1);
3583 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3584 break;
3585 case OP_FBLT_UN:
3586 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbl, 1, 1);
3587 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3588 break;
3589 case OP_FBGT_UN:
3590 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbg, 1, 1);
3591 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3592 break;
3593 case OP_FBGE_UN:
3594 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbge, 1, 1);
3595 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3596 break;
3597 case OP_FBLE_UN:
3598 EMIT_FLOAT_COND_BRANCH (ins, sparc_fble, 1, 1);
3599 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3600 break;
3601 case OP_CKFINITE: {
3602 MonoInst *spill = cfg->arch.float_spill_slot;
3603 gint32 reg = spill->inst_basereg;
3604 gint32 offset = spill->inst_offset;
3606 g_assert (spill->opcode == OP_REGOFFSET);
3608 if (!sparc_is_imm13 (offset)) {
3609 sparc_set (code, offset, sparc_o7);
3610 sparc_stdf (code, ins->sreg1, reg, sparc_o7);
3611 sparc_lduh (code, reg, sparc_o7, sparc_o7);
3612 } else {
3613 sparc_stdf_imm (code, ins->sreg1, reg, offset);
3614 sparc_lduh_imm (code, reg, offset, sparc_o7);
3616 sparc_srl_imm (code, sparc_o7, 4, sparc_o7);
3617 sparc_and_imm (code, FALSE, sparc_o7, 2047, sparc_o7);
3618 sparc_cmp_imm (code, sparc_o7, 2047);
3619 EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_be, "OverflowException");
3620 #ifdef SPARCV9
3621 sparc_fmovd (code, ins->sreg1, ins->dreg);
3622 #else
3623 sparc_fmovs (code, ins->sreg1, ins->dreg);
3624 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
3625 #endif
3626 break;
3629 case OP_MEMORY_BARRIER:
3630 sparc_membar (code, sparc_membar_all);
3631 break;
3632 case OP_LIVERANGE_START: {
3633 if (cfg->verbose_level > 1)
3634 printf ("R%d START=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code));
3635 MONO_VARINFO (cfg, ins->inst_c0)->live_range_start = code - cfg->native_code;
3636 break;
3638 case OP_LIVERANGE_END: {
3639 if (cfg->verbose_level > 1)
3640 printf ("R%d END=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code));
3641 MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code;
3642 break;
3644 case OP_GC_SAFE_POINT:
3645 break;
3647 default:
3648 #ifdef __GNUC__
3649 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
3650 #else
3651 g_warning ("%s:%d: unknown opcode %s\n", __FILE__, __LINE__, mono_inst_name (ins->opcode));
3652 #endif
3653 g_assert_not_reached ();
3656 if ((((guint8*)code) - code_start) > max_len) {
3657 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
3658 mono_inst_name (ins->opcode), max_len, ((guint8*)code) - code_start);
3659 g_assert_not_reached ();
3662 cpos += max_len;
3664 last_ins = ins;
3666 set_code_cursor (cfg, code);
3669 void
3670 mono_arch_register_lowlevel_calls (void)
3672 mono_register_jit_icall (mono_arch_get_lmf_addr, NULL, TRUE);
3675 void
3676 mono_arch_patch_code (MonoCompile *cfg, MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors, MonoError *error)
3678 MonoJumpInfo *patch_info;
3680 error_init (error);
3682 /* FIXME: Move part of this to arch independent code */
3683 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
3684 unsigned char *ip = patch_info->ip.i + code;
3685 gpointer target;
3687 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors, error);
3688 return_if_nok (error);
3690 switch (patch_info->type) {
3691 case MONO_PATCH_INFO_NONE:
3692 continue;
3693 case MONO_PATCH_INFO_METHOD_JUMP: {
3694 guint32 *ip2 = (guint32*)ip;
3695 /* Might already been patched */
3696 sparc_set_template (ip2, sparc_o7);
3697 break;
3699 default:
3700 break;
3702 sparc_patch ((guint32*)ip, target);
3706 #error obsolete tracing?
3707 void*
3708 mono_arch_instrument_prolog (MonoCompile *cfg, MonoJitICallId func, void *p, gboolean enable_arguments)
3710 int i;
3711 guint32 *code = (guint32*)p;
3712 MonoMethodSignature *sig = mono_method_signature_internal (cfg->method);
3713 CallInfo *cinfo;
3715 /* Save registers to stack */
3716 for (i = 0; i < 6; ++i)
3717 sparc_sti_imm (code, sparc_i0 + i, sparc_fp, ARGS_OFFSET + (i * sizeof (target_mgreg_t)));
3719 cinfo = get_call_info (cfg, sig, FALSE);
3721 /* Save float regs on V9, since they are caller saved */
3722 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3723 ArgInfo *ainfo = cinfo->args + i;
3724 gint32 stack_offset;
3726 stack_offset = ainfo->offset + ARGS_OFFSET;
3728 if (ainfo->storage == ArgInFloatReg) {
3729 if (!sparc_is_imm13 (stack_offset))
3730 NOT_IMPLEMENTED;
3731 sparc_stf_imm (code, ainfo->reg, sparc_fp, stack_offset);
3733 else if (ainfo->storage == ArgInDoubleReg) {
3734 /* The offset is guaranteed to be aligned by the ABI rules */
3735 sparc_stdf_imm (code, ainfo->reg, sparc_fp, stack_offset);
3739 sparc_set (code, cfg->method, sparc_o0);
3740 sparc_add_imm (code, FALSE, sparc_fp, MONO_SPARC_STACK_BIAS, sparc_o1);
3742 mono_add_patch_info (cfg, (guint8*)code-cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (func));
3743 EMIT_CALL ();
3745 /* Restore float regs on V9 */
3746 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3747 ArgInfo *ainfo = cinfo->args + i;
3748 gint32 stack_offset;
3750 stack_offset = ainfo->offset + ARGS_OFFSET;
3752 if (ainfo->storage == ArgInFloatReg) {
3753 if (!sparc_is_imm13 (stack_offset))
3754 NOT_IMPLEMENTED;
3755 sparc_ldf_imm (code, sparc_fp, stack_offset, ainfo->reg);
3757 else if (ainfo->storage == ArgInDoubleReg) {
3758 /* The offset is guaranteed to be aligned by the ABI rules */
3759 sparc_lddf_imm (code, sparc_fp, stack_offset, ainfo->reg);
3763 g_free (cinfo);
3765 return code;
3768 enum {
3769 SAVE_NONE,
3770 SAVE_STRUCT,
3771 SAVE_ONE,
3772 SAVE_TWO,
3773 SAVE_FP
3776 #error obsolete tracing?
3777 void*
3778 mono_arch_instrument_epilog (MonoCompile *cfg, MonoJitICallId func, void *p, gboolean enable_arguments)
3780 guint32 *code = (guint32*)p;
3781 int save_mode = SAVE_NONE;
3782 MonoMethod *method = cfg->method;
3784 switch (mini_get_underlying_type (mono_method_signature_internal (method)->ret)->type) {
3785 case MONO_TYPE_VOID:
3786 /* special case string .ctor icall */
3787 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
3788 save_mode = SAVE_ONE;
3789 else
3790 save_mode = SAVE_NONE;
3791 break;
3792 case MONO_TYPE_I8:
3793 case MONO_TYPE_U8:
3794 #ifdef SPARCV9
3795 save_mode = SAVE_ONE;
3796 #else
3797 save_mode = SAVE_TWO;
3798 #endif
3799 break;
3800 case MONO_TYPE_R4:
3801 case MONO_TYPE_R8:
3802 save_mode = SAVE_FP;
3803 break;
3804 case MONO_TYPE_VALUETYPE:
3805 save_mode = SAVE_STRUCT;
3806 break;
3807 default:
3808 save_mode = SAVE_ONE;
3809 break;
3812 /* Save the result to the stack and also put it into the output registers */
3814 switch (save_mode) {
3815 case SAVE_TWO:
3816 /* V8 only */
3817 sparc_st_imm (code, sparc_i0, sparc_fp, 68);
3818 sparc_st_imm (code, sparc_i0, sparc_fp, 72);
3819 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
3820 sparc_mov_reg_reg (code, sparc_i1, sparc_o2);
3821 break;
3822 case SAVE_ONE:
3823 sparc_sti_imm (code, sparc_i0, sparc_fp, ARGS_OFFSET);
3824 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
3825 break;
3826 case SAVE_FP:
3827 #ifdef SPARCV9
3828 sparc_stdf_imm (code, sparc_f0, sparc_fp, ARGS_OFFSET);
3829 #else
3830 sparc_stdf_imm (code, sparc_f0, sparc_fp, 72);
3831 sparc_ld_imm (code, sparc_fp, 72, sparc_o1);
3832 sparc_ld_imm (code, sparc_fp, 72 + 4, sparc_o2);
3833 #endif
3834 break;
3835 case SAVE_STRUCT:
3836 #ifdef SPARCV9
3837 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
3838 #else
3839 sparc_ld_imm (code, sparc_fp, 64, sparc_o1);
3840 #endif
3841 break;
3842 case SAVE_NONE:
3843 default:
3844 break;
3847 sparc_set (code, cfg->method, sparc_o0);
3849 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (func));
3850 EMIT_CALL ();
3852 /* Restore result */
3854 switch (save_mode) {
3855 case SAVE_TWO:
3856 sparc_ld_imm (code, sparc_fp, 68, sparc_i0);
3857 sparc_ld_imm (code, sparc_fp, 72, sparc_i0);
3858 break;
3859 case SAVE_ONE:
3860 sparc_ldi_imm (code, sparc_fp, ARGS_OFFSET, sparc_i0);
3861 break;
3862 case SAVE_FP:
3863 sparc_lddf_imm (code, sparc_fp, ARGS_OFFSET, sparc_f0);
3864 break;
3865 case SAVE_NONE:
3866 default:
3867 break;
3870 return code;
3873 guint8 *
3874 mono_arch_emit_prolog (MonoCompile *cfg)
3876 MonoMethod *method = cfg->method;
3877 MonoMethodSignature *sig;
3878 MonoInst *inst;
3879 guint32 *code;
3880 CallInfo *cinfo;
3881 guint32 i, offset;
3883 cfg->code_size = 256;
3884 cfg->native_code = g_malloc (cfg->code_size);
3885 code = (guint32*)cfg->native_code;
3887 /* FIXME: Generate intermediate code instead */
3889 offset = cfg->stack_offset;
3890 offset += (16 * sizeof (target_mgreg_t)); /* register save area */
3891 #ifndef SPARCV9
3892 offset += 4; /* struct/union return pointer */
3893 #endif
3895 /* add parameter area size for called functions */
3896 if (cfg->param_area < (6 * sizeof (target_mgreg_t)))
3897 /* Reserve space for the first 6 arguments even if it is unused */
3898 offset += 6 * sizeof (target_mgreg_t);
3899 else
3900 offset += cfg->param_area;
3902 /* align the stack size */
3903 offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
3906 * localloc'd memory is stored between the local variables (whose
3907 * size is given by cfg->stack_offset), and between the space reserved
3908 * by the ABI.
3910 cfg->arch.localloc_offset = offset - cfg->stack_offset;
3912 cfg->stack_offset = offset;
3914 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
3915 /* Perform stack touching */
3916 NOT_IMPLEMENTED;
3917 #endif
3919 if (!sparc_is_imm13 (- cfg->stack_offset)) {
3920 /* Can't use sparc_o7 here, since we're still in the caller's frame */
3921 sparc_set (code, (- cfg->stack_offset), GP_SCRATCH_REG);
3922 sparc_save (code, sparc_sp, GP_SCRATCH_REG, sparc_sp);
3924 else
3925 sparc_save_imm (code, sparc_sp, - cfg->stack_offset, sparc_sp);
3928 if (strstr (cfg->method->name, "foo")) {
3929 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
3930 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_sparc_break));
3931 sparc_call_simple (code, 0);
3932 sparc_nop (code);
3936 sig = mono_method_signature_internal (method);
3938 cinfo = get_call_info (cfg, sig, FALSE);
3940 /* Keep in sync with emit_load_volatile_arguments */
3941 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3942 ArgInfo *ainfo = cinfo->args + i;
3943 gint32 stack_offset;
3944 MonoType *arg_type;
3945 inst = cfg->args [i];
3947 if (sig->hasthis && (i == 0))
3948 arg_type = mono_get_object_type ();
3949 else
3950 arg_type = sig->params [i - sig->hasthis];
3952 stack_offset = ainfo->offset + ARGS_OFFSET;
3954 /* Save the split arguments so they will reside entirely on the stack */
3955 if (ainfo->storage == ArgInSplitRegStack) {
3956 /* Save the register to the stack */
3957 g_assert (inst->opcode == OP_REGOFFSET);
3958 if (!sparc_is_imm13 (stack_offset))
3959 NOT_IMPLEMENTED;
3960 sparc_st_imm (code, sparc_i5, inst->inst_basereg, stack_offset);
3963 if (!v64 && !arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
3964 /* Save the argument to a dword aligned stack location */
3966 * stack_offset contains the offset of the argument on the stack.
3967 * inst->inst_offset contains the dword aligned offset where the value
3968 * should be stored.
3970 if (ainfo->storage == ArgInIRegPair) {
3971 if (!sparc_is_imm13 (inst->inst_offset + 4))
3972 NOT_IMPLEMENTED;
3973 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, inst->inst_offset);
3974 sparc_st_imm (code, sparc_i0 + ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
3976 else
3977 if (ainfo->storage == ArgInSplitRegStack) {
3978 #ifdef SPARCV9
3979 g_assert_not_reached ();
3980 #endif
3981 if (stack_offset != inst->inst_offset) {
3982 /* stack_offset is not dword aligned, so we need to make a copy */
3983 sparc_st_imm (code, sparc_i5, inst->inst_basereg, inst->inst_offset);
3984 sparc_ld_imm (code, sparc_fp, stack_offset + 4, sparc_o7);
3985 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset + 4);
3988 else
3989 if (ainfo->storage == ArgOnStackPair) {
3990 #ifdef SPARCV9
3991 g_assert_not_reached ();
3992 #endif
3993 if (stack_offset != inst->inst_offset) {
3994 /* stack_offset is not dword aligned, so we need to make a copy */
3995 sparc_ld_imm (code, sparc_fp, stack_offset, sparc_o7);
3996 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset);
3997 sparc_ld_imm (code, sparc_fp, stack_offset + 4, sparc_o7);
3998 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset + 4);
4001 else
4002 g_assert_not_reached ();
4004 else
4005 if ((ainfo->storage == ArgInIReg) && (inst->opcode != OP_REGVAR)) {
4006 /* Argument in register, but need to be saved to stack */
4007 if (!sparc_is_imm13 (stack_offset))
4008 NOT_IMPLEMENTED;
4009 if ((stack_offset - ARGS_OFFSET) & 0x1)
4010 sparc_stb_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4011 else
4012 if ((stack_offset - ARGS_OFFSET) & 0x2)
4013 sparc_sth_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4014 else
4015 if ((stack_offset - ARGS_OFFSET) & 0x4)
4016 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4017 else {
4018 if (v64)
4019 sparc_stx_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4020 else
4021 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4024 else
4025 if ((ainfo->storage == ArgInIRegPair) && (inst->opcode != OP_REGVAR)) {
4026 #ifdef SPARCV9
4027 NOT_IMPLEMENTED;
4028 #endif
4029 /* Argument in regpair, but need to be saved to stack */
4030 if (!sparc_is_imm13 (inst->inst_offset + 4))
4031 NOT_IMPLEMENTED;
4032 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, inst->inst_offset);
4033 sparc_st_imm (code, sparc_i0 + ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4035 else if ((ainfo->storage == ArgInFloatReg) && (inst->opcode != OP_REGVAR)) {
4036 if (!sparc_is_imm13 (stack_offset))
4037 NOT_IMPLEMENTED;
4038 sparc_stf_imm (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4040 else if ((ainfo->storage == ArgInDoubleReg) && (inst->opcode != OP_REGVAR)) {
4041 /* The offset is guaranteed to be aligned by the ABI rules */
4042 sparc_stdf_imm (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4045 if ((ainfo->storage == ArgInFloatReg) && (inst->opcode == OP_REGVAR)) {
4046 /* Need to move into the a double precision register */
4047 sparc_fstod (code, ainfo->reg, ainfo->reg - 1);
4050 if ((ainfo->storage == ArgInSplitRegStack) || (ainfo->storage == ArgOnStack))
4051 if (inst->opcode == OP_REGVAR)
4052 /* FIXME: Load the argument into memory */
4053 NOT_IMPLEMENTED;
4056 g_free (cinfo);
4058 if (cfg->method->save_lmf) {
4059 gint32 lmf_offset = STACK_BIAS - cfg->arch.lmf_offset;
4061 /* Save ip */
4062 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
4063 sparc_set_template (code, sparc_o7);
4064 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ip));
4065 /* Save sp */
4066 sparc_sti_imm (code, sparc_sp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, sp));
4067 /* Save fp */
4068 sparc_sti_imm (code, sparc_fp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebp));
4069 /* Save method */
4070 /* FIXME: add a relocation for this */
4071 sparc_set (code, cfg->method, sparc_o7);
4072 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method));
4074 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
4075 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_get_lmf_addr));
4076 EMIT_CALL ();
4078 code = (guint32*)mono_sparc_emit_save_lmf (code, lmf_offset);
4081 #error obsolete tracing?
4082 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4083 code = (guint32*)mono_arch_instrument_prolog (cfg, MONO_JIT_ICALL_mono_trace_enter_method, code, TRUE);
4085 set_code_cursor (cfg, code);
4087 return (guint8*)code;
4090 void
4091 mono_arch_emit_epilog (MonoCompile *cfg)
4093 MonoMethod *method = cfg->method;
4094 guint32 *code;
4095 int can_fold = 0;
4096 int max_epilog_size = 16 + 20 * 4;
4098 if (cfg->method->save_lmf)
4099 max_epilog_size += 128;
4101 if (mono_jit_trace_calls != NULL)
4102 max_epilog_size += 50;
4104 code = (guint32 *)realloc_code (cfg, max_epilog_size);
4106 #error obsolete tracing?
4107 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4108 code = (guint32*)mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4110 if (cfg->method->save_lmf) {
4111 gint32 lmf_offset = STACK_BIAS - cfg->arch.lmf_offset;
4113 code = mono_sparc_emit_restore_lmf (code, lmf_offset);
4117 * The V8 ABI requires that calls to functions which return a structure
4118 * return to %i7+12
4120 if (!v64 && mono_method_signature_internal (cfg->method)->pinvoke && MONO_TYPE_ISSTRUCT(mono_method_signature_internal (cfg->method)->ret))
4121 sparc_jmpl_imm (code, sparc_i7, 12, sparc_g0);
4122 else
4123 sparc_ret (code);
4125 /* Only fold last instruction into the restore if the exit block has an in count of 1
4126 and the previous block hasn't been optimized away since it may have an in count > 1 */
4127 if (cfg->bb_exit->in_count == 1 && cfg->bb_exit->in_bb[0]->native_offset != cfg->bb_exit->native_offset)
4128 can_fold = 1;
4131 * FIXME: The last instruction might have a branch pointing into it like in
4132 * int_ceq sparc_i0 <-
4134 can_fold = 0;
4136 /* Try folding last instruction into the restore */
4137 if (can_fold && (sparc_inst_op (code [-2]) == 0x2) && (sparc_inst_op3 (code [-2]) == 0x2) && sparc_inst_imm (code [-2]) && (sparc_inst_rd (code [-2]) == sparc_i0)) {
4138 /* or reg, imm, %i0 */
4139 int reg = sparc_inst_rs1 (code [-2]);
4140 int imm = (((gint32)(sparc_inst_imm13 (code [-2]))) << 19) >> 19;
4141 code [-2] = code [-1];
4142 code --;
4143 sparc_restore_imm (code, reg, imm, sparc_o0);
4145 else
4146 if (can_fold && (sparc_inst_op (code [-2]) == 0x2) && (sparc_inst_op3 (code [-2]) == 0x2) && (!sparc_inst_imm (code [-2])) && (sparc_inst_rd (code [-2]) == sparc_i0)) {
4147 /* or reg, reg, %i0 */
4148 int reg1 = sparc_inst_rs1 (code [-2]);
4149 int reg2 = sparc_inst_rs2 (code [-2]);
4150 code [-2] = code [-1];
4151 code --;
4152 sparc_restore (code, reg1, reg2, sparc_o0);
4154 else
4155 sparc_restore_imm (code, sparc_g0, 0, sparc_g0);
4157 set_code_cursor (cfg, code);
4160 void
4161 mono_arch_emit_exceptions (MonoCompile *cfg)
4163 MonoJumpInfo *patch_info;
4164 guint32 *code;
4165 int nthrows = 0, i;
4166 int exc_count = 0;
4167 guint32 code_size;
4168 MonoClass *exc_classes [16];
4169 guint8 *exc_throw_start [16], *exc_throw_end [16];
4171 /* Compute needed space */
4172 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4173 if (patch_info->type == MONO_PATCH_INFO_EXC)
4174 exc_count++;
4178 * make sure we have enough space for exceptions
4180 #ifdef SPARCV9
4181 code_size = exc_count * (20 * 4);
4182 #else
4183 code_size = exc_count * 24;
4184 #endif
4185 code = (guint32*)realloc_code (cfg, code_size);
4187 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4188 switch (patch_info->type) {
4189 case MONO_PATCH_INFO_EXC: {
4190 MonoClass *exc_class;
4191 guint32 *buf, *buf2;
4192 guint32 throw_ip, type_idx;
4193 gint32 disp;
4195 sparc_patch ((guint32*)(cfg->native_code + patch_info->ip.i), code);
4197 exc_class = mono_class_load_from_name (mono_defaults.corlib, "System", patch_info->data.name);
4198 type_idx = m_class_get_type_token (exc_class) - MONO_TOKEN_TYPE_DEF;
4199 throw_ip = patch_info->ip.i;
4201 /* Find a throw sequence for the same exception class */
4202 for (i = 0; i < nthrows; ++i)
4203 if (exc_classes [i] == exc_class)
4204 break;
4206 if (i < nthrows) {
4207 guint32 throw_offset = (((guint8*)exc_throw_end [i] - cfg->native_code) - throw_ip) >> 2;
4208 if (!sparc_is_imm13 (throw_offset))
4209 sparc_set32 (code, throw_offset, sparc_o1);
4211 disp = (exc_throw_start [i] - (guint8*)code) >> 2;
4212 g_assert (sparc_is_imm22 (disp));
4213 sparc_branch (code, 0, sparc_ba, disp);
4214 if (sparc_is_imm13 (throw_offset))
4215 sparc_set32 (code, throw_offset, sparc_o1);
4216 else
4217 sparc_nop (code);
4218 patch_info->type = MONO_PATCH_INFO_NONE;
4220 else {
4221 /* Emit the template for setting o1 */
4222 buf = code;
4223 if (sparc_is_imm13 (((((guint8*)code - cfg->native_code) - throw_ip) >> 2) - 8))
4224 /* Can use a short form */
4225 sparc_nop (code);
4226 else
4227 sparc_set_template (code, sparc_o1);
4228 buf2 = code;
4230 if (nthrows < 16) {
4231 exc_classes [nthrows] = exc_class;
4232 exc_throw_start [nthrows] = (guint8*)code;
4236 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID,
4237 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_sparc_break));
4238 EMIT_CALL();
4241 /* first arg = type token */
4242 /* Pass the type index to reduce the size of the sparc_set */
4243 if (!sparc_is_imm13 (type_idx))
4244 sparc_set32 (code, type_idx, sparc_o0);
4246 /* second arg = offset between the throw ip and the current ip */
4247 /* On sparc, the saved ip points to the call instruction */
4248 disp = (((guint8*)code - cfg->native_code) - throw_ip) >> 2;
4249 sparc_set32 (buf, disp, sparc_o1);
4250 while (buf < buf2)
4251 sparc_nop (buf);
4253 if (nthrows < 16) {
4254 exc_throw_end [nthrows] = (guint8*)code;
4255 nthrows ++;
4258 patch_info->data.jit_icall_id = MONO_JIT_ICALL_mono_arch_throw_corlib_exception;
4259 patch_info->type = MONO_PATCH_INFO_JIT_ICALL_ID;
4260 patch_info->ip.i = (guint8*)code - cfg->native_code;
4262 EMIT_CALL ();
4264 if (sparc_is_imm13 (type_idx)) {
4265 /* Put it into the delay slot */
4266 code --;
4267 buf = code;
4268 sparc_set32 (code, type_idx, sparc_o0);
4269 g_assert (code - buf == 1);
4272 break;
4274 default:
4275 /* do nothing */
4276 break;
4279 set_code_cursor (cfg, code);
4282 set_code_cursor (cfg, code);
4285 gboolean lmf_addr_key_inited = FALSE;
4287 #ifdef MONO_SPARC_THR_TLS
4288 thread_key_t lmf_addr_key;
4289 #else
4290 pthread_key_t lmf_addr_key;
4291 #endif
4293 gpointer
4294 mono_arch_get_lmf_addr (void)
4296 /* This is perf critical so we bypass the IO layer */
4297 /* The thr_... functions seem to be somewhat faster */
4298 #ifdef MONO_SPARC_THR_TLS
4299 gpointer res;
4300 thr_getspecific (lmf_addr_key, &res);
4301 return res;
4302 #else
4303 return pthread_getspecific (lmf_addr_key);
4304 #endif
4307 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
4310 * There seems to be no way to determine stack boundaries under solaris,
4311 * so it's not possible to determine whenever a SIGSEGV is caused by stack
4312 * overflow or not.
4314 #error "--with-sigaltstack=yes not supported on solaris"
4316 #endif
4318 void
4319 mono_arch_tls_init (void)
4321 MonoJitTlsData *jit_tls;
4323 if (!lmf_addr_key_inited) {
4324 int res;
4326 lmf_addr_key_inited = TRUE;
4328 #ifdef MONO_SPARC_THR_TLS
4329 res = thr_keycreate (&lmf_addr_key, NULL);
4330 #else
4331 res = pthread_key_create (&lmf_addr_key, NULL);
4332 #endif
4333 g_assert (res == 0);
4337 jit_tls = mono_get_jit_tls ();
4339 #ifdef MONO_SPARC_THR_TLS
4340 thr_setspecific (lmf_addr_key, &jit_tls->lmf);
4341 #else
4342 pthread_setspecific (lmf_addr_key, &jit_tls->lmf);
4343 #endif
4346 void
4347 mono_arch_finish_init (void)
4351 MonoInst*
4352 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4354 MonoInst *ins = NULL;
4356 return ins;
4360 * mono_arch_get_argument_info:
4361 * @csig: a method signature
4362 * @param_count: the number of parameters to consider
4363 * @arg_info: an array to store the result infos
4365 * Gathers information on parameters such as size, alignment and
4366 * padding. arg_info should be large enought to hold param_count + 1 entries.
4368 * Returns the size of the activation frame.
4371 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
4373 int k, align;
4374 CallInfo *cinfo;
4375 ArgInfo *ainfo;
4377 cinfo = get_call_info (NULL, csig, FALSE);
4379 if (csig->hasthis) {
4380 ainfo = &cinfo->args [0];
4381 arg_info [0].offset = ARGS_OFFSET - MONO_SPARC_STACK_BIAS + ainfo->offset;
4384 for (k = 0; k < param_count; k++) {
4385 ainfo = &cinfo->args [k + csig->hasthis];
4387 arg_info [k + 1].offset = ARGS_OFFSET - MONO_SPARC_STACK_BIAS + ainfo->offset;
4388 arg_info [k + 1].size = mono_type_size (csig->params [k], &align);
4391 g_free (cinfo);
4393 return 0;
4396 host_mgreg_t
4397 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
4399 /* FIXME: implement */
4400 g_assert_not_reached ();
4403 gboolean
4404 mono_arch_opcode_supported (int opcode)
4406 return FALSE;
4409 gboolean
4410 mono_arch_tailcall_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig, gboolean virtual_)
4412 return FALSE;
4415 gpointer
4416 mono_arch_load_function (MonoJitICallId jit_icall_id)
4418 return NULL;