2010-04-01 Zoltan Varga <vargaz@gmail.com>
[mono/afaerber.git] / mono / mini / mini-sparc.c
blobeb94c7c862b2c0a1480b51266c098b00f931e09f
1 /*
2 * mini-sparc.c: Sparc backend for the Mono code generator
4 * Authors:
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * Modified for SPARC:
9 * Christopher Taylor (ct@gentoo.org)
10 * Mark Crichton (crichton@gimp.org)
11 * Zoltan Varga (vargaz@freemail.hu)
13 * (C) 2003 Ximian, Inc.
15 #include "mini.h"
16 #include <string.h>
17 #include <pthread.h>
18 #include <unistd.h>
20 #ifndef __linux__
21 #include <sys/systeminfo.h>
22 #include <thread.h>
23 #endif
25 #include <unistd.h>
26 #include <sys/mman.h>
28 #include <mono/metadata/appdomain.h>
29 #include <mono/metadata/debug-helpers.h>
30 #include <mono/metadata/tokentype.h>
31 #include <mono/utils/mono-math.h>
33 #include "mini-sparc.h"
34 #include "trace.h"
35 #include "cpu-sparc.h"
36 #include "jit-icalls.h"
37 #include "ir-emit.h"
40 * Sparc V9 means two things:
41 * - the instruction set
42 * - the ABI
44 * V9 instructions are only usable if the underlying processor is 64 bit. Most Sparc
45 * processors in use are 64 bit processors. The V9 ABI is only usable if the
46 * mono executable is a 64 bit executable. So it would make sense to use the 64 bit
47 * instructions without using the 64 bit ABI.
51 * Register usage:
52 * - %i0..%i<n> hold the incoming arguments, these are never written by JITted
53 * code. Unused input registers are used for global register allocation.
54 * - %o0..%o5 and %l7 is used for local register allocation and passing arguments
55 * - %l0..%l6 is used for global register allocation
56 * - %o7 and %g1 is used as scratch registers in opcodes
57 * - all floating point registers are used for local register allocation except %f0.
58 * Only double precision registers are used.
59 * In 64 bit mode:
60 * - fp registers %d0..%d30 are used for parameter passing, and %d32..%d62 are
61 * used for local allocation.
65 * Alignment:
66 * - doubles and longs must be stored in dword aligned locations
70 * The following things are not implemented or do not work:
71 * - some fp arithmetic corner cases
72 * The following tests in mono/mini are expected to fail:
73 * - test_0_simple_double_casts
74 * This test casts (guint64)-1 to double and then back to guint64 again.
75 * Under x86, it returns 0, while under sparc it returns -1.
77 * In addition to this, the runtime requires the trunc function, or its
78 * solaris counterpart, aintl, to do some double->int conversions. If this
79 * function is not available, it is emulated somewhat, but the results can be
80 * strange.
84 * SPARCV9 FIXME:
85 * - optimize sparc_set according to the memory model
86 * - when non-AOT compiling, compute patch targets immediately so we don't
87 * have to emit the 6 byte template.
88 * - varags
89 * - struct arguments/returns
93 * SPARCV9 ISSUES:
94 * - sparc_call_simple can't be used in a lot of places since the displacement
95 * might not fit into an imm30.
96 * - g1 can't be used in a lot of places since it is used as a scratch reg in
97 * sparc_set.
98 * - sparc_f0 can't be used as a scratch register on V9
99 * - the %d34..%d62 fp registers are encoded as: %dx = %f(x - 32 + 1), ie.
100 * %d36 = %f5.
101 * - ldind.i4/u4 needs to sign extend/clear out upper word -> slows things down
102 * - ins->dreg can't be used as a scatch register in r4 opcodes since it might
103 * be a double precision register which has no single precision part.
104 * - passing/returning structs is hard to implement, because:
105 * - the spec is very hard to understand
106 * - it requires knowledge about the fields of structure, needs to handle
107 * nested structures etc.
111 * Possible optimizations:
112 * - delay slot scheduling
113 * - allocate large constants to registers
114 * - add more mul/div/rem optimizations
117 #ifndef __linux__
118 #define MONO_SPARC_THR_TLS 1
119 #endif
122 * There was a 64 bit bug in glib-2.2: g_bit_nth_msf (0, -1) would return 32,
123 * causing infinite loops in dominator computation. So glib-2.4 is required.
125 #ifdef SPARCV9
126 #if GLIB_MAJOR_VERSION == 2 && GLIB_MINOR_VERSION < 4
127 #error "glib 2.4 or later is required for 64 bit mode."
128 #endif
129 #endif
131 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
133 #define SIGNAL_STACK_SIZE (64 * 1024)
135 #define STACK_BIAS MONO_SPARC_STACK_BIAS
137 #ifdef SPARCV9
139 /* %g1 is used by sparc_set */
140 #define GP_SCRATCH_REG sparc_g4
141 /* %f0 is used for parameter passing */
142 #define FP_SCRATCH_REG sparc_f30
143 #define ARGS_OFFSET (STACK_BIAS + 128)
145 #else
147 #define FP_SCRATCH_REG sparc_f0
148 #define ARGS_OFFSET 68
149 #define GP_SCRATCH_REG sparc_g1
151 #endif
153 /* Whenever the CPU supports v9 instructions */
154 static gboolean sparcv9 = FALSE;
156 /* Whenever this is a 64bit executable */
157 #if SPARCV9
158 static gboolean v64 = TRUE;
159 #else
160 static gboolean v64 = FALSE;
161 #endif
163 static gpointer mono_arch_get_lmf_addr (void);
165 const char*
166 mono_arch_regname (int reg) {
167 static const char * rnames[] = {
168 "sparc_g0", "sparc_g1", "sparc_g2", "sparc_g3", "sparc_g4",
169 "sparc_g5", "sparc_g6", "sparc_g7", "sparc_o0", "sparc_o1",
170 "sparc_o2", "sparc_o3", "sparc_o4", "sparc_o5", "sparc_sp",
171 "sparc_call", "sparc_l0", "sparc_l1", "sparc_l2", "sparc_l3",
172 "sparc_l4", "sparc_l5", "sparc_l6", "sparc_l7", "sparc_i0",
173 "sparc_i1", "sparc_i2", "sparc_i3", "sparc_i4", "sparc_i5",
174 "sparc_fp", "sparc_retadr"
176 if (reg >= 0 && reg < 32)
177 return rnames [reg];
178 return "unknown";
181 const char*
182 mono_arch_fregname (int reg) {
183 static const char *rnames [] = {
184 "sparc_f0", "sparc_f1", "sparc_f2", "sparc_f3", "sparc_f4",
185 "sparc_f5", "sparc_f6", "sparc_f7", "sparc_f8", "sparc_f9",
186 "sparc_f10", "sparc_f11", "sparc_f12", "sparc_f13", "sparc_f14",
187 "sparc_f15", "sparc_f16", "sparc_f17", "sparc_f18", "sparc_f19",
188 "sparc_f20", "sparc_f21", "sparc_f22", "sparc_f23", "sparc_f24",
189 "sparc_f25", "sparc_f26", "sparc_f27", "sparc_f28", "sparc_f29",
190 "sparc_f30", "sparc_f31"
193 if (reg >= 0 && reg < 32)
194 return rnames [reg];
195 else
196 return "unknown";
200 * Initialize the cpu to execute managed code.
202 void
203 mono_arch_cpu_init (void)
205 guint32 dummy;
206 /* make sure sparcv9 is initialized for embedded use */
207 mono_arch_cpu_optimizazions(&dummy);
211 * Initialize architecture specific code.
213 void
214 mono_arch_init (void)
219 * Cleanup architecture specific code.
221 void
222 mono_arch_cleanup (void)
227 * This function returns the optimizations supported on this cpu.
229 guint32
230 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
232 char buf [1024];
233 guint32 opts = 0;
235 *exclude_mask = 0;
237 #ifndef __linux__
238 if (!sysinfo (SI_ISALIST, buf, 1024))
239 g_assert_not_reached ();
240 #else
241 /* From glibc. If the getpagesize is 8192, we're on sparc64, which
242 * (in)directly implies that we're a v9 or better.
243 * Improvements to this are greatly accepted...
244 * Also, we don't differentiate between v7 and v8. I sense SIGILL
245 * sniffing in my future.
247 if (getpagesize() == 8192)
248 strcpy (buf, "sparcv9");
249 else
250 strcpy (buf, "sparcv8");
251 #endif
254 * On some processors, the cmov instructions are even slower than the
255 * normal ones...
257 if (strstr (buf, "sparcv9")) {
258 opts |= MONO_OPT_CMOV | MONO_OPT_FCMOV;
259 sparcv9 = TRUE;
261 else
262 *exclude_mask |= MONO_OPT_CMOV | MONO_OPT_FCMOV;
264 return opts;
267 #ifdef __GNUC__
268 #define flushi(addr) __asm__ __volatile__ ("iflush %0"::"r"(addr):"memory")
269 #else /* assume Sun's compiler */
270 static void flushi(void *addr)
272 asm("flush %i0");
274 #endif
276 #ifndef __linux__
277 void sync_instruction_memory(caddr_t addr, int len);
278 #endif
280 void
281 mono_arch_flush_icache (guint8 *code, gint size)
283 #ifndef __linux__
284 /* Hopefully this is optimized based on the actual CPU */
285 sync_instruction_memory (code, size);
286 #else
287 gulong start = (gulong) code;
288 gulong end = start + size;
289 gulong align;
291 /* Sparcv9 chips only need flushes on 32 byte
292 * cacheline boundaries.
294 * Sparcv8 needs a flush every 8 bytes.
296 align = (sparcv9 ? 32 : 8);
298 start &= ~(align - 1);
299 end = (end + (align - 1)) & ~(align - 1);
301 while (start < end) {
302 #ifdef __GNUC__
303 __asm__ __volatile__ ("iflush %0"::"r"(start));
304 #else
305 flushi (start);
306 #endif
307 start += align;
309 #endif
313 * mono_sparc_flushw:
315 * Flush all register windows to memory. Every register window is saved to
316 * a 16 word area on the stack pointed to by its %sp register.
318 void
319 mono_sparc_flushw (void)
321 static guint32 start [64];
322 static int inited = 0;
323 guint32 *code;
324 static void (*flushw) (void);
326 if (!inited) {
327 code = start;
329 sparc_save_imm (code, sparc_sp, -160, sparc_sp);
330 sparc_flushw (code);
331 sparc_ret (code);
332 sparc_restore_simple (code);
334 g_assert ((code - start) < 64);
336 mono_arch_flush_icache ((guint8*)start, (guint8*)code - (guint8*)start);
338 flushw = (gpointer)start;
340 inited = 1;
343 flushw ();
346 void
347 mono_arch_flush_register_windows (void)
349 mono_sparc_flushw ();
352 gboolean
353 mono_arch_is_inst_imm (gint64 imm)
355 return sparc_is_imm13 (imm);
358 gboolean
359 mono_sparc_is_v9 (void) {
360 return sparcv9;
363 gboolean
364 mono_sparc_is_sparc64 (void) {
365 return v64;
368 typedef enum {
369 ArgInIReg,
370 ArgInIRegPair,
371 ArgInSplitRegStack,
372 ArgInFReg,
373 ArgInFRegPair,
374 ArgOnStack,
375 ArgOnStackPair,
376 ArgInFloatReg, /* V9 only */
377 ArgInDoubleReg /* V9 only */
378 } ArgStorage;
380 typedef struct {
381 gint16 offset;
382 /* This needs to be offset by %i0 or %o0 depending on caller/callee */
383 gint8 reg;
384 ArgStorage storage;
385 guint32 vt_offset; /* for valuetypes */
386 } ArgInfo;
388 typedef struct {
389 int nargs;
390 guint32 stack_usage;
391 guint32 reg_usage;
392 ArgInfo ret;
393 ArgInfo sig_cookie;
394 ArgInfo args [1];
395 } CallInfo;
397 #define DEBUG(a)
399 /* %o0..%o5 */
400 #define PARAM_REGS 6
402 static void inline
403 add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean pair)
405 ainfo->offset = *stack_size;
407 if (!pair) {
408 if (*gr >= PARAM_REGS) {
409 ainfo->storage = ArgOnStack;
411 else {
412 ainfo->storage = ArgInIReg;
413 ainfo->reg = *gr;
414 (*gr) ++;
417 /* Allways reserve stack space for parameters passed in registers */
418 (*stack_size) += sizeof (gpointer);
420 else {
421 if (*gr < PARAM_REGS - 1) {
422 /* A pair of registers */
423 ainfo->storage = ArgInIRegPair;
424 ainfo->reg = *gr;
425 (*gr) += 2;
427 else if (*gr >= PARAM_REGS) {
428 /* A pair of stack locations */
429 ainfo->storage = ArgOnStackPair;
431 else {
432 ainfo->storage = ArgInSplitRegStack;
433 ainfo->reg = *gr;
434 (*gr) ++;
437 (*stack_size) += 2 * sizeof (gpointer);
441 #ifdef SPARCV9
443 #define FLOAT_PARAM_REGS 32
445 static void inline
446 add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean single)
448 ainfo->offset = *stack_size;
450 if (single) {
451 if (*gr >= FLOAT_PARAM_REGS) {
452 ainfo->storage = ArgOnStack;
454 else {
455 /* A single is passed in an even numbered fp register */
456 ainfo->storage = ArgInFloatReg;
457 ainfo->reg = *gr + 1;
458 (*gr) += 2;
461 else {
462 if (*gr < FLOAT_PARAM_REGS) {
463 /* A double register */
464 ainfo->storage = ArgInDoubleReg;
465 ainfo->reg = *gr;
466 (*gr) += 2;
468 else {
469 ainfo->storage = ArgOnStack;
473 (*stack_size) += sizeof (gpointer);
476 #endif
479 * get_call_info:
481 * Obtain information about a call according to the calling convention.
482 * For V8, see the "System V ABI, Sparc Processor Supplement" Sparc V8 version
483 * document for more information.
484 * For V9, see the "Low Level System Information (64-bit psABI)" chapter in
485 * the 'Sparc Compliance Definition 2.4' document.
487 static CallInfo*
488 get_call_info (MonoCompile *cfg, MonoMethodSignature *sig, gboolean is_pinvoke)
490 guint32 i, gr, fr;
491 int n = sig->hasthis + sig->param_count;
492 guint32 stack_size = 0;
493 CallInfo *cinfo;
494 MonoType *ret_type;
495 MonoGenericSharingContext *gsctx = cfg ? cfg->generic_sharing_context : NULL;
497 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
499 gr = 0;
500 fr = 0;
502 #ifdef SPARCV9
503 if (MONO_TYPE_ISSTRUCT ((sig->ret))) {
504 /* The address of the return value is passed in %o0 */
505 add_general (&gr, &stack_size, &cinfo->ret, FALSE);
506 cinfo->ret.reg += sparc_i0;
508 #endif
510 /* this */
511 if (sig->hasthis)
512 add_general (&gr, &stack_size, cinfo->args + 0, FALSE);
514 if ((sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
515 gr = PARAM_REGS;
517 /* Emit the signature cookie just before the implicit arguments */
518 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
521 for (i = 0; i < sig->param_count; ++i) {
522 ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
523 MonoType *ptype;
525 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
526 gr = PARAM_REGS;
528 /* Emit the signature cookie just before the implicit arguments */
529 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
532 DEBUG(printf("param %d: ", i));
533 if (sig->params [i]->byref) {
534 DEBUG(printf("byref\n"));
536 add_general (&gr, &stack_size, ainfo, FALSE);
537 continue;
539 ptype = mono_type_get_underlying_type (sig->params [i]);
540 ptype = mini_get_basic_type_from_generic (gsctx, ptype);
541 switch (ptype->type) {
542 case MONO_TYPE_BOOLEAN:
543 case MONO_TYPE_I1:
544 case MONO_TYPE_U1:
545 add_general (&gr, &stack_size, ainfo, FALSE);
546 /* the value is in the ls byte */
547 ainfo->offset += sizeof (gpointer) - 1;
548 break;
549 case MONO_TYPE_I2:
550 case MONO_TYPE_U2:
551 case MONO_TYPE_CHAR:
552 add_general (&gr, &stack_size, ainfo, FALSE);
553 /* the value is in the ls word */
554 ainfo->offset += sizeof (gpointer) - 2;
555 break;
556 case MONO_TYPE_I4:
557 case MONO_TYPE_U4:
558 add_general (&gr, &stack_size, ainfo, FALSE);
559 /* the value is in the ls dword */
560 ainfo->offset += sizeof (gpointer) - 4;
561 break;
562 case MONO_TYPE_I:
563 case MONO_TYPE_U:
564 case MONO_TYPE_PTR:
565 case MONO_TYPE_FNPTR:
566 case MONO_TYPE_CLASS:
567 case MONO_TYPE_OBJECT:
568 case MONO_TYPE_STRING:
569 case MONO_TYPE_SZARRAY:
570 case MONO_TYPE_ARRAY:
571 add_general (&gr, &stack_size, ainfo, FALSE);
572 break;
573 case MONO_TYPE_GENERICINST:
574 if (!mono_type_generic_inst_is_valuetype (ptype)) {
575 add_general (&gr, &stack_size, ainfo, FALSE);
576 break;
578 /* Fall through */
579 case MONO_TYPE_VALUETYPE:
580 #ifdef SPARCV9
581 if (sig->pinvoke)
582 NOT_IMPLEMENTED;
583 #endif
584 add_general (&gr, &stack_size, ainfo, FALSE);
585 break;
586 case MONO_TYPE_TYPEDBYREF:
587 add_general (&gr, &stack_size, ainfo, FALSE);
588 break;
589 case MONO_TYPE_U8:
590 case MONO_TYPE_I8:
591 #ifdef SPARCV9
592 add_general (&gr, &stack_size, ainfo, FALSE);
593 #else
594 add_general (&gr, &stack_size, ainfo, TRUE);
595 #endif
596 break;
597 case MONO_TYPE_R4:
598 #ifdef SPARCV9
599 add_float (&fr, &stack_size, ainfo, TRUE);
600 gr ++;
601 #else
602 /* single precision values are passed in integer registers */
603 add_general (&gr, &stack_size, ainfo, FALSE);
604 #endif
605 break;
606 case MONO_TYPE_R8:
607 #ifdef SPARCV9
608 add_float (&fr, &stack_size, ainfo, FALSE);
609 gr ++;
610 #else
611 /* double precision values are passed in a pair of registers */
612 add_general (&gr, &stack_size, ainfo, TRUE);
613 #endif
614 break;
615 default:
616 g_assert_not_reached ();
620 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n > 0) && (sig->sentinelpos == sig->param_count)) {
621 gr = PARAM_REGS;
623 /* Emit the signature cookie just before the implicit arguments */
624 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
627 /* return value */
628 ret_type = mono_type_get_underlying_type (sig->ret);
629 ret_type = mini_get_basic_type_from_generic (gsctx, ret_type);
630 switch (ret_type->type) {
631 case MONO_TYPE_BOOLEAN:
632 case MONO_TYPE_I1:
633 case MONO_TYPE_U1:
634 case MONO_TYPE_I2:
635 case MONO_TYPE_U2:
636 case MONO_TYPE_CHAR:
637 case MONO_TYPE_I4:
638 case MONO_TYPE_U4:
639 case MONO_TYPE_I:
640 case MONO_TYPE_U:
641 case MONO_TYPE_PTR:
642 case MONO_TYPE_FNPTR:
643 case MONO_TYPE_CLASS:
644 case MONO_TYPE_OBJECT:
645 case MONO_TYPE_SZARRAY:
646 case MONO_TYPE_ARRAY:
647 case MONO_TYPE_STRING:
648 cinfo->ret.storage = ArgInIReg;
649 cinfo->ret.reg = sparc_i0;
650 if (gr < 1)
651 gr = 1;
652 break;
653 case MONO_TYPE_U8:
654 case MONO_TYPE_I8:
655 #ifdef SPARCV9
656 cinfo->ret.storage = ArgInIReg;
657 cinfo->ret.reg = sparc_i0;
658 if (gr < 1)
659 gr = 1;
660 #else
661 cinfo->ret.storage = ArgInIRegPair;
662 cinfo->ret.reg = sparc_i0;
663 if (gr < 2)
664 gr = 2;
665 #endif
666 break;
667 case MONO_TYPE_R4:
668 case MONO_TYPE_R8:
669 cinfo->ret.storage = ArgInFReg;
670 cinfo->ret.reg = sparc_f0;
671 break;
672 case MONO_TYPE_GENERICINST:
673 if (!mono_type_generic_inst_is_valuetype (ret_type)) {
674 cinfo->ret.storage = ArgInIReg;
675 cinfo->ret.reg = sparc_i0;
676 if (gr < 1)
677 gr = 1;
678 break;
680 /* Fall through */
681 case MONO_TYPE_VALUETYPE:
682 if (v64) {
683 if (sig->pinvoke)
684 NOT_IMPLEMENTED;
685 else
686 /* Already done */
689 else
690 cinfo->ret.storage = ArgOnStack;
691 break;
692 case MONO_TYPE_TYPEDBYREF:
693 if (v64) {
694 if (sig->pinvoke)
695 /* Same as a valuetype with size 24 */
696 NOT_IMPLEMENTED;
697 else
698 /* Already done */
701 else
702 cinfo->ret.storage = ArgOnStack;
703 break;
704 case MONO_TYPE_VOID:
705 break;
706 default:
707 g_error ("Can't handle as return value 0x%x", sig->ret->type);
710 cinfo->stack_usage = stack_size;
711 cinfo->reg_usage = gr;
712 return cinfo;
715 GList *
716 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
718 GList *vars = NULL;
719 int i;
722 * FIXME: If an argument is allocated to a register, then load it from the
723 * stack in the prolog.
726 for (i = 0; i < cfg->num_varinfo; i++) {
727 MonoInst *ins = cfg->varinfo [i];
728 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
730 /* unused vars */
731 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
732 continue;
734 /* FIXME: Make arguments on stack allocateable to registers */
735 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode == OP_REGVAR) || (ins->opcode == OP_ARG))
736 continue;
738 if (mono_is_regsize_var (ins->inst_vtype)) {
739 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
740 g_assert (i == vmv->idx);
742 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
746 return vars;
749 GList *
750 mono_arch_get_global_int_regs (MonoCompile *cfg)
752 GList *regs = NULL;
753 int i;
754 MonoMethodSignature *sig;
755 CallInfo *cinfo;
757 sig = mono_method_signature (cfg->method);
759 cinfo = get_call_info (cfg, sig, FALSE);
761 /* Use unused input registers */
762 for (i = cinfo->reg_usage; i < 6; ++i)
763 regs = g_list_prepend (regs, GUINT_TO_POINTER (sparc_i0 + i));
765 /* Use %l0..%l6 as global registers */
766 for (i = sparc_l0; i < sparc_l7; ++i)
767 regs = g_list_prepend (regs, GUINT_TO_POINTER (i));
769 g_free (cinfo);
771 return regs;
775 * mono_arch_regalloc_cost:
777 * Return the cost, in number of memory references, of the action of
778 * allocating the variable VMV into a register during global register
779 * allocation.
781 guint32
782 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
784 return 0;
788 * Set var information according to the calling convention. sparc version.
789 * The locals var stuff should most likely be split in another method.
792 void
793 mono_arch_allocate_vars (MonoCompile *cfg)
795 MonoMethodSignature *sig;
796 MonoMethodHeader *header;
797 MonoInst *inst;
798 int i, offset, size, align, curinst;
799 CallInfo *cinfo;
801 header = cfg->header;
803 sig = mono_method_signature (cfg->method);
805 cinfo = get_call_info (cfg, sig, FALSE);
807 if (sig->ret->type != MONO_TYPE_VOID) {
808 switch (cinfo->ret.storage) {
809 case ArgInIReg:
810 case ArgInFReg:
811 cfg->ret->opcode = OP_REGVAR;
812 cfg->ret->inst_c0 = cinfo->ret.reg;
813 break;
814 case ArgInIRegPair: {
815 MonoType *t = mono_type_get_underlying_type (sig->ret);
816 if (((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
817 MonoInst *low = get_vreg_to_inst (cfg, cfg->ret->dreg + 1);
818 MonoInst *high = get_vreg_to_inst (cfg, cfg->ret->dreg + 2);
820 low->opcode = OP_REGVAR;
821 low->dreg = cinfo->ret.reg + 1;
822 high->opcode = OP_REGVAR;
823 high->dreg = cinfo->ret.reg;
825 cfg->ret->opcode = OP_REGVAR;
826 cfg->ret->inst_c0 = cinfo->ret.reg;
827 break;
829 case ArgOnStack:
830 #ifdef SPARCV9
831 g_assert_not_reached ();
832 #else
833 /* valuetypes */
834 cfg->vret_addr->opcode = OP_REGOFFSET;
835 cfg->vret_addr->inst_basereg = sparc_fp;
836 cfg->vret_addr->inst_offset = 64;
837 #endif
838 break;
839 default:
840 NOT_IMPLEMENTED;
842 cfg->ret->dreg = cfg->ret->inst_c0;
846 * We use the ABI calling conventions for managed code as well.
847 * Exception: valuetypes are never returned in registers on V9.
848 * FIXME: Use something more optimized.
851 /* Locals are allocated backwards from %fp */
852 cfg->frame_reg = sparc_fp;
853 offset = 0;
856 * Reserve a stack slot for holding information used during exception
857 * handling.
859 if (header->num_clauses)
860 offset += sizeof (gpointer) * 2;
862 if (cfg->method->save_lmf) {
863 offset += sizeof (MonoLMF);
864 cfg->arch.lmf_offset = offset;
867 curinst = cfg->locals_start;
868 for (i = curinst; i < cfg->num_varinfo; ++i) {
869 inst = cfg->varinfo [i];
871 if ((inst->opcode == OP_REGVAR) || (inst->opcode == OP_REGOFFSET)) {
872 //g_print ("allocating local %d to %s\n", i, mono_arch_regname (inst->dreg));
873 continue;
876 if (inst->flags & MONO_INST_IS_DEAD)
877 continue;
879 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
880 * pinvoke wrappers when they call functions returning structure */
881 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
882 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), &align);
883 else
884 size = mini_type_stack_size (cfg->generic_sharing_context, inst->inst_vtype, &align);
887 * This is needed since structures containing doubles must be doubleword
888 * aligned.
889 * FIXME: Do this only if needed.
891 if (MONO_TYPE_ISSTRUCT (inst->inst_vtype))
892 align = 8;
895 * variables are accessed as negative offsets from %fp, so increase
896 * the offset before assigning it to a variable
898 offset += size;
900 offset += align - 1;
901 offset &= ~(align - 1);
902 inst->opcode = OP_REGOFFSET;
903 inst->inst_basereg = sparc_fp;
904 inst->inst_offset = STACK_BIAS + -offset;
906 //g_print ("allocating local %d to [%s - %d]\n", i, mono_arch_regname (inst->inst_basereg), - inst->inst_offset);
909 if (sig->call_convention == MONO_CALL_VARARG) {
910 cfg->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
913 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
914 inst = cfg->args [i];
915 if (inst->opcode != OP_REGVAR) {
916 ArgInfo *ainfo = &cinfo->args [i];
917 gboolean inreg = TRUE;
918 MonoType *arg_type;
919 ArgStorage storage;
921 if (sig->hasthis && (i == 0))
922 arg_type = &mono_defaults.object_class->byval_arg;
923 else
924 arg_type = sig->params [i - sig->hasthis];
926 #ifndef SPARCV9
927 if (!arg_type->byref && ((arg_type->type == MONO_TYPE_R4)
928 || (arg_type->type == MONO_TYPE_R8)))
930 * Since float arguments are passed in integer registers, we need to
931 * save them to the stack in the prolog.
933 inreg = FALSE;
934 #endif
936 /* FIXME: Allocate volatile arguments to registers */
937 /* FIXME: This makes the argument holding a vtype address into volatile */
938 if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
939 inreg = FALSE;
941 if (MONO_TYPE_ISSTRUCT (arg_type))
942 /* FIXME: this isn't needed */
943 inreg = FALSE;
945 inst->opcode = OP_REGOFFSET;
947 if (!inreg)
948 storage = ArgOnStack;
949 else
950 storage = ainfo->storage;
952 switch (storage) {
953 case ArgInIReg:
954 inst->opcode = OP_REGVAR;
955 inst->dreg = sparc_i0 + ainfo->reg;
956 break;
957 case ArgInIRegPair:
958 if (inst->type == STACK_I8) {
959 MonoInst *low = get_vreg_to_inst (cfg, inst->dreg + 1);
960 MonoInst *high = get_vreg_to_inst (cfg, inst->dreg + 2);
962 low->opcode = OP_REGVAR;
963 low->dreg = sparc_i0 + ainfo->reg + 1;
964 high->opcode = OP_REGVAR;
965 high->dreg = sparc_i0 + ainfo->reg;
967 inst->opcode = OP_REGVAR;
968 inst->dreg = sparc_i0 + ainfo->reg;
969 break;
970 case ArgInFloatReg:
971 case ArgInDoubleReg:
973 * Since float regs are volatile, we save the arguments to
974 * the stack in the prolog.
975 * FIXME: Avoid this if the method contains no calls.
977 case ArgOnStack:
978 case ArgOnStackPair:
979 case ArgInSplitRegStack:
980 /* Split arguments are saved to the stack in the prolog */
981 inst->opcode = OP_REGOFFSET;
982 /* in parent frame */
983 inst->inst_basereg = sparc_fp;
984 inst->inst_offset = ainfo->offset + ARGS_OFFSET;
986 if (!arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
988 * It is very hard to load doubles from non-doubleword aligned
989 * memory locations. So if the offset is misaligned, we copy the
990 * argument to a stack location in the prolog.
992 if ((inst->inst_offset - STACK_BIAS) % 8) {
993 inst->inst_basereg = sparc_fp;
994 offset += 8;
995 align = 8;
996 offset += align - 1;
997 offset &= ~(align - 1);
998 inst->inst_offset = STACK_BIAS + -offset;
1002 break;
1003 default:
1004 NOT_IMPLEMENTED;
1007 if (MONO_TYPE_ISSTRUCT (arg_type)) {
1008 /* Add a level of indirection */
1010 * It would be easier to add OP_LDIND_I here, but ldind_i instructions
1011 * are destructively modified in a lot of places in inssel.brg.
1013 MonoInst *indir;
1014 MONO_INST_NEW (cfg, indir, 0);
1015 *indir = *inst;
1016 inst->opcode = OP_VTARG_ADDR;
1017 inst->inst_left = indir;
1023 * spillvars are stored between the normal locals and the storage reserved
1024 * by the ABI.
1027 cfg->stack_offset = offset;
1029 g_free (cinfo);
1032 void
1033 mono_arch_create_vars (MonoCompile *cfg)
1035 MonoMethodSignature *sig;
1037 sig = mono_method_signature (cfg->method);
1039 if (MONO_TYPE_ISSTRUCT ((sig->ret))) {
1040 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1041 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1042 printf ("vret_addr = ");
1043 mono_print_ins (cfg->vret_addr);
1047 if (!sig->ret->byref && (sig->ret->type == MONO_TYPE_I8 || sig->ret->type == MONO_TYPE_U8)) {
1048 MonoInst *low = get_vreg_to_inst (cfg, cfg->ret->dreg + 1);
1049 MonoInst *high = get_vreg_to_inst (cfg, cfg->ret->dreg + 2);
1051 low->flags |= MONO_INST_VOLATILE;
1052 high->flags |= MONO_INST_VOLATILE;
1055 /* Add a properly aligned dword for use by int<->float conversion opcodes */
1056 cfg->arch.float_spill_slot = mono_compile_create_var (cfg, &mono_defaults.double_class->byval_arg, OP_ARG);
1057 ((MonoInst*)cfg->arch.float_spill_slot)->flags |= MONO_INST_VOLATILE;
1060 static void
1061 add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, guint32 sreg)
1063 MonoInst *arg;
1065 MONO_INST_NEW (cfg, arg, 0);
1067 arg->sreg1 = sreg;
1069 switch (storage) {
1070 case ArgInIReg:
1071 arg->opcode = OP_MOVE;
1072 arg->dreg = mono_alloc_ireg (cfg);
1074 mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, FALSE);
1075 break;
1076 case ArgInFloatReg:
1077 arg->opcode = OP_FMOVE;
1078 arg->dreg = mono_alloc_freg (cfg);
1080 mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, TRUE);
1081 break;
1082 default:
1083 g_assert_not_reached ();
1086 MONO_ADD_INS (cfg->cbb, arg);
1089 static void
1090 add_outarg_load (MonoCompile *cfg, MonoCallInst *call, int opcode, int basereg, int offset, int reg)
1092 int dreg = mono_alloc_ireg (cfg);
1094 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, sparc_sp, offset);
1096 mono_call_inst_add_outarg_reg (cfg, call, dreg, reg, FALSE);
1099 static void
1100 emit_pass_long (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in)
1102 int offset = ARGS_OFFSET + ainfo->offset;
1104 switch (ainfo->storage) {
1105 case ArgInIRegPair:
1106 add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg + 1, in->dreg + 1);
1107 add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, in->dreg + 2);
1108 break;
1109 case ArgOnStackPair:
1110 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset, in->dreg + 2);
1111 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset + 4, in->dreg + 1);
1112 break;
1113 case ArgInSplitRegStack:
1114 add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, in->dreg + 2);
1115 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset + 4, in->dreg + 1);
1116 break;
1117 default:
1118 g_assert_not_reached ();
1122 static void
1123 emit_pass_double (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in)
1125 int offset = ARGS_OFFSET + ainfo->offset;
1127 switch (ainfo->storage) {
1128 case ArgInIRegPair:
1129 /* floating-point <-> integer transfer must go through memory */
1130 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg);
1132 /* Load into a register pair */
1133 add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg);
1134 add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset + 4, sparc_o0 + ainfo->reg + 1);
1135 break;
1136 case ArgOnStackPair:
1137 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg);
1138 break;
1139 case ArgInSplitRegStack:
1140 /* floating-point <-> integer transfer must go through memory */
1141 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg);
1142 /* Load most significant word into register */
1143 add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg);
1144 break;
1145 default:
1146 g_assert_not_reached ();
1150 static void
1151 emit_pass_float (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in)
1153 int offset = ARGS_OFFSET + ainfo->offset;
1155 switch (ainfo->storage) {
1156 case ArgInIReg:
1157 /* floating-point <-> integer transfer must go through memory */
1158 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, sparc_sp, offset, in->dreg);
1159 add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg);
1160 break;
1161 case ArgOnStack:
1162 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, sparc_sp, offset, in->dreg);
1163 break;
1164 default:
1165 g_assert_not_reached ();
1169 static void
1170 emit_pass_other (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in);
1172 static void
1173 emit_pass_vtype (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in, gboolean pinvoke)
1175 MonoInst *arg;
1176 guint32 align, offset, pad, size;
1178 if (arg_type->type == MONO_TYPE_TYPEDBYREF) {
1179 size = sizeof (MonoTypedRef);
1180 align = sizeof (gpointer);
1182 else if (pinvoke)
1183 size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
1184 else {
1186 * Other backends use mono_type_stack_size (), but that
1187 * aligns the size to 8, which is larger than the size of
1188 * the source, leading to reads of invalid memory if the
1189 * source is at the end of address space.
1191 size = mono_class_value_size (in->klass, &align);
1194 /* The first 6 argument locations are reserved */
1195 if (cinfo->stack_usage < 6 * sizeof (gpointer))
1196 cinfo->stack_usage = 6 * sizeof (gpointer);
1198 offset = ALIGN_TO ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage, align);
1199 pad = offset - ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage);
1201 cinfo->stack_usage += size;
1202 cinfo->stack_usage += pad;
1205 * We use OP_OUTARG_VT to copy the valuetype to a stack location, then
1206 * use the normal OUTARG opcodes to pass the address of the location to
1207 * the callee.
1209 if (size > 0) {
1210 MONO_INST_NEW (cfg, arg, OP_OUTARG_VT);
1211 arg->sreg1 = in->dreg;
1212 arg->klass = in->klass;
1213 arg->backend.size = size;
1214 arg->inst_p0 = call;
1215 arg->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1216 memcpy (arg->inst_p1, ainfo, sizeof (ArgInfo));
1217 ((ArgInfo*)(arg->inst_p1))->offset = STACK_BIAS + offset;
1218 MONO_ADD_INS (cfg->cbb, arg);
1220 MONO_INST_NEW (cfg, arg, OP_ADD_IMM);
1221 arg->dreg = mono_alloc_preg (cfg);
1222 arg->sreg1 = sparc_sp;
1223 arg->inst_imm = STACK_BIAS + offset;
1224 MONO_ADD_INS (cfg->cbb, arg);
1226 emit_pass_other (cfg, call, ainfo, NULL, arg);
1230 static void
1231 emit_pass_other (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in)
1233 int offset = ARGS_OFFSET + ainfo->offset;
1234 int opcode;
1236 switch (ainfo->storage) {
1237 case ArgInIReg:
1238 add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, in->dreg);
1239 break;
1240 case ArgOnStack:
1241 #ifdef SPARCV9
1242 NOT_IMPLEMENTED;
1243 #else
1244 if (offset & 0x1)
1245 opcode = OP_STOREI1_MEMBASE_REG;
1246 else if (offset & 0x2)
1247 opcode = OP_STOREI2_MEMBASE_REG;
1248 else
1249 opcode = OP_STOREI4_MEMBASE_REG;
1250 MONO_EMIT_NEW_STORE_MEMBASE (cfg, opcode, sparc_sp, offset, in->dreg);
1251 #endif
1252 break;
1253 default:
1254 g_assert_not_reached ();
1258 static void
1259 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
1261 MonoMethodSignature *tmp_sig;
1264 * mono_ArgIterator_Setup assumes the signature cookie is
1265 * passed first and all the arguments which were before it are
1266 * passed on the stack after the signature. So compensate by
1267 * passing a different signature.
1269 tmp_sig = mono_metadata_signature_dup (call->signature);
1270 tmp_sig->param_count -= call->signature->sentinelpos;
1271 tmp_sig->sentinelpos = 0;
1272 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1274 /* FIXME: Add support for signature tokens to AOT */
1275 cfg->disable_aot = TRUE;
1276 /* We allways pass the signature on the stack for simplicity */
1277 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sparc_sp, ARGS_OFFSET + cinfo->sig_cookie.offset, tmp_sig);
1280 void
1281 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1283 MonoInst *in;
1284 MonoMethodSignature *sig;
1285 int i, n;
1286 CallInfo *cinfo;
1287 ArgInfo *ainfo;
1288 guint32 extra_space = 0;
1290 sig = call->signature;
1291 n = sig->param_count + sig->hasthis;
1293 cinfo = get_call_info (cfg, sig, sig->pinvoke);
1295 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
1296 /* Set the 'struct/union return pointer' location on the stack */
1297 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, 64, call->vret_var->dreg);
1300 for (i = 0; i < n; ++i) {
1301 MonoType *arg_type;
1303 ainfo = cinfo->args + i;
1305 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1306 /* Emit the signature cookie just before the first implicit argument */
1307 emit_sig_cookie (cfg, call, cinfo);
1310 in = call->args [i];
1312 if (sig->hasthis && (i == 0))
1313 arg_type = &mono_defaults.object_class->byval_arg;
1314 else
1315 arg_type = sig->params [i - sig->hasthis];
1317 arg_type = mono_type_get_underlying_type (arg_type);
1318 if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis])))
1319 emit_pass_vtype (cfg, call, cinfo, ainfo, arg_type, in, sig->pinvoke);
1320 else if (!arg_type->byref && ((arg_type->type == MONO_TYPE_I8) || (arg_type->type == MONO_TYPE_U8)))
1321 emit_pass_long (cfg, call, ainfo, in);
1322 else if (!arg_type->byref && (arg_type->type == MONO_TYPE_R8))
1323 emit_pass_double (cfg, call, ainfo, in);
1324 else if (!arg_type->byref && (arg_type->type == MONO_TYPE_R4))
1325 emit_pass_float (cfg, call, ainfo, in);
1326 else
1327 emit_pass_other (cfg, call, ainfo, arg_type, in);
1330 /* Handle the case where there are no implicit arguments */
1331 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos)) {
1332 emit_sig_cookie (cfg, call, cinfo);
1335 call->stack_usage = cinfo->stack_usage + extra_space;
1337 g_free (cinfo);
1340 void
1341 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1343 ArgInfo *ainfo = (ArgInfo*)ins->inst_p1;
1344 int size = ins->backend.size;
1346 mini_emit_memcpy (cfg, sparc_sp, ainfo->offset, src->dreg, 0, size, 0);
1349 void
1350 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1352 CallInfo *cinfo = get_call_info (cfg, mono_method_signature (method), FALSE);
1353 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
1355 switch (cinfo->ret.storage) {
1356 case ArgInIReg:
1357 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1358 break;
1359 case ArgInIRegPair:
1360 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
1361 MONO_EMIT_NEW_UNALU (cfg, OP_LMOVE, cfg->ret->dreg, val->dreg);
1362 } else {
1363 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg + 2, val->dreg + 2);
1364 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg + 1, val->dreg + 1);
1366 break;
1367 case ArgInFReg:
1368 if (ret->type == MONO_TYPE_R4)
1369 MONO_EMIT_NEW_UNALU (cfg, OP_SETFRET, cfg->ret->dreg, val->dreg);
1370 else
1371 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
1372 break;
1373 default:
1374 g_assert_not_reached ();
1377 g_assert (cinfo);
1380 int cond_to_sparc_cond [][3] = {
1381 {sparc_be, sparc_be, sparc_fbe},
1382 {sparc_bne, sparc_bne, 0},
1383 {sparc_ble, sparc_ble, sparc_fble},
1384 {sparc_bge, sparc_bge, sparc_fbge},
1385 {sparc_bl, sparc_bl, sparc_fbl},
1386 {sparc_bg, sparc_bg, sparc_fbg},
1387 {sparc_bleu, sparc_bleu, 0},
1388 {sparc_beu, sparc_beu, 0},
1389 {sparc_blu, sparc_blu, sparc_fbl},
1390 {sparc_bgu, sparc_bgu, sparc_fbg}
1393 /* Map opcode to the sparc condition codes */
1394 static inline SparcCond
1395 opcode_to_sparc_cond (int opcode)
1397 CompRelation rel;
1398 CompType t;
1400 switch (opcode) {
1401 case OP_COND_EXC_OV:
1402 case OP_COND_EXC_IOV:
1403 return sparc_bvs;
1404 case OP_COND_EXC_C:
1405 case OP_COND_EXC_IC:
1406 return sparc_bcs;
1407 case OP_COND_EXC_NO:
1408 case OP_COND_EXC_NC:
1409 NOT_IMPLEMENTED;
1410 default:
1411 rel = mono_opcode_to_cond (opcode);
1412 t = mono_opcode_to_type (opcode, -1);
1414 return cond_to_sparc_cond [rel][t];
1415 break;
1418 return -1;
1421 #define COMPUTE_DISP(ins) \
1422 if (ins->inst_true_bb->native_offset) \
1423 disp = (ins->inst_true_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2; \
1424 else { \
1425 disp = 0; \
1426 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1429 #ifdef SPARCV9
1430 #define DEFAULT_ICC sparc_xcc_short
1431 #else
1432 #define DEFAULT_ICC sparc_icc_short
1433 #endif
1435 #ifdef SPARCV9
1436 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) \
1437 do { \
1438 gint32 disp; \
1439 guint32 predict; \
1440 COMPUTE_DISP(ins); \
1441 predict = (disp != 0) ? 1 : 0; \
1442 g_assert (sparc_is_imm19 (disp)); \
1443 sparc_branchp (code, (annul), cond, icc, (predict), disp); \
1444 if (filldelay) sparc_nop (code); \
1445 } while (0)
1446 #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_ICC ((ins), (cond), (annul), (filldelay), (sparc_xcc_short))
1447 #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) \
1448 do { \
1449 gint32 disp; \
1450 guint32 predict; \
1451 COMPUTE_DISP(ins); \
1452 predict = (disp != 0) ? 1 : 0; \
1453 g_assert (sparc_is_imm19 (disp)); \
1454 sparc_fbranch (code, (annul), cond, disp); \
1455 if (filldelay) sparc_nop (code); \
1456 } while (0)
1457 #else
1458 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) g_assert_not_reached ()
1459 #define EMIT_COND_BRANCH_GENERAL(ins,bop,cond,annul,filldelay) \
1460 do { \
1461 gint32 disp; \
1462 COMPUTE_DISP(ins); \
1463 g_assert (sparc_is_imm22 (disp)); \
1464 sparc_ ## bop (code, (annul), cond, disp); \
1465 if (filldelay) sparc_nop (code); \
1466 } while (0)
1467 #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),branch,(cond),annul,filldelay)
1468 #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),fbranch,(cond),annul,filldelay)
1469 #endif
1471 #define EMIT_COND_BRANCH_PREDICTED(ins,cond,annul,filldelay) \
1472 do { \
1473 gint32 disp; \
1474 guint32 predict; \
1475 COMPUTE_DISP(ins); \
1476 predict = (disp != 0) ? 1 : 0; \
1477 g_assert (sparc_is_imm19 (disp)); \
1478 sparc_branchp (code, (annul), (cond), DEFAULT_ICC, (predict), disp); \
1479 if (filldelay) sparc_nop (code); \
1480 } while (0)
1482 #define EMIT_COND_BRANCH_BPR(ins,bop,predict,annul,filldelay) \
1483 do { \
1484 gint32 disp; \
1485 COMPUTE_DISP(ins); \
1486 g_assert (sparc_is_imm22 (disp)); \
1487 sparc_ ## bop (code, (annul), (predict), ins->sreg1, disp); \
1488 if (filldelay) sparc_nop (code); \
1489 } while (0)
1491 /* emit an exception if condition is fail */
1493 * We put the exception throwing code out-of-line, at the end of the method
1495 #define EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,filldelay,icc) do { \
1496 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
1497 MONO_PATCH_INFO_EXC, sexc_name); \
1498 if (sparcv9 && ((icc) != sparc_icc_short)) { \
1499 sparc_branchp (code, 0, (cond), (icc), 0, 0); \
1501 else { \
1502 sparc_branch (code, 0, cond, 0); \
1504 if (filldelay) sparc_nop (code); \
1505 } while (0);
1507 #define EMIT_COND_SYSTEM_EXCEPTION(ins,cond,sexc_name) EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,TRUE,DEFAULT_ICC)
1509 #define EMIT_COND_SYSTEM_EXCEPTION_BPR(ins,bop,sexc_name) do { \
1510 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
1511 MONO_PATCH_INFO_EXC, sexc_name); \
1512 sparc_ ## bop (code, FALSE, FALSE, ins->sreg1, 0); \
1513 sparc_nop (code); \
1514 } while (0);
1516 #define EMIT_ALU_IMM(ins,op,setcc) do { \
1517 if (sparc_is_imm13 ((ins)->inst_imm)) \
1518 sparc_ ## op ## _imm (code, (setcc), (ins)->sreg1, ins->inst_imm, (ins)->dreg); \
1519 else { \
1520 sparc_set (code, ins->inst_imm, sparc_o7); \
1521 sparc_ ## op (code, (setcc), (ins)->sreg1, sparc_o7, (ins)->dreg); \
1523 } while (0);
1525 #define EMIT_LOAD_MEMBASE(ins,op) do { \
1526 if (sparc_is_imm13 (ins->inst_offset)) \
1527 sparc_ ## op ## _imm (code, ins->inst_basereg, ins->inst_offset, ins->dreg); \
1528 else { \
1529 sparc_set (code, ins->inst_offset, sparc_o7); \
1530 sparc_ ## op (code, ins->inst_basereg, sparc_o7, ins->dreg); \
1532 } while (0);
1534 /* max len = 5 */
1535 #define EMIT_STORE_MEMBASE_IMM(ins,op) do { \
1536 guint32 sreg; \
1537 if (ins->inst_imm == 0) \
1538 sreg = sparc_g0; \
1539 else { \
1540 sparc_set (code, ins->inst_imm, sparc_o7); \
1541 sreg = sparc_o7; \
1543 if (!sparc_is_imm13 (ins->inst_offset)) { \
1544 sparc_set (code, ins->inst_offset, GP_SCRATCH_REG); \
1545 sparc_ ## op (code, sreg, ins->inst_destbasereg, GP_SCRATCH_REG); \
1547 else \
1548 sparc_ ## op ## _imm (code, sreg, ins->inst_destbasereg, ins->inst_offset); \
1549 } while (0);
1551 #define EMIT_STORE_MEMBASE_REG(ins,op) do { \
1552 if (!sparc_is_imm13 (ins->inst_offset)) { \
1553 sparc_set (code, ins->inst_offset, sparc_o7); \
1554 sparc_ ## op (code, ins->sreg1, ins->inst_destbasereg, sparc_o7); \
1556 else \
1557 sparc_ ## op ## _imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); \
1558 } while (0);
1560 #define EMIT_CALL() do { \
1561 if (v64) { \
1562 sparc_set_template (code, sparc_o7); \
1563 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7); \
1565 else { \
1566 sparc_call_simple (code, 0); \
1568 sparc_nop (code); \
1569 } while (0);
1572 * A call template is 7 instructions long, so we want to avoid it if possible.
1574 static guint32*
1575 emit_call (MonoCompile *cfg, guint32 *code, guint32 patch_type, gconstpointer data)
1577 gpointer target;
1579 /* FIXME: This only works if the target method is already compiled */
1580 if (0 && v64 && !cfg->compile_aot) {
1581 MonoJumpInfo patch_info;
1583 patch_info.type = patch_type;
1584 patch_info.data.target = data;
1586 target = mono_resolve_patch_target (cfg->method, cfg->domain, NULL, &patch_info, FALSE);
1588 /* FIXME: Add optimizations if the target is close enough */
1589 sparc_set (code, target, sparc_o7);
1590 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7);
1591 sparc_nop (code);
1593 else {
1594 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, patch_type, data);
1595 EMIT_CALL ();
1598 return code;
1601 void
1602 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
1606 void
1607 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
1609 MonoInst *ins, *n, *last_ins = NULL;
1610 ins = bb->code;
1612 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
1613 switch (ins->opcode) {
1614 case OP_MUL_IMM:
1615 /* remove unnecessary multiplication with 1 */
1616 if (ins->inst_imm == 1) {
1617 if (ins->dreg != ins->sreg1) {
1618 ins->opcode = OP_MOVE;
1619 } else {
1620 MONO_DELETE_INS (bb, ins);
1621 continue;
1624 break;
1625 #ifndef SPARCV9
1626 case OP_LOAD_MEMBASE:
1627 case OP_LOADI4_MEMBASE:
1629 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1630 * OP_LOAD_MEMBASE offset(basereg), reg
1632 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
1633 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1634 ins->inst_basereg == last_ins->inst_destbasereg &&
1635 ins->inst_offset == last_ins->inst_offset) {
1636 if (ins->dreg == last_ins->sreg1) {
1637 MONO_DELETE_INS (bb, ins);
1638 continue;
1639 } else {
1640 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1641 ins->opcode = OP_MOVE;
1642 ins->sreg1 = last_ins->sreg1;
1646 * Note: reg1 must be different from the basereg in the second load
1647 * OP_LOAD_MEMBASE offset(basereg), reg1
1648 * OP_LOAD_MEMBASE offset(basereg), reg2
1649 * -->
1650 * OP_LOAD_MEMBASE offset(basereg), reg1
1651 * OP_MOVE reg1, reg2
1653 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
1654 || last_ins->opcode == OP_LOAD_MEMBASE) &&
1655 ins->inst_basereg != last_ins->dreg &&
1656 ins->inst_basereg == last_ins->inst_basereg &&
1657 ins->inst_offset == last_ins->inst_offset) {
1659 if (ins->dreg == last_ins->dreg) {
1660 MONO_DELETE_INS (bb, ins);
1661 continue;
1662 } else {
1663 ins->opcode = OP_MOVE;
1664 ins->sreg1 = last_ins->dreg;
1667 //g_assert_not_reached ();
1669 #if 0
1671 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1672 * OP_LOAD_MEMBASE offset(basereg), reg
1673 * -->
1674 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1675 * OP_ICONST reg, imm
1677 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
1678 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
1679 ins->inst_basereg == last_ins->inst_destbasereg &&
1680 ins->inst_offset == last_ins->inst_offset) {
1681 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1682 ins->opcode = OP_ICONST;
1683 ins->inst_c0 = last_ins->inst_imm;
1684 g_assert_not_reached (); // check this rule
1685 #endif
1687 break;
1688 #endif
1689 case OP_LOADI1_MEMBASE:
1690 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
1691 ins->inst_basereg == last_ins->inst_destbasereg &&
1692 ins->inst_offset == last_ins->inst_offset) {
1693 if (ins->dreg == last_ins->sreg1) {
1694 MONO_DELETE_INS (bb, ins);
1695 continue;
1696 } else {
1697 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1698 ins->opcode = OP_MOVE;
1699 ins->sreg1 = last_ins->sreg1;
1702 break;
1703 case OP_LOADI2_MEMBASE:
1704 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
1705 ins->inst_basereg == last_ins->inst_destbasereg &&
1706 ins->inst_offset == last_ins->inst_offset) {
1707 if (ins->dreg == last_ins->sreg1) {
1708 MONO_DELETE_INS (bb, ins);
1709 continue;
1710 } else {
1711 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1712 ins->opcode = OP_MOVE;
1713 ins->sreg1 = last_ins->sreg1;
1716 break;
1717 case OP_STOREI4_MEMBASE_IMM:
1718 /* Convert pairs of 0 stores to a dword 0 store */
1719 /* Used when initializing temporaries */
1720 /* We know sparc_fp is dword aligned */
1721 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM) &&
1722 (ins->inst_destbasereg == last_ins->inst_destbasereg) &&
1723 (ins->inst_destbasereg == sparc_fp) &&
1724 (ins->inst_offset < 0) &&
1725 ((ins->inst_offset % 8) == 0) &&
1726 ((ins->inst_offset == last_ins->inst_offset - 4)) &&
1727 (ins->inst_imm == 0) &&
1728 (last_ins->inst_imm == 0)) {
1729 if (sparcv9) {
1730 last_ins->opcode = OP_STOREI8_MEMBASE_IMM;
1731 last_ins->inst_offset = ins->inst_offset;
1732 MONO_DELETE_INS (bb, ins);
1733 continue;
1736 break;
1737 case OP_IBEQ:
1738 case OP_IBNE_UN:
1739 case OP_IBLT:
1740 case OP_IBGT:
1741 case OP_IBGE:
1742 case OP_IBLE:
1743 case OP_COND_EXC_EQ:
1744 case OP_COND_EXC_GE:
1745 case OP_COND_EXC_GT:
1746 case OP_COND_EXC_LE:
1747 case OP_COND_EXC_LT:
1748 case OP_COND_EXC_NE_UN:
1750 * Convert compare with zero+branch to BRcc
1753 * This only works in 64 bit mode, since it examines all 64
1754 * bits of the register.
1755 * Only do this if the method is small since BPr only has a 16bit
1756 * displacement.
1758 if (v64 && (cfg->header->code_size < 10000) && last_ins &&
1759 (last_ins->opcode == OP_COMPARE_IMM) &&
1760 (last_ins->inst_imm == 0)) {
1761 switch (ins->opcode) {
1762 case OP_IBEQ:
1763 ins->opcode = OP_SPARC_BRZ;
1764 break;
1765 case OP_IBNE_UN:
1766 ins->opcode = OP_SPARC_BRNZ;
1767 break;
1768 case OP_IBLT:
1769 ins->opcode = OP_SPARC_BRLZ;
1770 break;
1771 case OP_IBGT:
1772 ins->opcode = OP_SPARC_BRGZ;
1773 break;
1774 case OP_IBGE:
1775 ins->opcode = OP_SPARC_BRGEZ;
1776 break;
1777 case OP_IBLE:
1778 ins->opcode = OP_SPARC_BRLEZ;
1779 break;
1780 case OP_COND_EXC_EQ:
1781 ins->opcode = OP_SPARC_COND_EXC_EQZ;
1782 break;
1783 case OP_COND_EXC_GE:
1784 ins->opcode = OP_SPARC_COND_EXC_GEZ;
1785 break;
1786 case OP_COND_EXC_GT:
1787 ins->opcode = OP_SPARC_COND_EXC_GTZ;
1788 break;
1789 case OP_COND_EXC_LE:
1790 ins->opcode = OP_SPARC_COND_EXC_LEZ;
1791 break;
1792 case OP_COND_EXC_LT:
1793 ins->opcode = OP_SPARC_COND_EXC_LTZ;
1794 break;
1795 case OP_COND_EXC_NE_UN:
1796 ins->opcode = OP_SPARC_COND_EXC_NEZ;
1797 break;
1798 default:
1799 g_assert_not_reached ();
1801 ins->sreg1 = last_ins->sreg1;
1802 *last_ins = *ins;
1803 MONO_DELETE_INS (bb, ins);
1804 continue;
1806 break;
1807 case OP_MOVE:
1809 * OP_MOVE reg, reg
1811 if (ins->dreg == ins->sreg1) {
1812 MONO_DELETE_INS (bb, ins);
1813 continue;
1816 * OP_MOVE sreg, dreg
1817 * OP_MOVE dreg, sreg
1819 if (last_ins && last_ins->opcode == OP_MOVE &&
1820 ins->sreg1 == last_ins->dreg &&
1821 ins->dreg == last_ins->sreg1) {
1822 MONO_DELETE_INS (bb, ins);
1823 continue;
1825 break;
1827 last_ins = ins;
1828 ins = ins->next;
1830 bb->last_ins = last_ins;
1833 void
1834 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *ins)
1836 switch (ins->opcode) {
1837 case OP_LNEG:
1838 MONO_EMIT_NEW_BIALU (cfg, OP_SUBCC, ins->dreg + 1, 0, ins->sreg1 + 1);
1839 MONO_EMIT_NEW_BIALU (cfg, OP_SBB, ins->dreg + 2, 0, ins->sreg1 + 2);
1840 NULLIFY_INS (ins);
1841 break;
1842 default:
1843 break;
1847 void
1848 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1852 /* FIXME: Strange loads from the stack in basic-float.cs:test_2_rem */
1854 static void
1855 sparc_patch (guint32 *code, const gpointer target)
1857 guint32 *c = code;
1858 guint32 ins = *code;
1859 guint32 op = ins >> 30;
1860 guint32 op2 = (ins >> 22) & 0x7;
1861 guint32 rd = (ins >> 25) & 0x1f;
1862 guint8* target8 = (guint8*)target;
1863 gint64 disp = (target8 - (guint8*)code) >> 2;
1864 int reg;
1866 // g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
1868 if ((op == 0) && (op2 == 2)) {
1869 if (!sparc_is_imm22 (disp))
1870 NOT_IMPLEMENTED;
1871 /* Bicc */
1872 *code = ((ins >> 22) << 22) | (disp & 0x3fffff);
1874 else if ((op == 0) && (op2 == 1)) {
1875 if (!sparc_is_imm19 (disp))
1876 NOT_IMPLEMENTED;
1877 /* BPcc */
1878 *code = ((ins >> 19) << 19) | (disp & 0x7ffff);
1880 else if ((op == 0) && (op2 == 3)) {
1881 if (!sparc_is_imm16 (disp))
1882 NOT_IMPLEMENTED;
1883 /* BPr */
1884 *code &= ~(0x180000 | 0x3fff);
1885 *code |= ((disp << 21) & (0x180000)) | (disp & 0x3fff);
1887 else if ((op == 0) && (op2 == 6)) {
1888 if (!sparc_is_imm22 (disp))
1889 NOT_IMPLEMENTED;
1890 /* FBicc */
1891 *code = ((ins >> 22) << 22) | (disp & 0x3fffff);
1893 else if ((op == 0) && (op2 == 4)) {
1894 guint32 ins2 = code [1];
1896 if (((ins2 >> 30) == 2) && (((ins2 >> 19) & 0x3f) == 2)) {
1897 /* sethi followed by or */
1898 guint32 *p = code;
1899 sparc_set (p, target8, rd);
1900 while (p <= (code + 1))
1901 sparc_nop (p);
1903 else if (ins2 == 0x01000000) {
1904 /* sethi followed by nop */
1905 guint32 *p = code;
1906 sparc_set (p, target8, rd);
1907 while (p <= (code + 1))
1908 sparc_nop (p);
1910 else if ((sparc_inst_op (ins2) == 3) && (sparc_inst_imm (ins2))) {
1911 /* sethi followed by load/store */
1912 #ifndef SPARCV9
1913 guint32 t = (guint32)target8;
1914 *code &= ~(0x3fffff);
1915 *code |= (t >> 10);
1916 *(code + 1) &= ~(0x3ff);
1917 *(code + 1) |= (t & 0x3ff);
1918 #endif
1920 else if (v64 &&
1921 (sparc_inst_rd (ins) == sparc_g1) &&
1922 (sparc_inst_op (c [1]) == 0) && (sparc_inst_op2 (c [1]) == 4) &&
1923 (sparc_inst_op (c [2]) == 2) && (sparc_inst_op3 (c [2]) == 2) &&
1924 (sparc_inst_op (c [3]) == 2) && (sparc_inst_op3 (c [3]) == 2))
1926 /* sparc_set */
1927 guint32 *p = c;
1928 reg = sparc_inst_rd (c [1]);
1929 sparc_set (p, target8, reg);
1930 while (p < (c + 6))
1931 sparc_nop (p);
1933 else if ((sparc_inst_op (ins2) == 2) && (sparc_inst_op3 (ins2) == 0x38) &&
1934 (sparc_inst_imm (ins2))) {
1935 /* sethi followed by jmpl */
1936 #ifndef SPARCV9
1937 guint32 t = (guint32)target8;
1938 *code &= ~(0x3fffff);
1939 *code |= (t >> 10);
1940 *(code + 1) &= ~(0x3ff);
1941 *(code + 1) |= (t & 0x3ff);
1942 #endif
1944 else
1945 NOT_IMPLEMENTED;
1947 else if (op == 01) {
1948 gint64 disp = (target8 - (guint8*)code) >> 2;
1950 if (!sparc_is_imm30 (disp))
1951 NOT_IMPLEMENTED;
1952 sparc_call_simple (code, target8 - (guint8*)code);
1954 else if ((op == 2) && (sparc_inst_op3 (ins) == 0x2) && sparc_inst_imm (ins)) {
1955 /* mov imm, reg */
1956 g_assert (sparc_is_imm13 (target8));
1957 *code &= ~(0x1fff);
1958 *code |= (guint32)target8;
1960 else if ((sparc_inst_op (ins) == 2) && (sparc_inst_op3 (ins) == 0x7)) {
1961 /* sparc_set case 5. */
1962 guint32 *p = c;
1964 g_assert (v64);
1965 reg = sparc_inst_rd (c [3]);
1966 sparc_set (p, target, reg);
1967 while (p < (c + 6))
1968 sparc_nop (p);
1970 else
1971 NOT_IMPLEMENTED;
1973 // g_print ("patched with 0x%08x\n", ins);
1977 * mono_sparc_emit_save_lmf:
1979 * Emit the code neccesary to push a new entry onto the lmf stack. Used by
1980 * trampolines as well.
1982 guint32*
1983 mono_sparc_emit_save_lmf (guint32 *code, guint32 lmf_offset)
1985 /* Save lmf_addr */
1986 sparc_sti_imm (code, sparc_o0, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr));
1987 /* Save previous_lmf */
1988 sparc_ldi (code, sparc_o0, sparc_g0, sparc_o7);
1989 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf));
1990 /* Set new lmf */
1991 sparc_add_imm (code, FALSE, sparc_fp, lmf_offset, sparc_o7);
1992 sparc_sti (code, sparc_o7, sparc_o0, sparc_g0);
1994 return code;
1997 guint32*
1998 mono_sparc_emit_restore_lmf (guint32 *code, guint32 lmf_offset)
2000 /* Load previous_lmf */
2001 sparc_ldi_imm (code, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sparc_l0);
2002 /* Load lmf_addr */
2003 sparc_ldi_imm (code, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), sparc_l1);
2004 /* *(lmf) = previous_lmf */
2005 sparc_sti (code, sparc_l0, sparc_l1, sparc_g0);
2006 return code;
2009 static guint32*
2010 emit_save_sp_to_lmf (MonoCompile *cfg, guint32 *code)
2013 * Since register windows are saved to the current value of %sp, we need to
2014 * set the sp field in the lmf before the call, not in the prolog.
2016 if (cfg->method->save_lmf) {
2017 gint32 lmf_offset = MONO_SPARC_STACK_BIAS - cfg->arch.lmf_offset;
2019 /* Save sp */
2020 sparc_sti_imm (code, sparc_sp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, sp));
2023 return code;
2026 static guint32*
2027 emit_vret_token (MonoGenericSharingContext *gsctx, MonoInst *ins, guint32 *code)
2029 MonoCallInst *call = (MonoCallInst*)ins;
2030 guint32 size;
2033 * The sparc ABI requires that calls to functions which return a structure
2034 * contain an additional unimpl instruction which is checked by the callee.
2036 if (call->signature->pinvoke && MONO_TYPE_ISSTRUCT(call->signature->ret)) {
2037 if (call->signature->ret->type == MONO_TYPE_TYPEDBYREF)
2038 size = mini_type_stack_size (gsctx, call->signature->ret, NULL);
2039 else
2040 size = mono_class_native_size (call->signature->ret->data.klass, NULL);
2041 sparc_unimp (code, size & 0xfff);
2044 return code;
2047 static guint32*
2048 emit_move_return_value (MonoInst *ins, guint32 *code)
2050 /* Move return value to the target register */
2051 /* FIXME: do more things in the local reg allocator */
2052 switch (ins->opcode) {
2053 case OP_VOIDCALL:
2054 case OP_VOIDCALL_REG:
2055 case OP_VOIDCALL_MEMBASE:
2056 break;
2057 case OP_CALL:
2058 case OP_CALL_REG:
2059 case OP_CALL_MEMBASE:
2060 g_assert (ins->dreg == sparc_o0);
2061 break;
2062 case OP_LCALL:
2063 case OP_LCALL_REG:
2064 case OP_LCALL_MEMBASE:
2066 * ins->dreg is the least significant reg due to the lreg: LCALL rule
2067 * in inssel-long32.brg.
2069 #ifdef SPARCV9
2070 sparc_mov_reg_reg (code, sparc_o0, ins->dreg);
2071 #else
2072 g_assert (ins->dreg == sparc_o1);
2073 #endif
2074 break;
2075 case OP_FCALL:
2076 case OP_FCALL_REG:
2077 case OP_FCALL_MEMBASE:
2078 #ifdef SPARCV9
2079 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
2080 sparc_fmovs (code, sparc_f0, ins->dreg);
2081 sparc_fstod (code, ins->dreg, ins->dreg);
2083 else
2084 sparc_fmovd (code, sparc_f0, ins->dreg);
2085 #else
2086 sparc_fmovs (code, sparc_f0, ins->dreg);
2087 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4)
2088 sparc_fstod (code, ins->dreg, ins->dreg);
2089 else
2090 sparc_fmovs (code, sparc_f1, ins->dreg + 1);
2091 #endif
2092 break;
2093 case OP_VCALL:
2094 case OP_VCALL_REG:
2095 case OP_VCALL_MEMBASE:
2096 case OP_VCALL2:
2097 case OP_VCALL2_REG:
2098 case OP_VCALL2_MEMBASE:
2099 break;
2100 default:
2101 NOT_IMPLEMENTED;
2104 return code;
2108 * emit_load_volatile_arguments:
2110 * Load volatile arguments from the stack to the original input registers.
2111 * Required before a tail call.
2113 static guint32*
2114 emit_load_volatile_arguments (MonoCompile *cfg, guint32 *code)
2116 MonoMethod *method = cfg->method;
2117 MonoMethodSignature *sig;
2118 MonoInst *inst;
2119 CallInfo *cinfo;
2120 guint32 i, ireg;
2122 /* FIXME: Generate intermediate code instead */
2124 sig = mono_method_signature (method);
2126 cinfo = get_call_info (cfg, sig, FALSE);
2128 /* This is the opposite of the code in emit_prolog */
2130 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2131 ArgInfo *ainfo = cinfo->args + i;
2132 gint32 stack_offset;
2133 MonoType *arg_type;
2135 inst = cfg->args [i];
2137 if (sig->hasthis && (i == 0))
2138 arg_type = &mono_defaults.object_class->byval_arg;
2139 else
2140 arg_type = sig->params [i - sig->hasthis];
2142 stack_offset = ainfo->offset + ARGS_OFFSET;
2143 ireg = sparc_i0 + ainfo->reg;
2145 if (ainfo->storage == ArgInSplitRegStack) {
2146 g_assert (inst->opcode == OP_REGOFFSET);
2148 if (!sparc_is_imm13 (stack_offset))
2149 NOT_IMPLEMENTED;
2150 sparc_st_imm (code, inst->inst_basereg, stack_offset, sparc_i5);
2153 if (!v64 && !arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
2154 if (ainfo->storage == ArgInIRegPair) {
2155 if (!sparc_is_imm13 (inst->inst_offset + 4))
2156 NOT_IMPLEMENTED;
2157 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, ireg);
2158 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, ireg + 1);
2160 else
2161 if (ainfo->storage == ArgInSplitRegStack) {
2162 if (stack_offset != inst->inst_offset) {
2163 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, sparc_i5);
2164 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, sparc_o7);
2165 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset + 4);
2169 else
2170 if (ainfo->storage == ArgOnStackPair) {
2171 if (stack_offset != inst->inst_offset) {
2172 /* stack_offset is not dword aligned, so we need to make a copy */
2173 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, sparc_o7);
2174 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset);
2176 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, sparc_o7);
2177 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset + 4);
2181 else
2182 g_assert_not_reached ();
2184 else
2185 if ((ainfo->storage == ArgInIReg) && (inst->opcode != OP_REGVAR)) {
2186 /* Argument in register, but need to be saved to stack */
2187 if (!sparc_is_imm13 (stack_offset))
2188 NOT_IMPLEMENTED;
2189 if ((stack_offset - ARGS_OFFSET) & 0x1)
2190 /* FIXME: Is this ldsb or ldub ? */
2191 sparc_ldsb_imm (code, inst->inst_basereg, stack_offset, ireg);
2192 else
2193 if ((stack_offset - ARGS_OFFSET) & 0x2)
2194 sparc_ldsh_imm (code, inst->inst_basereg, stack_offset, ireg);
2195 else
2196 if ((stack_offset - ARGS_OFFSET) & 0x4)
2197 sparc_ld_imm (code, inst->inst_basereg, stack_offset, ireg);
2198 else {
2199 if (v64)
2200 sparc_ldx_imm (code, inst->inst_basereg, stack_offset, ireg);
2201 else
2202 sparc_ld_imm (code, inst->inst_basereg, stack_offset, ireg);
2205 else if ((ainfo->storage == ArgInIRegPair) && (inst->opcode != OP_REGVAR)) {
2206 /* Argument in regpair, but need to be saved to stack */
2207 if (!sparc_is_imm13 (inst->inst_offset + 4))
2208 NOT_IMPLEMENTED;
2209 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, ireg);
2210 sparc_st_imm (code, inst->inst_basereg, inst->inst_offset + 4, ireg + 1);
2212 else if ((ainfo->storage == ArgInFloatReg) && (inst->opcode != OP_REGVAR)) {
2213 NOT_IMPLEMENTED;
2215 else if ((ainfo->storage == ArgInDoubleReg) && (inst->opcode != OP_REGVAR)) {
2216 NOT_IMPLEMENTED;
2219 if ((ainfo->storage == ArgInSplitRegStack) || (ainfo->storage == ArgOnStack))
2220 if (inst->opcode == OP_REGVAR)
2221 /* FIXME: Load the argument into memory */
2222 NOT_IMPLEMENTED;
2225 g_free (cinfo);
2227 return code;
2231 * mono_sparc_is_virtual_call:
2233 * Determine whenever the instruction at CODE is a virtual call.
2235 gboolean
2236 mono_sparc_is_virtual_call (guint32 *code)
2238 guint32 buf[1];
2239 guint32 *p;
2241 p = buf;
2243 if ((sparc_inst_op (*code) == 0x2) && (sparc_inst_op3 (*code) == 0x38)) {
2245 * Register indirect call. If it is a virtual call, then the
2246 * instruction in the delay slot is a special kind of nop.
2249 /* Construct special nop */
2250 sparc_or_imm (p, FALSE, sparc_g0, 0xca, sparc_g0);
2251 p --;
2253 if (code [1] == p [0])
2254 return TRUE;
2257 return FALSE;
2261 * mono_arch_get_vcall_slot:
2263 * Determine the vtable slot used by a virtual call.
2265 gpointer
2266 mono_arch_get_vcall_slot (guint8 *code8, mgreg_t *regs, int *displacement)
2268 guint32 *code = (guint32*)(gpointer)code8;
2269 guint32 ins = code [0];
2270 guint32 prev_ins = code [-1];
2272 mono_sparc_flushw ();
2274 *displacement = 0;
2276 if (!mono_sparc_is_virtual_call (code))
2277 return NULL;
2279 if ((sparc_inst_op (ins) == 0x2) && (sparc_inst_op3 (ins) == 0x38)) {
2280 if ((sparc_inst_op (prev_ins) == 0x3) && (sparc_inst_i (prev_ins) == 1) && (sparc_inst_op3 (prev_ins) == 0 || sparc_inst_op3 (prev_ins) == 0xb)) {
2281 /* ld [r1 + CONST ], r2; call r2 */
2282 guint32 base = sparc_inst_rs1 (prev_ins);
2283 gint32 disp = (((gint32)(sparc_inst_imm13 (prev_ins))) << 19) >> 19;
2284 gpointer base_val;
2286 g_assert (sparc_inst_rd (prev_ins) == sparc_inst_rs1 (ins));
2288 g_assert ((base >= sparc_o0) && (base <= sparc_i7));
2290 base_val = regs [base];
2292 *displacement = disp;
2294 return (gpointer)base_val;
2296 else if ((sparc_inst_op (prev_ins) == 0x3) && (sparc_inst_i (prev_ins) == 0) && (sparc_inst_op3 (prev_ins) == 0)) {
2297 /* set r1, ICONST; ld [r1 + r2], r2; call r2 */
2298 /* Decode a sparc_set32 */
2299 guint32 base = sparc_inst_rs1 (prev_ins);
2300 guint32 disp;
2301 gpointer base_val;
2302 guint32 s1 = code [-3];
2303 guint32 s2 = code [-2];
2305 #ifdef SPARCV9
2306 NOT_IMPLEMENTED;
2307 #endif
2309 /* sparc_sethi */
2310 g_assert (sparc_inst_op (s1) == 0);
2311 g_assert (sparc_inst_op2 (s1) == 4);
2313 /* sparc_or_imm */
2314 g_assert (sparc_inst_op (s2) == 2);
2315 g_assert (sparc_inst_op3 (s2) == 2);
2316 g_assert (sparc_inst_i (s2) == 1);
2317 g_assert (sparc_inst_rs1 (s2) == sparc_inst_rd (s2));
2318 g_assert (sparc_inst_rd (s1) == sparc_inst_rs1 (s2));
2320 disp = ((s1 & 0x3fffff) << 10) | sparc_inst_imm13 (s2);
2322 g_assert ((base >= sparc_o0) && (base <= sparc_i7));
2324 base_val = regs [base];
2326 *displacement = disp;
2328 return (gpointer)base_val;
2329 } else
2330 g_assert_not_reached ();
2332 else
2333 g_assert_not_reached ();
2335 return NULL;
2338 #define CMP_SIZE 3
2339 #define BR_SMALL_SIZE 2
2340 #define BR_LARGE_SIZE 2
2341 #define JUMP_IMM_SIZE 5
2342 #define ENABLE_WRONG_METHOD_CHECK 0
2345 * LOCKING: called with the domain lock held
2347 gpointer
2348 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
2349 gpointer fail_tramp)
2351 int i;
2352 int size = 0;
2353 guint32 *code, *start;
2355 for (i = 0; i < count; ++i) {
2356 MonoIMTCheckItem *item = imt_entries [i];
2357 if (item->is_equals) {
2358 if (item->check_target_idx) {
2359 if (!item->compare_done)
2360 item->chunk_size += CMP_SIZE;
2361 item->chunk_size += BR_SMALL_SIZE + JUMP_IMM_SIZE;
2362 } else {
2363 if (fail_tramp)
2364 item->chunk_size += 16;
2365 item->chunk_size += JUMP_IMM_SIZE;
2366 #if ENABLE_WRONG_METHOD_CHECK
2367 item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + 1;
2368 #endif
2370 } else {
2371 item->chunk_size += CMP_SIZE + BR_LARGE_SIZE;
2372 imt_entries [item->check_target_idx]->compare_done = TRUE;
2374 size += item->chunk_size;
2376 if (fail_tramp)
2377 code = mono_method_alloc_generic_virtual_thunk (domain, size * 4);
2378 else
2379 code = mono_domain_code_reserve (domain, size * 4);
2380 start = code;
2381 for (i = 0; i < count; ++i) {
2382 MonoIMTCheckItem *item = imt_entries [i];
2383 item->code_target = (guint8*)code;
2384 if (item->is_equals) {
2385 gboolean fail_case = !item->check_target_idx && fail_tramp;
2387 if (item->check_target_idx || fail_case) {
2388 if (!item->compare_done || fail_case) {
2389 sparc_set (code, (guint32)item->key, sparc_g5);
2390 sparc_cmp (code, MONO_ARCH_IMT_REG, sparc_g5);
2392 item->jmp_code = (guint8*)code;
2393 sparc_branch (code, 0, sparc_bne, 0);
2394 sparc_nop (code);
2395 sparc_set (code, ((guint32)(&(vtable->vtable [item->value.vtable_slot]))), sparc_g5);
2396 sparc_ld (code, sparc_g5, 0, sparc_g5);
2397 sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
2398 sparc_nop (code);
2400 if (fail_case) {
2401 sparc_patch (item->jmp_code, code);
2402 sparc_set (code, fail_tramp, sparc_g5);
2403 sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
2404 sparc_nop (code);
2405 item->jmp_code = NULL;
2407 } else {
2408 /* enable the commented code to assert on wrong method */
2409 #if ENABLE_WRONG_METHOD_CHECK
2410 g_assert_not_reached ();
2411 #endif
2412 sparc_set (code, ((guint32)(&(vtable->vtable [item->value.vtable_slot]))), sparc_g5);
2413 sparc_ld (code, sparc_g5, 0, sparc_g5);
2414 sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
2415 sparc_nop (code);
2416 #if ENABLE_WRONG_METHOD_CHECK
2417 g_assert_not_reached ();
2418 #endif
2420 } else {
2421 sparc_set (code, (guint32)item->key, sparc_g5);
2422 sparc_cmp (code, MONO_ARCH_IMT_REG, sparc_g5);
2423 item->jmp_code = (guint8*)code;
2424 sparc_branch (code, 0, sparc_beu, 0);
2425 sparc_nop (code);
2428 /* patch the branches to get to the target items */
2429 for (i = 0; i < count; ++i) {
2430 MonoIMTCheckItem *item = imt_entries [i];
2431 if (item->jmp_code) {
2432 if (item->check_target_idx) {
2433 sparc_patch ((guint32*)item->jmp_code, imt_entries [item->check_target_idx]->code_target);
2438 mono_arch_flush_icache ((guint8*)start, (code - start) * 4);
2440 mono_stats.imt_thunks_size += (code - start) * 4;
2441 g_assert (code - start <= size);
2442 return start;
2445 MonoMethod*
2446 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
2448 #ifdef SPARCV9
2449 g_assert_not_reached ();
2450 #endif
2452 return (MonoMethod*)regs [sparc_g1];
2455 gpointer
2456 mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, mgreg_t *regs, guint8 *code)
2458 mono_sparc_flushw ();
2460 return (gpointer)regs [sparc_o0];
2464 * Some conventions used in the following code.
2465 * 2) The only scratch registers we have are o7 and g1. We try to
2466 * stick to o7 when we can, and use g1 when necessary.
2469 void
2470 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
2472 MonoInst *ins;
2473 MonoCallInst *call;
2474 guint offset;
2475 guint32 *code = (guint32*)(cfg->native_code + cfg->code_len);
2476 MonoInst *last_ins = NULL;
2477 int max_len, cpos;
2478 const char *spec;
2480 if (cfg->verbose_level > 2)
2481 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
2483 cpos = bb->max_offset;
2485 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
2486 NOT_IMPLEMENTED;
2489 MONO_BB_FOR_EACH_INS (bb, ins) {
2490 guint8* code_start;
2492 offset = (guint8*)code - cfg->native_code;
2494 spec = ins_get_spec (ins->opcode);
2496 max_len = ((guint8 *)spec)[MONO_INST_LEN];
2498 if (offset > (cfg->code_size - max_len - 16)) {
2499 cfg->code_size *= 2;
2500 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2501 code = (guint32*)(cfg->native_code + offset);
2503 code_start = (guint8*)code;
2504 // if (ins->cil_code)
2505 // g_print ("cil code\n");
2506 mono_debug_record_line_number (cfg, ins, offset);
2508 switch (ins->opcode) {
2509 case OP_STOREI1_MEMBASE_IMM:
2510 EMIT_STORE_MEMBASE_IMM (ins, stb);
2511 break;
2512 case OP_STOREI2_MEMBASE_IMM:
2513 EMIT_STORE_MEMBASE_IMM (ins, sth);
2514 break;
2515 case OP_STORE_MEMBASE_IMM:
2516 EMIT_STORE_MEMBASE_IMM (ins, sti);
2517 break;
2518 case OP_STOREI4_MEMBASE_IMM:
2519 EMIT_STORE_MEMBASE_IMM (ins, st);
2520 break;
2521 case OP_STOREI8_MEMBASE_IMM:
2522 #ifdef SPARCV9
2523 EMIT_STORE_MEMBASE_IMM (ins, stx);
2524 #else
2525 /* Only generated by peephole opts */
2526 g_assert ((ins->inst_offset % 8) == 0);
2527 g_assert (ins->inst_imm == 0);
2528 EMIT_STORE_MEMBASE_IMM (ins, stx);
2529 #endif
2530 break;
2531 case OP_STOREI1_MEMBASE_REG:
2532 EMIT_STORE_MEMBASE_REG (ins, stb);
2533 break;
2534 case OP_STOREI2_MEMBASE_REG:
2535 EMIT_STORE_MEMBASE_REG (ins, sth);
2536 break;
2537 case OP_STOREI4_MEMBASE_REG:
2538 EMIT_STORE_MEMBASE_REG (ins, st);
2539 break;
2540 case OP_STOREI8_MEMBASE_REG:
2541 #ifdef SPARCV9
2542 EMIT_STORE_MEMBASE_REG (ins, stx);
2543 #else
2544 /* Only used by OP_MEMSET */
2545 EMIT_STORE_MEMBASE_REG (ins, std);
2546 #endif
2547 break;
2548 case OP_STORE_MEMBASE_REG:
2549 EMIT_STORE_MEMBASE_REG (ins, sti);
2550 break;
2551 case OP_LOADU4_MEM:
2552 sparc_set (code, ins->inst_c0, ins->dreg);
2553 sparc_ld (code, ins->dreg, sparc_g0, ins->dreg);
2554 break;
2555 case OP_LOADI4_MEMBASE:
2556 #ifdef SPARCV9
2557 EMIT_LOAD_MEMBASE (ins, ldsw);
2558 #else
2559 EMIT_LOAD_MEMBASE (ins, ld);
2560 #endif
2561 break;
2562 case OP_LOADU4_MEMBASE:
2563 EMIT_LOAD_MEMBASE (ins, ld);
2564 break;
2565 case OP_LOADU1_MEMBASE:
2566 EMIT_LOAD_MEMBASE (ins, ldub);
2567 break;
2568 case OP_LOADI1_MEMBASE:
2569 EMIT_LOAD_MEMBASE (ins, ldsb);
2570 break;
2571 case OP_LOADU2_MEMBASE:
2572 EMIT_LOAD_MEMBASE (ins, lduh);
2573 break;
2574 case OP_LOADI2_MEMBASE:
2575 EMIT_LOAD_MEMBASE (ins, ldsh);
2576 break;
2577 case OP_LOAD_MEMBASE:
2578 #ifdef SPARCV9
2579 EMIT_LOAD_MEMBASE (ins, ldx);
2580 #else
2581 EMIT_LOAD_MEMBASE (ins, ld);
2582 #endif
2583 break;
2584 #ifdef SPARCV9
2585 case OP_LOADI8_MEMBASE:
2586 EMIT_LOAD_MEMBASE (ins, ldx);
2587 break;
2588 #endif
2589 case OP_ICONV_TO_I1:
2590 sparc_sll_imm (code, ins->sreg1, 24, sparc_o7);
2591 sparc_sra_imm (code, sparc_o7, 24, ins->dreg);
2592 break;
2593 case OP_ICONV_TO_I2:
2594 sparc_sll_imm (code, ins->sreg1, 16, sparc_o7);
2595 sparc_sra_imm (code, sparc_o7, 16, ins->dreg);
2596 break;
2597 case OP_ICONV_TO_U1:
2598 sparc_and_imm (code, FALSE, ins->sreg1, 0xff, ins->dreg);
2599 break;
2600 case OP_ICONV_TO_U2:
2601 sparc_sll_imm (code, ins->sreg1, 16, sparc_o7);
2602 sparc_srl_imm (code, sparc_o7, 16, ins->dreg);
2603 break;
2604 case OP_LCONV_TO_OVF_U4:
2605 case OP_ICONV_TO_OVF_U4:
2606 /* Only used on V9 */
2607 sparc_cmp_imm (code, ins->sreg1, 0);
2608 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code,
2609 MONO_PATCH_INFO_EXC, "OverflowException");
2610 sparc_branchp (code, 0, sparc_bl, sparc_xcc_short, 0, 0);
2611 /* Delay slot */
2612 sparc_set (code, 1, sparc_o7);
2613 sparc_sllx_imm (code, sparc_o7, 32, sparc_o7);
2614 sparc_cmp (code, ins->sreg1, sparc_o7);
2615 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code,
2616 MONO_PATCH_INFO_EXC, "OverflowException");
2617 sparc_branchp (code, 0, sparc_bge, sparc_xcc_short, 0, 0);
2618 sparc_nop (code);
2619 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
2620 break;
2621 case OP_LCONV_TO_OVF_I4_UN:
2622 case OP_ICONV_TO_OVF_I4_UN:
2623 /* Only used on V9 */
2624 NOT_IMPLEMENTED;
2625 break;
2626 case OP_COMPARE:
2627 case OP_LCOMPARE:
2628 case OP_ICOMPARE:
2629 sparc_cmp (code, ins->sreg1, ins->sreg2);
2630 break;
2631 case OP_COMPARE_IMM:
2632 case OP_ICOMPARE_IMM:
2633 if (sparc_is_imm13 (ins->inst_imm))
2634 sparc_cmp_imm (code, ins->sreg1, ins->inst_imm);
2635 else {
2636 sparc_set (code, ins->inst_imm, sparc_o7);
2637 sparc_cmp (code, ins->sreg1, sparc_o7);
2639 break;
2640 case OP_BREAK:
2642 * gdb does not like encountering 'ta 1' in the debugged code. So
2643 * instead of emitting a trap, we emit a call a C function and place a
2644 * breakpoint there.
2646 //sparc_ta (code, 1);
2647 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, mono_break);
2648 EMIT_CALL();
2649 break;
2650 case OP_ADDCC:
2651 case OP_IADDCC:
2652 sparc_add (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2653 break;
2654 case OP_IADD:
2655 sparc_add (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2656 break;
2657 case OP_ADDCC_IMM:
2658 case OP_ADD_IMM:
2659 case OP_IADD_IMM:
2660 /* according to inssel-long32.brg, this should set cc */
2661 EMIT_ALU_IMM (ins, add, TRUE);
2662 break;
2663 case OP_ADC:
2664 case OP_IADC:
2665 /* according to inssel-long32.brg, this should set cc */
2666 sparc_addx (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2667 break;
2668 case OP_ADC_IMM:
2669 case OP_IADC_IMM:
2670 EMIT_ALU_IMM (ins, addx, TRUE);
2671 break;
2672 case OP_SUBCC:
2673 case OP_ISUBCC:
2674 sparc_sub (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2675 break;
2676 case OP_ISUB:
2677 sparc_sub (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2678 break;
2679 case OP_SUBCC_IMM:
2680 case OP_SUB_IMM:
2681 case OP_ISUB_IMM:
2682 /* according to inssel-long32.brg, this should set cc */
2683 EMIT_ALU_IMM (ins, sub, TRUE);
2684 break;
2685 case OP_SBB:
2686 case OP_ISBB:
2687 /* according to inssel-long32.brg, this should set cc */
2688 sparc_subx (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2689 break;
2690 case OP_SBB_IMM:
2691 case OP_ISBB_IMM:
2692 EMIT_ALU_IMM (ins, subx, TRUE);
2693 break;
2694 case OP_IAND:
2695 sparc_and (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2696 break;
2697 case OP_AND_IMM:
2698 case OP_IAND_IMM:
2699 EMIT_ALU_IMM (ins, and, FALSE);
2700 break;
2701 case OP_IDIV:
2702 /* Sign extend sreg1 into %y */
2703 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2704 sparc_wry (code, sparc_o7, sparc_g0);
2705 sparc_sdiv (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2706 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2707 break;
2708 case OP_IDIV_UN:
2709 sparc_wry (code, sparc_g0, sparc_g0);
2710 sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2711 break;
2712 case OP_DIV_IMM:
2713 case OP_IDIV_IMM: {
2714 int i, imm;
2716 /* Transform division into a shift */
2717 for (i = 1; i < 30; ++i) {
2718 imm = (1 << i);
2719 if (ins->inst_imm == imm)
2720 break;
2722 if (i < 30) {
2723 if (i == 1) {
2724 /* gcc 2.95.3 */
2725 sparc_srl_imm (code, ins->sreg1, 31, sparc_o7);
2726 sparc_add (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2727 sparc_sra_imm (code, ins->dreg, 1, ins->dreg);
2729 else {
2730 /* http://compilers.iecc.com/comparch/article/93-04-079 */
2731 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2732 sparc_srl_imm (code, sparc_o7, 32 - i, sparc_o7);
2733 sparc_add (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2734 sparc_sra_imm (code, ins->dreg, i, ins->dreg);
2737 else {
2738 /* Sign extend sreg1 into %y */
2739 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2740 sparc_wry (code, sparc_o7, sparc_g0);
2741 EMIT_ALU_IMM (ins, sdiv, TRUE);
2742 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2744 break;
2746 case OP_IDIV_UN_IMM:
2747 sparc_wry (code, sparc_g0, sparc_g0);
2748 EMIT_ALU_IMM (ins, udiv, FALSE);
2749 break;
2750 case OP_IREM:
2751 /* Sign extend sreg1 into %y */
2752 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2753 sparc_wry (code, sparc_o7, sparc_g0);
2754 sparc_sdiv (code, TRUE, ins->sreg1, ins->sreg2, sparc_o7);
2755 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2756 sparc_smul (code, FALSE, ins->sreg2, sparc_o7, sparc_o7);
2757 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2758 break;
2759 case OP_IREM_UN:
2760 sparc_wry (code, sparc_g0, sparc_g0);
2761 sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, sparc_o7);
2762 sparc_umul (code, FALSE, ins->sreg2, sparc_o7, sparc_o7);
2763 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2764 break;
2765 case OP_REM_IMM:
2766 case OP_IREM_IMM:
2767 /* Sign extend sreg1 into %y */
2768 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2769 sparc_wry (code, sparc_o7, sparc_g0);
2770 if (!sparc_is_imm13 (ins->inst_imm)) {
2771 sparc_set (code, ins->inst_imm, GP_SCRATCH_REG);
2772 sparc_sdiv (code, TRUE, ins->sreg1, GP_SCRATCH_REG, sparc_o7);
2773 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2774 sparc_smul (code, FALSE, sparc_o7, GP_SCRATCH_REG, sparc_o7);
2776 else {
2777 sparc_sdiv_imm (code, TRUE, ins->sreg1, ins->inst_imm, sparc_o7);
2778 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2779 sparc_smul_imm (code, FALSE, sparc_o7, ins->inst_imm, sparc_o7);
2781 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2782 break;
2783 case OP_IREM_UN_IMM:
2784 sparc_set (code, ins->inst_imm, GP_SCRATCH_REG);
2785 sparc_wry (code, sparc_g0, sparc_g0);
2786 sparc_udiv (code, FALSE, ins->sreg1, GP_SCRATCH_REG, sparc_o7);
2787 sparc_umul (code, FALSE, GP_SCRATCH_REG, sparc_o7, sparc_o7);
2788 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2789 break;
2790 case OP_IOR:
2791 sparc_or (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2792 break;
2793 case OP_OR_IMM:
2794 case OP_IOR_IMM:
2795 EMIT_ALU_IMM (ins, or, FALSE);
2796 break;
2797 case OP_IXOR:
2798 sparc_xor (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2799 break;
2800 case OP_XOR_IMM:
2801 case OP_IXOR_IMM:
2802 EMIT_ALU_IMM (ins, xor, FALSE);
2803 break;
2804 case OP_ISHL:
2805 sparc_sll (code, ins->sreg1, ins->sreg2, ins->dreg);
2806 break;
2807 case OP_SHL_IMM:
2808 case OP_ISHL_IMM:
2809 if (ins->inst_imm < (1 << 5))
2810 sparc_sll_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2811 else {
2812 sparc_set (code, ins->inst_imm, sparc_o7);
2813 sparc_sll (code, ins->sreg1, sparc_o7, ins->dreg);
2815 break;
2816 case OP_ISHR:
2817 sparc_sra (code, ins->sreg1, ins->sreg2, ins->dreg);
2818 break;
2819 case OP_ISHR_IMM:
2820 case OP_SHR_IMM:
2821 if (ins->inst_imm < (1 << 5))
2822 sparc_sra_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2823 else {
2824 sparc_set (code, ins->inst_imm, sparc_o7);
2825 sparc_sra (code, ins->sreg1, sparc_o7, ins->dreg);
2827 break;
2828 case OP_SHR_UN_IMM:
2829 case OP_ISHR_UN_IMM:
2830 if (ins->inst_imm < (1 << 5))
2831 sparc_srl_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2832 else {
2833 sparc_set (code, ins->inst_imm, sparc_o7);
2834 sparc_srl (code, ins->sreg1, sparc_o7, ins->dreg);
2836 break;
2837 case OP_ISHR_UN:
2838 sparc_srl (code, ins->sreg1, ins->sreg2, ins->dreg);
2839 break;
2840 case OP_LSHL:
2841 sparc_sllx (code, ins->sreg1, ins->sreg2, ins->dreg);
2842 break;
2843 case OP_LSHL_IMM:
2844 if (ins->inst_imm < (1 << 6))
2845 sparc_sllx_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2846 else {
2847 sparc_set (code, ins->inst_imm, sparc_o7);
2848 sparc_sllx (code, ins->sreg1, sparc_o7, ins->dreg);
2850 break;
2851 case OP_LSHR:
2852 sparc_srax (code, ins->sreg1, ins->sreg2, ins->dreg);
2853 break;
2854 case OP_LSHR_IMM:
2855 if (ins->inst_imm < (1 << 6))
2856 sparc_srax_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2857 else {
2858 sparc_set (code, ins->inst_imm, sparc_o7);
2859 sparc_srax (code, ins->sreg1, sparc_o7, ins->dreg);
2861 break;
2862 case OP_LSHR_UN:
2863 sparc_srlx (code, ins->sreg1, ins->sreg2, ins->dreg);
2864 break;
2865 case OP_LSHR_UN_IMM:
2866 if (ins->inst_imm < (1 << 6))
2867 sparc_srlx_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2868 else {
2869 sparc_set (code, ins->inst_imm, sparc_o7);
2870 sparc_srlx (code, ins->sreg1, sparc_o7, ins->dreg);
2872 break;
2873 case OP_INOT:
2874 /* can't use sparc_not */
2875 sparc_xnor (code, FALSE, ins->sreg1, sparc_g0, ins->dreg);
2876 break;
2877 case OP_INEG:
2878 /* can't use sparc_neg */
2879 sparc_sub (code, FALSE, sparc_g0, ins->sreg1, ins->dreg);
2880 break;
2881 case OP_IMUL:
2882 sparc_smul (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2883 break;
2884 case OP_IMUL_IMM:
2885 case OP_MUL_IMM: {
2886 int i, imm;
2888 if ((ins->inst_imm == 1) && (ins->sreg1 == ins->dreg))
2889 break;
2891 /* Transform multiplication into a shift */
2892 for (i = 0; i < 30; ++i) {
2893 imm = (1 << i);
2894 if (ins->inst_imm == imm)
2895 break;
2897 if (i < 30)
2898 sparc_sll_imm (code, ins->sreg1, i, ins->dreg);
2899 else
2900 EMIT_ALU_IMM (ins, smul, FALSE);
2901 break;
2903 case OP_IMUL_OVF:
2904 sparc_smul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2905 sparc_rdy (code, sparc_g1);
2906 sparc_sra_imm (code, ins->dreg, 31, sparc_o7);
2907 sparc_cmp (code, sparc_g1, sparc_o7);
2908 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short);
2909 break;
2910 case OP_IMUL_OVF_UN:
2911 sparc_umul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2912 sparc_rdy (code, sparc_o7);
2913 sparc_cmp (code, sparc_o7, sparc_g0);
2914 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short);
2915 break;
2916 case OP_ICONST:
2917 sparc_set (code, ins->inst_c0, ins->dreg);
2918 break;
2919 case OP_I8CONST:
2920 sparc_set (code, ins->inst_l, ins->dreg);
2921 break;
2922 case OP_AOTCONST:
2923 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2924 sparc_set_template (code, ins->dreg);
2925 break;
2926 case OP_JUMP_TABLE:
2927 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2928 sparc_set_template (code, ins->dreg);
2929 break;
2930 case OP_ICONV_TO_I4:
2931 case OP_ICONV_TO_U4:
2932 case OP_MOVE:
2933 if (ins->sreg1 != ins->dreg)
2934 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
2935 break;
2936 case OP_FMOVE:
2937 #ifdef SPARCV9
2938 if (ins->sreg1 != ins->dreg)
2939 sparc_fmovd (code, ins->sreg1, ins->dreg);
2940 #else
2941 sparc_fmovs (code, ins->sreg1, ins->dreg);
2942 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
2943 #endif
2944 break;
2945 case OP_JMP:
2946 if (cfg->method->save_lmf)
2947 NOT_IMPLEMENTED;
2949 code = emit_load_volatile_arguments (cfg, code);
2950 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
2951 sparc_set_template (code, sparc_o7);
2952 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_g0);
2953 /* Restore parent frame in delay slot */
2954 sparc_restore_imm (code, sparc_g0, 0, sparc_g0);
2955 break;
2956 case OP_CHECK_THIS:
2957 /* ensure ins->sreg1 is not NULL */
2958 /* Might be misaligned in case of vtypes so use a byte load */
2959 sparc_ldsb_imm (code, ins->sreg1, 0, sparc_g0);
2960 break;
2961 case OP_ARGLIST:
2962 sparc_add_imm (code, FALSE, sparc_fp, cfg->sig_cookie, sparc_o7);
2963 sparc_sti_imm (code, sparc_o7, ins->sreg1, 0);
2964 break;
2965 case OP_FCALL:
2966 case OP_LCALL:
2967 case OP_VCALL:
2968 case OP_VCALL2:
2969 case OP_VOIDCALL:
2970 case OP_CALL:
2971 call = (MonoCallInst*)ins;
2972 g_assert (!call->virtual);
2973 code = emit_save_sp_to_lmf (cfg, code);
2974 if (ins->flags & MONO_INST_HAS_METHOD)
2975 code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method);
2976 else
2977 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
2979 code = emit_vret_token (cfg->generic_sharing_context, ins, code);
2980 code = emit_move_return_value (ins, code);
2981 break;
2982 case OP_FCALL_REG:
2983 case OP_LCALL_REG:
2984 case OP_VCALL_REG:
2985 case OP_VCALL2_REG:
2986 case OP_VOIDCALL_REG:
2987 case OP_CALL_REG:
2988 call = (MonoCallInst*)ins;
2989 code = emit_save_sp_to_lmf (cfg, code);
2990 sparc_jmpl (code, ins->sreg1, sparc_g0, sparc_callsite);
2992 * We emit a special kind of nop in the delay slot to tell the
2993 * trampoline code that this is a virtual call, thus an unbox
2994 * trampoline might need to be called.
2996 if (call->virtual)
2997 sparc_or_imm (code, FALSE, sparc_g0, 0xca, sparc_g0);
2998 else
2999 sparc_nop (code);
3001 code = emit_vret_token (cfg->generic_sharing_context, ins, code);
3002 code = emit_move_return_value (ins, code);
3003 break;
3004 case OP_FCALL_MEMBASE:
3005 case OP_LCALL_MEMBASE:
3006 case OP_VCALL_MEMBASE:
3007 case OP_VCALL2_MEMBASE:
3008 case OP_VOIDCALL_MEMBASE:
3009 case OP_CALL_MEMBASE:
3010 call = (MonoCallInst*)ins;
3011 code = emit_save_sp_to_lmf (cfg, code);
3012 if (sparc_is_imm13 (ins->inst_offset)) {
3013 sparc_ldi_imm (code, ins->inst_basereg, ins->inst_offset, sparc_o7);
3014 } else {
3015 sparc_set (code, ins->inst_offset, sparc_o7);
3016 sparc_ldi (code, ins->inst_basereg, sparc_o7, sparc_o7);
3018 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_callsite);
3019 if (call->virtual)
3020 sparc_or_imm (code, FALSE, sparc_g0, 0xca, sparc_g0);
3021 else
3022 sparc_nop (code);
3024 code = emit_vret_token (cfg->generic_sharing_context, ins, code);
3025 code = emit_move_return_value (ins, code);
3026 break;
3027 case OP_SETFRET:
3028 if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4)
3029 sparc_fdtos (code, ins->sreg1, sparc_f0);
3030 else {
3031 #ifdef SPARCV9
3032 sparc_fmovd (code, ins->sreg1, ins->dreg);
3033 #else
3034 /* FIXME: Why not use fmovd ? */
3035 sparc_fmovs (code, ins->sreg1, ins->dreg);
3036 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
3037 #endif
3039 break;
3040 case OP_LOCALLOC: {
3041 guint32 size_reg;
3042 gint32 offset2;
3044 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
3045 /* Perform stack touching */
3046 NOT_IMPLEMENTED;
3047 #endif
3049 /* Keep alignment */
3050 /* Add 4 to compensate for the rounding of localloc_offset */
3051 sparc_add_imm (code, FALSE, ins->sreg1, 4 + MONO_ARCH_LOCALLOC_ALIGNMENT - 1, ins->dreg);
3052 sparc_set (code, ~(MONO_ARCH_LOCALLOC_ALIGNMENT - 1), sparc_o7);
3053 sparc_and (code, FALSE, ins->dreg, sparc_o7, ins->dreg);
3055 if ((ins->flags & MONO_INST_INIT) && (ins->sreg1 == ins->dreg)) {
3056 #ifdef SPARCV9
3057 size_reg = sparc_g4;
3058 #else
3059 size_reg = sparc_g1;
3060 #endif
3061 sparc_mov_reg_reg (code, ins->dreg, size_reg);
3063 else
3064 size_reg = ins->sreg1;
3066 sparc_sub (code, FALSE, sparc_sp, ins->dreg, ins->dreg);
3067 /* Keep %sp valid at all times */
3068 sparc_mov_reg_reg (code, ins->dreg, sparc_sp);
3069 /* Round localloc_offset too so the result is at least 8 aligned */
3070 offset2 = ALIGN_TO (cfg->arch.localloc_offset, 8);
3071 g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + offset2));
3072 sparc_add_imm (code, FALSE, ins->dreg, MONO_SPARC_STACK_BIAS + offset2, ins->dreg);
3074 if (ins->flags & MONO_INST_INIT) {
3075 guint32 *br [3];
3076 /* Initialize memory region */
3077 sparc_cmp_imm (code, size_reg, 0);
3078 br [0] = code;
3079 sparc_branch (code, 0, sparc_be, 0);
3080 /* delay slot */
3081 sparc_set (code, 0, sparc_o7);
3082 sparc_sub_imm (code, 0, size_reg, sparcv9 ? 8 : 4, size_reg);
3083 /* start of loop */
3084 br [1] = code;
3085 if (sparcv9)
3086 sparc_stx (code, sparc_g0, ins->dreg, sparc_o7);
3087 else
3088 sparc_st (code, sparc_g0, ins->dreg, sparc_o7);
3089 sparc_cmp (code, sparc_o7, size_reg);
3090 br [2] = code;
3091 sparc_branch (code, 0, sparc_bl, 0);
3092 sparc_patch (br [2], br [1]);
3093 /* delay slot */
3094 sparc_add_imm (code, 0, sparc_o7, sparcv9 ? 8 : 4, sparc_o7);
3095 sparc_patch (br [0], code);
3097 break;
3099 case OP_LOCALLOC_IMM: {
3100 gint32 offset = ins->inst_imm;
3101 gint32 offset2;
3103 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
3104 /* Perform stack touching */
3105 NOT_IMPLEMENTED;
3106 #endif
3108 /* To compensate for the rounding of localloc_offset */
3109 offset += sizeof (gpointer);
3110 offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
3111 if (sparc_is_imm13 (offset))
3112 sparc_sub_imm (code, FALSE, sparc_sp, offset, sparc_sp);
3113 else {
3114 sparc_set (code, offset, sparc_o7);
3115 sparc_sub (code, FALSE, sparc_sp, sparc_o7, sparc_sp);
3117 /* Round localloc_offset too so the result is at least 8 aligned */
3118 offset2 = ALIGN_TO (cfg->arch.localloc_offset, 8);
3119 g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + offset2));
3120 sparc_add_imm (code, FALSE, sparc_sp, MONO_SPARC_STACK_BIAS + offset2, ins->dreg);
3121 if ((ins->flags & MONO_INST_INIT) && (offset > 0)) {
3122 guint32 *br [2];
3123 int i;
3125 if (offset <= 16) {
3126 i = 0;
3127 while (i < offset) {
3128 if (sparcv9) {
3129 sparc_stx_imm (code, sparc_g0, ins->dreg, i);
3130 i += 8;
3132 else {
3133 sparc_st_imm (code, sparc_g0, ins->dreg, i);
3134 i += 4;
3138 else {
3139 sparc_set (code, offset, sparc_o7);
3140 sparc_sub_imm (code, 0, sparc_o7, sparcv9 ? 8 : 4, sparc_o7);
3141 /* beginning of loop */
3142 br [0] = code;
3143 if (sparcv9)
3144 sparc_stx (code, sparc_g0, ins->dreg, sparc_o7);
3145 else
3146 sparc_st (code, sparc_g0, ins->dreg, sparc_o7);
3147 sparc_cmp_imm (code, sparc_o7, 0);
3148 br [1] = code;
3149 sparc_branch (code, 0, sparc_bne, 0);
3150 /* delay slot */
3151 sparc_sub_imm (code, 0, sparc_o7, sparcv9 ? 8 : 4, sparc_o7);
3152 sparc_patch (br [1], br [0]);
3155 break;
3157 case OP_THROW:
3158 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3159 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3160 (gpointer)"mono_arch_throw_exception");
3161 EMIT_CALL ();
3162 break;
3163 case OP_RETHROW:
3164 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3165 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3166 (gpointer)"mono_arch_rethrow_exception");
3167 EMIT_CALL ();
3168 break;
3169 case OP_START_HANDLER: {
3171 * The START_HANDLER instruction marks the beginning of a handler
3172 * block. It is called using a call instruction, so %o7 contains
3173 * the return address. Since the handler executes in the same stack
3174 * frame as the method itself, we can't use save/restore to save
3175 * the return address. Instead, we save it into a dedicated
3176 * variable.
3178 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3179 if (!sparc_is_imm13 (spvar->inst_offset)) {
3180 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3181 sparc_sti (code, sparc_o7, spvar->inst_basereg, GP_SCRATCH_REG);
3183 else
3184 sparc_sti_imm (code, sparc_o7, spvar->inst_basereg, spvar->inst_offset);
3185 break;
3187 case OP_ENDFILTER: {
3188 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3189 if (!sparc_is_imm13 (spvar->inst_offset)) {
3190 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3191 sparc_ldi (code, spvar->inst_basereg, GP_SCRATCH_REG, sparc_o7);
3193 else
3194 sparc_ldi_imm (code, spvar->inst_basereg, spvar->inst_offset, sparc_o7);
3195 sparc_jmpl_imm (code, sparc_o7, 8, sparc_g0);
3196 /* Delay slot */
3197 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3198 break;
3200 case OP_ENDFINALLY: {
3201 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3202 if (!sparc_is_imm13 (spvar->inst_offset)) {
3203 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3204 sparc_ldi (code, spvar->inst_basereg, GP_SCRATCH_REG, sparc_o7);
3206 else
3207 sparc_ldi_imm (code, spvar->inst_basereg, spvar->inst_offset, sparc_o7);
3208 sparc_jmpl_imm (code, sparc_o7, 8, sparc_g0);
3209 sparc_nop (code);
3210 break;
3212 case OP_CALL_HANDLER:
3213 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3214 /* This is a jump inside the method, so call_simple works even on V9 */
3215 sparc_call_simple (code, 0);
3216 sparc_nop (code);
3217 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
3218 break;
3219 case OP_LABEL:
3220 ins->inst_c0 = (guint8*)code - cfg->native_code;
3221 break;
3222 case OP_RELAXED_NOP:
3223 case OP_NOP:
3224 case OP_DUMMY_USE:
3225 case OP_DUMMY_STORE:
3226 case OP_NOT_REACHED:
3227 case OP_NOT_NULL:
3228 break;
3229 case OP_BR:
3230 //g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
3231 if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins)
3232 break;
3233 if (ins->inst_target_bb->native_offset) {
3234 gint32 disp = (ins->inst_target_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2;
3235 g_assert (sparc_is_imm22 (disp));
3236 sparc_branch (code, 1, sparc_ba, disp);
3237 } else {
3238 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3239 sparc_branch (code, 1, sparc_ba, 0);
3241 sparc_nop (code);
3242 break;
3243 case OP_BR_REG:
3244 sparc_jmp (code, ins->sreg1, sparc_g0);
3245 sparc_nop (code);
3246 break;
3247 case OP_CEQ:
3248 case OP_CLT:
3249 case OP_CLT_UN:
3250 case OP_CGT:
3251 case OP_CGT_UN:
3252 if (v64 && (cfg->opt & MONO_OPT_CMOV)) {
3253 sparc_clr_reg (code, ins->dreg);
3254 sparc_movcc_imm (code, sparc_xcc, opcode_to_sparc_cond (ins->opcode), 1, ins->dreg);
3256 else {
3257 sparc_clr_reg (code, ins->dreg);
3258 #ifdef SPARCV9
3259 sparc_branchp (code, 1, opcode_to_sparc_cond (ins->opcode), DEFAULT_ICC, 0, 2);
3260 #else
3261 sparc_branch (code, 1, opcode_to_sparc_cond (ins->opcode), 2);
3262 #endif
3263 /* delay slot */
3264 sparc_set (code, 1, ins->dreg);
3266 break;
3267 case OP_ICEQ:
3268 case OP_ICLT:
3269 case OP_ICLT_UN:
3270 case OP_ICGT:
3271 case OP_ICGT_UN:
3272 if (v64 && (cfg->opt & MONO_OPT_CMOV)) {
3273 sparc_clr_reg (code, ins->dreg);
3274 sparc_movcc_imm (code, sparc_icc, opcode_to_sparc_cond (ins->opcode), 1, ins->dreg);
3276 else {
3277 sparc_clr_reg (code, ins->dreg);
3278 sparc_branchp (code, 1, opcode_to_sparc_cond (ins->opcode), sparc_icc_short, 0, 2);
3279 /* delay slot */
3280 sparc_set (code, 1, ins->dreg);
3282 break;
3283 case OP_COND_EXC_EQ:
3284 case OP_COND_EXC_NE_UN:
3285 case OP_COND_EXC_LT:
3286 case OP_COND_EXC_LT_UN:
3287 case OP_COND_EXC_GT:
3288 case OP_COND_EXC_GT_UN:
3289 case OP_COND_EXC_GE:
3290 case OP_COND_EXC_GE_UN:
3291 case OP_COND_EXC_LE:
3292 case OP_COND_EXC_LE_UN:
3293 case OP_COND_EXC_OV:
3294 case OP_COND_EXC_NO:
3295 case OP_COND_EXC_C:
3296 case OP_COND_EXC_NC:
3297 case OP_COND_EXC_IEQ:
3298 case OP_COND_EXC_INE_UN:
3299 case OP_COND_EXC_ILT:
3300 case OP_COND_EXC_ILT_UN:
3301 case OP_COND_EXC_IGT:
3302 case OP_COND_EXC_IGT_UN:
3303 case OP_COND_EXC_IGE:
3304 case OP_COND_EXC_IGE_UN:
3305 case OP_COND_EXC_ILE:
3306 case OP_COND_EXC_ILE_UN:
3307 case OP_COND_EXC_IOV:
3308 case OP_COND_EXC_INO:
3309 case OP_COND_EXC_IC:
3310 case OP_COND_EXC_INC:
3311 #ifdef SPARCV9
3312 NOT_IMPLEMENTED;
3313 #else
3314 EMIT_COND_SYSTEM_EXCEPTION (ins, opcode_to_sparc_cond (ins->opcode), ins->inst_p1);
3315 #endif
3316 break;
3317 case OP_SPARC_COND_EXC_EQZ:
3318 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brz, ins->inst_p1);
3319 break;
3320 case OP_SPARC_COND_EXC_GEZ:
3321 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brgez, ins->inst_p1);
3322 break;
3323 case OP_SPARC_COND_EXC_GTZ:
3324 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brgz, ins->inst_p1);
3325 break;
3326 case OP_SPARC_COND_EXC_LEZ:
3327 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brlez, ins->inst_p1);
3328 break;
3329 case OP_SPARC_COND_EXC_LTZ:
3330 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brlz, ins->inst_p1);
3331 break;
3332 case OP_SPARC_COND_EXC_NEZ:
3333 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brnz, ins->inst_p1);
3334 break;
3336 case OP_IBEQ:
3337 case OP_IBNE_UN:
3338 case OP_IBLT:
3339 case OP_IBLT_UN:
3340 case OP_IBGT:
3341 case OP_IBGT_UN:
3342 case OP_IBGE:
3343 case OP_IBGE_UN:
3344 case OP_IBLE:
3345 case OP_IBLE_UN: {
3346 if (sparcv9)
3347 EMIT_COND_BRANCH_PREDICTED (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3348 else
3349 EMIT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3350 break;
3353 case OP_SPARC_BRZ:
3354 EMIT_COND_BRANCH_BPR (ins, brz, 1, 1, 1);
3355 break;
3356 case OP_SPARC_BRLEZ:
3357 EMIT_COND_BRANCH_BPR (ins, brlez, 1, 1, 1);
3358 break;
3359 case OP_SPARC_BRLZ:
3360 EMIT_COND_BRANCH_BPR (ins, brlz, 1, 1, 1);
3361 break;
3362 case OP_SPARC_BRNZ:
3363 EMIT_COND_BRANCH_BPR (ins, brnz, 1, 1, 1);
3364 break;
3365 case OP_SPARC_BRGZ:
3366 EMIT_COND_BRANCH_BPR (ins, brgz, 1, 1, 1);
3367 break;
3368 case OP_SPARC_BRGEZ:
3369 EMIT_COND_BRANCH_BPR (ins, brgez, 1, 1, 1);
3370 break;
3372 /* floating point opcodes */
3373 case OP_R8CONST:
3374 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, ins->inst_p0);
3375 #ifdef SPARCV9
3376 sparc_set_template (code, sparc_o7);
3377 #else
3378 sparc_sethi (code, 0, sparc_o7);
3379 #endif
3380 sparc_lddf_imm (code, sparc_o7, 0, ins->dreg);
3381 break;
3382 case OP_R4CONST:
3383 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, ins->inst_p0);
3384 #ifdef SPARCV9
3385 sparc_set_template (code, sparc_o7);
3386 #else
3387 sparc_sethi (code, 0, sparc_o7);
3388 #endif
3389 sparc_ldf_imm (code, sparc_o7, 0, FP_SCRATCH_REG);
3391 /* Extend to double */
3392 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3393 break;
3394 case OP_STORER8_MEMBASE_REG:
3395 if (!sparc_is_imm13 (ins->inst_offset + 4)) {
3396 sparc_set (code, ins->inst_offset, sparc_o7);
3397 /* SPARCV9 handles misaligned fp loads/stores */
3398 if (!v64 && (ins->inst_offset % 8)) {
3399 /* Misaligned */
3400 sparc_add (code, FALSE, ins->inst_destbasereg, sparc_o7, sparc_o7);
3401 sparc_stf (code, ins->sreg1, sparc_o7, sparc_g0);
3402 sparc_stf_imm (code, ins->sreg1 + 1, sparc_o7, 4);
3403 } else
3404 sparc_stdf (code, ins->sreg1, ins->inst_destbasereg, sparc_o7);
3406 else {
3407 if (!v64 && (ins->inst_offset % 8)) {
3408 /* Misaligned */
3409 sparc_stf_imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3410 sparc_stf_imm (code, ins->sreg1 + 1, ins->inst_destbasereg, ins->inst_offset + 4);
3411 } else
3412 sparc_stdf_imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3414 break;
3415 case OP_LOADR8_MEMBASE:
3416 EMIT_LOAD_MEMBASE (ins, lddf);
3417 break;
3418 case OP_STORER4_MEMBASE_REG:
3419 /* This requires a double->single conversion */
3420 sparc_fdtos (code, ins->sreg1, FP_SCRATCH_REG);
3421 if (!sparc_is_imm13 (ins->inst_offset)) {
3422 sparc_set (code, ins->inst_offset, sparc_o7);
3423 sparc_stf (code, FP_SCRATCH_REG, ins->inst_destbasereg, sparc_o7);
3425 else
3426 sparc_stf_imm (code, FP_SCRATCH_REG, ins->inst_destbasereg, ins->inst_offset);
3427 break;
3428 case OP_LOADR4_MEMBASE: {
3429 /* ldf needs a single precision register */
3430 int dreg = ins->dreg;
3431 ins->dreg = FP_SCRATCH_REG;
3432 EMIT_LOAD_MEMBASE (ins, ldf);
3433 ins->dreg = dreg;
3434 /* Extend to double */
3435 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3436 break;
3438 case OP_ICONV_TO_R4: {
3439 MonoInst *spill = cfg->arch.float_spill_slot;
3440 gint32 reg = spill->inst_basereg;
3441 gint32 offset = spill->inst_offset;
3443 g_assert (spill->opcode == OP_REGOFFSET);
3444 #ifdef SPARCV9
3445 if (!sparc_is_imm13 (offset)) {
3446 sparc_set (code, offset, sparc_o7);
3447 sparc_stx (code, ins->sreg1, reg, offset);
3448 sparc_lddf (code, reg, offset, FP_SCRATCH_REG);
3449 } else {
3450 sparc_stx_imm (code, ins->sreg1, reg, offset);
3451 sparc_lddf_imm (code, reg, offset, FP_SCRATCH_REG);
3453 sparc_fxtos (code, FP_SCRATCH_REG, FP_SCRATCH_REG);
3454 #else
3455 if (!sparc_is_imm13 (offset)) {
3456 sparc_set (code, offset, sparc_o7);
3457 sparc_st (code, ins->sreg1, reg, sparc_o7);
3458 sparc_ldf (code, reg, sparc_o7, FP_SCRATCH_REG);
3459 } else {
3460 sparc_st_imm (code, ins->sreg1, reg, offset);
3461 sparc_ldf_imm (code, reg, offset, FP_SCRATCH_REG);
3463 sparc_fitos (code, FP_SCRATCH_REG, FP_SCRATCH_REG);
3464 #endif
3465 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3466 break;
3468 case OP_ICONV_TO_R8: {
3469 MonoInst *spill = cfg->arch.float_spill_slot;
3470 gint32 reg = spill->inst_basereg;
3471 gint32 offset = spill->inst_offset;
3473 g_assert (spill->opcode == OP_REGOFFSET);
3475 #ifdef SPARCV9
3476 if (!sparc_is_imm13 (offset)) {
3477 sparc_set (code, offset, sparc_o7);
3478 sparc_stx (code, ins->sreg1, reg, sparc_o7);
3479 sparc_lddf (code, reg, sparc_o7, FP_SCRATCH_REG);
3480 } else {
3481 sparc_stx_imm (code, ins->sreg1, reg, offset);
3482 sparc_lddf_imm (code, reg, offset, FP_SCRATCH_REG);
3484 sparc_fxtod (code, FP_SCRATCH_REG, ins->dreg);
3485 #else
3486 if (!sparc_is_imm13 (offset)) {
3487 sparc_set (code, offset, sparc_o7);
3488 sparc_st (code, ins->sreg1, reg, sparc_o7);
3489 sparc_ldf (code, reg, sparc_o7, FP_SCRATCH_REG);
3490 } else {
3491 sparc_st_imm (code, ins->sreg1, reg, offset);
3492 sparc_ldf_imm (code, reg, offset, FP_SCRATCH_REG);
3494 sparc_fitod (code, FP_SCRATCH_REG, ins->dreg);
3495 #endif
3496 break;
3498 case OP_FCONV_TO_I1:
3499 case OP_FCONV_TO_U1:
3500 case OP_FCONV_TO_I2:
3501 case OP_FCONV_TO_U2:
3502 #ifndef SPARCV9
3503 case OP_FCONV_TO_I:
3504 case OP_FCONV_TO_U:
3505 #endif
3506 case OP_FCONV_TO_I4:
3507 case OP_FCONV_TO_U4: {
3508 MonoInst *spill = cfg->arch.float_spill_slot;
3509 gint32 reg = spill->inst_basereg;
3510 gint32 offset = spill->inst_offset;
3512 g_assert (spill->opcode == OP_REGOFFSET);
3514 sparc_fdtoi (code, ins->sreg1, FP_SCRATCH_REG);
3515 if (!sparc_is_imm13 (offset)) {
3516 sparc_set (code, offset, sparc_o7);
3517 sparc_stdf (code, FP_SCRATCH_REG, reg, sparc_o7);
3518 sparc_ld (code, reg, sparc_o7, ins->dreg);
3519 } else {
3520 sparc_stdf_imm (code, FP_SCRATCH_REG, reg, offset);
3521 sparc_ld_imm (code, reg, offset, ins->dreg);
3524 switch (ins->opcode) {
3525 case OP_FCONV_TO_I1:
3526 case OP_FCONV_TO_U1:
3527 sparc_and_imm (code, 0, ins->dreg, 0xff, ins->dreg);
3528 break;
3529 case OP_FCONV_TO_I2:
3530 case OP_FCONV_TO_U2:
3531 sparc_set (code, 0xffff, sparc_o7);
3532 sparc_and (code, 0, ins->dreg, sparc_o7, ins->dreg);
3533 break;
3534 default:
3535 break;
3537 break;
3539 case OP_FCONV_TO_I8:
3540 case OP_FCONV_TO_U8:
3541 /* Emulated */
3542 g_assert_not_reached ();
3543 break;
3544 case OP_FCONV_TO_R4:
3545 /* FIXME: Change precision ? */
3546 #ifdef SPARCV9
3547 sparc_fmovd (code, ins->sreg1, ins->dreg);
3548 #else
3549 sparc_fmovs (code, ins->sreg1, ins->dreg);
3550 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
3551 #endif
3552 break;
3553 case OP_LCONV_TO_R_UN: {
3554 /* Emulated */
3555 g_assert_not_reached ();
3556 break;
3558 case OP_LCONV_TO_OVF_I:
3559 case OP_LCONV_TO_OVF_I4_2: {
3560 guint32 *br [3], *label [1];
3563 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
3565 sparc_cmp_imm (code, ins->sreg1, 0);
3566 br [0] = code;
3567 sparc_branch (code, 1, sparc_bneg, 0);
3568 sparc_nop (code);
3570 /* positive */
3571 /* ms word must be 0 */
3572 sparc_cmp_imm (code, ins->sreg2, 0);
3573 br [1] = code;
3574 sparc_branch (code, 1, sparc_be, 0);
3575 sparc_nop (code);
3577 label [0] = code;
3579 EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_ba, "OverflowException");
3581 /* negative */
3582 sparc_patch (br [0], code);
3584 /* ms word must 0xfffffff */
3585 sparc_cmp_imm (code, ins->sreg2, -1);
3586 br [2] = code;
3587 sparc_branch (code, 1, sparc_bne, 0);
3588 sparc_nop (code);
3589 sparc_patch (br [2], label [0]);
3591 /* Ok */
3592 sparc_patch (br [1], code);
3593 if (ins->sreg1 != ins->dreg)
3594 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
3595 break;
3597 case OP_FADD:
3598 sparc_faddd (code, ins->sreg1, ins->sreg2, ins->dreg);
3599 break;
3600 case OP_FSUB:
3601 sparc_fsubd (code, ins->sreg1, ins->sreg2, ins->dreg);
3602 break;
3603 case OP_FMUL:
3604 sparc_fmuld (code, ins->sreg1, ins->sreg2, ins->dreg);
3605 break;
3606 case OP_FDIV:
3607 sparc_fdivd (code, ins->sreg1, ins->sreg2, ins->dreg);
3608 break;
3609 case OP_FNEG:
3610 #ifdef SPARCV9
3611 sparc_fnegd (code, ins->sreg1, ins->dreg);
3612 #else
3613 /* FIXME: why don't use fnegd ? */
3614 sparc_fnegs (code, ins->sreg1, ins->dreg);
3615 #endif
3616 break;
3617 case OP_FREM:
3618 sparc_fdivd (code, ins->sreg1, ins->sreg2, FP_SCRATCH_REG);
3619 sparc_fmuld (code, ins->sreg2, FP_SCRATCH_REG, FP_SCRATCH_REG);
3620 sparc_fsubd (code, ins->sreg1, FP_SCRATCH_REG, ins->dreg);
3621 break;
3622 case OP_FCOMPARE:
3623 sparc_fcmpd (code, ins->sreg1, ins->sreg2);
3624 break;
3625 case OP_FCEQ:
3626 case OP_FCLT:
3627 case OP_FCLT_UN:
3628 case OP_FCGT:
3629 case OP_FCGT_UN:
3630 sparc_fcmpd (code, ins->sreg1, ins->sreg2);
3631 sparc_clr_reg (code, ins->dreg);
3632 switch (ins->opcode) {
3633 case OP_FCLT_UN:
3634 case OP_FCGT_UN:
3635 sparc_fbranch (code, 1, opcode_to_sparc_cond (ins->opcode), 4);
3636 /* delay slot */
3637 sparc_set (code, 1, ins->dreg);
3638 sparc_fbranch (code, 1, sparc_fbu, 2);
3639 /* delay slot */
3640 sparc_set (code, 1, ins->dreg);
3641 break;
3642 default:
3643 sparc_fbranch (code, 1, opcode_to_sparc_cond (ins->opcode), 2);
3644 /* delay slot */
3645 sparc_set (code, 1, ins->dreg);
3647 break;
3648 case OP_FBEQ:
3649 case OP_FBLT:
3650 case OP_FBGT:
3651 EMIT_FLOAT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3652 break;
3653 case OP_FBGE: {
3654 /* clt.un + brfalse */
3655 guint32 *p = code;
3656 sparc_fbranch (code, 1, sparc_fbul, 0);
3657 /* delay slot */
3658 sparc_nop (code);
3659 EMIT_FLOAT_COND_BRANCH (ins, sparc_fba, 1, 1);
3660 sparc_patch (p, (guint8*)code);
3661 break;
3663 case OP_FBLE: {
3664 /* cgt.un + brfalse */
3665 guint32 *p = code;
3666 sparc_fbranch (code, 1, sparc_fbug, 0);
3667 /* delay slot */
3668 sparc_nop (code);
3669 EMIT_FLOAT_COND_BRANCH (ins, sparc_fba, 1, 1);
3670 sparc_patch (p, (guint8*)code);
3671 break;
3673 case OP_FBNE_UN:
3674 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbne, 1, 1);
3675 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3676 break;
3677 case OP_FBLT_UN:
3678 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbl, 1, 1);
3679 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3680 break;
3681 case OP_FBGT_UN:
3682 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbg, 1, 1);
3683 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3684 break;
3685 case OP_FBGE_UN:
3686 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbge, 1, 1);
3687 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3688 break;
3689 case OP_FBLE_UN:
3690 EMIT_FLOAT_COND_BRANCH (ins, sparc_fble, 1, 1);
3691 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3692 break;
3693 case OP_CKFINITE: {
3694 MonoInst *spill = cfg->arch.float_spill_slot;
3695 gint32 reg = spill->inst_basereg;
3696 gint32 offset = spill->inst_offset;
3698 g_assert (spill->opcode == OP_REGOFFSET);
3700 if (!sparc_is_imm13 (offset)) {
3701 sparc_set (code, offset, sparc_o7);
3702 sparc_stdf (code, ins->sreg1, reg, sparc_o7);
3703 sparc_lduh (code, reg, sparc_o7, sparc_o7);
3704 } else {
3705 sparc_stdf_imm (code, ins->sreg1, reg, offset);
3706 sparc_lduh_imm (code, reg, offset, sparc_o7);
3708 sparc_srl_imm (code, sparc_o7, 4, sparc_o7);
3709 sparc_and_imm (code, FALSE, sparc_o7, 2047, sparc_o7);
3710 sparc_cmp_imm (code, sparc_o7, 2047);
3711 EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_be, "ArithmeticException");
3712 #ifdef SPARCV9
3713 sparc_fmovd (code, ins->sreg1, ins->dreg);
3714 #else
3715 sparc_fmovs (code, ins->sreg1, ins->dreg);
3716 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
3717 #endif
3718 break;
3721 case OP_MEMORY_BARRIER:
3722 sparc_membar (code, sparc_membar_all);
3723 break;
3725 default:
3726 #ifdef __GNUC__
3727 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
3728 #else
3729 g_warning ("%s:%d: unknown opcode %s\n", __FILE__, __LINE__, mono_inst_name (ins->opcode));
3730 #endif
3731 g_assert_not_reached ();
3734 if ((((guint8*)code) - code_start) > max_len) {
3735 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
3736 mono_inst_name (ins->opcode), max_len, ((guint8*)code) - code_start);
3737 g_assert_not_reached ();
3740 cpos += max_len;
3742 last_ins = ins;
3745 cfg->code_len = (guint8*)code - cfg->native_code;
3748 void
3749 mono_arch_register_lowlevel_calls (void)
3751 mono_register_jit_icall (mono_arch_get_lmf_addr, "mono_arch_get_lmf_addr", NULL, TRUE);
3754 void
3755 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
3757 MonoJumpInfo *patch_info;
3759 /* FIXME: Move part of this to arch independent code */
3760 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
3761 unsigned char *ip = patch_info->ip.i + code;
3762 gpointer target;
3764 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
3766 switch (patch_info->type) {
3767 case MONO_PATCH_INFO_NONE:
3768 continue;
3769 case MONO_PATCH_INFO_CLASS_INIT: {
3770 guint32 *ip2 = (guint32*)ip;
3771 /* Might already been changed to a nop */
3772 #ifdef SPARCV9
3773 sparc_set_template (ip2, sparc_o7);
3774 sparc_jmpl (ip2, sparc_o7, sparc_g0, sparc_o7);
3775 #else
3776 sparc_call_simple (ip2, 0);
3777 #endif
3778 break;
3780 case MONO_PATCH_INFO_METHOD_JUMP: {
3781 guint32 *ip2 = (guint32*)ip;
3782 /* Might already been patched */
3783 sparc_set_template (ip2, sparc_o7);
3784 break;
3786 default:
3787 break;
3789 sparc_patch ((guint32*)ip, target);
3793 void*
3794 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
3796 int i;
3797 guint32 *code = (guint32*)p;
3798 MonoMethodSignature *sig = mono_method_signature (cfg->method);
3799 CallInfo *cinfo;
3801 /* Save registers to stack */
3802 for (i = 0; i < 6; ++i)
3803 sparc_sti_imm (code, sparc_i0 + i, sparc_fp, ARGS_OFFSET + (i * sizeof (gpointer)));
3805 cinfo = get_call_info (cfg, sig, FALSE);
3807 /* Save float regs on V9, since they are caller saved */
3808 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3809 ArgInfo *ainfo = cinfo->args + i;
3810 gint32 stack_offset;
3812 stack_offset = ainfo->offset + ARGS_OFFSET;
3814 if (ainfo->storage == ArgInFloatReg) {
3815 if (!sparc_is_imm13 (stack_offset))
3816 NOT_IMPLEMENTED;
3817 sparc_stf_imm (code, ainfo->reg, sparc_fp, stack_offset);
3819 else if (ainfo->storage == ArgInDoubleReg) {
3820 /* The offset is guaranteed to be aligned by the ABI rules */
3821 sparc_stdf_imm (code, ainfo->reg, sparc_fp, stack_offset);
3825 sparc_set (code, cfg->method, sparc_o0);
3826 sparc_add_imm (code, FALSE, sparc_fp, MONO_SPARC_STACK_BIAS, sparc_o1);
3828 mono_add_patch_info (cfg, (guint8*)code-cfg->native_code, MONO_PATCH_INFO_ABS, func);
3829 EMIT_CALL ();
3831 /* Restore float regs on V9 */
3832 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3833 ArgInfo *ainfo = cinfo->args + i;
3834 gint32 stack_offset;
3836 stack_offset = ainfo->offset + ARGS_OFFSET;
3838 if (ainfo->storage == ArgInFloatReg) {
3839 if (!sparc_is_imm13 (stack_offset))
3840 NOT_IMPLEMENTED;
3841 sparc_ldf_imm (code, sparc_fp, stack_offset, ainfo->reg);
3843 else if (ainfo->storage == ArgInDoubleReg) {
3844 /* The offset is guaranteed to be aligned by the ABI rules */
3845 sparc_lddf_imm (code, sparc_fp, stack_offset, ainfo->reg);
3849 g_free (cinfo);
3851 return code;
3854 enum {
3855 SAVE_NONE,
3856 SAVE_STRUCT,
3857 SAVE_ONE,
3858 SAVE_TWO,
3859 SAVE_FP
3862 void*
3863 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
3865 guint32 *code = (guint32*)p;
3866 int save_mode = SAVE_NONE;
3867 MonoMethod *method = cfg->method;
3869 switch (mono_type_get_underlying_type (mono_method_signature (method)->ret)->type) {
3870 case MONO_TYPE_VOID:
3871 /* special case string .ctor icall */
3872 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
3873 save_mode = SAVE_ONE;
3874 else
3875 save_mode = SAVE_NONE;
3876 break;
3877 case MONO_TYPE_I8:
3878 case MONO_TYPE_U8:
3879 #ifdef SPARCV9
3880 save_mode = SAVE_ONE;
3881 #else
3882 save_mode = SAVE_TWO;
3883 #endif
3884 break;
3885 case MONO_TYPE_R4:
3886 case MONO_TYPE_R8:
3887 save_mode = SAVE_FP;
3888 break;
3889 case MONO_TYPE_VALUETYPE:
3890 save_mode = SAVE_STRUCT;
3891 break;
3892 default:
3893 save_mode = SAVE_ONE;
3894 break;
3897 /* Save the result to the stack and also put it into the output registers */
3899 switch (save_mode) {
3900 case SAVE_TWO:
3901 /* V8 only */
3902 sparc_st_imm (code, sparc_i0, sparc_fp, 68);
3903 sparc_st_imm (code, sparc_i0, sparc_fp, 72);
3904 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
3905 sparc_mov_reg_reg (code, sparc_i1, sparc_o2);
3906 break;
3907 case SAVE_ONE:
3908 sparc_sti_imm (code, sparc_i0, sparc_fp, ARGS_OFFSET);
3909 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
3910 break;
3911 case SAVE_FP:
3912 #ifdef SPARCV9
3913 sparc_stdf_imm (code, sparc_f0, sparc_fp, ARGS_OFFSET);
3914 #else
3915 sparc_stdf_imm (code, sparc_f0, sparc_fp, 72);
3916 sparc_ld_imm (code, sparc_fp, 72, sparc_o1);
3917 sparc_ld_imm (code, sparc_fp, 72 + 4, sparc_o2);
3918 #endif
3919 break;
3920 case SAVE_STRUCT:
3921 #ifdef SPARCV9
3922 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
3923 #else
3924 sparc_ld_imm (code, sparc_fp, 64, sparc_o1);
3925 #endif
3926 break;
3927 case SAVE_NONE:
3928 default:
3929 break;
3932 sparc_set (code, cfg->method, sparc_o0);
3934 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, func);
3935 EMIT_CALL ();
3937 /* Restore result */
3939 switch (save_mode) {
3940 case SAVE_TWO:
3941 sparc_ld_imm (code, sparc_fp, 68, sparc_i0);
3942 sparc_ld_imm (code, sparc_fp, 72, sparc_i0);
3943 break;
3944 case SAVE_ONE:
3945 sparc_ldi_imm (code, sparc_fp, ARGS_OFFSET, sparc_i0);
3946 break;
3947 case SAVE_FP:
3948 sparc_lddf_imm (code, sparc_fp, ARGS_OFFSET, sparc_f0);
3949 break;
3950 case SAVE_NONE:
3951 default:
3952 break;
3955 return code;
3958 guint8 *
3959 mono_arch_emit_prolog (MonoCompile *cfg)
3961 MonoMethod *method = cfg->method;
3962 MonoMethodSignature *sig;
3963 MonoInst *inst;
3964 guint32 *code;
3965 CallInfo *cinfo;
3966 guint32 i, offset;
3968 cfg->code_size = 256;
3969 cfg->native_code = g_malloc (cfg->code_size);
3970 code = (guint32*)cfg->native_code;
3972 /* FIXME: Generate intermediate code instead */
3974 offset = cfg->stack_offset;
3975 offset += (16 * sizeof (gpointer)); /* register save area */
3976 #ifndef SPARCV9
3977 offset += 4; /* struct/union return pointer */
3978 #endif
3980 /* add parameter area size for called functions */
3981 if (cfg->param_area < (6 * sizeof (gpointer)))
3982 /* Reserve space for the first 6 arguments even if it is unused */
3983 offset += 6 * sizeof (gpointer);
3984 else
3985 offset += cfg->param_area;
3987 /* align the stack size */
3988 offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
3991 * localloc'd memory is stored between the local variables (whose
3992 * size is given by cfg->stack_offset), and between the space reserved
3993 * by the ABI.
3995 cfg->arch.localloc_offset = offset - cfg->stack_offset;
3997 cfg->stack_offset = offset;
3999 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
4000 /* Perform stack touching */
4001 NOT_IMPLEMENTED;
4002 #endif
4004 if (!sparc_is_imm13 (- cfg->stack_offset)) {
4005 /* Can't use sparc_o7 here, since we're still in the caller's frame */
4006 sparc_set (code, (- cfg->stack_offset), GP_SCRATCH_REG);
4007 sparc_save (code, sparc_sp, GP_SCRATCH_REG, sparc_sp);
4009 else
4010 sparc_save_imm (code, sparc_sp, - cfg->stack_offset, sparc_sp);
4013 if (strstr (cfg->method->name, "foo")) {
4014 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, mono_sparc_break);
4015 sparc_call_simple (code, 0);
4016 sparc_nop (code);
4020 sig = mono_method_signature (method);
4022 cinfo = get_call_info (cfg, sig, FALSE);
4024 /* Keep in sync with emit_load_volatile_arguments */
4025 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4026 ArgInfo *ainfo = cinfo->args + i;
4027 gint32 stack_offset;
4028 MonoType *arg_type;
4029 inst = cfg->args [i];
4031 if (sig->hasthis && (i == 0))
4032 arg_type = &mono_defaults.object_class->byval_arg;
4033 else
4034 arg_type = sig->params [i - sig->hasthis];
4036 stack_offset = ainfo->offset + ARGS_OFFSET;
4038 /* Save the split arguments so they will reside entirely on the stack */
4039 if (ainfo->storage == ArgInSplitRegStack) {
4040 /* Save the register to the stack */
4041 g_assert (inst->opcode == OP_REGOFFSET);
4042 if (!sparc_is_imm13 (stack_offset))
4043 NOT_IMPLEMENTED;
4044 sparc_st_imm (code, sparc_i5, inst->inst_basereg, stack_offset);
4047 if (!v64 && !arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
4048 /* Save the argument to a dword aligned stack location */
4050 * stack_offset contains the offset of the argument on the stack.
4051 * inst->inst_offset contains the dword aligned offset where the value
4052 * should be stored.
4054 if (ainfo->storage == ArgInIRegPair) {
4055 if (!sparc_is_imm13 (inst->inst_offset + 4))
4056 NOT_IMPLEMENTED;
4057 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, inst->inst_offset);
4058 sparc_st_imm (code, sparc_i0 + ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4060 else
4061 if (ainfo->storage == ArgInSplitRegStack) {
4062 #ifdef SPARCV9
4063 g_assert_not_reached ();
4064 #endif
4065 if (stack_offset != inst->inst_offset) {
4066 /* stack_offset is not dword aligned, so we need to make a copy */
4067 sparc_st_imm (code, sparc_i5, inst->inst_basereg, inst->inst_offset);
4068 sparc_ld_imm (code, sparc_fp, stack_offset + 4, sparc_o7);
4069 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset + 4);
4072 else
4073 if (ainfo->storage == ArgOnStackPair) {
4074 #ifdef SPARCV9
4075 g_assert_not_reached ();
4076 #endif
4077 if (stack_offset != inst->inst_offset) {
4078 /* stack_offset is not dword aligned, so we need to make a copy */
4079 sparc_ld_imm (code, sparc_fp, stack_offset, sparc_o7);
4080 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset);
4081 sparc_ld_imm (code, sparc_fp, stack_offset + 4, sparc_o7);
4082 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset + 4);
4085 else
4086 g_assert_not_reached ();
4088 else
4089 if ((ainfo->storage == ArgInIReg) && (inst->opcode != OP_REGVAR)) {
4090 /* Argument in register, but need to be saved to stack */
4091 if (!sparc_is_imm13 (stack_offset))
4092 NOT_IMPLEMENTED;
4093 if ((stack_offset - ARGS_OFFSET) & 0x1)
4094 sparc_stb_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4095 else
4096 if ((stack_offset - ARGS_OFFSET) & 0x2)
4097 sparc_sth_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4098 else
4099 if ((stack_offset - ARGS_OFFSET) & 0x4)
4100 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4101 else {
4102 if (v64)
4103 sparc_stx_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4104 else
4105 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4108 else
4109 if ((ainfo->storage == ArgInIRegPair) && (inst->opcode != OP_REGVAR)) {
4110 #ifdef SPARCV9
4111 NOT_IMPLEMENTED;
4112 #endif
4113 /* Argument in regpair, but need to be saved to stack */
4114 if (!sparc_is_imm13 (inst->inst_offset + 4))
4115 NOT_IMPLEMENTED;
4116 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, inst->inst_offset);
4117 sparc_st_imm (code, sparc_i0 + ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4119 else if ((ainfo->storage == ArgInFloatReg) && (inst->opcode != OP_REGVAR)) {
4120 if (!sparc_is_imm13 (stack_offset))
4121 NOT_IMPLEMENTED;
4122 sparc_stf_imm (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4124 else if ((ainfo->storage == ArgInDoubleReg) && (inst->opcode != OP_REGVAR)) {
4125 /* The offset is guaranteed to be aligned by the ABI rules */
4126 sparc_stdf_imm (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4129 if ((ainfo->storage == ArgInFloatReg) && (inst->opcode == OP_REGVAR)) {
4130 /* Need to move into the a double precision register */
4131 sparc_fstod (code, ainfo->reg, ainfo->reg - 1);
4134 if ((ainfo->storage == ArgInSplitRegStack) || (ainfo->storage == ArgOnStack))
4135 if (inst->opcode == OP_REGVAR)
4136 /* FIXME: Load the argument into memory */
4137 NOT_IMPLEMENTED;
4140 g_free (cinfo);
4142 if (cfg->method->save_lmf) {
4143 gint32 lmf_offset = STACK_BIAS - cfg->arch.lmf_offset;
4145 /* Save ip */
4146 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
4147 sparc_set_template (code, sparc_o7);
4148 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ip));
4149 /* Save sp */
4150 sparc_sti_imm (code, sparc_sp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, sp));
4151 /* Save fp */
4152 sparc_sti_imm (code, sparc_fp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebp));
4153 /* Save method */
4154 /* FIXME: add a relocation for this */
4155 sparc_set (code, cfg->method, sparc_o7);
4156 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method));
4158 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4159 (gpointer)"mono_arch_get_lmf_addr");
4160 EMIT_CALL ();
4162 code = (guint32*)mono_sparc_emit_save_lmf (code, lmf_offset);
4165 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4166 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
4168 cfg->code_len = (guint8*)code - cfg->native_code;
4170 g_assert (cfg->code_len <= cfg->code_size);
4172 return (guint8*)code;
4175 void
4176 mono_arch_emit_epilog (MonoCompile *cfg)
4178 MonoMethod *method = cfg->method;
4179 guint32 *code;
4180 int can_fold = 0;
4181 int max_epilog_size = 16 + 20 * 4;
4183 if (cfg->method->save_lmf)
4184 max_epilog_size += 128;
4186 if (mono_jit_trace_calls != NULL)
4187 max_epilog_size += 50;
4189 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
4190 max_epilog_size += 50;
4192 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4193 cfg->code_size *= 2;
4194 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4195 mono_jit_stats.code_reallocs++;
4198 code = (guint32*)(cfg->native_code + cfg->code_len);
4200 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4201 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4203 if (cfg->method->save_lmf) {
4204 gint32 lmf_offset = STACK_BIAS - cfg->arch.lmf_offset;
4206 code = mono_sparc_emit_restore_lmf (code, lmf_offset);
4210 * The V8 ABI requires that calls to functions which return a structure
4211 * return to %i7+12
4213 if (!v64 && mono_method_signature (cfg->method)->pinvoke && MONO_TYPE_ISSTRUCT(mono_method_signature (cfg->method)->ret))
4214 sparc_jmpl_imm (code, sparc_i7, 12, sparc_g0);
4215 else
4216 sparc_ret (code);
4218 /* Only fold last instruction into the restore if the exit block has an in count of 1
4219 and the previous block hasn't been optimized away since it may have an in count > 1 */
4220 if (cfg->bb_exit->in_count == 1 && cfg->bb_exit->in_bb[0]->native_offset != cfg->bb_exit->native_offset)
4221 can_fold = 1;
4224 * FIXME: The last instruction might have a branch pointing into it like in
4225 * int_ceq sparc_i0 <-
4227 can_fold = 0;
4229 /* Try folding last instruction into the restore */
4230 if (can_fold && (sparc_inst_op (code [-2]) == 0x2) && (sparc_inst_op3 (code [-2]) == 0x2) && sparc_inst_imm (code [-2]) && (sparc_inst_rd (code [-2]) == sparc_i0)) {
4231 /* or reg, imm, %i0 */
4232 int reg = sparc_inst_rs1 (code [-2]);
4233 int imm = (((gint32)(sparc_inst_imm13 (code [-2]))) << 19) >> 19;
4234 code [-2] = code [-1];
4235 code --;
4236 sparc_restore_imm (code, reg, imm, sparc_o0);
4238 else
4239 if (can_fold && (sparc_inst_op (code [-2]) == 0x2) && (sparc_inst_op3 (code [-2]) == 0x2) && (!sparc_inst_imm (code [-2])) && (sparc_inst_rd (code [-2]) == sparc_i0)) {
4240 /* or reg, reg, %i0 */
4241 int reg1 = sparc_inst_rs1 (code [-2]);
4242 int reg2 = sparc_inst_rs2 (code [-2]);
4243 code [-2] = code [-1];
4244 code --;
4245 sparc_restore (code, reg1, reg2, sparc_o0);
4247 else
4248 sparc_restore_imm (code, sparc_g0, 0, sparc_g0);
4250 cfg->code_len = (guint8*)code - cfg->native_code;
4252 g_assert (cfg->code_len < cfg->code_size);
4256 void
4257 mono_arch_emit_exceptions (MonoCompile *cfg)
4259 MonoJumpInfo *patch_info;
4260 guint32 *code;
4261 int nthrows = 0, i;
4262 int exc_count = 0;
4263 guint32 code_size;
4264 MonoClass *exc_classes [16];
4265 guint8 *exc_throw_start [16], *exc_throw_end [16];
4267 /* Compute needed space */
4268 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4269 if (patch_info->type == MONO_PATCH_INFO_EXC)
4270 exc_count++;
4274 * make sure we have enough space for exceptions
4276 #ifdef SPARCV9
4277 code_size = exc_count * (20 * 4);
4278 #else
4279 code_size = exc_count * 24;
4280 #endif
4282 while (cfg->code_len + code_size > (cfg->code_size - 16)) {
4283 cfg->code_size *= 2;
4284 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4285 mono_jit_stats.code_reallocs++;
4288 code = (guint32*)(cfg->native_code + cfg->code_len);
4290 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4291 switch (patch_info->type) {
4292 case MONO_PATCH_INFO_EXC: {
4293 MonoClass *exc_class;
4294 guint32 *buf, *buf2;
4295 guint32 throw_ip, type_idx;
4296 gint32 disp;
4298 sparc_patch ((guint32*)(cfg->native_code + patch_info->ip.i), code);
4300 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
4301 g_assert (exc_class);
4302 type_idx = exc_class->type_token - MONO_TOKEN_TYPE_DEF;
4303 throw_ip = patch_info->ip.i;
4305 /* Find a throw sequence for the same exception class */
4306 for (i = 0; i < nthrows; ++i)
4307 if (exc_classes [i] == exc_class)
4308 break;
4310 if (i < nthrows) {
4311 guint32 throw_offset = (((guint8*)exc_throw_end [i] - cfg->native_code) - throw_ip) >> 2;
4312 if (!sparc_is_imm13 (throw_offset))
4313 sparc_set32 (code, throw_offset, sparc_o1);
4315 disp = (exc_throw_start [i] - (guint8*)code) >> 2;
4316 g_assert (sparc_is_imm22 (disp));
4317 sparc_branch (code, 0, sparc_ba, disp);
4318 if (sparc_is_imm13 (throw_offset))
4319 sparc_set32 (code, throw_offset, sparc_o1);
4320 else
4321 sparc_nop (code);
4322 patch_info->type = MONO_PATCH_INFO_NONE;
4324 else {
4325 /* Emit the template for setting o1 */
4326 buf = code;
4327 if (sparc_is_imm13 (((((guint8*)code - cfg->native_code) - throw_ip) >> 2) - 8))
4328 /* Can use a short form */
4329 sparc_nop (code);
4330 else
4331 sparc_set_template (code, sparc_o1);
4332 buf2 = code;
4334 if (nthrows < 16) {
4335 exc_classes [nthrows] = exc_class;
4336 exc_throw_start [nthrows] = (guint8*)code;
4340 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, mono_sparc_break);
4341 EMIT_CALL();
4344 /* first arg = type token */
4345 /* Pass the type index to reduce the size of the sparc_set */
4346 if (!sparc_is_imm13 (type_idx))
4347 sparc_set32 (code, type_idx, sparc_o0);
4349 /* second arg = offset between the throw ip and the current ip */
4350 /* On sparc, the saved ip points to the call instruction */
4351 disp = (((guint8*)code - cfg->native_code) - throw_ip) >> 2;
4352 sparc_set32 (buf, disp, sparc_o1);
4353 while (buf < buf2)
4354 sparc_nop (buf);
4356 if (nthrows < 16) {
4357 exc_throw_end [nthrows] = (guint8*)code;
4358 nthrows ++;
4361 patch_info->data.name = "mono_arch_throw_corlib_exception";
4362 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4363 patch_info->ip.i = (guint8*)code - cfg->native_code;
4365 EMIT_CALL ();
4367 if (sparc_is_imm13 (type_idx)) {
4368 /* Put it into the delay slot */
4369 code --;
4370 buf = code;
4371 sparc_set32 (code, type_idx, sparc_o0);
4372 g_assert (code - buf == 1);
4375 break;
4377 default:
4378 /* do nothing */
4379 break;
4383 cfg->code_len = (guint8*)code - cfg->native_code;
4385 g_assert (cfg->code_len < cfg->code_size);
4389 gboolean lmf_addr_key_inited = FALSE;
4391 #ifdef MONO_SPARC_THR_TLS
4392 thread_key_t lmf_addr_key;
4393 #else
4394 pthread_key_t lmf_addr_key;
4395 #endif
4397 gpointer
4398 mono_arch_get_lmf_addr (void)
4400 /* This is perf critical so we bypass the IO layer */
4401 /* The thr_... functions seem to be somewhat faster */
4402 #ifdef MONO_SPARC_THR_TLS
4403 gpointer res;
4404 thr_getspecific (lmf_addr_key, &res);
4405 return res;
4406 #else
4407 return pthread_getspecific (lmf_addr_key);
4408 #endif
4411 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
4414 * There seems to be no way to determine stack boundaries under solaris,
4415 * so it's not possible to determine whenever a SIGSEGV is caused by stack
4416 * overflow or not.
4418 #error "--with-sigaltstack=yes not supported on solaris"
4420 #endif
4422 void
4423 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
4425 if (!lmf_addr_key_inited) {
4426 int res;
4428 lmf_addr_key_inited = TRUE;
4430 #ifdef MONO_SPARC_THR_TLS
4431 res = thr_keycreate (&lmf_addr_key, NULL);
4432 #else
4433 res = pthread_key_create (&lmf_addr_key, NULL);
4434 #endif
4435 g_assert (res == 0);
4439 #ifdef MONO_SPARC_THR_TLS
4440 thr_setspecific (lmf_addr_key, &tls->lmf);
4441 #else
4442 pthread_setspecific (lmf_addr_key, &tls->lmf);
4443 #endif
4446 void
4447 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
4451 MonoInst*
4452 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4454 MonoInst *ins = NULL;
4456 return ins;
4460 * mono_arch_get_argument_info:
4461 * @csig: a method signature
4462 * @param_count: the number of parameters to consider
4463 * @arg_info: an array to store the result infos
4465 * Gathers information on parameters such as size, alignment and
4466 * padding. arg_info should be large enought to hold param_count + 1 entries.
4468 * Returns the size of the activation frame.
4471 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
4473 int k, align;
4474 CallInfo *cinfo;
4475 ArgInfo *ainfo;
4477 cinfo = get_call_info (NULL, csig, FALSE);
4479 if (csig->hasthis) {
4480 ainfo = &cinfo->args [0];
4481 arg_info [0].offset = ARGS_OFFSET - MONO_SPARC_STACK_BIAS + ainfo->offset;
4484 for (k = 0; k < param_count; k++) {
4485 ainfo = &cinfo->args [k + csig->hasthis];
4487 arg_info [k + 1].offset = ARGS_OFFSET - MONO_SPARC_STACK_BIAS + ainfo->offset;
4488 arg_info [k + 1].size = mono_type_size (csig->params [k], &align);
4491 g_free (cinfo);
4493 return 0;
4496 gboolean
4497 mono_arch_print_tree (MonoInst *tree, int arity)
4499 return 0;
4502 MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
4504 return NULL;
4507 gpointer
4508 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
4510 /* FIXME: implement */
4511 g_assert_not_reached ();