Update spec file to 4.5 profile
[mono-project.git] / mono / mini / mini-sparc.c
blobc5626cc8237d05bdc9e48d1a430637262ea13dcf
1 /*
2 * mini-sparc.c: Sparc backend for the Mono code generator
4 * Authors:
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * Modified for SPARC:
9 * Christopher Taylor (ct@gentoo.org)
10 * Mark Crichton (crichton@gimp.org)
11 * Zoltan Varga (vargaz@freemail.hu)
13 * (C) 2003 Ximian, Inc.
15 #include "mini.h"
16 #include <string.h>
17 #include <pthread.h>
18 #include <unistd.h>
20 #ifndef __linux__
21 #include <sys/systeminfo.h>
22 #include <thread.h>
23 #endif
25 #include <unistd.h>
26 #include <sys/mman.h>
28 #include <mono/metadata/appdomain.h>
29 #include <mono/metadata/debug-helpers.h>
30 #include <mono/metadata/tokentype.h>
31 #include <mono/utils/mono-math.h>
33 #include "mini-sparc.h"
34 #include "trace.h"
35 #include "cpu-sparc.h"
36 #include "jit-icalls.h"
37 #include "ir-emit.h"
40 * Sparc V9 means two things:
41 * - the instruction set
42 * - the ABI
44 * V9 instructions are only usable if the underlying processor is 64 bit. Most Sparc
45 * processors in use are 64 bit processors. The V9 ABI is only usable if the
46 * mono executable is a 64 bit executable. So it would make sense to use the 64 bit
47 * instructions without using the 64 bit ABI.
51 * Register usage:
52 * - %i0..%i<n> hold the incoming arguments, these are never written by JITted
53 * code. Unused input registers are used for global register allocation.
54 * - %o0..%o5 and %l7 is used for local register allocation and passing arguments
55 * - %l0..%l6 is used for global register allocation
56 * - %o7 and %g1 is used as scratch registers in opcodes
57 * - all floating point registers are used for local register allocation except %f0.
58 * Only double precision registers are used.
59 * In 64 bit mode:
60 * - fp registers %d0..%d30 are used for parameter passing, and %d32..%d62 are
61 * used for local allocation.
65 * Alignment:
66 * - doubles and longs must be stored in dword aligned locations
70 * The following things are not implemented or do not work:
71 * - some fp arithmetic corner cases
72 * The following tests in mono/mini are expected to fail:
73 * - test_0_simple_double_casts
74 * This test casts (guint64)-1 to double and then back to guint64 again.
75 * Under x86, it returns 0, while under sparc it returns -1.
77 * In addition to this, the runtime requires the trunc function, or its
78 * solaris counterpart, aintl, to do some double->int conversions. If this
79 * function is not available, it is emulated somewhat, but the results can be
80 * strange.
84 * SPARCV9 FIXME:
85 * - optimize sparc_set according to the memory model
86 * - when non-AOT compiling, compute patch targets immediately so we don't
87 * have to emit the 6 byte template.
88 * - varags
89 * - struct arguments/returns
93 * SPARCV9 ISSUES:
94 * - sparc_call_simple can't be used in a lot of places since the displacement
95 * might not fit into an imm30.
96 * - g1 can't be used in a lot of places since it is used as a scratch reg in
97 * sparc_set.
98 * - sparc_f0 can't be used as a scratch register on V9
99 * - the %d34..%d62 fp registers are encoded as: %dx = %f(x - 32 + 1), ie.
100 * %d36 = %f5.
101 * - ldind.i4/u4 needs to sign extend/clear out upper word -> slows things down
102 * - ins->dreg can't be used as a scatch register in r4 opcodes since it might
103 * be a double precision register which has no single precision part.
104 * - passing/returning structs is hard to implement, because:
105 * - the spec is very hard to understand
106 * - it requires knowledge about the fields of structure, needs to handle
107 * nested structures etc.
111 * Possible optimizations:
112 * - delay slot scheduling
113 * - allocate large constants to registers
114 * - add more mul/div/rem optimizations
117 #ifndef __linux__
118 #define MONO_SPARC_THR_TLS 1
119 #endif
122 * There was a 64 bit bug in glib-2.2: g_bit_nth_msf (0, -1) would return 32,
123 * causing infinite loops in dominator computation. So glib-2.4 is required.
125 #ifdef SPARCV9
126 #if GLIB_MAJOR_VERSION == 2 && GLIB_MINOR_VERSION < 4
127 #error "glib 2.4 or later is required for 64 bit mode."
128 #endif
129 #endif
131 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
133 #define SIGNAL_STACK_SIZE (64 * 1024)
135 #define STACK_BIAS MONO_SPARC_STACK_BIAS
137 #ifdef SPARCV9
139 /* %g1 is used by sparc_set */
140 #define GP_SCRATCH_REG sparc_g4
141 /* %f0 is used for parameter passing */
142 #define FP_SCRATCH_REG sparc_f30
143 #define ARGS_OFFSET (STACK_BIAS + 128)
145 #else
147 #define FP_SCRATCH_REG sparc_f0
148 #define ARGS_OFFSET 68
149 #define GP_SCRATCH_REG sparc_g1
151 #endif
153 /* Whenever the CPU supports v9 instructions */
154 static gboolean sparcv9 = FALSE;
156 /* Whenever this is a 64bit executable */
157 #if SPARCV9
158 static gboolean v64 = TRUE;
159 #else
160 static gboolean v64 = FALSE;
161 #endif
163 static gpointer mono_arch_get_lmf_addr (void);
165 const char*
166 mono_arch_regname (int reg) {
167 static const char * rnames[] = {
168 "sparc_g0", "sparc_g1", "sparc_g2", "sparc_g3", "sparc_g4",
169 "sparc_g5", "sparc_g6", "sparc_g7", "sparc_o0", "sparc_o1",
170 "sparc_o2", "sparc_o3", "sparc_o4", "sparc_o5", "sparc_sp",
171 "sparc_call", "sparc_l0", "sparc_l1", "sparc_l2", "sparc_l3",
172 "sparc_l4", "sparc_l5", "sparc_l6", "sparc_l7", "sparc_i0",
173 "sparc_i1", "sparc_i2", "sparc_i3", "sparc_i4", "sparc_i5",
174 "sparc_fp", "sparc_retadr"
176 if (reg >= 0 && reg < 32)
177 return rnames [reg];
178 return "unknown";
181 const char*
182 mono_arch_fregname (int reg) {
183 static const char *rnames [] = {
184 "sparc_f0", "sparc_f1", "sparc_f2", "sparc_f3", "sparc_f4",
185 "sparc_f5", "sparc_f6", "sparc_f7", "sparc_f8", "sparc_f9",
186 "sparc_f10", "sparc_f11", "sparc_f12", "sparc_f13", "sparc_f14",
187 "sparc_f15", "sparc_f16", "sparc_f17", "sparc_f18", "sparc_f19",
188 "sparc_f20", "sparc_f21", "sparc_f22", "sparc_f23", "sparc_f24",
189 "sparc_f25", "sparc_f26", "sparc_f27", "sparc_f28", "sparc_f29",
190 "sparc_f30", "sparc_f31"
193 if (reg >= 0 && reg < 32)
194 return rnames [reg];
195 else
196 return "unknown";
200 * Initialize the cpu to execute managed code.
202 void
203 mono_arch_cpu_init (void)
205 guint32 dummy;
206 /* make sure sparcv9 is initialized for embedded use */
207 mono_arch_cpu_optimizazions(&dummy);
211 * Initialize architecture specific code.
213 void
214 mono_arch_init (void)
219 * Cleanup architecture specific code.
221 void
222 mono_arch_cleanup (void)
227 * This function returns the optimizations supported on this cpu.
229 guint32
230 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
232 char buf [1024];
233 guint32 opts = 0;
235 *exclude_mask = 0;
237 #ifndef __linux__
238 if (!sysinfo (SI_ISALIST, buf, 1024))
239 g_assert_not_reached ();
240 #else
241 /* From glibc. If the getpagesize is 8192, we're on sparc64, which
242 * (in)directly implies that we're a v9 or better.
243 * Improvements to this are greatly accepted...
244 * Also, we don't differentiate between v7 and v8. I sense SIGILL
245 * sniffing in my future.
247 if (getpagesize() == 8192)
248 strcpy (buf, "sparcv9");
249 else
250 strcpy (buf, "sparcv8");
251 #endif
254 * On some processors, the cmov instructions are even slower than the
255 * normal ones...
257 if (strstr (buf, "sparcv9")) {
258 opts |= MONO_OPT_CMOV | MONO_OPT_FCMOV;
259 sparcv9 = TRUE;
261 else
262 *exclude_mask |= MONO_OPT_CMOV | MONO_OPT_FCMOV;
264 return opts;
267 #ifdef __GNUC__
268 #define flushi(addr) __asm__ __volatile__ ("iflush %0"::"r"(addr):"memory")
269 #else /* assume Sun's compiler */
270 static void flushi(void *addr)
272 asm("flush %i0");
274 #endif
276 #ifndef __linux__
277 void sync_instruction_memory(caddr_t addr, int len);
278 #endif
280 void
281 mono_arch_flush_icache (guint8 *code, gint size)
283 #ifndef __linux__
284 /* Hopefully this is optimized based on the actual CPU */
285 sync_instruction_memory (code, size);
286 #else
287 gulong start = (gulong) code;
288 gulong end = start + size;
289 gulong align;
291 /* Sparcv9 chips only need flushes on 32 byte
292 * cacheline boundaries.
294 * Sparcv8 needs a flush every 8 bytes.
296 align = (sparcv9 ? 32 : 8);
298 start &= ~(align - 1);
299 end = (end + (align - 1)) & ~(align - 1);
301 while (start < end) {
302 #ifdef __GNUC__
303 __asm__ __volatile__ ("iflush %0"::"r"(start));
304 #else
305 flushi (start);
306 #endif
307 start += align;
309 #endif
313 * mono_sparc_flushw:
315 * Flush all register windows to memory. Every register window is saved to
316 * a 16 word area on the stack pointed to by its %sp register.
318 void
319 mono_sparc_flushw (void)
321 static guint32 start [64];
322 static int inited = 0;
323 guint32 *code;
324 static void (*flushw) (void);
326 if (!inited) {
327 code = start;
329 sparc_save_imm (code, sparc_sp, -160, sparc_sp);
330 sparc_flushw (code);
331 sparc_ret (code);
332 sparc_restore_simple (code);
334 g_assert ((code - start) < 64);
336 mono_arch_flush_icache ((guint8*)start, (guint8*)code - (guint8*)start);
338 flushw = (gpointer)start;
340 inited = 1;
343 flushw ();
346 void
347 mono_arch_flush_register_windows (void)
349 mono_sparc_flushw ();
352 gboolean
353 mono_arch_is_inst_imm (gint64 imm)
355 return sparc_is_imm13 (imm);
358 gboolean
359 mono_sparc_is_v9 (void) {
360 return sparcv9;
363 gboolean
364 mono_sparc_is_sparc64 (void) {
365 return v64;
368 typedef enum {
369 ArgInIReg,
370 ArgInIRegPair,
371 ArgInSplitRegStack,
372 ArgInFReg,
373 ArgInFRegPair,
374 ArgOnStack,
375 ArgOnStackPair,
376 ArgInFloatReg, /* V9 only */
377 ArgInDoubleReg /* V9 only */
378 } ArgStorage;
380 typedef struct {
381 gint16 offset;
382 /* This needs to be offset by %i0 or %o0 depending on caller/callee */
383 gint8 reg;
384 ArgStorage storage;
385 guint32 vt_offset; /* for valuetypes */
386 } ArgInfo;
388 typedef struct {
389 int nargs;
390 guint32 stack_usage;
391 guint32 reg_usage;
392 ArgInfo ret;
393 ArgInfo sig_cookie;
394 ArgInfo args [1];
395 } CallInfo;
397 #define DEBUG(a)
399 /* %o0..%o5 */
400 #define PARAM_REGS 6
402 static void inline
403 add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean pair)
405 ainfo->offset = *stack_size;
407 if (!pair) {
408 if (*gr >= PARAM_REGS) {
409 ainfo->storage = ArgOnStack;
411 else {
412 ainfo->storage = ArgInIReg;
413 ainfo->reg = *gr;
414 (*gr) ++;
417 /* Allways reserve stack space for parameters passed in registers */
418 (*stack_size) += sizeof (gpointer);
420 else {
421 if (*gr < PARAM_REGS - 1) {
422 /* A pair of registers */
423 ainfo->storage = ArgInIRegPair;
424 ainfo->reg = *gr;
425 (*gr) += 2;
427 else if (*gr >= PARAM_REGS) {
428 /* A pair of stack locations */
429 ainfo->storage = ArgOnStackPair;
431 else {
432 ainfo->storage = ArgInSplitRegStack;
433 ainfo->reg = *gr;
434 (*gr) ++;
437 (*stack_size) += 2 * sizeof (gpointer);
441 #ifdef SPARCV9
443 #define FLOAT_PARAM_REGS 32
445 static void inline
446 add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean single)
448 ainfo->offset = *stack_size;
450 if (single) {
451 if (*gr >= FLOAT_PARAM_REGS) {
452 ainfo->storage = ArgOnStack;
454 else {
455 /* A single is passed in an even numbered fp register */
456 ainfo->storage = ArgInFloatReg;
457 ainfo->reg = *gr + 1;
458 (*gr) += 2;
461 else {
462 if (*gr < FLOAT_PARAM_REGS) {
463 /* A double register */
464 ainfo->storage = ArgInDoubleReg;
465 ainfo->reg = *gr;
466 (*gr) += 2;
468 else {
469 ainfo->storage = ArgOnStack;
473 (*stack_size) += sizeof (gpointer);
476 #endif
479 * get_call_info:
481 * Obtain information about a call according to the calling convention.
482 * For V8, see the "System V ABI, Sparc Processor Supplement" Sparc V8 version
483 * document for more information.
484 * For V9, see the "Low Level System Information (64-bit psABI)" chapter in
485 * the 'Sparc Compliance Definition 2.4' document.
487 static CallInfo*
488 get_call_info (MonoCompile *cfg, MonoMethodSignature *sig, gboolean is_pinvoke)
490 guint32 i, gr, fr;
491 int n = sig->hasthis + sig->param_count;
492 guint32 stack_size = 0;
493 CallInfo *cinfo;
494 MonoType *ret_type;
495 MonoGenericSharingContext *gsctx = cfg ? cfg->generic_sharing_context : NULL;
497 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
499 gr = 0;
500 fr = 0;
502 #ifdef SPARCV9
503 if (MONO_TYPE_ISSTRUCT ((sig->ret))) {
504 /* The address of the return value is passed in %o0 */
505 add_general (&gr, &stack_size, &cinfo->ret, FALSE);
506 cinfo->ret.reg += sparc_i0;
507 /* FIXME: Pass this after this as on other platforms */
508 NOT_IMPLEMENTED;
510 #endif
512 /* this */
513 if (sig->hasthis)
514 add_general (&gr, &stack_size, cinfo->args + 0, FALSE);
516 if ((sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
517 gr = PARAM_REGS;
519 /* Emit the signature cookie just before the implicit arguments */
520 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
523 for (i = 0; i < sig->param_count; ++i) {
524 ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
525 MonoType *ptype;
527 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
528 gr = PARAM_REGS;
530 /* Emit the signature cookie just before the implicit arguments */
531 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
534 DEBUG(printf("param %d: ", i));
535 if (sig->params [i]->byref) {
536 DEBUG(printf("byref\n"));
538 add_general (&gr, &stack_size, ainfo, FALSE);
539 continue;
541 ptype = mono_type_get_underlying_type (sig->params [i]);
542 ptype = mini_get_basic_type_from_generic (gsctx, ptype);
543 switch (ptype->type) {
544 case MONO_TYPE_BOOLEAN:
545 case MONO_TYPE_I1:
546 case MONO_TYPE_U1:
547 add_general (&gr, &stack_size, ainfo, FALSE);
548 /* the value is in the ls byte */
549 ainfo->offset += sizeof (gpointer) - 1;
550 break;
551 case MONO_TYPE_I2:
552 case MONO_TYPE_U2:
553 case MONO_TYPE_CHAR:
554 add_general (&gr, &stack_size, ainfo, FALSE);
555 /* the value is in the ls word */
556 ainfo->offset += sizeof (gpointer) - 2;
557 break;
558 case MONO_TYPE_I4:
559 case MONO_TYPE_U4:
560 add_general (&gr, &stack_size, ainfo, FALSE);
561 /* the value is in the ls dword */
562 ainfo->offset += sizeof (gpointer) - 4;
563 break;
564 case MONO_TYPE_I:
565 case MONO_TYPE_U:
566 case MONO_TYPE_PTR:
567 case MONO_TYPE_FNPTR:
568 case MONO_TYPE_CLASS:
569 case MONO_TYPE_OBJECT:
570 case MONO_TYPE_STRING:
571 case MONO_TYPE_SZARRAY:
572 case MONO_TYPE_ARRAY:
573 add_general (&gr, &stack_size, ainfo, FALSE);
574 break;
575 case MONO_TYPE_GENERICINST:
576 if (!mono_type_generic_inst_is_valuetype (ptype)) {
577 add_general (&gr, &stack_size, ainfo, FALSE);
578 break;
580 /* Fall through */
581 case MONO_TYPE_VALUETYPE:
582 #ifdef SPARCV9
583 if (sig->pinvoke)
584 NOT_IMPLEMENTED;
585 #endif
586 add_general (&gr, &stack_size, ainfo, FALSE);
587 break;
588 case MONO_TYPE_TYPEDBYREF:
589 add_general (&gr, &stack_size, ainfo, FALSE);
590 break;
591 case MONO_TYPE_U8:
592 case MONO_TYPE_I8:
593 #ifdef SPARCV9
594 add_general (&gr, &stack_size, ainfo, FALSE);
595 #else
596 add_general (&gr, &stack_size, ainfo, TRUE);
597 #endif
598 break;
599 case MONO_TYPE_R4:
600 #ifdef SPARCV9
601 add_float (&fr, &stack_size, ainfo, TRUE);
602 gr ++;
603 #else
604 /* single precision values are passed in integer registers */
605 add_general (&gr, &stack_size, ainfo, FALSE);
606 #endif
607 break;
608 case MONO_TYPE_R8:
609 #ifdef SPARCV9
610 add_float (&fr, &stack_size, ainfo, FALSE);
611 gr ++;
612 #else
613 /* double precision values are passed in a pair of registers */
614 add_general (&gr, &stack_size, ainfo, TRUE);
615 #endif
616 break;
617 default:
618 g_assert_not_reached ();
622 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n > 0) && (sig->sentinelpos == sig->param_count)) {
623 gr = PARAM_REGS;
625 /* Emit the signature cookie just before the implicit arguments */
626 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
629 /* return value */
630 ret_type = mono_type_get_underlying_type (sig->ret);
631 ret_type = mini_get_basic_type_from_generic (gsctx, ret_type);
632 switch (ret_type->type) {
633 case MONO_TYPE_BOOLEAN:
634 case MONO_TYPE_I1:
635 case MONO_TYPE_U1:
636 case MONO_TYPE_I2:
637 case MONO_TYPE_U2:
638 case MONO_TYPE_CHAR:
639 case MONO_TYPE_I4:
640 case MONO_TYPE_U4:
641 case MONO_TYPE_I:
642 case MONO_TYPE_U:
643 case MONO_TYPE_PTR:
644 case MONO_TYPE_FNPTR:
645 case MONO_TYPE_CLASS:
646 case MONO_TYPE_OBJECT:
647 case MONO_TYPE_SZARRAY:
648 case MONO_TYPE_ARRAY:
649 case MONO_TYPE_STRING:
650 cinfo->ret.storage = ArgInIReg;
651 cinfo->ret.reg = sparc_i0;
652 if (gr < 1)
653 gr = 1;
654 break;
655 case MONO_TYPE_U8:
656 case MONO_TYPE_I8:
657 #ifdef SPARCV9
658 cinfo->ret.storage = ArgInIReg;
659 cinfo->ret.reg = sparc_i0;
660 if (gr < 1)
661 gr = 1;
662 #else
663 cinfo->ret.storage = ArgInIRegPair;
664 cinfo->ret.reg = sparc_i0;
665 if (gr < 2)
666 gr = 2;
667 #endif
668 break;
669 case MONO_TYPE_R4:
670 case MONO_TYPE_R8:
671 cinfo->ret.storage = ArgInFReg;
672 cinfo->ret.reg = sparc_f0;
673 break;
674 case MONO_TYPE_GENERICINST:
675 if (!mono_type_generic_inst_is_valuetype (ret_type)) {
676 cinfo->ret.storage = ArgInIReg;
677 cinfo->ret.reg = sparc_i0;
678 if (gr < 1)
679 gr = 1;
680 break;
682 /* Fall through */
683 case MONO_TYPE_VALUETYPE:
684 if (v64) {
685 if (sig->pinvoke)
686 NOT_IMPLEMENTED;
687 else
688 /* Already done */
691 else
692 cinfo->ret.storage = ArgOnStack;
693 break;
694 case MONO_TYPE_TYPEDBYREF:
695 if (v64) {
696 if (sig->pinvoke)
697 /* Same as a valuetype with size 24 */
698 NOT_IMPLEMENTED;
699 else
700 /* Already done */
703 else
704 cinfo->ret.storage = ArgOnStack;
705 break;
706 case MONO_TYPE_VOID:
707 break;
708 default:
709 g_error ("Can't handle as return value 0x%x", sig->ret->type);
712 cinfo->stack_usage = stack_size;
713 cinfo->reg_usage = gr;
714 return cinfo;
717 GList *
718 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
720 GList *vars = NULL;
721 int i;
724 * FIXME: If an argument is allocated to a register, then load it from the
725 * stack in the prolog.
728 for (i = 0; i < cfg->num_varinfo; i++) {
729 MonoInst *ins = cfg->varinfo [i];
730 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
732 /* unused vars */
733 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
734 continue;
736 /* FIXME: Make arguments on stack allocateable to registers */
737 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode == OP_REGVAR) || (ins->opcode == OP_ARG))
738 continue;
740 if (mono_is_regsize_var (ins->inst_vtype)) {
741 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
742 g_assert (i == vmv->idx);
744 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
748 return vars;
751 GList *
752 mono_arch_get_global_int_regs (MonoCompile *cfg)
754 GList *regs = NULL;
755 int i;
756 MonoMethodSignature *sig;
757 CallInfo *cinfo;
759 sig = mono_method_signature (cfg->method);
761 cinfo = get_call_info (cfg, sig, FALSE);
763 /* Use unused input registers */
764 for (i = cinfo->reg_usage; i < 6; ++i)
765 regs = g_list_prepend (regs, GUINT_TO_POINTER (sparc_i0 + i));
767 /* Use %l0..%l6 as global registers */
768 for (i = sparc_l0; i < sparc_l7; ++i)
769 regs = g_list_prepend (regs, GUINT_TO_POINTER (i));
771 g_free (cinfo);
773 return regs;
777 * mono_arch_regalloc_cost:
779 * Return the cost, in number of memory references, of the action of
780 * allocating the variable VMV into a register during global register
781 * allocation.
783 guint32
784 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
786 return 0;
790 * Set var information according to the calling convention. sparc version.
791 * The locals var stuff should most likely be split in another method.
794 void
795 mono_arch_allocate_vars (MonoCompile *cfg)
797 MonoMethodSignature *sig;
798 MonoMethodHeader *header;
799 MonoInst *inst;
800 int i, offset, size, align, curinst;
801 CallInfo *cinfo;
803 header = cfg->header;
805 sig = mono_method_signature (cfg->method);
807 cinfo = get_call_info (cfg, sig, FALSE);
809 if (sig->ret->type != MONO_TYPE_VOID) {
810 switch (cinfo->ret.storage) {
811 case ArgInIReg:
812 case ArgInFReg:
813 cfg->ret->opcode = OP_REGVAR;
814 cfg->ret->inst_c0 = cinfo->ret.reg;
815 break;
816 case ArgInIRegPair: {
817 MonoType *t = mono_type_get_underlying_type (sig->ret);
818 if (((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
819 MonoInst *low = get_vreg_to_inst (cfg, cfg->ret->dreg + 1);
820 MonoInst *high = get_vreg_to_inst (cfg, cfg->ret->dreg + 2);
822 low->opcode = OP_REGVAR;
823 low->dreg = cinfo->ret.reg + 1;
824 high->opcode = OP_REGVAR;
825 high->dreg = cinfo->ret.reg;
827 cfg->ret->opcode = OP_REGVAR;
828 cfg->ret->inst_c0 = cinfo->ret.reg;
829 break;
831 case ArgOnStack:
832 #ifdef SPARCV9
833 g_assert_not_reached ();
834 #else
835 /* valuetypes */
836 cfg->vret_addr->opcode = OP_REGOFFSET;
837 cfg->vret_addr->inst_basereg = sparc_fp;
838 cfg->vret_addr->inst_offset = 64;
839 #endif
840 break;
841 default:
842 NOT_IMPLEMENTED;
844 cfg->ret->dreg = cfg->ret->inst_c0;
848 * We use the ABI calling conventions for managed code as well.
849 * Exception: valuetypes are never returned in registers on V9.
850 * FIXME: Use something more optimized.
853 /* Locals are allocated backwards from %fp */
854 cfg->frame_reg = sparc_fp;
855 offset = 0;
858 * Reserve a stack slot for holding information used during exception
859 * handling.
861 if (header->num_clauses)
862 offset += sizeof (gpointer) * 2;
864 if (cfg->method->save_lmf) {
865 offset += sizeof (MonoLMF);
866 cfg->arch.lmf_offset = offset;
869 curinst = cfg->locals_start;
870 for (i = curinst; i < cfg->num_varinfo; ++i) {
871 inst = cfg->varinfo [i];
873 if ((inst->opcode == OP_REGVAR) || (inst->opcode == OP_REGOFFSET)) {
874 //g_print ("allocating local %d to %s\n", i, mono_arch_regname (inst->dreg));
875 continue;
878 if (inst->flags & MONO_INST_IS_DEAD)
879 continue;
881 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
882 * pinvoke wrappers when they call functions returning structure */
883 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
884 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), &align);
885 else
886 size = mini_type_stack_size (cfg->generic_sharing_context, inst->inst_vtype, &align);
889 * This is needed since structures containing doubles must be doubleword
890 * aligned.
891 * FIXME: Do this only if needed.
893 if (MONO_TYPE_ISSTRUCT (inst->inst_vtype))
894 align = 8;
897 * variables are accessed as negative offsets from %fp, so increase
898 * the offset before assigning it to a variable
900 offset += size;
902 offset += align - 1;
903 offset &= ~(align - 1);
904 inst->opcode = OP_REGOFFSET;
905 inst->inst_basereg = sparc_fp;
906 inst->inst_offset = STACK_BIAS + -offset;
908 //g_print ("allocating local %d to [%s - %d]\n", i, mono_arch_regname (inst->inst_basereg), - inst->inst_offset);
911 if (sig->call_convention == MONO_CALL_VARARG) {
912 cfg->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
915 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
916 inst = cfg->args [i];
917 if (inst->opcode != OP_REGVAR) {
918 ArgInfo *ainfo = &cinfo->args [i];
919 gboolean inreg = TRUE;
920 MonoType *arg_type;
921 ArgStorage storage;
923 if (sig->hasthis && (i == 0))
924 arg_type = &mono_defaults.object_class->byval_arg;
925 else
926 arg_type = sig->params [i - sig->hasthis];
928 #ifndef SPARCV9
929 if (!arg_type->byref && ((arg_type->type == MONO_TYPE_R4)
930 || (arg_type->type == MONO_TYPE_R8)))
932 * Since float arguments are passed in integer registers, we need to
933 * save them to the stack in the prolog.
935 inreg = FALSE;
936 #endif
938 /* FIXME: Allocate volatile arguments to registers */
939 /* FIXME: This makes the argument holding a vtype address into volatile */
940 if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
941 inreg = FALSE;
943 if (MONO_TYPE_ISSTRUCT (arg_type))
944 /* FIXME: this isn't needed */
945 inreg = FALSE;
947 inst->opcode = OP_REGOFFSET;
949 if (!inreg)
950 storage = ArgOnStack;
951 else
952 storage = ainfo->storage;
954 switch (storage) {
955 case ArgInIReg:
956 inst->opcode = OP_REGVAR;
957 inst->dreg = sparc_i0 + ainfo->reg;
958 break;
959 case ArgInIRegPair:
960 if (inst->type == STACK_I8) {
961 MonoInst *low = get_vreg_to_inst (cfg, inst->dreg + 1);
962 MonoInst *high = get_vreg_to_inst (cfg, inst->dreg + 2);
964 low->opcode = OP_REGVAR;
965 low->dreg = sparc_i0 + ainfo->reg + 1;
966 high->opcode = OP_REGVAR;
967 high->dreg = sparc_i0 + ainfo->reg;
969 inst->opcode = OP_REGVAR;
970 inst->dreg = sparc_i0 + ainfo->reg;
971 break;
972 case ArgInFloatReg:
973 case ArgInDoubleReg:
975 * Since float regs are volatile, we save the arguments to
976 * the stack in the prolog.
977 * FIXME: Avoid this if the method contains no calls.
979 case ArgOnStack:
980 case ArgOnStackPair:
981 case ArgInSplitRegStack:
982 /* Split arguments are saved to the stack in the prolog */
983 inst->opcode = OP_REGOFFSET;
984 /* in parent frame */
985 inst->inst_basereg = sparc_fp;
986 inst->inst_offset = ainfo->offset + ARGS_OFFSET;
988 if (!arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
990 * It is very hard to load doubles from non-doubleword aligned
991 * memory locations. So if the offset is misaligned, we copy the
992 * argument to a stack location in the prolog.
994 if ((inst->inst_offset - STACK_BIAS) % 8) {
995 inst->inst_basereg = sparc_fp;
996 offset += 8;
997 align = 8;
998 offset += align - 1;
999 offset &= ~(align - 1);
1000 inst->inst_offset = STACK_BIAS + -offset;
1004 break;
1005 default:
1006 NOT_IMPLEMENTED;
1009 if (MONO_TYPE_ISSTRUCT (arg_type)) {
1010 /* Add a level of indirection */
1012 * It would be easier to add OP_LDIND_I here, but ldind_i instructions
1013 * are destructively modified in a lot of places in inssel.brg.
1015 MonoInst *indir;
1016 MONO_INST_NEW (cfg, indir, 0);
1017 *indir = *inst;
1018 inst->opcode = OP_VTARG_ADDR;
1019 inst->inst_left = indir;
1025 * spillvars are stored between the normal locals and the storage reserved
1026 * by the ABI.
1029 cfg->stack_offset = offset;
1031 g_free (cinfo);
1034 void
1035 mono_arch_create_vars (MonoCompile *cfg)
1037 MonoMethodSignature *sig;
1039 sig = mono_method_signature (cfg->method);
1041 if (MONO_TYPE_ISSTRUCT ((sig->ret))) {
1042 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1043 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1044 printf ("vret_addr = ");
1045 mono_print_ins (cfg->vret_addr);
1049 if (!sig->ret->byref && (sig->ret->type == MONO_TYPE_I8 || sig->ret->type == MONO_TYPE_U8)) {
1050 MonoInst *low = get_vreg_to_inst (cfg, cfg->ret->dreg + 1);
1051 MonoInst *high = get_vreg_to_inst (cfg, cfg->ret->dreg + 2);
1053 low->flags |= MONO_INST_VOLATILE;
1054 high->flags |= MONO_INST_VOLATILE;
1057 /* Add a properly aligned dword for use by int<->float conversion opcodes */
1058 cfg->arch.float_spill_slot = mono_compile_create_var (cfg, &mono_defaults.double_class->byval_arg, OP_ARG);
1059 ((MonoInst*)cfg->arch.float_spill_slot)->flags |= MONO_INST_VOLATILE;
1062 static void
1063 add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, guint32 sreg)
1065 MonoInst *arg;
1067 MONO_INST_NEW (cfg, arg, 0);
1069 arg->sreg1 = sreg;
1071 switch (storage) {
1072 case ArgInIReg:
1073 arg->opcode = OP_MOVE;
1074 arg->dreg = mono_alloc_ireg (cfg);
1076 mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, FALSE);
1077 break;
1078 case ArgInFloatReg:
1079 arg->opcode = OP_FMOVE;
1080 arg->dreg = mono_alloc_freg (cfg);
1082 mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, TRUE);
1083 break;
1084 default:
1085 g_assert_not_reached ();
1088 MONO_ADD_INS (cfg->cbb, arg);
1091 static void
1092 add_outarg_load (MonoCompile *cfg, MonoCallInst *call, int opcode, int basereg, int offset, int reg)
1094 int dreg = mono_alloc_ireg (cfg);
1096 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, sparc_sp, offset);
1098 mono_call_inst_add_outarg_reg (cfg, call, dreg, reg, FALSE);
1101 static void
1102 emit_pass_long (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in)
1104 int offset = ARGS_OFFSET + ainfo->offset;
1106 switch (ainfo->storage) {
1107 case ArgInIRegPair:
1108 add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg + 1, in->dreg + 1);
1109 add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, in->dreg + 2);
1110 break;
1111 case ArgOnStackPair:
1112 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset, in->dreg + 2);
1113 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset + 4, in->dreg + 1);
1114 break;
1115 case ArgInSplitRegStack:
1116 add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, in->dreg + 2);
1117 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset + 4, in->dreg + 1);
1118 break;
1119 default:
1120 g_assert_not_reached ();
1124 static void
1125 emit_pass_double (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in)
1127 int offset = ARGS_OFFSET + ainfo->offset;
1129 switch (ainfo->storage) {
1130 case ArgInIRegPair:
1131 /* floating-point <-> integer transfer must go through memory */
1132 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg);
1134 /* Load into a register pair */
1135 add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg);
1136 add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset + 4, sparc_o0 + ainfo->reg + 1);
1137 break;
1138 case ArgOnStackPair:
1139 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg);
1140 break;
1141 case ArgInSplitRegStack:
1142 /* floating-point <-> integer transfer must go through memory */
1143 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg);
1144 /* Load most significant word into register */
1145 add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg);
1146 break;
1147 default:
1148 g_assert_not_reached ();
1152 static void
1153 emit_pass_float (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in)
1155 int offset = ARGS_OFFSET + ainfo->offset;
1157 switch (ainfo->storage) {
1158 case ArgInIReg:
1159 /* floating-point <-> integer transfer must go through memory */
1160 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, sparc_sp, offset, in->dreg);
1161 add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg);
1162 break;
1163 case ArgOnStack:
1164 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, sparc_sp, offset, in->dreg);
1165 break;
1166 default:
1167 g_assert_not_reached ();
1171 static void
1172 emit_pass_other (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in);
1174 static void
1175 emit_pass_vtype (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in, gboolean pinvoke)
1177 MonoInst *arg;
1178 guint32 align, offset, pad, size;
1180 if (arg_type->type == MONO_TYPE_TYPEDBYREF) {
1181 size = sizeof (MonoTypedRef);
1182 align = sizeof (gpointer);
1184 else if (pinvoke)
1185 size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
1186 else {
1188 * Other backends use mono_type_stack_size (), but that
1189 * aligns the size to 8, which is larger than the size of
1190 * the source, leading to reads of invalid memory if the
1191 * source is at the end of address space.
1193 size = mono_class_value_size (in->klass, &align);
1196 /* The first 6 argument locations are reserved */
1197 if (cinfo->stack_usage < 6 * sizeof (gpointer))
1198 cinfo->stack_usage = 6 * sizeof (gpointer);
1200 offset = ALIGN_TO ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage, align);
1201 pad = offset - ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage);
1203 cinfo->stack_usage += size;
1204 cinfo->stack_usage += pad;
1207 * We use OP_OUTARG_VT to copy the valuetype to a stack location, then
1208 * use the normal OUTARG opcodes to pass the address of the location to
1209 * the callee.
1211 if (size > 0) {
1212 MONO_INST_NEW (cfg, arg, OP_OUTARG_VT);
1213 arg->sreg1 = in->dreg;
1214 arg->klass = in->klass;
1215 arg->backend.size = size;
1216 arg->inst_p0 = call;
1217 arg->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1218 memcpy (arg->inst_p1, ainfo, sizeof (ArgInfo));
1219 ((ArgInfo*)(arg->inst_p1))->offset = STACK_BIAS + offset;
1220 MONO_ADD_INS (cfg->cbb, arg);
1222 MONO_INST_NEW (cfg, arg, OP_ADD_IMM);
1223 arg->dreg = mono_alloc_preg (cfg);
1224 arg->sreg1 = sparc_sp;
1225 arg->inst_imm = STACK_BIAS + offset;
1226 MONO_ADD_INS (cfg->cbb, arg);
1228 emit_pass_other (cfg, call, ainfo, NULL, arg);
1232 static void
1233 emit_pass_other (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in)
1235 int offset = ARGS_OFFSET + ainfo->offset;
1236 int opcode;
1238 switch (ainfo->storage) {
1239 case ArgInIReg:
1240 add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, in->dreg);
1241 break;
1242 case ArgOnStack:
1243 #ifdef SPARCV9
1244 NOT_IMPLEMENTED;
1245 #else
1246 if (offset & 0x1)
1247 opcode = OP_STOREI1_MEMBASE_REG;
1248 else if (offset & 0x2)
1249 opcode = OP_STOREI2_MEMBASE_REG;
1250 else
1251 opcode = OP_STOREI4_MEMBASE_REG;
1252 MONO_EMIT_NEW_STORE_MEMBASE (cfg, opcode, sparc_sp, offset, in->dreg);
1253 #endif
1254 break;
1255 default:
1256 g_assert_not_reached ();
1260 static void
1261 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
1263 MonoMethodSignature *tmp_sig;
1266 * mono_ArgIterator_Setup assumes the signature cookie is
1267 * passed first and all the arguments which were before it are
1268 * passed on the stack after the signature. So compensate by
1269 * passing a different signature.
1271 tmp_sig = mono_metadata_signature_dup (call->signature);
1272 tmp_sig->param_count -= call->signature->sentinelpos;
1273 tmp_sig->sentinelpos = 0;
1274 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1276 /* FIXME: Add support for signature tokens to AOT */
1277 cfg->disable_aot = TRUE;
1278 /* We allways pass the signature on the stack for simplicity */
1279 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sparc_sp, ARGS_OFFSET + cinfo->sig_cookie.offset, tmp_sig);
1282 void
1283 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1285 MonoInst *in;
1286 MonoMethodSignature *sig;
1287 int i, n;
1288 CallInfo *cinfo;
1289 ArgInfo *ainfo;
1290 guint32 extra_space = 0;
1292 sig = call->signature;
1293 n = sig->param_count + sig->hasthis;
1295 cinfo = get_call_info (cfg, sig, sig->pinvoke);
1297 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
1298 /* Set the 'struct/union return pointer' location on the stack */
1299 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, 64, call->vret_var->dreg);
1302 for (i = 0; i < n; ++i) {
1303 MonoType *arg_type;
1305 ainfo = cinfo->args + i;
1307 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1308 /* Emit the signature cookie just before the first implicit argument */
1309 emit_sig_cookie (cfg, call, cinfo);
1312 in = call->args [i];
1314 if (sig->hasthis && (i == 0))
1315 arg_type = &mono_defaults.object_class->byval_arg;
1316 else
1317 arg_type = sig->params [i - sig->hasthis];
1319 arg_type = mono_type_get_underlying_type (arg_type);
1320 if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis])))
1321 emit_pass_vtype (cfg, call, cinfo, ainfo, arg_type, in, sig->pinvoke);
1322 else if (!arg_type->byref && ((arg_type->type == MONO_TYPE_I8) || (arg_type->type == MONO_TYPE_U8)))
1323 emit_pass_long (cfg, call, ainfo, in);
1324 else if (!arg_type->byref && (arg_type->type == MONO_TYPE_R8))
1325 emit_pass_double (cfg, call, ainfo, in);
1326 else if (!arg_type->byref && (arg_type->type == MONO_TYPE_R4))
1327 emit_pass_float (cfg, call, ainfo, in);
1328 else
1329 emit_pass_other (cfg, call, ainfo, arg_type, in);
1332 /* Handle the case where there are no implicit arguments */
1333 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos)) {
1334 emit_sig_cookie (cfg, call, cinfo);
1337 call->stack_usage = cinfo->stack_usage + extra_space;
1339 g_free (cinfo);
1342 void
1343 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1345 ArgInfo *ainfo = (ArgInfo*)ins->inst_p1;
1346 int size = ins->backend.size;
1348 mini_emit_memcpy (cfg, sparc_sp, ainfo->offset, src->dreg, 0, size, 0);
1351 void
1352 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1354 CallInfo *cinfo = get_call_info (cfg, mono_method_signature (method), FALSE);
1355 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
1357 switch (cinfo->ret.storage) {
1358 case ArgInIReg:
1359 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1360 break;
1361 case ArgInIRegPair:
1362 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
1363 MONO_EMIT_NEW_UNALU (cfg, OP_LMOVE, cfg->ret->dreg, val->dreg);
1364 } else {
1365 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg + 2, val->dreg + 2);
1366 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg + 1, val->dreg + 1);
1368 break;
1369 case ArgInFReg:
1370 if (ret->type == MONO_TYPE_R4)
1371 MONO_EMIT_NEW_UNALU (cfg, OP_SETFRET, cfg->ret->dreg, val->dreg);
1372 else
1373 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
1374 break;
1375 default:
1376 g_assert_not_reached ();
1379 g_assert (cinfo);
1382 int cond_to_sparc_cond [][3] = {
1383 {sparc_be, sparc_be, sparc_fbe},
1384 {sparc_bne, sparc_bne, 0},
1385 {sparc_ble, sparc_ble, sparc_fble},
1386 {sparc_bge, sparc_bge, sparc_fbge},
1387 {sparc_bl, sparc_bl, sparc_fbl},
1388 {sparc_bg, sparc_bg, sparc_fbg},
1389 {sparc_bleu, sparc_bleu, 0},
1390 {sparc_beu, sparc_beu, 0},
1391 {sparc_blu, sparc_blu, sparc_fbl},
1392 {sparc_bgu, sparc_bgu, sparc_fbg}
1395 /* Map opcode to the sparc condition codes */
1396 static inline SparcCond
1397 opcode_to_sparc_cond (int opcode)
1399 CompRelation rel;
1400 CompType t;
1402 switch (opcode) {
1403 case OP_COND_EXC_OV:
1404 case OP_COND_EXC_IOV:
1405 return sparc_bvs;
1406 case OP_COND_EXC_C:
1407 case OP_COND_EXC_IC:
1408 return sparc_bcs;
1409 case OP_COND_EXC_NO:
1410 case OP_COND_EXC_NC:
1411 NOT_IMPLEMENTED;
1412 default:
1413 rel = mono_opcode_to_cond (opcode);
1414 t = mono_opcode_to_type (opcode, -1);
1416 return cond_to_sparc_cond [rel][t];
1417 break;
1420 return -1;
1423 #define COMPUTE_DISP(ins) \
1424 if (ins->inst_true_bb->native_offset) \
1425 disp = (ins->inst_true_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2; \
1426 else { \
1427 disp = 0; \
1428 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1431 #ifdef SPARCV9
1432 #define DEFAULT_ICC sparc_xcc_short
1433 #else
1434 #define DEFAULT_ICC sparc_icc_short
1435 #endif
1437 #ifdef SPARCV9
1438 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) \
1439 do { \
1440 gint32 disp; \
1441 guint32 predict; \
1442 COMPUTE_DISP(ins); \
1443 predict = (disp != 0) ? 1 : 0; \
1444 g_assert (sparc_is_imm19 (disp)); \
1445 sparc_branchp (code, (annul), cond, icc, (predict), disp); \
1446 if (filldelay) sparc_nop (code); \
1447 } while (0)
1448 #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_ICC ((ins), (cond), (annul), (filldelay), (sparc_xcc_short))
1449 #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) \
1450 do { \
1451 gint32 disp; \
1452 guint32 predict; \
1453 COMPUTE_DISP(ins); \
1454 predict = (disp != 0) ? 1 : 0; \
1455 g_assert (sparc_is_imm19 (disp)); \
1456 sparc_fbranch (code, (annul), cond, disp); \
1457 if (filldelay) sparc_nop (code); \
1458 } while (0)
1459 #else
1460 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) g_assert_not_reached ()
1461 #define EMIT_COND_BRANCH_GENERAL(ins,bop,cond,annul,filldelay) \
1462 do { \
1463 gint32 disp; \
1464 COMPUTE_DISP(ins); \
1465 g_assert (sparc_is_imm22 (disp)); \
1466 sparc_ ## bop (code, (annul), cond, disp); \
1467 if (filldelay) sparc_nop (code); \
1468 } while (0)
1469 #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),branch,(cond),annul,filldelay)
1470 #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),fbranch,(cond),annul,filldelay)
1471 #endif
1473 #define EMIT_COND_BRANCH_PREDICTED(ins,cond,annul,filldelay) \
1474 do { \
1475 gint32 disp; \
1476 guint32 predict; \
1477 COMPUTE_DISP(ins); \
1478 predict = (disp != 0) ? 1 : 0; \
1479 g_assert (sparc_is_imm19 (disp)); \
1480 sparc_branchp (code, (annul), (cond), DEFAULT_ICC, (predict), disp); \
1481 if (filldelay) sparc_nop (code); \
1482 } while (0)
1484 #define EMIT_COND_BRANCH_BPR(ins,bop,predict,annul,filldelay) \
1485 do { \
1486 gint32 disp; \
1487 COMPUTE_DISP(ins); \
1488 g_assert (sparc_is_imm22 (disp)); \
1489 sparc_ ## bop (code, (annul), (predict), ins->sreg1, disp); \
1490 if (filldelay) sparc_nop (code); \
1491 } while (0)
1493 /* emit an exception if condition is fail */
1495 * We put the exception throwing code out-of-line, at the end of the method
1497 #define EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,filldelay,icc) do { \
1498 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
1499 MONO_PATCH_INFO_EXC, sexc_name); \
1500 if (sparcv9 && ((icc) != sparc_icc_short)) { \
1501 sparc_branchp (code, 0, (cond), (icc), 0, 0); \
1503 else { \
1504 sparc_branch (code, 0, cond, 0); \
1506 if (filldelay) sparc_nop (code); \
1507 } while (0);
1509 #define EMIT_COND_SYSTEM_EXCEPTION(ins,cond,sexc_name) EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,TRUE,DEFAULT_ICC)
1511 #define EMIT_COND_SYSTEM_EXCEPTION_BPR(ins,bop,sexc_name) do { \
1512 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
1513 MONO_PATCH_INFO_EXC, sexc_name); \
1514 sparc_ ## bop (code, FALSE, FALSE, ins->sreg1, 0); \
1515 sparc_nop (code); \
1516 } while (0);
1518 #define EMIT_ALU_IMM(ins,op,setcc) do { \
1519 if (sparc_is_imm13 ((ins)->inst_imm)) \
1520 sparc_ ## op ## _imm (code, (setcc), (ins)->sreg1, ins->inst_imm, (ins)->dreg); \
1521 else { \
1522 sparc_set (code, ins->inst_imm, sparc_o7); \
1523 sparc_ ## op (code, (setcc), (ins)->sreg1, sparc_o7, (ins)->dreg); \
1525 } while (0);
1527 #define EMIT_LOAD_MEMBASE(ins,op) do { \
1528 if (sparc_is_imm13 (ins->inst_offset)) \
1529 sparc_ ## op ## _imm (code, ins->inst_basereg, ins->inst_offset, ins->dreg); \
1530 else { \
1531 sparc_set (code, ins->inst_offset, sparc_o7); \
1532 sparc_ ## op (code, ins->inst_basereg, sparc_o7, ins->dreg); \
1534 } while (0);
1536 /* max len = 5 */
1537 #define EMIT_STORE_MEMBASE_IMM(ins,op) do { \
1538 guint32 sreg; \
1539 if (ins->inst_imm == 0) \
1540 sreg = sparc_g0; \
1541 else { \
1542 sparc_set (code, ins->inst_imm, sparc_o7); \
1543 sreg = sparc_o7; \
1545 if (!sparc_is_imm13 (ins->inst_offset)) { \
1546 sparc_set (code, ins->inst_offset, GP_SCRATCH_REG); \
1547 sparc_ ## op (code, sreg, ins->inst_destbasereg, GP_SCRATCH_REG); \
1549 else \
1550 sparc_ ## op ## _imm (code, sreg, ins->inst_destbasereg, ins->inst_offset); \
1551 } while (0);
1553 #define EMIT_STORE_MEMBASE_REG(ins,op) do { \
1554 if (!sparc_is_imm13 (ins->inst_offset)) { \
1555 sparc_set (code, ins->inst_offset, sparc_o7); \
1556 sparc_ ## op (code, ins->sreg1, ins->inst_destbasereg, sparc_o7); \
1558 else \
1559 sparc_ ## op ## _imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); \
1560 } while (0);
1562 #define EMIT_CALL() do { \
1563 if (v64) { \
1564 sparc_set_template (code, sparc_o7); \
1565 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7); \
1567 else { \
1568 sparc_call_simple (code, 0); \
1570 sparc_nop (code); \
1571 } while (0);
1574 * A call template is 7 instructions long, so we want to avoid it if possible.
1576 static guint32*
1577 emit_call (MonoCompile *cfg, guint32 *code, guint32 patch_type, gconstpointer data)
1579 gpointer target;
1581 /* FIXME: This only works if the target method is already compiled */
1582 if (0 && v64 && !cfg->compile_aot) {
1583 MonoJumpInfo patch_info;
1585 patch_info.type = patch_type;
1586 patch_info.data.target = data;
1588 target = mono_resolve_patch_target (cfg->method, cfg->domain, NULL, &patch_info, FALSE);
1590 /* FIXME: Add optimizations if the target is close enough */
1591 sparc_set (code, target, sparc_o7);
1592 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7);
1593 sparc_nop (code);
1595 else {
1596 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, patch_type, data);
1597 EMIT_CALL ();
1600 return code;
1603 void
1604 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
1608 void
1609 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
1611 MonoInst *ins, *n, *last_ins = NULL;
1612 ins = bb->code;
1614 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
1615 switch (ins->opcode) {
1616 case OP_MUL_IMM:
1617 /* remove unnecessary multiplication with 1 */
1618 if (ins->inst_imm == 1) {
1619 if (ins->dreg != ins->sreg1) {
1620 ins->opcode = OP_MOVE;
1621 } else {
1622 MONO_DELETE_INS (bb, ins);
1623 continue;
1626 break;
1627 #ifndef SPARCV9
1628 case OP_LOAD_MEMBASE:
1629 case OP_LOADI4_MEMBASE:
1631 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1632 * OP_LOAD_MEMBASE offset(basereg), reg
1634 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
1635 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1636 ins->inst_basereg == last_ins->inst_destbasereg &&
1637 ins->inst_offset == last_ins->inst_offset) {
1638 if (ins->dreg == last_ins->sreg1) {
1639 MONO_DELETE_INS (bb, ins);
1640 continue;
1641 } else {
1642 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1643 ins->opcode = OP_MOVE;
1644 ins->sreg1 = last_ins->sreg1;
1648 * Note: reg1 must be different from the basereg in the second load
1649 * OP_LOAD_MEMBASE offset(basereg), reg1
1650 * OP_LOAD_MEMBASE offset(basereg), reg2
1651 * -->
1652 * OP_LOAD_MEMBASE offset(basereg), reg1
1653 * OP_MOVE reg1, reg2
1655 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
1656 || last_ins->opcode == OP_LOAD_MEMBASE) &&
1657 ins->inst_basereg != last_ins->dreg &&
1658 ins->inst_basereg == last_ins->inst_basereg &&
1659 ins->inst_offset == last_ins->inst_offset) {
1661 if (ins->dreg == last_ins->dreg) {
1662 MONO_DELETE_INS (bb, ins);
1663 continue;
1664 } else {
1665 ins->opcode = OP_MOVE;
1666 ins->sreg1 = last_ins->dreg;
1669 //g_assert_not_reached ();
1671 #if 0
1673 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1674 * OP_LOAD_MEMBASE offset(basereg), reg
1675 * -->
1676 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1677 * OP_ICONST reg, imm
1679 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
1680 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
1681 ins->inst_basereg == last_ins->inst_destbasereg &&
1682 ins->inst_offset == last_ins->inst_offset) {
1683 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1684 ins->opcode = OP_ICONST;
1685 ins->inst_c0 = last_ins->inst_imm;
1686 g_assert_not_reached (); // check this rule
1687 #endif
1689 break;
1690 #endif
1691 case OP_LOADI1_MEMBASE:
1692 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
1693 ins->inst_basereg == last_ins->inst_destbasereg &&
1694 ins->inst_offset == last_ins->inst_offset) {
1695 if (ins->dreg == last_ins->sreg1) {
1696 MONO_DELETE_INS (bb, ins);
1697 continue;
1698 } else {
1699 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1700 ins->opcode = OP_MOVE;
1701 ins->sreg1 = last_ins->sreg1;
1704 break;
1705 case OP_LOADI2_MEMBASE:
1706 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
1707 ins->inst_basereg == last_ins->inst_destbasereg &&
1708 ins->inst_offset == last_ins->inst_offset) {
1709 if (ins->dreg == last_ins->sreg1) {
1710 MONO_DELETE_INS (bb, ins);
1711 continue;
1712 } else {
1713 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1714 ins->opcode = OP_MOVE;
1715 ins->sreg1 = last_ins->sreg1;
1718 break;
1719 case OP_STOREI4_MEMBASE_IMM:
1720 /* Convert pairs of 0 stores to a dword 0 store */
1721 /* Used when initializing temporaries */
1722 /* We know sparc_fp is dword aligned */
1723 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM) &&
1724 (ins->inst_destbasereg == last_ins->inst_destbasereg) &&
1725 (ins->inst_destbasereg == sparc_fp) &&
1726 (ins->inst_offset < 0) &&
1727 ((ins->inst_offset % 8) == 0) &&
1728 ((ins->inst_offset == last_ins->inst_offset - 4)) &&
1729 (ins->inst_imm == 0) &&
1730 (last_ins->inst_imm == 0)) {
1731 if (sparcv9) {
1732 last_ins->opcode = OP_STOREI8_MEMBASE_IMM;
1733 last_ins->inst_offset = ins->inst_offset;
1734 MONO_DELETE_INS (bb, ins);
1735 continue;
1738 break;
1739 case OP_IBEQ:
1740 case OP_IBNE_UN:
1741 case OP_IBLT:
1742 case OP_IBGT:
1743 case OP_IBGE:
1744 case OP_IBLE:
1745 case OP_COND_EXC_EQ:
1746 case OP_COND_EXC_GE:
1747 case OP_COND_EXC_GT:
1748 case OP_COND_EXC_LE:
1749 case OP_COND_EXC_LT:
1750 case OP_COND_EXC_NE_UN:
1752 * Convert compare with zero+branch to BRcc
1755 * This only works in 64 bit mode, since it examines all 64
1756 * bits of the register.
1757 * Only do this if the method is small since BPr only has a 16bit
1758 * displacement.
1760 if (v64 && (cfg->header->code_size < 10000) && last_ins &&
1761 (last_ins->opcode == OP_COMPARE_IMM) &&
1762 (last_ins->inst_imm == 0)) {
1763 switch (ins->opcode) {
1764 case OP_IBEQ:
1765 ins->opcode = OP_SPARC_BRZ;
1766 break;
1767 case OP_IBNE_UN:
1768 ins->opcode = OP_SPARC_BRNZ;
1769 break;
1770 case OP_IBLT:
1771 ins->opcode = OP_SPARC_BRLZ;
1772 break;
1773 case OP_IBGT:
1774 ins->opcode = OP_SPARC_BRGZ;
1775 break;
1776 case OP_IBGE:
1777 ins->opcode = OP_SPARC_BRGEZ;
1778 break;
1779 case OP_IBLE:
1780 ins->opcode = OP_SPARC_BRLEZ;
1781 break;
1782 case OP_COND_EXC_EQ:
1783 ins->opcode = OP_SPARC_COND_EXC_EQZ;
1784 break;
1785 case OP_COND_EXC_GE:
1786 ins->opcode = OP_SPARC_COND_EXC_GEZ;
1787 break;
1788 case OP_COND_EXC_GT:
1789 ins->opcode = OP_SPARC_COND_EXC_GTZ;
1790 break;
1791 case OP_COND_EXC_LE:
1792 ins->opcode = OP_SPARC_COND_EXC_LEZ;
1793 break;
1794 case OP_COND_EXC_LT:
1795 ins->opcode = OP_SPARC_COND_EXC_LTZ;
1796 break;
1797 case OP_COND_EXC_NE_UN:
1798 ins->opcode = OP_SPARC_COND_EXC_NEZ;
1799 break;
1800 default:
1801 g_assert_not_reached ();
1803 ins->sreg1 = last_ins->sreg1;
1804 *last_ins = *ins;
1805 MONO_DELETE_INS (bb, ins);
1806 continue;
1808 break;
1809 case OP_MOVE:
1811 * OP_MOVE reg, reg
1813 if (ins->dreg == ins->sreg1) {
1814 MONO_DELETE_INS (bb, ins);
1815 continue;
1818 * OP_MOVE sreg, dreg
1819 * OP_MOVE dreg, sreg
1821 if (last_ins && last_ins->opcode == OP_MOVE &&
1822 ins->sreg1 == last_ins->dreg &&
1823 ins->dreg == last_ins->sreg1) {
1824 MONO_DELETE_INS (bb, ins);
1825 continue;
1827 break;
1829 last_ins = ins;
1830 ins = ins->next;
1832 bb->last_ins = last_ins;
1835 void
1836 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *ins)
1838 switch (ins->opcode) {
1839 case OP_LNEG:
1840 MONO_EMIT_NEW_BIALU (cfg, OP_SUBCC, ins->dreg + 1, 0, ins->sreg1 + 1);
1841 MONO_EMIT_NEW_BIALU (cfg, OP_SBB, ins->dreg + 2, 0, ins->sreg1 + 2);
1842 NULLIFY_INS (ins);
1843 break;
1844 default:
1845 break;
1849 void
1850 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1854 /* FIXME: Strange loads from the stack in basic-float.cs:test_2_rem */
1856 static void
1857 sparc_patch (guint32 *code, const gpointer target)
1859 guint32 *c = code;
1860 guint32 ins = *code;
1861 guint32 op = ins >> 30;
1862 guint32 op2 = (ins >> 22) & 0x7;
1863 guint32 rd = (ins >> 25) & 0x1f;
1864 guint8* target8 = (guint8*)target;
1865 gint64 disp = (target8 - (guint8*)code) >> 2;
1866 int reg;
1868 // g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
1870 if ((op == 0) && (op2 == 2)) {
1871 if (!sparc_is_imm22 (disp))
1872 NOT_IMPLEMENTED;
1873 /* Bicc */
1874 *code = ((ins >> 22) << 22) | (disp & 0x3fffff);
1876 else if ((op == 0) && (op2 == 1)) {
1877 if (!sparc_is_imm19 (disp))
1878 NOT_IMPLEMENTED;
1879 /* BPcc */
1880 *code = ((ins >> 19) << 19) | (disp & 0x7ffff);
1882 else if ((op == 0) && (op2 == 3)) {
1883 if (!sparc_is_imm16 (disp))
1884 NOT_IMPLEMENTED;
1885 /* BPr */
1886 *code &= ~(0x180000 | 0x3fff);
1887 *code |= ((disp << 21) & (0x180000)) | (disp & 0x3fff);
1889 else if ((op == 0) && (op2 == 6)) {
1890 if (!sparc_is_imm22 (disp))
1891 NOT_IMPLEMENTED;
1892 /* FBicc */
1893 *code = ((ins >> 22) << 22) | (disp & 0x3fffff);
1895 else if ((op == 0) && (op2 == 4)) {
1896 guint32 ins2 = code [1];
1898 if (((ins2 >> 30) == 2) && (((ins2 >> 19) & 0x3f) == 2)) {
1899 /* sethi followed by or */
1900 guint32 *p = code;
1901 sparc_set (p, target8, rd);
1902 while (p <= (code + 1))
1903 sparc_nop (p);
1905 else if (ins2 == 0x01000000) {
1906 /* sethi followed by nop */
1907 guint32 *p = code;
1908 sparc_set (p, target8, rd);
1909 while (p <= (code + 1))
1910 sparc_nop (p);
1912 else if ((sparc_inst_op (ins2) == 3) && (sparc_inst_imm (ins2))) {
1913 /* sethi followed by load/store */
1914 #ifndef SPARCV9
1915 guint32 t = (guint32)target8;
1916 *code &= ~(0x3fffff);
1917 *code |= (t >> 10);
1918 *(code + 1) &= ~(0x3ff);
1919 *(code + 1) |= (t & 0x3ff);
1920 #endif
1922 else if (v64 &&
1923 (sparc_inst_rd (ins) == sparc_g1) &&
1924 (sparc_inst_op (c [1]) == 0) && (sparc_inst_op2 (c [1]) == 4) &&
1925 (sparc_inst_op (c [2]) == 2) && (sparc_inst_op3 (c [2]) == 2) &&
1926 (sparc_inst_op (c [3]) == 2) && (sparc_inst_op3 (c [3]) == 2))
1928 /* sparc_set */
1929 guint32 *p = c;
1930 reg = sparc_inst_rd (c [1]);
1931 sparc_set (p, target8, reg);
1932 while (p < (c + 6))
1933 sparc_nop (p);
1935 else if ((sparc_inst_op (ins2) == 2) && (sparc_inst_op3 (ins2) == 0x38) &&
1936 (sparc_inst_imm (ins2))) {
1937 /* sethi followed by jmpl */
1938 #ifndef SPARCV9
1939 guint32 t = (guint32)target8;
1940 *code &= ~(0x3fffff);
1941 *code |= (t >> 10);
1942 *(code + 1) &= ~(0x3ff);
1943 *(code + 1) |= (t & 0x3ff);
1944 #endif
1946 else
1947 NOT_IMPLEMENTED;
1949 else if (op == 01) {
1950 gint64 disp = (target8 - (guint8*)code) >> 2;
1952 if (!sparc_is_imm30 (disp))
1953 NOT_IMPLEMENTED;
1954 sparc_call_simple (code, target8 - (guint8*)code);
1956 else if ((op == 2) && (sparc_inst_op3 (ins) == 0x2) && sparc_inst_imm (ins)) {
1957 /* mov imm, reg */
1958 g_assert (sparc_is_imm13 (target8));
1959 *code &= ~(0x1fff);
1960 *code |= (guint32)target8;
1962 else if ((sparc_inst_op (ins) == 2) && (sparc_inst_op3 (ins) == 0x7)) {
1963 /* sparc_set case 5. */
1964 guint32 *p = c;
1966 g_assert (v64);
1967 reg = sparc_inst_rd (c [3]);
1968 sparc_set (p, target, reg);
1969 while (p < (c + 6))
1970 sparc_nop (p);
1972 else
1973 NOT_IMPLEMENTED;
1975 // g_print ("patched with 0x%08x\n", ins);
1979 * mono_sparc_emit_save_lmf:
1981 * Emit the code neccesary to push a new entry onto the lmf stack. Used by
1982 * trampolines as well.
1984 guint32*
1985 mono_sparc_emit_save_lmf (guint32 *code, guint32 lmf_offset)
1987 /* Save lmf_addr */
1988 sparc_sti_imm (code, sparc_o0, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr));
1989 /* Save previous_lmf */
1990 sparc_ldi (code, sparc_o0, sparc_g0, sparc_o7);
1991 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf));
1992 /* Set new lmf */
1993 sparc_add_imm (code, FALSE, sparc_fp, lmf_offset, sparc_o7);
1994 sparc_sti (code, sparc_o7, sparc_o0, sparc_g0);
1996 return code;
1999 guint32*
2000 mono_sparc_emit_restore_lmf (guint32 *code, guint32 lmf_offset)
2002 /* Load previous_lmf */
2003 sparc_ldi_imm (code, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sparc_l0);
2004 /* Load lmf_addr */
2005 sparc_ldi_imm (code, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), sparc_l1);
2006 /* *(lmf) = previous_lmf */
2007 sparc_sti (code, sparc_l0, sparc_l1, sparc_g0);
2008 return code;
2011 static guint32*
2012 emit_save_sp_to_lmf (MonoCompile *cfg, guint32 *code)
2015 * Since register windows are saved to the current value of %sp, we need to
2016 * set the sp field in the lmf before the call, not in the prolog.
2018 if (cfg->method->save_lmf) {
2019 gint32 lmf_offset = MONO_SPARC_STACK_BIAS - cfg->arch.lmf_offset;
2021 /* Save sp */
2022 sparc_sti_imm (code, sparc_sp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, sp));
2025 return code;
2028 static guint32*
2029 emit_vret_token (MonoGenericSharingContext *gsctx, MonoInst *ins, guint32 *code)
2031 MonoCallInst *call = (MonoCallInst*)ins;
2032 guint32 size;
2035 * The sparc ABI requires that calls to functions which return a structure
2036 * contain an additional unimpl instruction which is checked by the callee.
2038 if (call->signature->pinvoke && MONO_TYPE_ISSTRUCT(call->signature->ret)) {
2039 if (call->signature->ret->type == MONO_TYPE_TYPEDBYREF)
2040 size = mini_type_stack_size (gsctx, call->signature->ret, NULL);
2041 else
2042 size = mono_class_native_size (call->signature->ret->data.klass, NULL);
2043 sparc_unimp (code, size & 0xfff);
2046 return code;
2049 static guint32*
2050 emit_move_return_value (MonoInst *ins, guint32 *code)
2052 /* Move return value to the target register */
2053 /* FIXME: do more things in the local reg allocator */
2054 switch (ins->opcode) {
2055 case OP_VOIDCALL:
2056 case OP_VOIDCALL_REG:
2057 case OP_VOIDCALL_MEMBASE:
2058 break;
2059 case OP_CALL:
2060 case OP_CALL_REG:
2061 case OP_CALL_MEMBASE:
2062 g_assert (ins->dreg == sparc_o0);
2063 break;
2064 case OP_LCALL:
2065 case OP_LCALL_REG:
2066 case OP_LCALL_MEMBASE:
2068 * ins->dreg is the least significant reg due to the lreg: LCALL rule
2069 * in inssel-long32.brg.
2071 #ifdef SPARCV9
2072 sparc_mov_reg_reg (code, sparc_o0, ins->dreg);
2073 #else
2074 g_assert (ins->dreg == sparc_o1);
2075 #endif
2076 break;
2077 case OP_FCALL:
2078 case OP_FCALL_REG:
2079 case OP_FCALL_MEMBASE:
2080 #ifdef SPARCV9
2081 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
2082 sparc_fmovs (code, sparc_f0, ins->dreg);
2083 sparc_fstod (code, ins->dreg, ins->dreg);
2085 else
2086 sparc_fmovd (code, sparc_f0, ins->dreg);
2087 #else
2088 sparc_fmovs (code, sparc_f0, ins->dreg);
2089 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4)
2090 sparc_fstod (code, ins->dreg, ins->dreg);
2091 else
2092 sparc_fmovs (code, sparc_f1, ins->dreg + 1);
2093 #endif
2094 break;
2095 case OP_VCALL:
2096 case OP_VCALL_REG:
2097 case OP_VCALL_MEMBASE:
2098 case OP_VCALL2:
2099 case OP_VCALL2_REG:
2100 case OP_VCALL2_MEMBASE:
2101 break;
2102 default:
2103 NOT_IMPLEMENTED;
2106 return code;
2110 * emit_load_volatile_arguments:
2112 * Load volatile arguments from the stack to the original input registers.
2113 * Required before a tail call.
2115 static guint32*
2116 emit_load_volatile_arguments (MonoCompile *cfg, guint32 *code)
2118 MonoMethod *method = cfg->method;
2119 MonoMethodSignature *sig;
2120 MonoInst *inst;
2121 CallInfo *cinfo;
2122 guint32 i, ireg;
2124 /* FIXME: Generate intermediate code instead */
2126 sig = mono_method_signature (method);
2128 cinfo = get_call_info (cfg, sig, FALSE);
2130 /* This is the opposite of the code in emit_prolog */
2132 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2133 ArgInfo *ainfo = cinfo->args + i;
2134 gint32 stack_offset;
2135 MonoType *arg_type;
2137 inst = cfg->args [i];
2139 if (sig->hasthis && (i == 0))
2140 arg_type = &mono_defaults.object_class->byval_arg;
2141 else
2142 arg_type = sig->params [i - sig->hasthis];
2144 stack_offset = ainfo->offset + ARGS_OFFSET;
2145 ireg = sparc_i0 + ainfo->reg;
2147 if (ainfo->storage == ArgInSplitRegStack) {
2148 g_assert (inst->opcode == OP_REGOFFSET);
2150 if (!sparc_is_imm13 (stack_offset))
2151 NOT_IMPLEMENTED;
2152 sparc_st_imm (code, inst->inst_basereg, stack_offset, sparc_i5);
2155 if (!v64 && !arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
2156 if (ainfo->storage == ArgInIRegPair) {
2157 if (!sparc_is_imm13 (inst->inst_offset + 4))
2158 NOT_IMPLEMENTED;
2159 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, ireg);
2160 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, ireg + 1);
2162 else
2163 if (ainfo->storage == ArgInSplitRegStack) {
2164 if (stack_offset != inst->inst_offset) {
2165 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, sparc_i5);
2166 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, sparc_o7);
2167 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset + 4);
2171 else
2172 if (ainfo->storage == ArgOnStackPair) {
2173 if (stack_offset != inst->inst_offset) {
2174 /* stack_offset is not dword aligned, so we need to make a copy */
2175 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, sparc_o7);
2176 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset);
2178 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, sparc_o7);
2179 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset + 4);
2183 else
2184 g_assert_not_reached ();
2186 else
2187 if ((ainfo->storage == ArgInIReg) && (inst->opcode != OP_REGVAR)) {
2188 /* Argument in register, but need to be saved to stack */
2189 if (!sparc_is_imm13 (stack_offset))
2190 NOT_IMPLEMENTED;
2191 if ((stack_offset - ARGS_OFFSET) & 0x1)
2192 /* FIXME: Is this ldsb or ldub ? */
2193 sparc_ldsb_imm (code, inst->inst_basereg, stack_offset, ireg);
2194 else
2195 if ((stack_offset - ARGS_OFFSET) & 0x2)
2196 sparc_ldsh_imm (code, inst->inst_basereg, stack_offset, ireg);
2197 else
2198 if ((stack_offset - ARGS_OFFSET) & 0x4)
2199 sparc_ld_imm (code, inst->inst_basereg, stack_offset, ireg);
2200 else {
2201 if (v64)
2202 sparc_ldx_imm (code, inst->inst_basereg, stack_offset, ireg);
2203 else
2204 sparc_ld_imm (code, inst->inst_basereg, stack_offset, ireg);
2207 else if ((ainfo->storage == ArgInIRegPair) && (inst->opcode != OP_REGVAR)) {
2208 /* Argument in regpair, but need to be saved to stack */
2209 if (!sparc_is_imm13 (inst->inst_offset + 4))
2210 NOT_IMPLEMENTED;
2211 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, ireg);
2212 sparc_st_imm (code, inst->inst_basereg, inst->inst_offset + 4, ireg + 1);
2214 else if ((ainfo->storage == ArgInFloatReg) && (inst->opcode != OP_REGVAR)) {
2215 NOT_IMPLEMENTED;
2217 else if ((ainfo->storage == ArgInDoubleReg) && (inst->opcode != OP_REGVAR)) {
2218 NOT_IMPLEMENTED;
2221 if ((ainfo->storage == ArgInSplitRegStack) || (ainfo->storage == ArgOnStack))
2222 if (inst->opcode == OP_REGVAR)
2223 /* FIXME: Load the argument into memory */
2224 NOT_IMPLEMENTED;
2227 g_free (cinfo);
2229 return code;
2233 * mono_sparc_is_virtual_call:
2235 * Determine whenever the instruction at CODE is a virtual call.
2237 gboolean
2238 mono_sparc_is_virtual_call (guint32 *code)
2240 guint32 buf[1];
2241 guint32 *p;
2243 p = buf;
2245 if ((sparc_inst_op (*code) == 0x2) && (sparc_inst_op3 (*code) == 0x38)) {
2247 * Register indirect call. If it is a virtual call, then the
2248 * instruction in the delay slot is a special kind of nop.
2251 /* Construct special nop */
2252 sparc_or_imm (p, FALSE, sparc_g0, 0xca, sparc_g0);
2253 p --;
2255 if (code [1] == p [0])
2256 return TRUE;
2259 return FALSE;
2262 #define CMP_SIZE 3
2263 #define BR_SMALL_SIZE 2
2264 #define BR_LARGE_SIZE 2
2265 #define JUMP_IMM_SIZE 5
2266 #define ENABLE_WRONG_METHOD_CHECK 0
2269 * LOCKING: called with the domain lock held
2271 gpointer
2272 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
2273 gpointer fail_tramp)
2275 int i;
2276 int size = 0;
2277 guint32 *code, *start;
2279 for (i = 0; i < count; ++i) {
2280 MonoIMTCheckItem *item = imt_entries [i];
2281 if (item->is_equals) {
2282 if (item->check_target_idx) {
2283 if (!item->compare_done)
2284 item->chunk_size += CMP_SIZE;
2285 item->chunk_size += BR_SMALL_SIZE + JUMP_IMM_SIZE;
2286 } else {
2287 if (fail_tramp)
2288 item->chunk_size += 16;
2289 item->chunk_size += JUMP_IMM_SIZE;
2290 #if ENABLE_WRONG_METHOD_CHECK
2291 item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + 1;
2292 #endif
2294 } else {
2295 item->chunk_size += CMP_SIZE + BR_LARGE_SIZE;
2296 imt_entries [item->check_target_idx]->compare_done = TRUE;
2298 size += item->chunk_size;
2300 if (fail_tramp)
2301 code = mono_method_alloc_generic_virtual_thunk (domain, size * 4);
2302 else
2303 code = mono_domain_code_reserve (domain, size * 4);
2304 start = code;
2305 for (i = 0; i < count; ++i) {
2306 MonoIMTCheckItem *item = imt_entries [i];
2307 item->code_target = (guint8*)code;
2308 if (item->is_equals) {
2309 gboolean fail_case = !item->check_target_idx && fail_tramp;
2311 if (item->check_target_idx || fail_case) {
2312 if (!item->compare_done || fail_case) {
2313 sparc_set (code, (guint32)item->key, sparc_g5);
2314 sparc_cmp (code, MONO_ARCH_IMT_REG, sparc_g5);
2316 item->jmp_code = (guint8*)code;
2317 sparc_branch (code, 0, sparc_bne, 0);
2318 sparc_nop (code);
2319 if (item->has_target_code) {
2320 sparc_set (code, item->value.target_code, sparc_f5);
2321 } else {
2322 sparc_set (code, ((guint32)(&(vtable->vtable [item->value.vtable_slot]))), sparc_g5);
2323 sparc_ld (code, sparc_g5, 0, sparc_g5);
2325 sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
2326 sparc_nop (code);
2328 if (fail_case) {
2329 sparc_patch (item->jmp_code, code);
2330 sparc_set (code, fail_tramp, sparc_g5);
2331 sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
2332 sparc_nop (code);
2333 item->jmp_code = NULL;
2335 } else {
2336 /* enable the commented code to assert on wrong method */
2337 #if ENABLE_WRONG_METHOD_CHECK
2338 g_assert_not_reached ();
2339 #endif
2340 sparc_set (code, ((guint32)(&(vtable->vtable [item->value.vtable_slot]))), sparc_g5);
2341 sparc_ld (code, sparc_g5, 0, sparc_g5);
2342 sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
2343 sparc_nop (code);
2344 #if ENABLE_WRONG_METHOD_CHECK
2345 g_assert_not_reached ();
2346 #endif
2348 } else {
2349 sparc_set (code, (guint32)item->key, sparc_g5);
2350 sparc_cmp (code, MONO_ARCH_IMT_REG, sparc_g5);
2351 item->jmp_code = (guint8*)code;
2352 sparc_branch (code, 0, sparc_beu, 0);
2353 sparc_nop (code);
2356 /* patch the branches to get to the target items */
2357 for (i = 0; i < count; ++i) {
2358 MonoIMTCheckItem *item = imt_entries [i];
2359 if (item->jmp_code) {
2360 if (item->check_target_idx) {
2361 sparc_patch ((guint32*)item->jmp_code, imt_entries [item->check_target_idx]->code_target);
2366 mono_arch_flush_icache ((guint8*)start, (code - start) * 4);
2368 mono_stats.imt_thunks_size += (code - start) * 4;
2369 g_assert (code - start <= size);
2370 return start;
2373 MonoMethod*
2374 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
2376 #ifdef SPARCV9
2377 g_assert_not_reached ();
2378 #endif
2380 return (MonoMethod*)regs [sparc_g1];
2383 gpointer
2384 mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
2386 mono_sparc_flushw ();
2388 return (gpointer)regs [sparc_o0];
2392 * Some conventions used in the following code.
2393 * 2) The only scratch registers we have are o7 and g1. We try to
2394 * stick to o7 when we can, and use g1 when necessary.
2397 void
2398 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
2400 MonoInst *ins;
2401 MonoCallInst *call;
2402 guint offset;
2403 guint32 *code = (guint32*)(cfg->native_code + cfg->code_len);
2404 MonoInst *last_ins = NULL;
2405 int max_len, cpos;
2406 const char *spec;
2408 if (cfg->verbose_level > 2)
2409 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
2411 cpos = bb->max_offset;
2413 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
2414 NOT_IMPLEMENTED;
2417 MONO_BB_FOR_EACH_INS (bb, ins) {
2418 guint8* code_start;
2420 offset = (guint8*)code - cfg->native_code;
2422 spec = ins_get_spec (ins->opcode);
2424 max_len = ((guint8 *)spec)[MONO_INST_LEN];
2426 if (offset > (cfg->code_size - max_len - 16)) {
2427 cfg->code_size *= 2;
2428 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2429 code = (guint32*)(cfg->native_code + offset);
2431 code_start = (guint8*)code;
2432 // if (ins->cil_code)
2433 // g_print ("cil code\n");
2434 mono_debug_record_line_number (cfg, ins, offset);
2436 switch (ins->opcode) {
2437 case OP_STOREI1_MEMBASE_IMM:
2438 EMIT_STORE_MEMBASE_IMM (ins, stb);
2439 break;
2440 case OP_STOREI2_MEMBASE_IMM:
2441 EMIT_STORE_MEMBASE_IMM (ins, sth);
2442 break;
2443 case OP_STORE_MEMBASE_IMM:
2444 EMIT_STORE_MEMBASE_IMM (ins, sti);
2445 break;
2446 case OP_STOREI4_MEMBASE_IMM:
2447 EMIT_STORE_MEMBASE_IMM (ins, st);
2448 break;
2449 case OP_STOREI8_MEMBASE_IMM:
2450 #ifdef SPARCV9
2451 EMIT_STORE_MEMBASE_IMM (ins, stx);
2452 #else
2453 /* Only generated by peephole opts */
2454 g_assert ((ins->inst_offset % 8) == 0);
2455 g_assert (ins->inst_imm == 0);
2456 EMIT_STORE_MEMBASE_IMM (ins, stx);
2457 #endif
2458 break;
2459 case OP_STOREI1_MEMBASE_REG:
2460 EMIT_STORE_MEMBASE_REG (ins, stb);
2461 break;
2462 case OP_STOREI2_MEMBASE_REG:
2463 EMIT_STORE_MEMBASE_REG (ins, sth);
2464 break;
2465 case OP_STOREI4_MEMBASE_REG:
2466 EMIT_STORE_MEMBASE_REG (ins, st);
2467 break;
2468 case OP_STOREI8_MEMBASE_REG:
2469 #ifdef SPARCV9
2470 EMIT_STORE_MEMBASE_REG (ins, stx);
2471 #else
2472 /* Only used by OP_MEMSET */
2473 EMIT_STORE_MEMBASE_REG (ins, std);
2474 #endif
2475 break;
2476 case OP_STORE_MEMBASE_REG:
2477 EMIT_STORE_MEMBASE_REG (ins, sti);
2478 break;
2479 case OP_LOADU4_MEM:
2480 sparc_set (code, ins->inst_c0, ins->dreg);
2481 sparc_ld (code, ins->dreg, sparc_g0, ins->dreg);
2482 break;
2483 case OP_LOADI4_MEMBASE:
2484 #ifdef SPARCV9
2485 EMIT_LOAD_MEMBASE (ins, ldsw);
2486 #else
2487 EMIT_LOAD_MEMBASE (ins, ld);
2488 #endif
2489 break;
2490 case OP_LOADU4_MEMBASE:
2491 EMIT_LOAD_MEMBASE (ins, ld);
2492 break;
2493 case OP_LOADU1_MEMBASE:
2494 EMIT_LOAD_MEMBASE (ins, ldub);
2495 break;
2496 case OP_LOADI1_MEMBASE:
2497 EMIT_LOAD_MEMBASE (ins, ldsb);
2498 break;
2499 case OP_LOADU2_MEMBASE:
2500 EMIT_LOAD_MEMBASE (ins, lduh);
2501 break;
2502 case OP_LOADI2_MEMBASE:
2503 EMIT_LOAD_MEMBASE (ins, ldsh);
2504 break;
2505 case OP_LOAD_MEMBASE:
2506 #ifdef SPARCV9
2507 EMIT_LOAD_MEMBASE (ins, ldx);
2508 #else
2509 EMIT_LOAD_MEMBASE (ins, ld);
2510 #endif
2511 break;
2512 #ifdef SPARCV9
2513 case OP_LOADI8_MEMBASE:
2514 EMIT_LOAD_MEMBASE (ins, ldx);
2515 break;
2516 #endif
2517 case OP_ICONV_TO_I1:
2518 sparc_sll_imm (code, ins->sreg1, 24, sparc_o7);
2519 sparc_sra_imm (code, sparc_o7, 24, ins->dreg);
2520 break;
2521 case OP_ICONV_TO_I2:
2522 sparc_sll_imm (code, ins->sreg1, 16, sparc_o7);
2523 sparc_sra_imm (code, sparc_o7, 16, ins->dreg);
2524 break;
2525 case OP_ICONV_TO_U1:
2526 sparc_and_imm (code, FALSE, ins->sreg1, 0xff, ins->dreg);
2527 break;
2528 case OP_ICONV_TO_U2:
2529 sparc_sll_imm (code, ins->sreg1, 16, sparc_o7);
2530 sparc_srl_imm (code, sparc_o7, 16, ins->dreg);
2531 break;
2532 case OP_LCONV_TO_OVF_U4:
2533 case OP_ICONV_TO_OVF_U4:
2534 /* Only used on V9 */
2535 sparc_cmp_imm (code, ins->sreg1, 0);
2536 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code,
2537 MONO_PATCH_INFO_EXC, "OverflowException");
2538 sparc_branchp (code, 0, sparc_bl, sparc_xcc_short, 0, 0);
2539 /* Delay slot */
2540 sparc_set (code, 1, sparc_o7);
2541 sparc_sllx_imm (code, sparc_o7, 32, sparc_o7);
2542 sparc_cmp (code, ins->sreg1, sparc_o7);
2543 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code,
2544 MONO_PATCH_INFO_EXC, "OverflowException");
2545 sparc_branchp (code, 0, sparc_bge, sparc_xcc_short, 0, 0);
2546 sparc_nop (code);
2547 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
2548 break;
2549 case OP_LCONV_TO_OVF_I4_UN:
2550 case OP_ICONV_TO_OVF_I4_UN:
2551 /* Only used on V9 */
2552 NOT_IMPLEMENTED;
2553 break;
2554 case OP_COMPARE:
2555 case OP_LCOMPARE:
2556 case OP_ICOMPARE:
2557 sparc_cmp (code, ins->sreg1, ins->sreg2);
2558 break;
2559 case OP_COMPARE_IMM:
2560 case OP_ICOMPARE_IMM:
2561 if (sparc_is_imm13 (ins->inst_imm))
2562 sparc_cmp_imm (code, ins->sreg1, ins->inst_imm);
2563 else {
2564 sparc_set (code, ins->inst_imm, sparc_o7);
2565 sparc_cmp (code, ins->sreg1, sparc_o7);
2567 break;
2568 case OP_BREAK:
2570 * gdb does not like encountering 'ta 1' in the debugged code. So
2571 * instead of emitting a trap, we emit a call a C function and place a
2572 * breakpoint there.
2574 //sparc_ta (code, 1);
2575 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, mono_break);
2576 EMIT_CALL();
2577 break;
2578 case OP_ADDCC:
2579 case OP_IADDCC:
2580 sparc_add (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2581 break;
2582 case OP_IADD:
2583 sparc_add (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2584 break;
2585 case OP_ADDCC_IMM:
2586 case OP_ADD_IMM:
2587 case OP_IADD_IMM:
2588 /* according to inssel-long32.brg, this should set cc */
2589 EMIT_ALU_IMM (ins, add, TRUE);
2590 break;
2591 case OP_ADC:
2592 case OP_IADC:
2593 /* according to inssel-long32.brg, this should set cc */
2594 sparc_addx (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2595 break;
2596 case OP_ADC_IMM:
2597 case OP_IADC_IMM:
2598 EMIT_ALU_IMM (ins, addx, TRUE);
2599 break;
2600 case OP_SUBCC:
2601 case OP_ISUBCC:
2602 sparc_sub (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2603 break;
2604 case OP_ISUB:
2605 sparc_sub (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2606 break;
2607 case OP_SUBCC_IMM:
2608 case OP_SUB_IMM:
2609 case OP_ISUB_IMM:
2610 /* according to inssel-long32.brg, this should set cc */
2611 EMIT_ALU_IMM (ins, sub, TRUE);
2612 break;
2613 case OP_SBB:
2614 case OP_ISBB:
2615 /* according to inssel-long32.brg, this should set cc */
2616 sparc_subx (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2617 break;
2618 case OP_SBB_IMM:
2619 case OP_ISBB_IMM:
2620 EMIT_ALU_IMM (ins, subx, TRUE);
2621 break;
2622 case OP_IAND:
2623 sparc_and (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2624 break;
2625 case OP_AND_IMM:
2626 case OP_IAND_IMM:
2627 EMIT_ALU_IMM (ins, and, FALSE);
2628 break;
2629 case OP_IDIV:
2630 /* Sign extend sreg1 into %y */
2631 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2632 sparc_wry (code, sparc_o7, sparc_g0);
2633 sparc_sdiv (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2634 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2635 break;
2636 case OP_IDIV_UN:
2637 sparc_wry (code, sparc_g0, sparc_g0);
2638 sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2639 break;
2640 case OP_DIV_IMM:
2641 case OP_IDIV_IMM: {
2642 int i, imm;
2644 /* Transform division into a shift */
2645 for (i = 1; i < 30; ++i) {
2646 imm = (1 << i);
2647 if (ins->inst_imm == imm)
2648 break;
2650 if (i < 30) {
2651 if (i == 1) {
2652 /* gcc 2.95.3 */
2653 sparc_srl_imm (code, ins->sreg1, 31, sparc_o7);
2654 sparc_add (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2655 sparc_sra_imm (code, ins->dreg, 1, ins->dreg);
2657 else {
2658 /* http://compilers.iecc.com/comparch/article/93-04-079 */
2659 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2660 sparc_srl_imm (code, sparc_o7, 32 - i, sparc_o7);
2661 sparc_add (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2662 sparc_sra_imm (code, ins->dreg, i, ins->dreg);
2665 else {
2666 /* Sign extend sreg1 into %y */
2667 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2668 sparc_wry (code, sparc_o7, sparc_g0);
2669 EMIT_ALU_IMM (ins, sdiv, TRUE);
2670 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2672 break;
2674 case OP_IDIV_UN_IMM:
2675 sparc_wry (code, sparc_g0, sparc_g0);
2676 EMIT_ALU_IMM (ins, udiv, FALSE);
2677 break;
2678 case OP_IREM:
2679 /* Sign extend sreg1 into %y */
2680 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2681 sparc_wry (code, sparc_o7, sparc_g0);
2682 sparc_sdiv (code, TRUE, ins->sreg1, ins->sreg2, sparc_o7);
2683 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2684 sparc_smul (code, FALSE, ins->sreg2, sparc_o7, sparc_o7);
2685 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2686 break;
2687 case OP_IREM_UN:
2688 sparc_wry (code, sparc_g0, sparc_g0);
2689 sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, sparc_o7);
2690 sparc_umul (code, FALSE, ins->sreg2, sparc_o7, sparc_o7);
2691 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2692 break;
2693 case OP_REM_IMM:
2694 case OP_IREM_IMM:
2695 /* Sign extend sreg1 into %y */
2696 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2697 sparc_wry (code, sparc_o7, sparc_g0);
2698 if (!sparc_is_imm13 (ins->inst_imm)) {
2699 sparc_set (code, ins->inst_imm, GP_SCRATCH_REG);
2700 sparc_sdiv (code, TRUE, ins->sreg1, GP_SCRATCH_REG, sparc_o7);
2701 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2702 sparc_smul (code, FALSE, sparc_o7, GP_SCRATCH_REG, sparc_o7);
2704 else {
2705 sparc_sdiv_imm (code, TRUE, ins->sreg1, ins->inst_imm, sparc_o7);
2706 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2707 sparc_smul_imm (code, FALSE, sparc_o7, ins->inst_imm, sparc_o7);
2709 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2710 break;
2711 case OP_IREM_UN_IMM:
2712 sparc_set (code, ins->inst_imm, GP_SCRATCH_REG);
2713 sparc_wry (code, sparc_g0, sparc_g0);
2714 sparc_udiv (code, FALSE, ins->sreg1, GP_SCRATCH_REG, sparc_o7);
2715 sparc_umul (code, FALSE, GP_SCRATCH_REG, sparc_o7, sparc_o7);
2716 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2717 break;
2718 case OP_IOR:
2719 sparc_or (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2720 break;
2721 case OP_OR_IMM:
2722 case OP_IOR_IMM:
2723 EMIT_ALU_IMM (ins, or, FALSE);
2724 break;
2725 case OP_IXOR:
2726 sparc_xor (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2727 break;
2728 case OP_XOR_IMM:
2729 case OP_IXOR_IMM:
2730 EMIT_ALU_IMM (ins, xor, FALSE);
2731 break;
2732 case OP_ISHL:
2733 sparc_sll (code, ins->sreg1, ins->sreg2, ins->dreg);
2734 break;
2735 case OP_SHL_IMM:
2736 case OP_ISHL_IMM:
2737 if (ins->inst_imm < (1 << 5))
2738 sparc_sll_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2739 else {
2740 sparc_set (code, ins->inst_imm, sparc_o7);
2741 sparc_sll (code, ins->sreg1, sparc_o7, ins->dreg);
2743 break;
2744 case OP_ISHR:
2745 sparc_sra (code, ins->sreg1, ins->sreg2, ins->dreg);
2746 break;
2747 case OP_ISHR_IMM:
2748 case OP_SHR_IMM:
2749 if (ins->inst_imm < (1 << 5))
2750 sparc_sra_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2751 else {
2752 sparc_set (code, ins->inst_imm, sparc_o7);
2753 sparc_sra (code, ins->sreg1, sparc_o7, ins->dreg);
2755 break;
2756 case OP_SHR_UN_IMM:
2757 case OP_ISHR_UN_IMM:
2758 if (ins->inst_imm < (1 << 5))
2759 sparc_srl_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2760 else {
2761 sparc_set (code, ins->inst_imm, sparc_o7);
2762 sparc_srl (code, ins->sreg1, sparc_o7, ins->dreg);
2764 break;
2765 case OP_ISHR_UN:
2766 sparc_srl (code, ins->sreg1, ins->sreg2, ins->dreg);
2767 break;
2768 case OP_LSHL:
2769 sparc_sllx (code, ins->sreg1, ins->sreg2, ins->dreg);
2770 break;
2771 case OP_LSHL_IMM:
2772 if (ins->inst_imm < (1 << 6))
2773 sparc_sllx_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2774 else {
2775 sparc_set (code, ins->inst_imm, sparc_o7);
2776 sparc_sllx (code, ins->sreg1, sparc_o7, ins->dreg);
2778 break;
2779 case OP_LSHR:
2780 sparc_srax (code, ins->sreg1, ins->sreg2, ins->dreg);
2781 break;
2782 case OP_LSHR_IMM:
2783 if (ins->inst_imm < (1 << 6))
2784 sparc_srax_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2785 else {
2786 sparc_set (code, ins->inst_imm, sparc_o7);
2787 sparc_srax (code, ins->sreg1, sparc_o7, ins->dreg);
2789 break;
2790 case OP_LSHR_UN:
2791 sparc_srlx (code, ins->sreg1, ins->sreg2, ins->dreg);
2792 break;
2793 case OP_LSHR_UN_IMM:
2794 if (ins->inst_imm < (1 << 6))
2795 sparc_srlx_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2796 else {
2797 sparc_set (code, ins->inst_imm, sparc_o7);
2798 sparc_srlx (code, ins->sreg1, sparc_o7, ins->dreg);
2800 break;
2801 case OP_INOT:
2802 /* can't use sparc_not */
2803 sparc_xnor (code, FALSE, ins->sreg1, sparc_g0, ins->dreg);
2804 break;
2805 case OP_INEG:
2806 /* can't use sparc_neg */
2807 sparc_sub (code, FALSE, sparc_g0, ins->sreg1, ins->dreg);
2808 break;
2809 case OP_IMUL:
2810 sparc_smul (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2811 break;
2812 case OP_IMUL_IMM:
2813 case OP_MUL_IMM: {
2814 int i, imm;
2816 if ((ins->inst_imm == 1) && (ins->sreg1 == ins->dreg))
2817 break;
2819 /* Transform multiplication into a shift */
2820 for (i = 0; i < 30; ++i) {
2821 imm = (1 << i);
2822 if (ins->inst_imm == imm)
2823 break;
2825 if (i < 30)
2826 sparc_sll_imm (code, ins->sreg1, i, ins->dreg);
2827 else
2828 EMIT_ALU_IMM (ins, smul, FALSE);
2829 break;
2831 case OP_IMUL_OVF:
2832 sparc_smul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2833 sparc_rdy (code, sparc_g1);
2834 sparc_sra_imm (code, ins->dreg, 31, sparc_o7);
2835 sparc_cmp (code, sparc_g1, sparc_o7);
2836 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short);
2837 break;
2838 case OP_IMUL_OVF_UN:
2839 sparc_umul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2840 sparc_rdy (code, sparc_o7);
2841 sparc_cmp (code, sparc_o7, sparc_g0);
2842 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short);
2843 break;
2844 case OP_ICONST:
2845 sparc_set (code, ins->inst_c0, ins->dreg);
2846 break;
2847 case OP_I8CONST:
2848 sparc_set (code, ins->inst_l, ins->dreg);
2849 break;
2850 case OP_AOTCONST:
2851 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2852 sparc_set_template (code, ins->dreg);
2853 break;
2854 case OP_JUMP_TABLE:
2855 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2856 sparc_set_template (code, ins->dreg);
2857 break;
2858 case OP_ICONV_TO_I4:
2859 case OP_ICONV_TO_U4:
2860 case OP_MOVE:
2861 if (ins->sreg1 != ins->dreg)
2862 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
2863 break;
2864 case OP_FMOVE:
2865 #ifdef SPARCV9
2866 if (ins->sreg1 != ins->dreg)
2867 sparc_fmovd (code, ins->sreg1, ins->dreg);
2868 #else
2869 sparc_fmovs (code, ins->sreg1, ins->dreg);
2870 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
2871 #endif
2872 break;
2873 case OP_JMP:
2874 if (cfg->method->save_lmf)
2875 NOT_IMPLEMENTED;
2877 code = emit_load_volatile_arguments (cfg, code);
2878 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
2879 sparc_set_template (code, sparc_o7);
2880 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_g0);
2881 /* Restore parent frame in delay slot */
2882 sparc_restore_imm (code, sparc_g0, 0, sparc_g0);
2883 break;
2884 case OP_CHECK_THIS:
2885 /* ensure ins->sreg1 is not NULL */
2886 /* Might be misaligned in case of vtypes so use a byte load */
2887 sparc_ldsb_imm (code, ins->sreg1, 0, sparc_g0);
2888 break;
2889 case OP_ARGLIST:
2890 sparc_add_imm (code, FALSE, sparc_fp, cfg->sig_cookie, sparc_o7);
2891 sparc_sti_imm (code, sparc_o7, ins->sreg1, 0);
2892 break;
2893 case OP_FCALL:
2894 case OP_LCALL:
2895 case OP_VCALL:
2896 case OP_VCALL2:
2897 case OP_VOIDCALL:
2898 case OP_CALL:
2899 call = (MonoCallInst*)ins;
2900 g_assert (!call->virtual);
2901 code = emit_save_sp_to_lmf (cfg, code);
2902 if (ins->flags & MONO_INST_HAS_METHOD)
2903 code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method);
2904 else
2905 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
2907 code = emit_vret_token (cfg->generic_sharing_context, ins, code);
2908 code = emit_move_return_value (ins, code);
2909 break;
2910 case OP_FCALL_REG:
2911 case OP_LCALL_REG:
2912 case OP_VCALL_REG:
2913 case OP_VCALL2_REG:
2914 case OP_VOIDCALL_REG:
2915 case OP_CALL_REG:
2916 call = (MonoCallInst*)ins;
2917 code = emit_save_sp_to_lmf (cfg, code);
2918 sparc_jmpl (code, ins->sreg1, sparc_g0, sparc_callsite);
2920 * We emit a special kind of nop in the delay slot to tell the
2921 * trampoline code that this is a virtual call, thus an unbox
2922 * trampoline might need to be called.
2924 if (call->virtual)
2925 sparc_or_imm (code, FALSE, sparc_g0, 0xca, sparc_g0);
2926 else
2927 sparc_nop (code);
2929 code = emit_vret_token (cfg->generic_sharing_context, ins, code);
2930 code = emit_move_return_value (ins, code);
2931 break;
2932 case OP_FCALL_MEMBASE:
2933 case OP_LCALL_MEMBASE:
2934 case OP_VCALL_MEMBASE:
2935 case OP_VCALL2_MEMBASE:
2936 case OP_VOIDCALL_MEMBASE:
2937 case OP_CALL_MEMBASE:
2938 call = (MonoCallInst*)ins;
2939 code = emit_save_sp_to_lmf (cfg, code);
2940 if (sparc_is_imm13 (ins->inst_offset)) {
2941 sparc_ldi_imm (code, ins->inst_basereg, ins->inst_offset, sparc_o7);
2942 } else {
2943 sparc_set (code, ins->inst_offset, sparc_o7);
2944 sparc_ldi (code, ins->inst_basereg, sparc_o7, sparc_o7);
2946 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_callsite);
2947 if (call->virtual)
2948 sparc_or_imm (code, FALSE, sparc_g0, 0xca, sparc_g0);
2949 else
2950 sparc_nop (code);
2952 code = emit_vret_token (cfg->generic_sharing_context, ins, code);
2953 code = emit_move_return_value (ins, code);
2954 break;
2955 case OP_SETFRET:
2956 if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4)
2957 sparc_fdtos (code, ins->sreg1, sparc_f0);
2958 else {
2959 #ifdef SPARCV9
2960 sparc_fmovd (code, ins->sreg1, ins->dreg);
2961 #else
2962 /* FIXME: Why not use fmovd ? */
2963 sparc_fmovs (code, ins->sreg1, ins->dreg);
2964 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
2965 #endif
2967 break;
2968 case OP_LOCALLOC: {
2969 guint32 size_reg;
2970 gint32 offset2;
2972 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
2973 /* Perform stack touching */
2974 NOT_IMPLEMENTED;
2975 #endif
2977 /* Keep alignment */
2978 /* Add 4 to compensate for the rounding of localloc_offset */
2979 sparc_add_imm (code, FALSE, ins->sreg1, 4 + MONO_ARCH_LOCALLOC_ALIGNMENT - 1, ins->dreg);
2980 sparc_set (code, ~(MONO_ARCH_LOCALLOC_ALIGNMENT - 1), sparc_o7);
2981 sparc_and (code, FALSE, ins->dreg, sparc_o7, ins->dreg);
2983 if ((ins->flags & MONO_INST_INIT) && (ins->sreg1 == ins->dreg)) {
2984 #ifdef SPARCV9
2985 size_reg = sparc_g4;
2986 #else
2987 size_reg = sparc_g1;
2988 #endif
2989 sparc_mov_reg_reg (code, ins->dreg, size_reg);
2991 else
2992 size_reg = ins->sreg1;
2994 sparc_sub (code, FALSE, sparc_sp, ins->dreg, ins->dreg);
2995 /* Keep %sp valid at all times */
2996 sparc_mov_reg_reg (code, ins->dreg, sparc_sp);
2997 /* Round localloc_offset too so the result is at least 8 aligned */
2998 offset2 = ALIGN_TO (cfg->arch.localloc_offset, 8);
2999 g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + offset2));
3000 sparc_add_imm (code, FALSE, ins->dreg, MONO_SPARC_STACK_BIAS + offset2, ins->dreg);
3002 if (ins->flags & MONO_INST_INIT) {
3003 guint32 *br [3];
3004 /* Initialize memory region */
3005 sparc_cmp_imm (code, size_reg, 0);
3006 br [0] = code;
3007 sparc_branch (code, 0, sparc_be, 0);
3008 /* delay slot */
3009 sparc_set (code, 0, sparc_o7);
3010 sparc_sub_imm (code, 0, size_reg, sparcv9 ? 8 : 4, size_reg);
3011 /* start of loop */
3012 br [1] = code;
3013 if (sparcv9)
3014 sparc_stx (code, sparc_g0, ins->dreg, sparc_o7);
3015 else
3016 sparc_st (code, sparc_g0, ins->dreg, sparc_o7);
3017 sparc_cmp (code, sparc_o7, size_reg);
3018 br [2] = code;
3019 sparc_branch (code, 0, sparc_bl, 0);
3020 sparc_patch (br [2], br [1]);
3021 /* delay slot */
3022 sparc_add_imm (code, 0, sparc_o7, sparcv9 ? 8 : 4, sparc_o7);
3023 sparc_patch (br [0], code);
3025 break;
3027 case OP_LOCALLOC_IMM: {
3028 gint32 offset = ins->inst_imm;
3029 gint32 offset2;
3031 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
3032 /* Perform stack touching */
3033 NOT_IMPLEMENTED;
3034 #endif
3036 /* To compensate for the rounding of localloc_offset */
3037 offset += sizeof (gpointer);
3038 offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
3039 if (sparc_is_imm13 (offset))
3040 sparc_sub_imm (code, FALSE, sparc_sp, offset, sparc_sp);
3041 else {
3042 sparc_set (code, offset, sparc_o7);
3043 sparc_sub (code, FALSE, sparc_sp, sparc_o7, sparc_sp);
3045 /* Round localloc_offset too so the result is at least 8 aligned */
3046 offset2 = ALIGN_TO (cfg->arch.localloc_offset, 8);
3047 g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + offset2));
3048 sparc_add_imm (code, FALSE, sparc_sp, MONO_SPARC_STACK_BIAS + offset2, ins->dreg);
3049 if ((ins->flags & MONO_INST_INIT) && (offset > 0)) {
3050 guint32 *br [2];
3051 int i;
3053 if (offset <= 16) {
3054 i = 0;
3055 while (i < offset) {
3056 if (sparcv9) {
3057 sparc_stx_imm (code, sparc_g0, ins->dreg, i);
3058 i += 8;
3060 else {
3061 sparc_st_imm (code, sparc_g0, ins->dreg, i);
3062 i += 4;
3066 else {
3067 sparc_set (code, offset, sparc_o7);
3068 sparc_sub_imm (code, 0, sparc_o7, sparcv9 ? 8 : 4, sparc_o7);
3069 /* beginning of loop */
3070 br [0] = code;
3071 if (sparcv9)
3072 sparc_stx (code, sparc_g0, ins->dreg, sparc_o7);
3073 else
3074 sparc_st (code, sparc_g0, ins->dreg, sparc_o7);
3075 sparc_cmp_imm (code, sparc_o7, 0);
3076 br [1] = code;
3077 sparc_branch (code, 0, sparc_bne, 0);
3078 /* delay slot */
3079 sparc_sub_imm (code, 0, sparc_o7, sparcv9 ? 8 : 4, sparc_o7);
3080 sparc_patch (br [1], br [0]);
3083 break;
3085 case OP_THROW:
3086 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3087 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3088 (gpointer)"mono_arch_throw_exception");
3089 EMIT_CALL ();
3090 break;
3091 case OP_RETHROW:
3092 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3093 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3094 (gpointer)"mono_arch_rethrow_exception");
3095 EMIT_CALL ();
3096 break;
3097 case OP_START_HANDLER: {
3099 * The START_HANDLER instruction marks the beginning of a handler
3100 * block. It is called using a call instruction, so %o7 contains
3101 * the return address. Since the handler executes in the same stack
3102 * frame as the method itself, we can't use save/restore to save
3103 * the return address. Instead, we save it into a dedicated
3104 * variable.
3106 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3107 if (!sparc_is_imm13 (spvar->inst_offset)) {
3108 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3109 sparc_sti (code, sparc_o7, spvar->inst_basereg, GP_SCRATCH_REG);
3111 else
3112 sparc_sti_imm (code, sparc_o7, spvar->inst_basereg, spvar->inst_offset);
3113 break;
3115 case OP_ENDFILTER: {
3116 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3117 if (!sparc_is_imm13 (spvar->inst_offset)) {
3118 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3119 sparc_ldi (code, spvar->inst_basereg, GP_SCRATCH_REG, sparc_o7);
3121 else
3122 sparc_ldi_imm (code, spvar->inst_basereg, spvar->inst_offset, sparc_o7);
3123 sparc_jmpl_imm (code, sparc_o7, 8, sparc_g0);
3124 /* Delay slot */
3125 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3126 break;
3128 case OP_ENDFINALLY: {
3129 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3130 if (!sparc_is_imm13 (spvar->inst_offset)) {
3131 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3132 sparc_ldi (code, spvar->inst_basereg, GP_SCRATCH_REG, sparc_o7);
3134 else
3135 sparc_ldi_imm (code, spvar->inst_basereg, spvar->inst_offset, sparc_o7);
3136 sparc_jmpl_imm (code, sparc_o7, 8, sparc_g0);
3137 sparc_nop (code);
3138 break;
3140 case OP_CALL_HANDLER:
3141 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3142 /* This is a jump inside the method, so call_simple works even on V9 */
3143 sparc_call_simple (code, 0);
3144 sparc_nop (code);
3145 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
3146 break;
3147 case OP_LABEL:
3148 ins->inst_c0 = (guint8*)code - cfg->native_code;
3149 break;
3150 case OP_RELAXED_NOP:
3151 case OP_NOP:
3152 case OP_DUMMY_USE:
3153 case OP_DUMMY_STORE:
3154 case OP_NOT_REACHED:
3155 case OP_NOT_NULL:
3156 break;
3157 case OP_BR:
3158 //g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
3159 if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins)
3160 break;
3161 if (ins->inst_target_bb->native_offset) {
3162 gint32 disp = (ins->inst_target_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2;
3163 g_assert (sparc_is_imm22 (disp));
3164 sparc_branch (code, 1, sparc_ba, disp);
3165 } else {
3166 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3167 sparc_branch (code, 1, sparc_ba, 0);
3169 sparc_nop (code);
3170 break;
3171 case OP_BR_REG:
3172 sparc_jmp (code, ins->sreg1, sparc_g0);
3173 sparc_nop (code);
3174 break;
3175 case OP_CEQ:
3176 case OP_CLT:
3177 case OP_CLT_UN:
3178 case OP_CGT:
3179 case OP_CGT_UN:
3180 if (v64 && (cfg->opt & MONO_OPT_CMOV)) {
3181 sparc_clr_reg (code, ins->dreg);
3182 sparc_movcc_imm (code, sparc_xcc, opcode_to_sparc_cond (ins->opcode), 1, ins->dreg);
3184 else {
3185 sparc_clr_reg (code, ins->dreg);
3186 #ifdef SPARCV9
3187 sparc_branchp (code, 1, opcode_to_sparc_cond (ins->opcode), DEFAULT_ICC, 0, 2);
3188 #else
3189 sparc_branch (code, 1, opcode_to_sparc_cond (ins->opcode), 2);
3190 #endif
3191 /* delay slot */
3192 sparc_set (code, 1, ins->dreg);
3194 break;
3195 case OP_ICEQ:
3196 case OP_ICLT:
3197 case OP_ICLT_UN:
3198 case OP_ICGT:
3199 case OP_ICGT_UN:
3200 if (v64 && (cfg->opt & MONO_OPT_CMOV)) {
3201 sparc_clr_reg (code, ins->dreg);
3202 sparc_movcc_imm (code, sparc_icc, opcode_to_sparc_cond (ins->opcode), 1, ins->dreg);
3204 else {
3205 sparc_clr_reg (code, ins->dreg);
3206 sparc_branchp (code, 1, opcode_to_sparc_cond (ins->opcode), sparc_icc_short, 0, 2);
3207 /* delay slot */
3208 sparc_set (code, 1, ins->dreg);
3210 break;
3211 case OP_COND_EXC_EQ:
3212 case OP_COND_EXC_NE_UN:
3213 case OP_COND_EXC_LT:
3214 case OP_COND_EXC_LT_UN:
3215 case OP_COND_EXC_GT:
3216 case OP_COND_EXC_GT_UN:
3217 case OP_COND_EXC_GE:
3218 case OP_COND_EXC_GE_UN:
3219 case OP_COND_EXC_LE:
3220 case OP_COND_EXC_LE_UN:
3221 case OP_COND_EXC_OV:
3222 case OP_COND_EXC_NO:
3223 case OP_COND_EXC_C:
3224 case OP_COND_EXC_NC:
3225 case OP_COND_EXC_IEQ:
3226 case OP_COND_EXC_INE_UN:
3227 case OP_COND_EXC_ILT:
3228 case OP_COND_EXC_ILT_UN:
3229 case OP_COND_EXC_IGT:
3230 case OP_COND_EXC_IGT_UN:
3231 case OP_COND_EXC_IGE:
3232 case OP_COND_EXC_IGE_UN:
3233 case OP_COND_EXC_ILE:
3234 case OP_COND_EXC_ILE_UN:
3235 case OP_COND_EXC_IOV:
3236 case OP_COND_EXC_INO:
3237 case OP_COND_EXC_IC:
3238 case OP_COND_EXC_INC:
3239 #ifdef SPARCV9
3240 NOT_IMPLEMENTED;
3241 #else
3242 EMIT_COND_SYSTEM_EXCEPTION (ins, opcode_to_sparc_cond (ins->opcode), ins->inst_p1);
3243 #endif
3244 break;
3245 case OP_SPARC_COND_EXC_EQZ:
3246 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brz, ins->inst_p1);
3247 break;
3248 case OP_SPARC_COND_EXC_GEZ:
3249 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brgez, ins->inst_p1);
3250 break;
3251 case OP_SPARC_COND_EXC_GTZ:
3252 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brgz, ins->inst_p1);
3253 break;
3254 case OP_SPARC_COND_EXC_LEZ:
3255 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brlez, ins->inst_p1);
3256 break;
3257 case OP_SPARC_COND_EXC_LTZ:
3258 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brlz, ins->inst_p1);
3259 break;
3260 case OP_SPARC_COND_EXC_NEZ:
3261 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brnz, ins->inst_p1);
3262 break;
3264 case OP_IBEQ:
3265 case OP_IBNE_UN:
3266 case OP_IBLT:
3267 case OP_IBLT_UN:
3268 case OP_IBGT:
3269 case OP_IBGT_UN:
3270 case OP_IBGE:
3271 case OP_IBGE_UN:
3272 case OP_IBLE:
3273 case OP_IBLE_UN: {
3274 if (sparcv9)
3275 EMIT_COND_BRANCH_PREDICTED (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3276 else
3277 EMIT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3278 break;
3281 case OP_SPARC_BRZ:
3282 EMIT_COND_BRANCH_BPR (ins, brz, 1, 1, 1);
3283 break;
3284 case OP_SPARC_BRLEZ:
3285 EMIT_COND_BRANCH_BPR (ins, brlez, 1, 1, 1);
3286 break;
3287 case OP_SPARC_BRLZ:
3288 EMIT_COND_BRANCH_BPR (ins, brlz, 1, 1, 1);
3289 break;
3290 case OP_SPARC_BRNZ:
3291 EMIT_COND_BRANCH_BPR (ins, brnz, 1, 1, 1);
3292 break;
3293 case OP_SPARC_BRGZ:
3294 EMIT_COND_BRANCH_BPR (ins, brgz, 1, 1, 1);
3295 break;
3296 case OP_SPARC_BRGEZ:
3297 EMIT_COND_BRANCH_BPR (ins, brgez, 1, 1, 1);
3298 break;
3300 /* floating point opcodes */
3301 case OP_R8CONST:
3302 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, ins->inst_p0);
3303 #ifdef SPARCV9
3304 sparc_set_template (code, sparc_o7);
3305 #else
3306 sparc_sethi (code, 0, sparc_o7);
3307 #endif
3308 sparc_lddf_imm (code, sparc_o7, 0, ins->dreg);
3309 break;
3310 case OP_R4CONST:
3311 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, ins->inst_p0);
3312 #ifdef SPARCV9
3313 sparc_set_template (code, sparc_o7);
3314 #else
3315 sparc_sethi (code, 0, sparc_o7);
3316 #endif
3317 sparc_ldf_imm (code, sparc_o7, 0, FP_SCRATCH_REG);
3319 /* Extend to double */
3320 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3321 break;
3322 case OP_STORER8_MEMBASE_REG:
3323 if (!sparc_is_imm13 (ins->inst_offset + 4)) {
3324 sparc_set (code, ins->inst_offset, sparc_o7);
3325 /* SPARCV9 handles misaligned fp loads/stores */
3326 if (!v64 && (ins->inst_offset % 8)) {
3327 /* Misaligned */
3328 sparc_add (code, FALSE, ins->inst_destbasereg, sparc_o7, sparc_o7);
3329 sparc_stf (code, ins->sreg1, sparc_o7, sparc_g0);
3330 sparc_stf_imm (code, ins->sreg1 + 1, sparc_o7, 4);
3331 } else
3332 sparc_stdf (code, ins->sreg1, ins->inst_destbasereg, sparc_o7);
3334 else {
3335 if (!v64 && (ins->inst_offset % 8)) {
3336 /* Misaligned */
3337 sparc_stf_imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3338 sparc_stf_imm (code, ins->sreg1 + 1, ins->inst_destbasereg, ins->inst_offset + 4);
3339 } else
3340 sparc_stdf_imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3342 break;
3343 case OP_LOADR8_MEMBASE:
3344 EMIT_LOAD_MEMBASE (ins, lddf);
3345 break;
3346 case OP_STORER4_MEMBASE_REG:
3347 /* This requires a double->single conversion */
3348 sparc_fdtos (code, ins->sreg1, FP_SCRATCH_REG);
3349 if (!sparc_is_imm13 (ins->inst_offset)) {
3350 sparc_set (code, ins->inst_offset, sparc_o7);
3351 sparc_stf (code, FP_SCRATCH_REG, ins->inst_destbasereg, sparc_o7);
3353 else
3354 sparc_stf_imm (code, FP_SCRATCH_REG, ins->inst_destbasereg, ins->inst_offset);
3355 break;
3356 case OP_LOADR4_MEMBASE: {
3357 /* ldf needs a single precision register */
3358 int dreg = ins->dreg;
3359 ins->dreg = FP_SCRATCH_REG;
3360 EMIT_LOAD_MEMBASE (ins, ldf);
3361 ins->dreg = dreg;
3362 /* Extend to double */
3363 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3364 break;
3366 case OP_ICONV_TO_R4: {
3367 MonoInst *spill = cfg->arch.float_spill_slot;
3368 gint32 reg = spill->inst_basereg;
3369 gint32 offset = spill->inst_offset;
3371 g_assert (spill->opcode == OP_REGOFFSET);
3372 #ifdef SPARCV9
3373 if (!sparc_is_imm13 (offset)) {
3374 sparc_set (code, offset, sparc_o7);
3375 sparc_stx (code, ins->sreg1, reg, offset);
3376 sparc_lddf (code, reg, offset, FP_SCRATCH_REG);
3377 } else {
3378 sparc_stx_imm (code, ins->sreg1, reg, offset);
3379 sparc_lddf_imm (code, reg, offset, FP_SCRATCH_REG);
3381 sparc_fxtos (code, FP_SCRATCH_REG, FP_SCRATCH_REG);
3382 #else
3383 if (!sparc_is_imm13 (offset)) {
3384 sparc_set (code, offset, sparc_o7);
3385 sparc_st (code, ins->sreg1, reg, sparc_o7);
3386 sparc_ldf (code, reg, sparc_o7, FP_SCRATCH_REG);
3387 } else {
3388 sparc_st_imm (code, ins->sreg1, reg, offset);
3389 sparc_ldf_imm (code, reg, offset, FP_SCRATCH_REG);
3391 sparc_fitos (code, FP_SCRATCH_REG, FP_SCRATCH_REG);
3392 #endif
3393 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3394 break;
3396 case OP_ICONV_TO_R8: {
3397 MonoInst *spill = cfg->arch.float_spill_slot;
3398 gint32 reg = spill->inst_basereg;
3399 gint32 offset = spill->inst_offset;
3401 g_assert (spill->opcode == OP_REGOFFSET);
3403 #ifdef SPARCV9
3404 if (!sparc_is_imm13 (offset)) {
3405 sparc_set (code, offset, sparc_o7);
3406 sparc_stx (code, ins->sreg1, reg, sparc_o7);
3407 sparc_lddf (code, reg, sparc_o7, FP_SCRATCH_REG);
3408 } else {
3409 sparc_stx_imm (code, ins->sreg1, reg, offset);
3410 sparc_lddf_imm (code, reg, offset, FP_SCRATCH_REG);
3412 sparc_fxtod (code, FP_SCRATCH_REG, ins->dreg);
3413 #else
3414 if (!sparc_is_imm13 (offset)) {
3415 sparc_set (code, offset, sparc_o7);
3416 sparc_st (code, ins->sreg1, reg, sparc_o7);
3417 sparc_ldf (code, reg, sparc_o7, FP_SCRATCH_REG);
3418 } else {
3419 sparc_st_imm (code, ins->sreg1, reg, offset);
3420 sparc_ldf_imm (code, reg, offset, FP_SCRATCH_REG);
3422 sparc_fitod (code, FP_SCRATCH_REG, ins->dreg);
3423 #endif
3424 break;
3426 case OP_FCONV_TO_I1:
3427 case OP_FCONV_TO_U1:
3428 case OP_FCONV_TO_I2:
3429 case OP_FCONV_TO_U2:
3430 #ifndef SPARCV9
3431 case OP_FCONV_TO_I:
3432 case OP_FCONV_TO_U:
3433 #endif
3434 case OP_FCONV_TO_I4:
3435 case OP_FCONV_TO_U4: {
3436 MonoInst *spill = cfg->arch.float_spill_slot;
3437 gint32 reg = spill->inst_basereg;
3438 gint32 offset = spill->inst_offset;
3440 g_assert (spill->opcode == OP_REGOFFSET);
3442 sparc_fdtoi (code, ins->sreg1, FP_SCRATCH_REG);
3443 if (!sparc_is_imm13 (offset)) {
3444 sparc_set (code, offset, sparc_o7);
3445 sparc_stdf (code, FP_SCRATCH_REG, reg, sparc_o7);
3446 sparc_ld (code, reg, sparc_o7, ins->dreg);
3447 } else {
3448 sparc_stdf_imm (code, FP_SCRATCH_REG, reg, offset);
3449 sparc_ld_imm (code, reg, offset, ins->dreg);
3452 switch (ins->opcode) {
3453 case OP_FCONV_TO_I1:
3454 case OP_FCONV_TO_U1:
3455 sparc_and_imm (code, 0, ins->dreg, 0xff, ins->dreg);
3456 break;
3457 case OP_FCONV_TO_I2:
3458 case OP_FCONV_TO_U2:
3459 sparc_set (code, 0xffff, sparc_o7);
3460 sparc_and (code, 0, ins->dreg, sparc_o7, ins->dreg);
3461 break;
3462 default:
3463 break;
3465 break;
3467 case OP_FCONV_TO_I8:
3468 case OP_FCONV_TO_U8:
3469 /* Emulated */
3470 g_assert_not_reached ();
3471 break;
3472 case OP_FCONV_TO_R4:
3473 /* FIXME: Change precision ? */
3474 #ifdef SPARCV9
3475 sparc_fmovd (code, ins->sreg1, ins->dreg);
3476 #else
3477 sparc_fmovs (code, ins->sreg1, ins->dreg);
3478 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
3479 #endif
3480 break;
3481 case OP_LCONV_TO_R_UN: {
3482 /* Emulated */
3483 g_assert_not_reached ();
3484 break;
3486 case OP_LCONV_TO_OVF_I:
3487 case OP_LCONV_TO_OVF_I4_2: {
3488 guint32 *br [3], *label [1];
3491 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
3493 sparc_cmp_imm (code, ins->sreg1, 0);
3494 br [0] = code;
3495 sparc_branch (code, 1, sparc_bneg, 0);
3496 sparc_nop (code);
3498 /* positive */
3499 /* ms word must be 0 */
3500 sparc_cmp_imm (code, ins->sreg2, 0);
3501 br [1] = code;
3502 sparc_branch (code, 1, sparc_be, 0);
3503 sparc_nop (code);
3505 label [0] = code;
3507 EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_ba, "OverflowException");
3509 /* negative */
3510 sparc_patch (br [0], code);
3512 /* ms word must 0xfffffff */
3513 sparc_cmp_imm (code, ins->sreg2, -1);
3514 br [2] = code;
3515 sparc_branch (code, 1, sparc_bne, 0);
3516 sparc_nop (code);
3517 sparc_patch (br [2], label [0]);
3519 /* Ok */
3520 sparc_patch (br [1], code);
3521 if (ins->sreg1 != ins->dreg)
3522 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
3523 break;
3525 case OP_FADD:
3526 sparc_faddd (code, ins->sreg1, ins->sreg2, ins->dreg);
3527 break;
3528 case OP_FSUB:
3529 sparc_fsubd (code, ins->sreg1, ins->sreg2, ins->dreg);
3530 break;
3531 case OP_FMUL:
3532 sparc_fmuld (code, ins->sreg1, ins->sreg2, ins->dreg);
3533 break;
3534 case OP_FDIV:
3535 sparc_fdivd (code, ins->sreg1, ins->sreg2, ins->dreg);
3536 break;
3537 case OP_FNEG:
3538 #ifdef SPARCV9
3539 sparc_fnegd (code, ins->sreg1, ins->dreg);
3540 #else
3541 /* FIXME: why don't use fnegd ? */
3542 sparc_fnegs (code, ins->sreg1, ins->dreg);
3543 #endif
3544 break;
3545 case OP_FREM:
3546 sparc_fdivd (code, ins->sreg1, ins->sreg2, FP_SCRATCH_REG);
3547 sparc_fmuld (code, ins->sreg2, FP_SCRATCH_REG, FP_SCRATCH_REG);
3548 sparc_fsubd (code, ins->sreg1, FP_SCRATCH_REG, ins->dreg);
3549 break;
3550 case OP_FCOMPARE:
3551 sparc_fcmpd (code, ins->sreg1, ins->sreg2);
3552 break;
3553 case OP_FCEQ:
3554 case OP_FCLT:
3555 case OP_FCLT_UN:
3556 case OP_FCGT:
3557 case OP_FCGT_UN:
3558 sparc_fcmpd (code, ins->sreg1, ins->sreg2);
3559 sparc_clr_reg (code, ins->dreg);
3560 switch (ins->opcode) {
3561 case OP_FCLT_UN:
3562 case OP_FCGT_UN:
3563 sparc_fbranch (code, 1, opcode_to_sparc_cond (ins->opcode), 4);
3564 /* delay slot */
3565 sparc_set (code, 1, ins->dreg);
3566 sparc_fbranch (code, 1, sparc_fbu, 2);
3567 /* delay slot */
3568 sparc_set (code, 1, ins->dreg);
3569 break;
3570 default:
3571 sparc_fbranch (code, 1, opcode_to_sparc_cond (ins->opcode), 2);
3572 /* delay slot */
3573 sparc_set (code, 1, ins->dreg);
3575 break;
3576 case OP_FBEQ:
3577 case OP_FBLT:
3578 case OP_FBGT:
3579 EMIT_FLOAT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3580 break;
3581 case OP_FBGE: {
3582 /* clt.un + brfalse */
3583 guint32 *p = code;
3584 sparc_fbranch (code, 1, sparc_fbul, 0);
3585 /* delay slot */
3586 sparc_nop (code);
3587 EMIT_FLOAT_COND_BRANCH (ins, sparc_fba, 1, 1);
3588 sparc_patch (p, (guint8*)code);
3589 break;
3591 case OP_FBLE: {
3592 /* cgt.un + brfalse */
3593 guint32 *p = code;
3594 sparc_fbranch (code, 1, sparc_fbug, 0);
3595 /* delay slot */
3596 sparc_nop (code);
3597 EMIT_FLOAT_COND_BRANCH (ins, sparc_fba, 1, 1);
3598 sparc_patch (p, (guint8*)code);
3599 break;
3601 case OP_FBNE_UN:
3602 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbne, 1, 1);
3603 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3604 break;
3605 case OP_FBLT_UN:
3606 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbl, 1, 1);
3607 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3608 break;
3609 case OP_FBGT_UN:
3610 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbg, 1, 1);
3611 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3612 break;
3613 case OP_FBGE_UN:
3614 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbge, 1, 1);
3615 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3616 break;
3617 case OP_FBLE_UN:
3618 EMIT_FLOAT_COND_BRANCH (ins, sparc_fble, 1, 1);
3619 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3620 break;
3621 case OP_CKFINITE: {
3622 MonoInst *spill = cfg->arch.float_spill_slot;
3623 gint32 reg = spill->inst_basereg;
3624 gint32 offset = spill->inst_offset;
3626 g_assert (spill->opcode == OP_REGOFFSET);
3628 if (!sparc_is_imm13 (offset)) {
3629 sparc_set (code, offset, sparc_o7);
3630 sparc_stdf (code, ins->sreg1, reg, sparc_o7);
3631 sparc_lduh (code, reg, sparc_o7, sparc_o7);
3632 } else {
3633 sparc_stdf_imm (code, ins->sreg1, reg, offset);
3634 sparc_lduh_imm (code, reg, offset, sparc_o7);
3636 sparc_srl_imm (code, sparc_o7, 4, sparc_o7);
3637 sparc_and_imm (code, FALSE, sparc_o7, 2047, sparc_o7);
3638 sparc_cmp_imm (code, sparc_o7, 2047);
3639 EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_be, "ArithmeticException");
3640 #ifdef SPARCV9
3641 sparc_fmovd (code, ins->sreg1, ins->dreg);
3642 #else
3643 sparc_fmovs (code, ins->sreg1, ins->dreg);
3644 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
3645 #endif
3646 break;
3649 case OP_MEMORY_BARRIER:
3650 sparc_membar (code, sparc_membar_all);
3651 break;
3653 default:
3654 #ifdef __GNUC__
3655 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
3656 #else
3657 g_warning ("%s:%d: unknown opcode %s\n", __FILE__, __LINE__, mono_inst_name (ins->opcode));
3658 #endif
3659 g_assert_not_reached ();
3662 if ((((guint8*)code) - code_start) > max_len) {
3663 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
3664 mono_inst_name (ins->opcode), max_len, ((guint8*)code) - code_start);
3665 g_assert_not_reached ();
3668 cpos += max_len;
3670 last_ins = ins;
3673 cfg->code_len = (guint8*)code - cfg->native_code;
3676 void
3677 mono_arch_register_lowlevel_calls (void)
3679 mono_register_jit_icall (mono_arch_get_lmf_addr, "mono_arch_get_lmf_addr", NULL, TRUE);
3682 void
3683 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
3685 MonoJumpInfo *patch_info;
3687 /* FIXME: Move part of this to arch independent code */
3688 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
3689 unsigned char *ip = patch_info->ip.i + code;
3690 gpointer target;
3692 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
3694 switch (patch_info->type) {
3695 case MONO_PATCH_INFO_NONE:
3696 continue;
3697 case MONO_PATCH_INFO_CLASS_INIT: {
3698 guint32 *ip2 = (guint32*)ip;
3699 /* Might already been changed to a nop */
3700 #ifdef SPARCV9
3701 sparc_set_template (ip2, sparc_o7);
3702 sparc_jmpl (ip2, sparc_o7, sparc_g0, sparc_o7);
3703 #else
3704 sparc_call_simple (ip2, 0);
3705 #endif
3706 break;
3708 case MONO_PATCH_INFO_METHOD_JUMP: {
3709 guint32 *ip2 = (guint32*)ip;
3710 /* Might already been patched */
3711 sparc_set_template (ip2, sparc_o7);
3712 break;
3714 default:
3715 break;
3717 sparc_patch ((guint32*)ip, target);
3721 void*
3722 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
3724 int i;
3725 guint32 *code = (guint32*)p;
3726 MonoMethodSignature *sig = mono_method_signature (cfg->method);
3727 CallInfo *cinfo;
3729 /* Save registers to stack */
3730 for (i = 0; i < 6; ++i)
3731 sparc_sti_imm (code, sparc_i0 + i, sparc_fp, ARGS_OFFSET + (i * sizeof (gpointer)));
3733 cinfo = get_call_info (cfg, sig, FALSE);
3735 /* Save float regs on V9, since they are caller saved */
3736 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3737 ArgInfo *ainfo = cinfo->args + i;
3738 gint32 stack_offset;
3740 stack_offset = ainfo->offset + ARGS_OFFSET;
3742 if (ainfo->storage == ArgInFloatReg) {
3743 if (!sparc_is_imm13 (stack_offset))
3744 NOT_IMPLEMENTED;
3745 sparc_stf_imm (code, ainfo->reg, sparc_fp, stack_offset);
3747 else if (ainfo->storage == ArgInDoubleReg) {
3748 /* The offset is guaranteed to be aligned by the ABI rules */
3749 sparc_stdf_imm (code, ainfo->reg, sparc_fp, stack_offset);
3753 sparc_set (code, cfg->method, sparc_o0);
3754 sparc_add_imm (code, FALSE, sparc_fp, MONO_SPARC_STACK_BIAS, sparc_o1);
3756 mono_add_patch_info (cfg, (guint8*)code-cfg->native_code, MONO_PATCH_INFO_ABS, func);
3757 EMIT_CALL ();
3759 /* Restore float regs on V9 */
3760 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3761 ArgInfo *ainfo = cinfo->args + i;
3762 gint32 stack_offset;
3764 stack_offset = ainfo->offset + ARGS_OFFSET;
3766 if (ainfo->storage == ArgInFloatReg) {
3767 if (!sparc_is_imm13 (stack_offset))
3768 NOT_IMPLEMENTED;
3769 sparc_ldf_imm (code, sparc_fp, stack_offset, ainfo->reg);
3771 else if (ainfo->storage == ArgInDoubleReg) {
3772 /* The offset is guaranteed to be aligned by the ABI rules */
3773 sparc_lddf_imm (code, sparc_fp, stack_offset, ainfo->reg);
3777 g_free (cinfo);
3779 return code;
3782 enum {
3783 SAVE_NONE,
3784 SAVE_STRUCT,
3785 SAVE_ONE,
3786 SAVE_TWO,
3787 SAVE_FP
3790 void*
3791 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
3793 guint32 *code = (guint32*)p;
3794 int save_mode = SAVE_NONE;
3795 MonoMethod *method = cfg->method;
3797 switch (mono_type_get_underlying_type (mono_method_signature (method)->ret)->type) {
3798 case MONO_TYPE_VOID:
3799 /* special case string .ctor icall */
3800 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
3801 save_mode = SAVE_ONE;
3802 else
3803 save_mode = SAVE_NONE;
3804 break;
3805 case MONO_TYPE_I8:
3806 case MONO_TYPE_U8:
3807 #ifdef SPARCV9
3808 save_mode = SAVE_ONE;
3809 #else
3810 save_mode = SAVE_TWO;
3811 #endif
3812 break;
3813 case MONO_TYPE_R4:
3814 case MONO_TYPE_R8:
3815 save_mode = SAVE_FP;
3816 break;
3817 case MONO_TYPE_VALUETYPE:
3818 save_mode = SAVE_STRUCT;
3819 break;
3820 default:
3821 save_mode = SAVE_ONE;
3822 break;
3825 /* Save the result to the stack and also put it into the output registers */
3827 switch (save_mode) {
3828 case SAVE_TWO:
3829 /* V8 only */
3830 sparc_st_imm (code, sparc_i0, sparc_fp, 68);
3831 sparc_st_imm (code, sparc_i0, sparc_fp, 72);
3832 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
3833 sparc_mov_reg_reg (code, sparc_i1, sparc_o2);
3834 break;
3835 case SAVE_ONE:
3836 sparc_sti_imm (code, sparc_i0, sparc_fp, ARGS_OFFSET);
3837 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
3838 break;
3839 case SAVE_FP:
3840 #ifdef SPARCV9
3841 sparc_stdf_imm (code, sparc_f0, sparc_fp, ARGS_OFFSET);
3842 #else
3843 sparc_stdf_imm (code, sparc_f0, sparc_fp, 72);
3844 sparc_ld_imm (code, sparc_fp, 72, sparc_o1);
3845 sparc_ld_imm (code, sparc_fp, 72 + 4, sparc_o2);
3846 #endif
3847 break;
3848 case SAVE_STRUCT:
3849 #ifdef SPARCV9
3850 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
3851 #else
3852 sparc_ld_imm (code, sparc_fp, 64, sparc_o1);
3853 #endif
3854 break;
3855 case SAVE_NONE:
3856 default:
3857 break;
3860 sparc_set (code, cfg->method, sparc_o0);
3862 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, func);
3863 EMIT_CALL ();
3865 /* Restore result */
3867 switch (save_mode) {
3868 case SAVE_TWO:
3869 sparc_ld_imm (code, sparc_fp, 68, sparc_i0);
3870 sparc_ld_imm (code, sparc_fp, 72, sparc_i0);
3871 break;
3872 case SAVE_ONE:
3873 sparc_ldi_imm (code, sparc_fp, ARGS_OFFSET, sparc_i0);
3874 break;
3875 case SAVE_FP:
3876 sparc_lddf_imm (code, sparc_fp, ARGS_OFFSET, sparc_f0);
3877 break;
3878 case SAVE_NONE:
3879 default:
3880 break;
3883 return code;
3886 guint8 *
3887 mono_arch_emit_prolog (MonoCompile *cfg)
3889 MonoMethod *method = cfg->method;
3890 MonoMethodSignature *sig;
3891 MonoInst *inst;
3892 guint32 *code;
3893 CallInfo *cinfo;
3894 guint32 i, offset;
3896 cfg->code_size = 256;
3897 cfg->native_code = g_malloc (cfg->code_size);
3898 code = (guint32*)cfg->native_code;
3900 /* FIXME: Generate intermediate code instead */
3902 offset = cfg->stack_offset;
3903 offset += (16 * sizeof (gpointer)); /* register save area */
3904 #ifndef SPARCV9
3905 offset += 4; /* struct/union return pointer */
3906 #endif
3908 /* add parameter area size for called functions */
3909 if (cfg->param_area < (6 * sizeof (gpointer)))
3910 /* Reserve space for the first 6 arguments even if it is unused */
3911 offset += 6 * sizeof (gpointer);
3912 else
3913 offset += cfg->param_area;
3915 /* align the stack size */
3916 offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
3919 * localloc'd memory is stored between the local variables (whose
3920 * size is given by cfg->stack_offset), and between the space reserved
3921 * by the ABI.
3923 cfg->arch.localloc_offset = offset - cfg->stack_offset;
3925 cfg->stack_offset = offset;
3927 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
3928 /* Perform stack touching */
3929 NOT_IMPLEMENTED;
3930 #endif
3932 if (!sparc_is_imm13 (- cfg->stack_offset)) {
3933 /* Can't use sparc_o7 here, since we're still in the caller's frame */
3934 sparc_set (code, (- cfg->stack_offset), GP_SCRATCH_REG);
3935 sparc_save (code, sparc_sp, GP_SCRATCH_REG, sparc_sp);
3937 else
3938 sparc_save_imm (code, sparc_sp, - cfg->stack_offset, sparc_sp);
3941 if (strstr (cfg->method->name, "foo")) {
3942 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, mono_sparc_break);
3943 sparc_call_simple (code, 0);
3944 sparc_nop (code);
3948 sig = mono_method_signature (method);
3950 cinfo = get_call_info (cfg, sig, FALSE);
3952 /* Keep in sync with emit_load_volatile_arguments */
3953 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3954 ArgInfo *ainfo = cinfo->args + i;
3955 gint32 stack_offset;
3956 MonoType *arg_type;
3957 inst = cfg->args [i];
3959 if (sig->hasthis && (i == 0))
3960 arg_type = &mono_defaults.object_class->byval_arg;
3961 else
3962 arg_type = sig->params [i - sig->hasthis];
3964 stack_offset = ainfo->offset + ARGS_OFFSET;
3966 /* Save the split arguments so they will reside entirely on the stack */
3967 if (ainfo->storage == ArgInSplitRegStack) {
3968 /* Save the register to the stack */
3969 g_assert (inst->opcode == OP_REGOFFSET);
3970 if (!sparc_is_imm13 (stack_offset))
3971 NOT_IMPLEMENTED;
3972 sparc_st_imm (code, sparc_i5, inst->inst_basereg, stack_offset);
3975 if (!v64 && !arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
3976 /* Save the argument to a dword aligned stack location */
3978 * stack_offset contains the offset of the argument on the stack.
3979 * inst->inst_offset contains the dword aligned offset where the value
3980 * should be stored.
3982 if (ainfo->storage == ArgInIRegPair) {
3983 if (!sparc_is_imm13 (inst->inst_offset + 4))
3984 NOT_IMPLEMENTED;
3985 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, inst->inst_offset);
3986 sparc_st_imm (code, sparc_i0 + ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
3988 else
3989 if (ainfo->storage == ArgInSplitRegStack) {
3990 #ifdef SPARCV9
3991 g_assert_not_reached ();
3992 #endif
3993 if (stack_offset != inst->inst_offset) {
3994 /* stack_offset is not dword aligned, so we need to make a copy */
3995 sparc_st_imm (code, sparc_i5, inst->inst_basereg, inst->inst_offset);
3996 sparc_ld_imm (code, sparc_fp, stack_offset + 4, sparc_o7);
3997 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset + 4);
4000 else
4001 if (ainfo->storage == ArgOnStackPair) {
4002 #ifdef SPARCV9
4003 g_assert_not_reached ();
4004 #endif
4005 if (stack_offset != inst->inst_offset) {
4006 /* stack_offset is not dword aligned, so we need to make a copy */
4007 sparc_ld_imm (code, sparc_fp, stack_offset, sparc_o7);
4008 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset);
4009 sparc_ld_imm (code, sparc_fp, stack_offset + 4, sparc_o7);
4010 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset + 4);
4013 else
4014 g_assert_not_reached ();
4016 else
4017 if ((ainfo->storage == ArgInIReg) && (inst->opcode != OP_REGVAR)) {
4018 /* Argument in register, but need to be saved to stack */
4019 if (!sparc_is_imm13 (stack_offset))
4020 NOT_IMPLEMENTED;
4021 if ((stack_offset - ARGS_OFFSET) & 0x1)
4022 sparc_stb_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4023 else
4024 if ((stack_offset - ARGS_OFFSET) & 0x2)
4025 sparc_sth_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4026 else
4027 if ((stack_offset - ARGS_OFFSET) & 0x4)
4028 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4029 else {
4030 if (v64)
4031 sparc_stx_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4032 else
4033 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4036 else
4037 if ((ainfo->storage == ArgInIRegPair) && (inst->opcode != OP_REGVAR)) {
4038 #ifdef SPARCV9
4039 NOT_IMPLEMENTED;
4040 #endif
4041 /* Argument in regpair, but need to be saved to stack */
4042 if (!sparc_is_imm13 (inst->inst_offset + 4))
4043 NOT_IMPLEMENTED;
4044 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, inst->inst_offset);
4045 sparc_st_imm (code, sparc_i0 + ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4047 else if ((ainfo->storage == ArgInFloatReg) && (inst->opcode != OP_REGVAR)) {
4048 if (!sparc_is_imm13 (stack_offset))
4049 NOT_IMPLEMENTED;
4050 sparc_stf_imm (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4052 else if ((ainfo->storage == ArgInDoubleReg) && (inst->opcode != OP_REGVAR)) {
4053 /* The offset is guaranteed to be aligned by the ABI rules */
4054 sparc_stdf_imm (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4057 if ((ainfo->storage == ArgInFloatReg) && (inst->opcode == OP_REGVAR)) {
4058 /* Need to move into the a double precision register */
4059 sparc_fstod (code, ainfo->reg, ainfo->reg - 1);
4062 if ((ainfo->storage == ArgInSplitRegStack) || (ainfo->storage == ArgOnStack))
4063 if (inst->opcode == OP_REGVAR)
4064 /* FIXME: Load the argument into memory */
4065 NOT_IMPLEMENTED;
4068 g_free (cinfo);
4070 if (cfg->method->save_lmf) {
4071 gint32 lmf_offset = STACK_BIAS - cfg->arch.lmf_offset;
4073 /* Save ip */
4074 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
4075 sparc_set_template (code, sparc_o7);
4076 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ip));
4077 /* Save sp */
4078 sparc_sti_imm (code, sparc_sp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, sp));
4079 /* Save fp */
4080 sparc_sti_imm (code, sparc_fp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebp));
4081 /* Save method */
4082 /* FIXME: add a relocation for this */
4083 sparc_set (code, cfg->method, sparc_o7);
4084 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method));
4086 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4087 (gpointer)"mono_arch_get_lmf_addr");
4088 EMIT_CALL ();
4090 code = (guint32*)mono_sparc_emit_save_lmf (code, lmf_offset);
4093 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4094 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
4096 cfg->code_len = (guint8*)code - cfg->native_code;
4098 g_assert (cfg->code_len <= cfg->code_size);
4100 return (guint8*)code;
4103 void
4104 mono_arch_emit_epilog (MonoCompile *cfg)
4106 MonoMethod *method = cfg->method;
4107 guint32 *code;
4108 int can_fold = 0;
4109 int max_epilog_size = 16 + 20 * 4;
4111 if (cfg->method->save_lmf)
4112 max_epilog_size += 128;
4114 if (mono_jit_trace_calls != NULL)
4115 max_epilog_size += 50;
4117 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
4118 max_epilog_size += 50;
4120 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4121 cfg->code_size *= 2;
4122 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4123 cfg->stat_code_reallocs++;
4126 code = (guint32*)(cfg->native_code + cfg->code_len);
4128 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4129 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4131 if (cfg->method->save_lmf) {
4132 gint32 lmf_offset = STACK_BIAS - cfg->arch.lmf_offset;
4134 code = mono_sparc_emit_restore_lmf (code, lmf_offset);
4138 * The V8 ABI requires that calls to functions which return a structure
4139 * return to %i7+12
4141 if (!v64 && mono_method_signature (cfg->method)->pinvoke && MONO_TYPE_ISSTRUCT(mono_method_signature (cfg->method)->ret))
4142 sparc_jmpl_imm (code, sparc_i7, 12, sparc_g0);
4143 else
4144 sparc_ret (code);
4146 /* Only fold last instruction into the restore if the exit block has an in count of 1
4147 and the previous block hasn't been optimized away since it may have an in count > 1 */
4148 if (cfg->bb_exit->in_count == 1 && cfg->bb_exit->in_bb[0]->native_offset != cfg->bb_exit->native_offset)
4149 can_fold = 1;
4152 * FIXME: The last instruction might have a branch pointing into it like in
4153 * int_ceq sparc_i0 <-
4155 can_fold = 0;
4157 /* Try folding last instruction into the restore */
4158 if (can_fold && (sparc_inst_op (code [-2]) == 0x2) && (sparc_inst_op3 (code [-2]) == 0x2) && sparc_inst_imm (code [-2]) && (sparc_inst_rd (code [-2]) == sparc_i0)) {
4159 /* or reg, imm, %i0 */
4160 int reg = sparc_inst_rs1 (code [-2]);
4161 int imm = (((gint32)(sparc_inst_imm13 (code [-2]))) << 19) >> 19;
4162 code [-2] = code [-1];
4163 code --;
4164 sparc_restore_imm (code, reg, imm, sparc_o0);
4166 else
4167 if (can_fold && (sparc_inst_op (code [-2]) == 0x2) && (sparc_inst_op3 (code [-2]) == 0x2) && (!sparc_inst_imm (code [-2])) && (sparc_inst_rd (code [-2]) == sparc_i0)) {
4168 /* or reg, reg, %i0 */
4169 int reg1 = sparc_inst_rs1 (code [-2]);
4170 int reg2 = sparc_inst_rs2 (code [-2]);
4171 code [-2] = code [-1];
4172 code --;
4173 sparc_restore (code, reg1, reg2, sparc_o0);
4175 else
4176 sparc_restore_imm (code, sparc_g0, 0, sparc_g0);
4178 cfg->code_len = (guint8*)code - cfg->native_code;
4180 g_assert (cfg->code_len < cfg->code_size);
4184 void
4185 mono_arch_emit_exceptions (MonoCompile *cfg)
4187 MonoJumpInfo *patch_info;
4188 guint32 *code;
4189 int nthrows = 0, i;
4190 int exc_count = 0;
4191 guint32 code_size;
4192 MonoClass *exc_classes [16];
4193 guint8 *exc_throw_start [16], *exc_throw_end [16];
4195 /* Compute needed space */
4196 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4197 if (patch_info->type == MONO_PATCH_INFO_EXC)
4198 exc_count++;
4202 * make sure we have enough space for exceptions
4204 #ifdef SPARCV9
4205 code_size = exc_count * (20 * 4);
4206 #else
4207 code_size = exc_count * 24;
4208 #endif
4210 while (cfg->code_len + code_size > (cfg->code_size - 16)) {
4211 cfg->code_size *= 2;
4212 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4213 cfg->stat_code_reallocs++;
4216 code = (guint32*)(cfg->native_code + cfg->code_len);
4218 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4219 switch (patch_info->type) {
4220 case MONO_PATCH_INFO_EXC: {
4221 MonoClass *exc_class;
4222 guint32 *buf, *buf2;
4223 guint32 throw_ip, type_idx;
4224 gint32 disp;
4226 sparc_patch ((guint32*)(cfg->native_code + patch_info->ip.i), code);
4228 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
4229 g_assert (exc_class);
4230 type_idx = exc_class->type_token - MONO_TOKEN_TYPE_DEF;
4231 throw_ip = patch_info->ip.i;
4233 /* Find a throw sequence for the same exception class */
4234 for (i = 0; i < nthrows; ++i)
4235 if (exc_classes [i] == exc_class)
4236 break;
4238 if (i < nthrows) {
4239 guint32 throw_offset = (((guint8*)exc_throw_end [i] - cfg->native_code) - throw_ip) >> 2;
4240 if (!sparc_is_imm13 (throw_offset))
4241 sparc_set32 (code, throw_offset, sparc_o1);
4243 disp = (exc_throw_start [i] - (guint8*)code) >> 2;
4244 g_assert (sparc_is_imm22 (disp));
4245 sparc_branch (code, 0, sparc_ba, disp);
4246 if (sparc_is_imm13 (throw_offset))
4247 sparc_set32 (code, throw_offset, sparc_o1);
4248 else
4249 sparc_nop (code);
4250 patch_info->type = MONO_PATCH_INFO_NONE;
4252 else {
4253 /* Emit the template for setting o1 */
4254 buf = code;
4255 if (sparc_is_imm13 (((((guint8*)code - cfg->native_code) - throw_ip) >> 2) - 8))
4256 /* Can use a short form */
4257 sparc_nop (code);
4258 else
4259 sparc_set_template (code, sparc_o1);
4260 buf2 = code;
4262 if (nthrows < 16) {
4263 exc_classes [nthrows] = exc_class;
4264 exc_throw_start [nthrows] = (guint8*)code;
4268 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, mono_sparc_break);
4269 EMIT_CALL();
4272 /* first arg = type token */
4273 /* Pass the type index to reduce the size of the sparc_set */
4274 if (!sparc_is_imm13 (type_idx))
4275 sparc_set32 (code, type_idx, sparc_o0);
4277 /* second arg = offset between the throw ip and the current ip */
4278 /* On sparc, the saved ip points to the call instruction */
4279 disp = (((guint8*)code - cfg->native_code) - throw_ip) >> 2;
4280 sparc_set32 (buf, disp, sparc_o1);
4281 while (buf < buf2)
4282 sparc_nop (buf);
4284 if (nthrows < 16) {
4285 exc_throw_end [nthrows] = (guint8*)code;
4286 nthrows ++;
4289 patch_info->data.name = "mono_arch_throw_corlib_exception";
4290 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4291 patch_info->ip.i = (guint8*)code - cfg->native_code;
4293 EMIT_CALL ();
4295 if (sparc_is_imm13 (type_idx)) {
4296 /* Put it into the delay slot */
4297 code --;
4298 buf = code;
4299 sparc_set32 (code, type_idx, sparc_o0);
4300 g_assert (code - buf == 1);
4303 break;
4305 default:
4306 /* do nothing */
4307 break;
4311 cfg->code_len = (guint8*)code - cfg->native_code;
4313 g_assert (cfg->code_len < cfg->code_size);
4317 gboolean lmf_addr_key_inited = FALSE;
4319 #ifdef MONO_SPARC_THR_TLS
4320 thread_key_t lmf_addr_key;
4321 #else
4322 pthread_key_t lmf_addr_key;
4323 #endif
4325 gpointer
4326 mono_arch_get_lmf_addr (void)
4328 /* This is perf critical so we bypass the IO layer */
4329 /* The thr_... functions seem to be somewhat faster */
4330 #ifdef MONO_SPARC_THR_TLS
4331 gpointer res;
4332 thr_getspecific (lmf_addr_key, &res);
4333 return res;
4334 #else
4335 return pthread_getspecific (lmf_addr_key);
4336 #endif
4339 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
4342 * There seems to be no way to determine stack boundaries under solaris,
4343 * so it's not possible to determine whenever a SIGSEGV is caused by stack
4344 * overflow or not.
4346 #error "--with-sigaltstack=yes not supported on solaris"
4348 #endif
4350 void
4351 mono_arch_finish_init (void)
4353 if (!lmf_addr_key_inited) {
4354 int res;
4356 lmf_addr_key_inited = TRUE;
4358 #ifdef MONO_SPARC_THR_TLS
4359 res = thr_keycreate (&lmf_addr_key, NULL);
4360 #else
4361 res = pthread_key_create (&lmf_addr_key, NULL);
4362 #endif
4363 g_assert (res == 0);
4367 #ifdef MONO_SPARC_THR_TLS
4368 thr_setspecific (lmf_addr_key, &tls->lmf);
4369 #else
4370 pthread_setspecific (lmf_addr_key, &tls->lmf);
4371 #endif
4374 void
4375 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
4379 MonoInst*
4380 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4382 MonoInst *ins = NULL;
4384 return ins;
4388 * mono_arch_get_argument_info:
4389 * @csig: a method signature
4390 * @param_count: the number of parameters to consider
4391 * @arg_info: an array to store the result infos
4393 * Gathers information on parameters such as size, alignment and
4394 * padding. arg_info should be large enought to hold param_count + 1 entries.
4396 * Returns the size of the activation frame.
4399 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
4401 int k, align;
4402 CallInfo *cinfo;
4403 ArgInfo *ainfo;
4405 cinfo = get_call_info (NULL, csig, FALSE);
4407 if (csig->hasthis) {
4408 ainfo = &cinfo->args [0];
4409 arg_info [0].offset = ARGS_OFFSET - MONO_SPARC_STACK_BIAS + ainfo->offset;
4412 for (k = 0; k < param_count; k++) {
4413 ainfo = &cinfo->args [k + csig->hasthis];
4415 arg_info [k + 1].offset = ARGS_OFFSET - MONO_SPARC_STACK_BIAS + ainfo->offset;
4416 arg_info [k + 1].size = mono_type_size (csig->params [k], &align);
4419 g_free (cinfo);
4421 return 0;
4424 gboolean
4425 mono_arch_print_tree (MonoInst *tree, int arity)
4427 return 0;
4430 MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
4432 return NULL;
4435 mgreg_t
4436 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
4438 /* FIXME: implement */
4439 g_assert_not_reached ();