Revert "update rx to the latest rx-oss-v1.1 build."
[mono-project.git] / mono / mini / mini-ia64.c
blobc45305e2668a99e960a45797daec97f6011df4a3
1 /*
2 * mini-ia64.c: IA64 backend for the Mono code generator
4 * Authors:
5 * Zoltan Varga (vargaz@gmail.com)
7 * (C) 2003 Ximian, Inc.
8 */
9 #include "mini.h"
10 #include <string.h>
11 #include <math.h>
12 #include <unistd.h>
13 #include <sys/mman.h>
15 #ifdef __INTEL_COMPILER
16 #include <ia64intrin.h>
17 #endif
19 #include <mono/metadata/appdomain.h>
20 #include <mono/metadata/debug-helpers.h>
21 #include <mono/metadata/threads.h>
22 #include <mono/metadata/profiler-private.h>
23 #include <mono/utils/mono-math.h>
25 #include "trace.h"
26 #include "mini-ia64.h"
27 #include "cpu-ia64.h"
28 #include "jit-icalls.h"
29 #include "ir-emit.h"
31 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
33 #define IS_IMM32(val) ((((guint64)val) >> 32) == 0)
36 * IA64 register usage:
37 * - local registers are used for global register allocation
38 * - r8..r11, r14..r30 is used for local register allocation
39 * - r31 is a scratch register used within opcode implementations
40 * - FIXME: Use out registers as well
41 * - the first three locals are used for saving ar.pfst, b0, and sp
42 * - compare instructions allways set p6 and p7
46 * There are a lot of places where generated code is disassembled/patched.
47 * The automatic bundling of instructions done by the code generation macros
48 * could complicate things, so it is best to call
49 * ia64_codegen_set_one_ins_per_bundle () at those places.
52 #define ARGS_OFFSET 16
54 #define GP_SCRATCH_REG 31
55 #define GP_SCRATCH_REG2 30
56 #define FP_SCRATCH_REG 32
57 #define FP_SCRATCH_REG2 33
59 #define LOOP_ALIGNMENT 8
60 #define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
62 static const char* gregs [] = {
63 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9",
64 "r10", "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19",
65 "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29",
66 "r30", "r31", "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
67 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47", "r48", "r49",
68 "r50", "r51", "r52", "r53", "r54", "r55", "r56", "r57", "r58", "r59",
69 "r60", "r61", "r62", "r63", "r64", "r65", "r66", "r67", "r68", "r69",
70 "r70", "r71", "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
71 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87", "r88", "r89",
72 "r90", "r91", "r92", "r93", "r94", "r95", "r96", "r97", "r98", "r99",
73 "r100", "r101", "r102", "r103", "r104", "r105", "r106", "r107", "r108", "r109",
74 "r110", "r111", "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
75 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127"
78 const char*
79 mono_arch_regname (int reg)
81 if (reg < 128)
82 return gregs [reg];
83 else
84 return "unknown";
87 static const char* fregs [] = {
88 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9",
89 "f10", "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19",
90 "f20", "f21", "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29",
91 "f30", "f31", "f32", "f33", "f34", "f35", "f36", "f37", "f38", "f39",
92 "f40", "f41", "f42", "f43", "f44", "f45", "f46", "f47", "f48", "f49",
93 "f50", "f51", "f52", "f53", "f54", "f55", "f56", "f57", "f58", "f59",
94 "f60", "f61", "f62", "f63", "f64", "f65", "f66", "f67", "f68", "f69",
95 "f70", "f71", "f72", "f73", "f74", "f75", "f76", "f77", "f78", "f79",
96 "f80", "f81", "f82", "f83", "f84", "f85", "f86", "f87", "f88", "f89",
97 "f90", "f91", "f92", "f93", "f94", "f95", "f96", "f97", "f98", "f99",
98 "f100", "f101", "f102", "f103", "f104", "f105", "f106", "f107", "f108", "f109",
99 "f110", "f111", "f112", "f113", "f114", "f115", "f116", "f117", "f118", "f119",
100 "f120", "f121", "f122", "f123", "f124", "f125", "f126", "f127"
103 const char*
104 mono_arch_fregname (int reg)
106 if (reg < 128)
107 return fregs [reg];
108 else
109 return "unknown";
112 static gboolean
113 debug_ins_sched (void)
115 #if 0
116 return mono_debug_count ();
117 #else
118 return TRUE;
119 #endif
122 static gboolean
123 debug_omit_fp (void)
125 #if 0
126 return mono_debug_count ();
127 #else
128 return TRUE;
129 #endif
132 static void
133 ia64_patch (unsigned char* code, gpointer target);
135 typedef enum {
136 ArgInIReg,
137 ArgInFloatReg,
138 ArgInFloatRegR4,
139 ArgOnStack,
140 ArgValuetypeAddrInIReg,
141 ArgAggregate,
142 ArgSingleHFA,
143 ArgDoubleHFA,
144 ArgNone
145 } ArgStorage;
147 typedef enum {
148 AggregateNormal,
149 AggregateSingleHFA,
150 AggregateDoubleHFA
151 } AggregateType;
153 typedef struct {
154 gint16 offset;
155 gint8 reg;
156 ArgStorage storage;
158 /* Only if storage == ArgAggregate */
159 int nregs, nslots;
160 AggregateType atype;
161 } ArgInfo;
163 typedef struct {
164 int nargs;
165 guint32 stack_usage;
166 guint32 reg_usage;
167 guint32 freg_usage;
168 gboolean need_stack_align;
169 gboolean vtype_retaddr;
170 /* The index of the vret arg in the argument list */
171 int vret_arg_index;
172 ArgInfo ret;
173 ArgInfo sig_cookie;
174 ArgInfo args [1];
175 } CallInfo;
177 #define DEBUG(a) if (cfg->verbose_level > 1) a
179 #define PARAM_REGS 8
181 static void inline
182 add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo)
184 ainfo->offset = *stack_size;
186 if (*gr >= PARAM_REGS) {
187 ainfo->storage = ArgOnStack;
188 (*stack_size) += sizeof (gpointer);
190 else {
191 ainfo->storage = ArgInIReg;
192 ainfo->reg = *gr;
193 *(gr) += 1;
197 #define FLOAT_PARAM_REGS 8
199 static void inline
200 add_float (guint32 *gr, guint32 *fr, guint32 *stack_size, ArgInfo *ainfo, gboolean is_double)
202 ainfo->offset = *stack_size;
204 if (*gr >= PARAM_REGS) {
205 ainfo->storage = ArgOnStack;
206 (*stack_size) += sizeof (gpointer);
208 else {
209 ainfo->storage = is_double ? ArgInFloatReg : ArgInFloatRegR4;
210 ainfo->reg = 8 + *fr;
211 (*fr) += 1;
212 (*gr) += 1;
216 static void
217 add_valuetype (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type,
218 gboolean is_return,
219 guint32 *gr, guint32 *fr, guint32 *stack_size)
221 guint32 size, i;
222 MonoClass *klass;
223 MonoMarshalType *info;
224 gboolean is_hfa = TRUE;
225 guint32 hfa_type = 0;
227 klass = mono_class_from_mono_type (type);
228 if (type->type == MONO_TYPE_TYPEDBYREF)
229 size = 3 * sizeof (gpointer);
230 else if (sig->pinvoke)
231 size = mono_type_native_stack_size (&klass->byval_arg, NULL);
232 else
233 size = mini_type_stack_size (gsctx, &klass->byval_arg, NULL);
235 if (!sig->pinvoke || (size == 0)) {
236 /* Allways pass in memory */
237 ainfo->offset = *stack_size;
238 *stack_size += ALIGN_TO (size, 8);
239 ainfo->storage = ArgOnStack;
241 return;
244 /* Determine whenever it is a HFA (Homogeneous Floating Point Aggregate) */
245 info = mono_marshal_load_type_info (klass);
246 g_assert (info);
247 for (i = 0; i < info->num_fields; ++i) {
248 guint32 ftype = info->fields [i].field->type->type;
249 if (!(info->fields [i].field->type->byref) &&
250 ((ftype == MONO_TYPE_R4) || (ftype == MONO_TYPE_R8))) {
251 if (hfa_type == 0)
252 hfa_type = ftype;
253 else if (hfa_type != ftype)
254 is_hfa = FALSE;
256 else
257 is_hfa = FALSE;
259 if (hfa_type == 0)
260 is_hfa = FALSE;
262 ainfo->storage = ArgAggregate;
263 ainfo->atype = AggregateNormal;
265 if (is_hfa) {
266 ainfo->atype = hfa_type == MONO_TYPE_R4 ? AggregateSingleHFA : AggregateDoubleHFA;
267 if (is_return) {
268 if (info->num_fields <= 8) {
269 ainfo->reg = 8;
270 ainfo->nregs = info->num_fields;
271 ainfo->nslots = ainfo->nregs;
272 return;
274 /* Fall through */
276 else {
277 if ((*fr) + info->num_fields > 8)
278 NOT_IMPLEMENTED;
280 ainfo->reg = 8 + (*fr);
281 ainfo->nregs = info->num_fields;
282 ainfo->nslots = ainfo->nregs;
283 (*fr) += info->num_fields;
284 if (ainfo->atype == AggregateSingleHFA) {
286 * FIXME: Have to keep track of the parameter slot number, which is
287 * not the same as *gr.
289 (*gr) += ALIGN_TO (info->num_fields, 2) / 2;
290 } else {
291 (*gr) += info->num_fields;
293 return;
297 /* This also handles returning of TypedByRef used by some icalls */
298 if (is_return) {
299 if (size <= 32) {
300 ainfo->reg = IA64_R8;
301 ainfo->nregs = (size + 7) / 8;
302 ainfo->nslots = ainfo->nregs;
303 return;
305 NOT_IMPLEMENTED;
308 ainfo->reg = (*gr);
309 ainfo->offset = *stack_size;
310 ainfo->nslots = (size + 7) / 8;
312 if (((*gr) + ainfo->nslots) <= 8) {
313 /* Fits entirely in registers */
314 ainfo->nregs = ainfo->nslots;
315 (*gr) += ainfo->nregs;
316 return;
319 ainfo->nregs = 8 - (*gr);
320 (*gr) = 8;
321 (*stack_size) += (ainfo->nslots - ainfo->nregs) * 8;
325 * get_call_info:
327 * Obtain information about a call according to the calling convention.
328 * For IA64, see the "Itanium Software Conventions and Runtime Architecture
329 * Gude" document for more information.
331 static CallInfo*
332 get_call_info (MonoCompile *cfg, MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
334 guint32 i, gr, fr, pstart;
335 MonoType *ret_type;
336 int n = sig->hasthis + sig->param_count;
337 guint32 stack_size = 0;
338 CallInfo *cinfo;
339 MonoGenericSharingContext *gsctx = cfg ? cfg->generic_sharing_context : NULL;
341 if (mp)
342 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
343 else
344 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
346 gr = 0;
347 fr = 0;
349 /* return value */
351 ret_type = mono_type_get_underlying_type (sig->ret);
352 ret_type = mini_get_basic_type_from_generic (gsctx, ret_type);
353 switch (ret_type->type) {
354 case MONO_TYPE_BOOLEAN:
355 case MONO_TYPE_I1:
356 case MONO_TYPE_U1:
357 case MONO_TYPE_I2:
358 case MONO_TYPE_U2:
359 case MONO_TYPE_CHAR:
360 case MONO_TYPE_I4:
361 case MONO_TYPE_U4:
362 case MONO_TYPE_I:
363 case MONO_TYPE_U:
364 case MONO_TYPE_PTR:
365 case MONO_TYPE_FNPTR:
366 case MONO_TYPE_CLASS:
367 case MONO_TYPE_OBJECT:
368 case MONO_TYPE_SZARRAY:
369 case MONO_TYPE_ARRAY:
370 case MONO_TYPE_STRING:
371 cinfo->ret.storage = ArgInIReg;
372 cinfo->ret.reg = IA64_R8;
373 break;
374 case MONO_TYPE_U8:
375 case MONO_TYPE_I8:
376 cinfo->ret.storage = ArgInIReg;
377 cinfo->ret.reg = IA64_R8;
378 break;
379 case MONO_TYPE_R4:
380 case MONO_TYPE_R8:
381 cinfo->ret.storage = ArgInFloatReg;
382 cinfo->ret.reg = 8;
383 break;
384 case MONO_TYPE_GENERICINST:
385 if (!mono_type_generic_inst_is_valuetype (ret_type)) {
386 cinfo->ret.storage = ArgInIReg;
387 cinfo->ret.reg = IA64_R8;
388 break;
390 /* Fall through */
391 case MONO_TYPE_VALUETYPE:
392 case MONO_TYPE_TYPEDBYREF: {
393 guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0;
395 if (sig->ret->byref) {
396 /* This seems to happen with ldfld wrappers */
397 cinfo->ret.storage = ArgInIReg;
398 } else {
399 add_valuetype (gsctx, sig, &cinfo->ret, sig->ret, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
400 if (cinfo->ret.storage == ArgOnStack) {
401 /* The caller passes the address where the value is stored */
402 cinfo->vtype_retaddr = TRUE;
405 break;
407 case MONO_TYPE_VOID:
408 cinfo->ret.storage = ArgNone;
409 break;
410 default:
411 g_error ("Can't handle as return value 0x%x", sig->ret->type);
415 pstart = 0;
417 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
418 * the first argument, allowing 'this' to be always passed in the first arg reg.
419 * Also do this if the first argument is a reference type, since virtual calls
420 * are sometimes made using calli without sig->hasthis set, like in the delegate
421 * invoke wrappers.
423 if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
424 if (sig->hasthis) {
425 add_general (&gr, &stack_size, cinfo->args + 0);
426 } else {
427 add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0]);
428 pstart = 1;
430 add_general (&gr, &stack_size, &cinfo->ret);
431 if (cinfo->ret.storage == ArgInIReg)
432 cinfo->ret.storage = ArgValuetypeAddrInIReg;
433 cinfo->vret_arg_index = 1;
434 } else {
435 /* this */
436 if (sig->hasthis)
437 add_general (&gr, &stack_size, cinfo->args + 0);
439 if (cinfo->vtype_retaddr) {
440 add_general (&gr, &stack_size, &cinfo->ret);
441 if (cinfo->ret.storage == ArgInIReg)
442 cinfo->ret.storage = ArgValuetypeAddrInIReg;
446 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
447 gr = PARAM_REGS;
448 fr = FLOAT_PARAM_REGS;
450 /* Emit the signature cookie just before the implicit arguments */
451 add_general (&gr, &stack_size, &cinfo->sig_cookie);
454 for (i = pstart; i < sig->param_count; ++i) {
455 ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
456 MonoType *ptype;
458 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
459 /* We allways pass the sig cookie on the stack for simplicity */
461 * Prevent implicit arguments + the sig cookie from being passed
462 * in registers.
464 gr = PARAM_REGS;
465 fr = FLOAT_PARAM_REGS;
467 /* Emit the signature cookie just before the implicit arguments */
468 add_general (&gr, &stack_size, &cinfo->sig_cookie);
471 if (sig->params [i]->byref) {
472 add_general (&gr, &stack_size, ainfo);
473 continue;
475 ptype = mono_type_get_underlying_type (sig->params [i]);
476 ptype = mini_get_basic_type_from_generic (gsctx, ptype);
477 switch (ptype->type) {
478 case MONO_TYPE_BOOLEAN:
479 case MONO_TYPE_I1:
480 case MONO_TYPE_U1:
481 add_general (&gr, &stack_size, ainfo);
482 break;
483 case MONO_TYPE_I2:
484 case MONO_TYPE_U2:
485 case MONO_TYPE_CHAR:
486 add_general (&gr, &stack_size, ainfo);
487 break;
488 case MONO_TYPE_I4:
489 case MONO_TYPE_U4:
490 add_general (&gr, &stack_size, ainfo);
491 break;
492 case MONO_TYPE_I:
493 case MONO_TYPE_U:
494 case MONO_TYPE_PTR:
495 case MONO_TYPE_FNPTR:
496 case MONO_TYPE_CLASS:
497 case MONO_TYPE_OBJECT:
498 case MONO_TYPE_STRING:
499 case MONO_TYPE_SZARRAY:
500 case MONO_TYPE_ARRAY:
501 add_general (&gr, &stack_size, ainfo);
502 break;
503 case MONO_TYPE_GENERICINST:
504 if (!mono_type_generic_inst_is_valuetype (ptype)) {
505 add_general (&gr, &stack_size, ainfo);
506 break;
508 /* Fall through */
509 case MONO_TYPE_VALUETYPE:
510 case MONO_TYPE_TYPEDBYREF:
511 /* FIXME: */
512 /* We allways pass valuetypes on the stack */
513 add_valuetype (gsctx, sig, ainfo, sig->params [i], FALSE, &gr, &fr, &stack_size);
514 break;
515 case MONO_TYPE_U8:
516 case MONO_TYPE_I8:
517 add_general (&gr, &stack_size, ainfo);
518 break;
519 case MONO_TYPE_R4:
520 add_float (&gr, &fr, &stack_size, ainfo, FALSE);
521 break;
522 case MONO_TYPE_R8:
523 add_float (&gr, &fr, &stack_size, ainfo, TRUE);
524 break;
525 default:
526 g_assert_not_reached ();
530 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n > 0) && (sig->sentinelpos == sig->param_count)) {
531 gr = PARAM_REGS;
532 fr = FLOAT_PARAM_REGS;
534 /* Emit the signature cookie just before the implicit arguments */
535 add_general (&gr, &stack_size, &cinfo->sig_cookie);
538 cinfo->stack_usage = stack_size;
539 cinfo->reg_usage = gr;
540 cinfo->freg_usage = fr;
541 return cinfo;
545 * mono_arch_get_argument_info:
546 * @csig: a method signature
547 * @param_count: the number of parameters to consider
548 * @arg_info: an array to store the result infos
550 * Gathers information on parameters such as size, alignment and
551 * padding. arg_info should be large enought to hold param_count + 1 entries.
553 * Returns the size of the argument area on the stack.
556 mono_arch_get_argument_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
558 int k;
559 CallInfo *cinfo = get_call_info (NULL, NULL, csig, FALSE);
560 guint32 args_size = cinfo->stack_usage;
562 /* The arguments are saved to a stack area in mono_arch_instrument_prolog */
563 if (csig->hasthis) {
564 arg_info [0].offset = 0;
567 for (k = 0; k < param_count; k++) {
568 arg_info [k + 1].offset = ((k + csig->hasthis) * 8);
569 /* FIXME: */
570 arg_info [k + 1].size = 0;
573 g_free (cinfo);
575 return args_size;
579 * Initialize the cpu to execute managed code.
581 void
582 mono_arch_cpu_init (void)
587 * Initialize architecture specific code.
589 void
590 mono_arch_init (void)
595 * Cleanup architecture specific code.
597 void
598 mono_arch_cleanup (void)
603 * This function returns the optimizations supported on this cpu.
605 guint32
606 mono_arch_cpu_optimizations (guint32 *exclude_mask)
608 *exclude_mask = 0;
610 return 0;
614 * This function test for all SIMD functions supported.
616 * Returns a bitmask corresponding to all supported versions.
619 guint32
620 mono_arch_cpu_enumerate_simd_versions (void)
622 /* SIMD is currently unimplemented */
623 return 0;
626 GList *
627 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
629 GList *vars = NULL;
630 int i;
631 MonoMethodSignature *sig;
632 MonoMethodHeader *header;
633 CallInfo *cinfo;
635 header = cfg->header;
637 sig = mono_method_signature (cfg->method);
639 cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
641 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
642 MonoInst *ins = cfg->args [i];
644 ArgInfo *ainfo = &cinfo->args [i];
646 if (ins->flags & (MONO_INST_IS_DEAD|MONO_INST_VOLATILE|MONO_INST_INDIRECT))
647 continue;
649 if (ainfo->storage == ArgInIReg) {
650 /* The input registers are non-volatile */
651 ins->opcode = OP_REGVAR;
652 ins->dreg = 32 + ainfo->reg;
656 for (i = 0; i < cfg->num_varinfo; i++) {
657 MonoInst *ins = cfg->varinfo [i];
658 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
660 /* unused vars */
661 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
662 continue;
664 if ((ins->flags & (MONO_INST_IS_DEAD|MONO_INST_VOLATILE|MONO_INST_INDIRECT)) ||
665 (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
666 continue;
668 if (mono_is_regsize_var (ins->inst_vtype)) {
669 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
670 g_assert (i == vmv->idx);
671 vars = g_list_prepend (vars, vmv);
675 vars = mono_varlist_sort (cfg, vars, 0);
677 return vars;
680 static void
681 mono_ia64_alloc_stacked_registers (MonoCompile *cfg)
683 CallInfo *cinfo;
684 guint32 reserved_regs;
685 MonoMethodHeader *header;
687 if (cfg->arch.reg_local0 > 0)
688 /* Already done */
689 return;
691 cinfo = get_call_info (cfg, cfg->mempool, mono_method_signature (cfg->method), FALSE);
693 header = cfg->header;
695 /* Some registers are reserved for use by the prolog/epilog */
696 reserved_regs = header->num_clauses ? 4 : 3;
698 if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
699 (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)) {
700 /* One registers is needed by instrument_epilog to save the return value */
701 reserved_regs ++;
702 if (cinfo->reg_usage < 2)
703 /* Number of arguments passed to function call in instrument_prolog */
704 cinfo->reg_usage = 2;
707 cfg->arch.reg_in0 = 32;
708 cfg->arch.reg_local0 = cfg->arch.reg_in0 + cinfo->reg_usage + reserved_regs;
709 cfg->arch.reg_out0 = cfg->arch.reg_local0 + 16;
711 cfg->arch.reg_saved_ar_pfs = cfg->arch.reg_local0 - 1;
712 cfg->arch.reg_saved_b0 = cfg->arch.reg_local0 - 2;
713 cfg->arch.reg_fp = cfg->arch.reg_local0 - 3;
716 * Frames without handlers save sp to fp, frames with handlers save it into
717 * a dedicated register.
719 if (header->num_clauses)
720 cfg->arch.reg_saved_sp = cfg->arch.reg_local0 - 4;
721 else
722 cfg->arch.reg_saved_sp = cfg->arch.reg_fp;
724 if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
725 (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)) {
726 cfg->arch.reg_saved_return_val = cfg->arch.reg_local0 - reserved_regs;
730 * Need to allocate at least 2 out register for use by OP_THROW / the system
731 * exception throwing code.
733 cfg->arch.n_out_regs = MAX (cfg->arch.n_out_regs, 2);
736 GList *
737 mono_arch_get_global_int_regs (MonoCompile *cfg)
739 GList *regs = NULL;
740 int i;
742 mono_ia64_alloc_stacked_registers (cfg);
744 for (i = cfg->arch.reg_local0; i < cfg->arch.reg_out0; ++i) {
745 /* FIXME: regmask */
746 g_assert (i < 64);
747 regs = g_list_prepend (regs, (gpointer)(gssize)(i));
750 return regs;
754 * mono_arch_regalloc_cost:
756 * Return the cost, in number of memory references, of the action of
757 * allocating the variable VMV into a register during global register
758 * allocation.
760 guint32
761 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
763 /* FIXME: Increase costs linearly to avoid using all local registers */
765 return 0;
768 void
769 mono_arch_allocate_vars (MonoCompile *cfg)
771 MonoMethodSignature *sig;
772 MonoMethodHeader *header;
773 MonoInst *inst;
774 int i, offset;
775 guint32 locals_stack_size, locals_stack_align;
776 gint32 *offsets;
777 CallInfo *cinfo;
779 header = cfg->header;
781 sig = mono_method_signature (cfg->method);
783 cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
786 * Determine whenever the frame pointer can be eliminated.
787 * FIXME: Remove some of the restrictions.
789 cfg->arch.omit_fp = TRUE;
791 if (!debug_omit_fp ())
792 cfg->arch.omit_fp = FALSE;
794 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
795 cfg->arch.omit_fp = FALSE;
796 if (header->num_clauses)
797 cfg->arch.omit_fp = FALSE;
798 if (cfg->param_area)
799 cfg->arch.omit_fp = FALSE;
800 if ((sig->ret->type != MONO_TYPE_VOID) && (cinfo->ret.storage == ArgAggregate))
801 cfg->arch.omit_fp = FALSE;
802 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
803 cfg->arch.omit_fp = FALSE;
804 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
805 ArgInfo *ainfo = &cinfo->args [i];
807 if (ainfo->storage == ArgOnStack) {
809 * The stack offset can only be determined when the frame
810 * size is known.
812 cfg->arch.omit_fp = FALSE;
816 mono_ia64_alloc_stacked_registers (cfg);
819 * We use the ABI calling conventions for managed code as well.
820 * Exception: valuetypes are never passed or returned in registers.
823 if (cfg->arch.omit_fp) {
824 cfg->flags |= MONO_CFG_HAS_SPILLUP;
825 cfg->frame_reg = IA64_SP;
826 offset = ARGS_OFFSET;
828 else {
829 /* Locals are allocated backwards from %fp */
830 cfg->frame_reg = cfg->arch.reg_fp;
831 offset = 0;
834 if (cfg->method->save_lmf) {
835 /* No LMF on IA64 */
838 if (sig->ret->type != MONO_TYPE_VOID) {
839 switch (cinfo->ret.storage) {
840 case ArgInIReg:
841 cfg->ret->opcode = OP_REGVAR;
842 cfg->ret->inst_c0 = cinfo->ret.reg;
843 break;
844 case ArgInFloatReg:
845 cfg->ret->opcode = OP_REGVAR;
846 cfg->ret->inst_c0 = cinfo->ret.reg;
847 break;
848 case ArgValuetypeAddrInIReg:
849 cfg->vret_addr->opcode = OP_REGVAR;
850 cfg->vret_addr->dreg = cfg->arch.reg_in0 + cinfo->ret.reg;
851 break;
852 case ArgAggregate:
853 /* Allocate a local to hold the result, the epilog will copy it to the correct place */
854 if (cfg->arch.omit_fp)
855 g_assert_not_reached ();
856 offset = ALIGN_TO (offset, 8);
857 offset += cinfo->ret.nslots * 8;
858 cfg->ret->opcode = OP_REGOFFSET;
859 cfg->ret->inst_basereg = cfg->frame_reg;
860 cfg->ret->inst_offset = - offset;
861 break;
862 default:
863 g_assert_not_reached ();
865 cfg->ret->dreg = cfg->ret->inst_c0;
868 /* Allocate locals */
869 offsets = mono_allocate_stack_slots (cfg, cfg->arch.omit_fp ? FALSE : TRUE, &locals_stack_size, &locals_stack_align);
870 if (locals_stack_align) {
871 offset = ALIGN_TO (offset, locals_stack_align);
873 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
874 if (offsets [i] != -1) {
875 MonoInst *inst = cfg->varinfo [i];
876 inst->opcode = OP_REGOFFSET;
877 inst->inst_basereg = cfg->frame_reg;
878 if (cfg->arch.omit_fp)
879 inst->inst_offset = (offset + offsets [i]);
880 else
881 inst->inst_offset = - (offset + offsets [i]);
882 // printf ("allocated local %d to ", i); mono_print_tree_nl (inst);
885 offset += locals_stack_size;
887 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) {
888 if (cfg->arch.omit_fp)
889 g_assert_not_reached ();
890 g_assert (cinfo->sig_cookie.storage == ArgOnStack);
891 cfg->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
894 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
895 inst = cfg->args [i];
896 if (inst->opcode != OP_REGVAR) {
897 ArgInfo *ainfo = &cinfo->args [i];
898 gboolean inreg = TRUE;
899 MonoType *arg_type;
901 if (sig->hasthis && (i == 0))
902 arg_type = &mono_defaults.object_class->byval_arg;
903 else
904 arg_type = sig->params [i - sig->hasthis];
906 /* FIXME: VOLATILE is only set if the liveness pass runs */
907 if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
908 inreg = FALSE;
910 inst->opcode = OP_REGOFFSET;
912 switch (ainfo->storage) {
913 case ArgInIReg:
914 inst->opcode = OP_REGVAR;
915 inst->dreg = cfg->arch.reg_in0 + ainfo->reg;
916 break;
917 case ArgInFloatReg:
918 case ArgInFloatRegR4:
920 * Since float regs are volatile, we save the arguments to
921 * the stack in the prolog.
923 inreg = FALSE;
924 break;
925 case ArgOnStack:
926 if (cfg->arch.omit_fp)
927 g_assert_not_reached ();
928 inst->opcode = OP_REGOFFSET;
929 inst->inst_basereg = cfg->frame_reg;
930 inst->inst_offset = ARGS_OFFSET + ainfo->offset;
931 break;
932 case ArgAggregate:
933 inreg = FALSE;
934 break;
935 default:
936 NOT_IMPLEMENTED;
939 if (!inreg && (ainfo->storage != ArgOnStack)) {
940 guint32 size = 0;
942 inst->opcode = OP_REGOFFSET;
943 inst->inst_basereg = cfg->frame_reg;
944 /* These arguments are saved to the stack in the prolog */
945 switch (ainfo->storage) {
946 case ArgAggregate:
947 if (ainfo->atype == AggregateSingleHFA)
948 size = ainfo->nslots * 4;
949 else
950 size = ainfo->nslots * 8;
951 break;
952 default:
953 size = sizeof (gpointer);
954 break;
957 offset = ALIGN_TO (offset, sizeof (gpointer));
959 if (cfg->arch.omit_fp) {
960 inst->inst_offset = offset;
961 offset += size;
962 } else {
963 offset += size;
964 inst->inst_offset = - offset;
971 * FIXME: This doesn't work because some variables are allocated during local
972 * regalloc.
975 if (cfg->arch.omit_fp && offset == 16)
976 offset = 0;
979 cfg->stack_offset = offset;
982 void
983 mono_arch_create_vars (MonoCompile *cfg)
985 MonoMethodSignature *sig;
986 CallInfo *cinfo;
988 sig = mono_method_signature (cfg->method);
990 cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
992 if (cinfo->ret.storage == ArgAggregate)
993 cfg->ret_var_is_local = TRUE;
994 if (cinfo->ret.storage == ArgValuetypeAddrInIReg) {
995 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
996 if (G_UNLIKELY (cfg->verbose_level > 1)) {
997 printf ("vret_addr = ");
998 mono_print_ins (cfg->vret_addr);
1003 static void
1004 add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, MonoInst *tree)
1006 MonoInst *arg;
1008 MONO_INST_NEW (cfg, arg, OP_NOP);
1009 arg->sreg1 = tree->dreg;
1011 switch (storage) {
1012 case ArgInIReg:
1013 arg->opcode = OP_MOVE;
1014 arg->dreg = mono_alloc_ireg (cfg);
1016 mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, FALSE);
1017 break;
1018 case ArgInFloatReg:
1019 arg->opcode = OP_FMOVE;
1020 arg->dreg = mono_alloc_freg (cfg);
1022 mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, TRUE);
1023 break;
1024 case ArgInFloatRegR4:
1025 arg->opcode = OP_FCONV_TO_R4;
1026 arg->dreg = mono_alloc_freg (cfg);
1028 mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, TRUE);
1029 break;
1030 default:
1031 g_assert_not_reached ();
1034 MONO_ADD_INS (cfg->cbb, arg);
1037 static void
1038 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
1040 MonoMethodSignature *tmp_sig;
1042 /* Emit the signature cookie just before the implicit arguments */
1043 MonoInst *sig_arg;
1044 /* FIXME: Add support for signature tokens to AOT */
1045 cfg->disable_aot = TRUE;
1047 g_assert (cinfo->sig_cookie.storage == ArgOnStack);
1050 * mono_ArgIterator_Setup assumes the signature cookie is
1051 * passed first and all the arguments which were before it are
1052 * passed on the stack after the signature. So compensate by
1053 * passing a different signature.
1055 tmp_sig = mono_metadata_signature_dup (call->signature);
1056 tmp_sig->param_count -= call->signature->sentinelpos;
1057 tmp_sig->sentinelpos = 0;
1058 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1060 MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
1061 sig_arg->dreg = mono_alloc_ireg (cfg);
1062 sig_arg->inst_p0 = tmp_sig;
1063 MONO_ADD_INS (cfg->cbb, sig_arg);
1065 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, IA64_SP, 16 + cinfo->sig_cookie.offset, sig_arg->dreg);
1068 void
1069 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1071 MonoInst *in;
1072 MonoMethodSignature *sig;
1073 int i, n, stack_size;
1074 CallInfo *cinfo;
1075 ArgInfo *ainfo;
1077 stack_size = 0;
1079 mono_ia64_alloc_stacked_registers (cfg);
1081 sig = call->signature;
1082 n = sig->param_count + sig->hasthis;
1084 cinfo = get_call_info (cfg, cfg->mempool, sig, sig->pinvoke);
1086 if (cinfo->ret.storage == ArgAggregate) {
1087 MonoInst *vtarg;
1088 MonoInst *local;
1091 * The valuetype is in registers after the call, need to be copied
1092 * to the stack. Save the address to a local here, so the call
1093 * instruction can access it.
1095 local = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1096 local->flags |= MONO_INST_VOLATILE;
1097 cfg->arch.ret_var_addr_local = local;
1099 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
1100 vtarg->sreg1 = call->vret_var->dreg;
1101 vtarg->dreg = local->dreg;
1102 MONO_ADD_INS (cfg->cbb, vtarg);
1105 if (cinfo->ret.storage == ArgValuetypeAddrInIReg) {
1106 add_outarg_reg (cfg, call, ArgInIReg, cfg->arch.reg_out0 + cinfo->ret.reg, call->vret_var);
1109 for (i = 0; i < n; ++i) {
1110 MonoType *arg_type;
1112 ainfo = cinfo->args + i;
1114 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1115 /* Emit the signature cookie just before the implicit arguments */
1116 emit_sig_cookie (cfg, call, cinfo);
1119 in = call->args [i];
1121 if (sig->hasthis && (i == 0))
1122 arg_type = &mono_defaults.object_class->byval_arg;
1123 else
1124 arg_type = sig->params [i - sig->hasthis];
1126 if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(arg_type))) {
1127 guint32 align;
1128 guint32 size;
1130 if (arg_type->type == MONO_TYPE_TYPEDBYREF) {
1131 size = sizeof (MonoTypedRef);
1132 align = sizeof (gpointer);
1134 else if (sig->pinvoke)
1135 size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
1136 else {
1138 * Other backends use mono_type_stack_size (), but that
1139 * aligns the size to 8, which is larger than the size of
1140 * the source, leading to reads of invalid memory if the
1141 * source is at the end of address space.
1143 size = mono_class_value_size (in->klass, &align);
1146 if (size > 0) {
1147 MonoInst *arg;
1149 MONO_INST_NEW (cfg, arg, OP_OUTARG_VT);
1150 arg->sreg1 = in->dreg;
1151 arg->klass = in->klass;
1152 arg->backend.size = size;
1153 arg->inst_p0 = call;
1154 arg->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1155 memcpy (arg->inst_p1, ainfo, sizeof (ArgInfo));
1157 MONO_ADD_INS (cfg->cbb, arg);
1160 else {
1161 switch (ainfo->storage) {
1162 case ArgInIReg:
1163 add_outarg_reg (cfg, call, ainfo->storage, cfg->arch.reg_out0 + ainfo->reg, in);
1164 break;
1165 case ArgInFloatReg:
1166 case ArgInFloatRegR4:
1167 add_outarg_reg (cfg, call, ainfo->storage, ainfo->reg, in);
1168 break;
1169 case ArgOnStack:
1170 if (arg_type->type == MONO_TYPE_R4 && !arg_type->byref)
1171 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, IA64_SP, 16 + ainfo->offset, in->dreg);
1172 else if (arg_type->type == MONO_TYPE_R8 && !arg_type->byref)
1173 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, IA64_SP, 16 + ainfo->offset, in->dreg);
1174 else
1175 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, IA64_SP, 16 + ainfo->offset, in->dreg);
1176 break;
1177 default:
1178 g_assert_not_reached ();
1183 /* Handle the case where there are no implicit arguments */
1184 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos)) {
1185 emit_sig_cookie (cfg, call, cinfo);
1188 call->stack_usage = cinfo->stack_usage;
1189 cfg->arch.n_out_regs = MAX (cfg->arch.n_out_regs, cinfo->reg_usage);
1192 void
1193 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1195 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
1196 ArgInfo *ainfo = (ArgInfo*)ins->inst_p1;
1197 int size = ins->backend.size;
1199 if (ainfo->storage == ArgAggregate) {
1200 MonoInst *load, *store;
1201 int i, slot;
1204 * Part of the structure is passed in registers.
1206 for (i = 0; i < ainfo->nregs; ++i) {
1207 slot = ainfo->reg + i;
1209 if (ainfo->atype == AggregateSingleHFA) {
1210 MONO_INST_NEW (cfg, load, OP_LOADR4_MEMBASE);
1211 load->inst_basereg = src->dreg;
1212 load->inst_offset = i * 4;
1213 load->dreg = mono_alloc_freg (cfg);
1215 mono_call_inst_add_outarg_reg (cfg, call, load->dreg, ainfo->reg + i, TRUE);
1216 } else if (ainfo->atype == AggregateDoubleHFA) {
1217 MONO_INST_NEW (cfg, load, OP_LOADR8_MEMBASE);
1218 load->inst_basereg = src->dreg;
1219 load->inst_offset = i * 8;
1220 load->dreg = mono_alloc_freg (cfg);
1222 mono_call_inst_add_outarg_reg (cfg, call, load->dreg, ainfo->reg + i, TRUE);
1223 } else {
1224 MONO_INST_NEW (cfg, load, OP_LOADI8_MEMBASE);
1225 load->inst_basereg = src->dreg;
1226 load->inst_offset = i * 8;
1227 load->dreg = mono_alloc_ireg (cfg);
1229 mono_call_inst_add_outarg_reg (cfg, call, load->dreg, cfg->arch.reg_out0 + ainfo->reg + i, FALSE);
1231 MONO_ADD_INS (cfg->cbb, load);
1235 * Part of the structure is passed on the stack.
1237 for (i = ainfo->nregs; i < ainfo->nslots; ++i) {
1238 slot = ainfo->reg + i;
1240 MONO_INST_NEW (cfg, load, OP_LOADI8_MEMBASE);
1241 load->inst_basereg = src->dreg;
1242 load->inst_offset = i * sizeof (gpointer);
1243 load->dreg = mono_alloc_preg (cfg);
1244 MONO_ADD_INS (cfg->cbb, load);
1246 MONO_INST_NEW (cfg, store, OP_STOREI8_MEMBASE_REG);
1247 store->sreg1 = load->dreg;
1248 store->inst_destbasereg = IA64_SP;
1249 store->inst_offset = 16 + ainfo->offset + (slot - 8) * 8;
1250 MONO_ADD_INS (cfg->cbb, store);
1252 } else {
1253 mini_emit_memcpy (cfg, IA64_SP, 16 + ainfo->offset, src->dreg, 0, size, 4);
1257 void
1258 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1260 CallInfo *cinfo = get_call_info (cfg, cfg->mempool, mono_method_signature (method), FALSE);
1262 switch (cinfo->ret.storage) {
1263 case ArgInIReg:
1264 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1265 break;
1266 case ArgInFloatReg:
1267 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
1268 break;
1269 default:
1270 g_assert_not_reached ();
1274 void
1275 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
1279 void
1280 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
1282 MonoInst *ins, *n, *last_ins = NULL;
1283 ins = bb->code;
1285 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
1286 switch (ins->opcode) {
1287 case OP_MOVE:
1288 case OP_FMOVE:
1290 * Removes:
1292 * OP_MOVE reg, reg
1294 if (ins->dreg == ins->sreg1) {
1295 MONO_DELETE_INS (bb, ins);
1296 continue;
1299 * Removes:
1301 * OP_MOVE sreg, dreg
1302 * OP_MOVE dreg, sreg
1304 if (last_ins && last_ins->opcode == OP_MOVE &&
1305 ins->sreg1 == last_ins->dreg &&
1306 ins->dreg == last_ins->sreg1) {
1307 MONO_DELETE_INS (bb, ins);
1308 continue;
1310 break;
1311 case OP_MUL_IMM:
1312 case OP_IMUL_IMM:
1313 /* remove unnecessary multiplication with 1 */
1314 if (ins->inst_imm == 1) {
1315 if (ins->dreg != ins->sreg1) {
1316 ins->opcode = OP_MOVE;
1317 } else {
1318 MONO_DELETE_INS (bb, ins);
1319 continue;
1322 break;
1325 last_ins = ins;
1326 ins = ins->next;
1328 bb->last_ins = last_ins;
1331 int cond_to_ia64_cmp [][3] = {
1332 {OP_IA64_CMP_EQ, OP_IA64_CMP4_EQ, OP_IA64_FCMP_EQ},
1333 {OP_IA64_CMP_NE, OP_IA64_CMP4_NE, OP_IA64_FCMP_NE},
1334 {OP_IA64_CMP_LE, OP_IA64_CMP4_LE, OP_IA64_FCMP_LE},
1335 {OP_IA64_CMP_GE, OP_IA64_CMP4_GE, OP_IA64_FCMP_GE},
1336 {OP_IA64_CMP_LT, OP_IA64_CMP4_LT, OP_IA64_FCMP_LT},
1337 {OP_IA64_CMP_GT, OP_IA64_CMP4_GT, OP_IA64_FCMP_GT},
1338 {OP_IA64_CMP_LE_UN, OP_IA64_CMP4_LE_UN, OP_IA64_FCMP_LE_UN},
1339 {OP_IA64_CMP_GE_UN, OP_IA64_CMP4_GE_UN, OP_IA64_FCMP_GE_UN},
1340 {OP_IA64_CMP_LT_UN, OP_IA64_CMP4_LT_UN, OP_IA64_FCMP_LT_UN},
1341 {OP_IA64_CMP_GT_UN, OP_IA64_CMP4_GT_UN, OP_IA64_FCMP_GT_UN}
1344 static int
1345 opcode_to_ia64_cmp (int opcode, int cmp_opcode)
1347 return cond_to_ia64_cmp [mono_opcode_to_cond (opcode)][mono_opcode_to_type (opcode, cmp_opcode)];
1350 int cond_to_ia64_cmp_imm [][3] = {
1351 {OP_IA64_CMP_EQ_IMM, OP_IA64_CMP4_EQ_IMM, 0},
1352 {OP_IA64_CMP_NE_IMM, OP_IA64_CMP4_NE_IMM, 0},
1353 {OP_IA64_CMP_GE_IMM, OP_IA64_CMP4_GE_IMM, 0},
1354 {OP_IA64_CMP_LE_IMM, OP_IA64_CMP4_LE_IMM, 0},
1355 {OP_IA64_CMP_GT_IMM, OP_IA64_CMP4_GT_IMM, 0},
1356 {OP_IA64_CMP_LT_IMM, OP_IA64_CMP4_LT_IMM, 0},
1357 {OP_IA64_CMP_GE_UN_IMM, OP_IA64_CMP4_GE_UN_IMM, 0},
1358 {OP_IA64_CMP_LE_UN_IMM, OP_IA64_CMP4_LE_UN_IMM, 0},
1359 {OP_IA64_CMP_GT_UN_IMM, OP_IA64_CMP4_GT_UN_IMM, 0},
1360 {OP_IA64_CMP_LT_UN_IMM, OP_IA64_CMP4_LT_UN_IMM, 0},
1363 static int
1364 opcode_to_ia64_cmp_imm (int opcode, int cmp_opcode)
1366 /* The condition needs to be reversed */
1367 return cond_to_ia64_cmp_imm [mono_opcode_to_cond (opcode)][mono_opcode_to_type (opcode, cmp_opcode)];
1370 #define NEW_INS(cfg,dest,op) do { \
1371 (dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
1372 (dest)->opcode = (op); \
1373 mono_bblock_insert_after_ins (bb, last_ins, (dest)); \
1374 last_ins = (dest); \
1375 } while (0)
1378 * mono_arch_lowering_pass:
1380 * Converts complex opcodes into simpler ones so that each IR instruction
1381 * corresponds to one machine instruction.
1383 void
1384 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1386 MonoInst *ins, *n, *next, *temp, *temp2, *temp3, *last_ins = NULL;
1387 ins = bb->code;
1389 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
1390 switch (ins->opcode) {
1391 case OP_STOREI1_MEMBASE_IMM:
1392 case OP_STOREI2_MEMBASE_IMM:
1393 case OP_STOREI4_MEMBASE_IMM:
1394 case OP_STOREI8_MEMBASE_IMM:
1395 case OP_STORE_MEMBASE_IMM:
1396 /* There are no store_membase instructions on ia64 */
1397 if (ins->inst_offset == 0) {
1398 temp2 = NULL;
1399 } else if (ia64_is_imm14 (ins->inst_offset)) {
1400 NEW_INS (cfg, temp2, OP_ADD_IMM);
1401 temp2->sreg1 = ins->inst_destbasereg;
1402 temp2->inst_imm = ins->inst_offset;
1403 temp2->dreg = mono_alloc_ireg (cfg);
1405 else {
1406 NEW_INS (cfg, temp, OP_I8CONST);
1407 temp->inst_c0 = ins->inst_offset;
1408 temp->dreg = mono_alloc_ireg (cfg);
1410 NEW_INS (cfg, temp2, OP_LADD);
1411 temp2->sreg1 = ins->inst_destbasereg;
1412 temp2->sreg2 = temp->dreg;
1413 temp2->dreg = mono_alloc_ireg (cfg);
1416 switch (ins->opcode) {
1417 case OP_STOREI1_MEMBASE_IMM:
1418 ins->opcode = OP_STOREI1_MEMBASE_REG;
1419 break;
1420 case OP_STOREI2_MEMBASE_IMM:
1421 ins->opcode = OP_STOREI2_MEMBASE_REG;
1422 break;
1423 case OP_STOREI4_MEMBASE_IMM:
1424 ins->opcode = OP_STOREI4_MEMBASE_REG;
1425 break;
1426 case OP_STOREI8_MEMBASE_IMM:
1427 case OP_STORE_MEMBASE_IMM:
1428 ins->opcode = OP_STOREI8_MEMBASE_REG;
1429 break;
1430 default:
1431 g_assert_not_reached ();
1434 if (ins->inst_imm == 0)
1435 ins->sreg1 = IA64_R0;
1436 else {
1437 NEW_INS (cfg, temp3, OP_I8CONST);
1438 temp3->inst_c0 = ins->inst_imm;
1439 temp3->dreg = mono_alloc_ireg (cfg);
1440 ins->sreg1 = temp3->dreg;
1443 ins->inst_offset = 0;
1444 if (temp2)
1445 ins->inst_destbasereg = temp2->dreg;
1446 break;
1447 case OP_STOREI1_MEMBASE_REG:
1448 case OP_STOREI2_MEMBASE_REG:
1449 case OP_STOREI4_MEMBASE_REG:
1450 case OP_STOREI8_MEMBASE_REG:
1451 case OP_STORER4_MEMBASE_REG:
1452 case OP_STORER8_MEMBASE_REG:
1453 case OP_STORE_MEMBASE_REG:
1454 /* There are no store_membase instructions on ia64 */
1455 if (ins->inst_offset == 0) {
1456 break;
1458 else if (ia64_is_imm14 (ins->inst_offset)) {
1459 NEW_INS (cfg, temp2, OP_ADD_IMM);
1460 temp2->sreg1 = ins->inst_destbasereg;
1461 temp2->inst_imm = ins->inst_offset;
1462 temp2->dreg = mono_alloc_ireg (cfg);
1464 else {
1465 NEW_INS (cfg, temp, OP_I8CONST);
1466 temp->inst_c0 = ins->inst_offset;
1467 temp->dreg = mono_alloc_ireg (cfg);
1468 NEW_INS (cfg, temp2, OP_LADD);
1469 temp2->sreg1 = ins->inst_destbasereg;
1470 temp2->sreg2 = temp->dreg;
1471 temp2->dreg = mono_alloc_ireg (cfg);
1474 ins->inst_offset = 0;
1475 ins->inst_destbasereg = temp2->dreg;
1476 break;
1477 case OP_LOADI1_MEMBASE:
1478 case OP_LOADU1_MEMBASE:
1479 case OP_LOADI2_MEMBASE:
1480 case OP_LOADU2_MEMBASE:
1481 case OP_LOADI4_MEMBASE:
1482 case OP_LOADU4_MEMBASE:
1483 case OP_LOADI8_MEMBASE:
1484 case OP_LOAD_MEMBASE:
1485 case OP_LOADR4_MEMBASE:
1486 case OP_LOADR8_MEMBASE:
1487 case OP_ATOMIC_EXCHANGE_I4:
1488 case OP_ATOMIC_EXCHANGE_I8:
1489 case OP_ATOMIC_ADD_NEW_I4:
1490 case OP_ATOMIC_ADD_NEW_I8:
1491 case OP_ATOMIC_ADD_IMM_NEW_I4:
1492 case OP_ATOMIC_ADD_IMM_NEW_I8:
1493 /* There are no membase instructions on ia64 */
1494 if (ins->inst_offset == 0) {
1495 break;
1497 else if (ia64_is_imm14 (ins->inst_offset)) {
1498 NEW_INS (cfg, temp2, OP_ADD_IMM);
1499 temp2->sreg1 = ins->inst_basereg;
1500 temp2->inst_imm = ins->inst_offset;
1501 temp2->dreg = mono_alloc_ireg (cfg);
1503 else {
1504 NEW_INS (cfg, temp, OP_I8CONST);
1505 temp->inst_c0 = ins->inst_offset;
1506 temp->dreg = mono_alloc_ireg (cfg);
1507 NEW_INS (cfg, temp2, OP_LADD);
1508 temp2->sreg1 = ins->inst_basereg;
1509 temp2->sreg2 = temp->dreg;
1510 temp2->dreg = mono_alloc_ireg (cfg);
1513 ins->inst_offset = 0;
1514 ins->inst_basereg = temp2->dreg;
1515 break;
1516 case OP_ADD_IMM:
1517 case OP_IADD_IMM:
1518 case OP_LADD_IMM:
1519 case OP_ISUB_IMM:
1520 case OP_LSUB_IMM:
1521 case OP_AND_IMM:
1522 case OP_IAND_IMM:
1523 case OP_LAND_IMM:
1524 case OP_IOR_IMM:
1525 case OP_LOR_IMM:
1526 case OP_IXOR_IMM:
1527 case OP_LXOR_IMM:
1528 case OP_SHL_IMM:
1529 case OP_SHR_IMM:
1530 case OP_ISHL_IMM:
1531 case OP_LSHL_IMM:
1532 case OP_ISHR_IMM:
1533 case OP_LSHR_IMM:
1534 case OP_ISHR_UN_IMM:
1535 case OP_LSHR_UN_IMM: {
1536 gboolean is_imm = FALSE;
1537 gboolean switched = FALSE;
1539 if (ins->opcode == OP_AND_IMM && ins->inst_imm == 255) {
1540 ins->opcode = OP_ZEXT_I1;
1541 break;
1544 switch (ins->opcode) {
1545 case OP_ADD_IMM:
1546 case OP_IADD_IMM:
1547 case OP_LADD_IMM:
1548 is_imm = ia64_is_imm14 (ins->inst_imm);
1549 switched = TRUE;
1550 break;
1551 case OP_ISUB_IMM:
1552 case OP_LSUB_IMM:
1553 is_imm = ia64_is_imm14 (- (ins->inst_imm));
1554 if (is_imm) {
1555 /* A = B - IMM -> A = B + (-IMM) */
1556 ins->inst_imm = - ins->inst_imm;
1557 ins->opcode = OP_IADD_IMM;
1559 switched = TRUE;
1560 break;
1561 case OP_IAND_IMM:
1562 case OP_IOR_IMM:
1563 case OP_IXOR_IMM:
1564 case OP_AND_IMM:
1565 case OP_LAND_IMM:
1566 case OP_LOR_IMM:
1567 case OP_LXOR_IMM:
1568 is_imm = ia64_is_imm8 (ins->inst_imm);
1569 switched = TRUE;
1570 break;
1571 case OP_SHL_IMM:
1572 case OP_SHR_IMM:
1573 case OP_ISHL_IMM:
1574 case OP_LSHL_IMM:
1575 case OP_ISHR_IMM:
1576 case OP_LSHR_IMM:
1577 case OP_ISHR_UN_IMM:
1578 case OP_LSHR_UN_IMM:
1579 is_imm = (ins->inst_imm >= 0) && (ins->inst_imm < 64);
1580 break;
1581 default:
1582 break;
1585 if (is_imm) {
1586 if (switched)
1587 ins->sreg2 = ins->sreg1;
1588 break;
1591 ins->opcode = mono_op_imm_to_op (ins->opcode);
1593 if (ins->inst_imm == 0)
1594 ins->sreg2 = IA64_R0;
1595 else {
1596 NEW_INS (cfg, temp, OP_I8CONST);
1597 temp->inst_c0 = ins->inst_imm;
1598 temp->dreg = mono_alloc_ireg (cfg);
1599 ins->sreg2 = temp->dreg;
1601 break;
1603 case OP_COMPARE_IMM:
1604 case OP_ICOMPARE_IMM:
1605 case OP_LCOMPARE_IMM: {
1606 /* Instead of compare+b<cond>, ia64 has compare<cond>+br */
1607 gboolean imm;
1608 CompRelation cond;
1610 next = ins->next;
1612 /* Branch opts can eliminate the branch */
1613 if (!next || (!(MONO_IS_COND_BRANCH_OP (next) || MONO_IS_COND_EXC (next) || MONO_IS_SETCC (next)))) {
1614 NULLIFY_INS (ins);
1615 break;
1619 * The compare_imm instructions have switched up arguments, and
1620 * some of them take an imm between -127 and 128.
1622 next = ins->next;
1623 cond = mono_opcode_to_cond (next->opcode);
1624 if ((cond == CMP_LT) || (cond == CMP_GE))
1625 imm = ia64_is_imm8 (ins->inst_imm - 1);
1626 else if ((cond == CMP_LT_UN) || (cond == CMP_GE_UN))
1627 imm = ia64_is_imm8 (ins->inst_imm - 1) && (ins->inst_imm > 0);
1628 else
1629 imm = ia64_is_imm8 (ins->inst_imm);
1631 if (imm) {
1632 ins->opcode = opcode_to_ia64_cmp_imm (next->opcode, ins->opcode);
1633 ins->sreg2 = ins->sreg1;
1635 else {
1636 ins->opcode = opcode_to_ia64_cmp (next->opcode, ins->opcode);
1638 if (ins->inst_imm == 0)
1639 ins->sreg2 = IA64_R0;
1640 else {
1641 NEW_INS (cfg, temp, OP_I8CONST);
1642 temp->inst_c0 = ins->inst_imm;
1643 temp->dreg = mono_alloc_ireg (cfg);
1644 ins->sreg2 = temp->dreg;
1648 if (MONO_IS_COND_BRANCH_OP (next)) {
1649 next->opcode = OP_IA64_BR_COND;
1650 next->inst_target_bb = next->inst_true_bb;
1651 } else if (MONO_IS_COND_EXC (next)) {
1652 next->opcode = OP_IA64_COND_EXC;
1653 } else if (MONO_IS_SETCC (next)) {
1654 next->opcode = OP_IA64_CSET;
1655 } else {
1656 printf ("%s\n", mono_inst_name (next->opcode));
1657 NOT_IMPLEMENTED;
1660 break;
1662 case OP_COMPARE:
1663 case OP_ICOMPARE:
1664 case OP_LCOMPARE:
1665 case OP_FCOMPARE: {
1666 /* Instead of compare+b<cond>, ia64 has compare<cond>+br */
1668 next = ins->next;
1670 /* Branch opts can eliminate the branch */
1671 if (!next || (!(MONO_IS_COND_BRANCH_OP (next) || MONO_IS_COND_EXC (next) || MONO_IS_SETCC (next)))) {
1672 NULLIFY_INS (ins);
1673 break;
1676 ins->opcode = opcode_to_ia64_cmp (next->opcode, ins->opcode);
1678 if (MONO_IS_COND_BRANCH_OP (next)) {
1679 next->opcode = OP_IA64_BR_COND;
1680 next->inst_target_bb = next->inst_true_bb;
1681 } else if (MONO_IS_COND_EXC (next)) {
1682 next->opcode = OP_IA64_COND_EXC;
1683 } else if (MONO_IS_SETCC (next)) {
1684 next->opcode = OP_IA64_CSET;
1685 } else {
1686 printf ("%s\n", mono_inst_name (next->opcode));
1687 NOT_IMPLEMENTED;
1690 break;
1692 case OP_FCEQ:
1693 case OP_FCGT:
1694 case OP_FCGT_UN:
1695 case OP_FCLT:
1696 case OP_FCLT_UN:
1697 /* The front end removes the fcompare, so introduce it again */
1698 NEW_INS (cfg, temp, opcode_to_ia64_cmp (ins->opcode, OP_FCOMPARE));
1699 temp->sreg1 = ins->sreg1;
1700 temp->sreg2 = ins->sreg2;
1702 ins->opcode = OP_IA64_CSET;
1703 MONO_INST_NULLIFY_SREGS (ins);
1704 break;
1705 case OP_MUL_IMM:
1706 case OP_LMUL_IMM:
1707 case OP_IMUL_IMM: {
1708 int i, sum_reg;
1709 gboolean found = FALSE;
1710 int shl_op = ins->opcode == OP_IMUL_IMM ? OP_ISHL_IMM : OP_SHL_IMM;
1712 /* First the easy cases */
1713 if (ins->inst_imm == 1) {
1714 ins->opcode = OP_MOVE;
1715 break;
1717 for (i = 1; i < 64; ++i)
1718 if (ins->inst_imm == (((gint64)1) << i)) {
1719 ins->opcode = shl_op;
1720 ins->inst_imm = i;
1721 found = TRUE;
1722 break;
1725 /* This could be optimized */
1726 if (!found) {
1727 sum_reg = 0;
1728 for (i = 0; i < 64; ++i) {
1729 if (ins->inst_imm & (((gint64)1) << i)) {
1730 NEW_INS (cfg, temp, shl_op);
1731 temp->dreg = mono_alloc_ireg (cfg);
1732 temp->sreg1 = ins->sreg1;
1733 temp->inst_imm = i;
1735 if (sum_reg == 0)
1736 sum_reg = temp->dreg;
1737 else {
1738 NEW_INS (cfg, temp2, OP_LADD);
1739 temp2->dreg = mono_alloc_ireg (cfg);
1740 temp2->sreg1 = sum_reg;
1741 temp2->sreg2 = temp->dreg;
1742 sum_reg = temp2->dreg;
1746 ins->opcode = OP_MOVE;
1747 ins->sreg1 = sum_reg;
1749 break;
1751 case OP_LCONV_TO_OVF_U4:
1752 NEW_INS (cfg, temp, OP_IA64_CMP4_LT);
1753 temp->sreg1 = ins->sreg1;
1754 temp->sreg2 = IA64_R0;
1756 NEW_INS (cfg, temp, OP_IA64_COND_EXC);
1757 temp->inst_p1 = (char*)"OverflowException";
1759 ins->opcode = OP_MOVE;
1760 break;
1761 case OP_LCONV_TO_OVF_I4_UN:
1762 NEW_INS (cfg, temp, OP_ICONST);
1763 temp->inst_c0 = 0x7fffffff;
1764 temp->dreg = mono_alloc_ireg (cfg);
1766 NEW_INS (cfg, temp2, OP_IA64_CMP4_GT_UN);
1767 temp2->sreg1 = ins->sreg1;
1768 temp2->sreg2 = temp->dreg;
1770 NEW_INS (cfg, temp, OP_IA64_COND_EXC);
1771 temp->inst_p1 = (char*)"OverflowException";
1773 ins->opcode = OP_MOVE;
1774 break;
1775 case OP_FCONV_TO_I4:
1776 case OP_FCONV_TO_I2:
1777 case OP_FCONV_TO_U2:
1778 case OP_FCONV_TO_I1:
1779 case OP_FCONV_TO_U1:
1780 NEW_INS (cfg, temp, OP_FCONV_TO_I8);
1781 temp->sreg1 = ins->sreg1;
1782 temp->dreg = ins->dreg;
1784 switch (ins->opcode) {
1785 case OP_FCONV_TO_I4:
1786 ins->opcode = OP_SEXT_I4;
1787 break;
1788 case OP_FCONV_TO_I2:
1789 ins->opcode = OP_SEXT_I2;
1790 break;
1791 case OP_FCONV_TO_U2:
1792 ins->opcode = OP_ZEXT_I4;
1793 break;
1794 case OP_FCONV_TO_I1:
1795 ins->opcode = OP_SEXT_I1;
1796 break;
1797 case OP_FCONV_TO_U1:
1798 ins->opcode = OP_ZEXT_I1;
1799 break;
1800 default:
1801 g_assert_not_reached ();
1803 ins->sreg1 = ins->dreg;
1804 break;
1805 default:
1806 break;
1808 last_ins = ins;
1809 ins = ins->next;
1811 bb->last_ins = last_ins;
1813 bb->max_vreg = cfg->next_vreg;
1817 * emit_load_volatile_arguments:
1819 * Load volatile arguments from the stack to the original input registers.
1820 * Required before a tail call.
1822 static Ia64CodegenState
1823 emit_load_volatile_arguments (MonoCompile *cfg, Ia64CodegenState code)
1825 MonoMethod *method = cfg->method;
1826 MonoMethodSignature *sig;
1827 MonoInst *ins;
1828 CallInfo *cinfo;
1829 guint32 i;
1831 /* FIXME: Generate intermediate code instead */
1833 sig = mono_method_signature (method);
1835 cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
1837 /* This is the opposite of the code in emit_prolog */
1838 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1839 ArgInfo *ainfo = cinfo->args + i;
1840 gint32 stack_offset;
1841 MonoType *arg_type;
1843 ins = cfg->args [i];
1845 if (sig->hasthis && (i == 0))
1846 arg_type = &mono_defaults.object_class->byval_arg;
1847 else
1848 arg_type = sig->params [i - sig->hasthis];
1850 arg_type = mono_type_get_underlying_type (arg_type);
1852 stack_offset = ainfo->offset + ARGS_OFFSET;
1854 /* Save volatile arguments to the stack */
1855 if (ins->opcode != OP_REGVAR) {
1856 switch (ainfo->storage) {
1857 case ArgInIReg:
1858 case ArgInFloatReg:
1859 /* FIXME: big offsets */
1860 g_assert (ins->opcode == OP_REGOFFSET);
1861 ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_basereg);
1862 if (arg_type->byref)
1863 ia64_ld8 (code, cfg->arch.reg_in0 + ainfo->reg, GP_SCRATCH_REG);
1864 else {
1865 switch (arg_type->type) {
1866 case MONO_TYPE_R4:
1867 ia64_ldfs (code, ainfo->reg, GP_SCRATCH_REG);
1868 break;
1869 case MONO_TYPE_R8:
1870 ia64_ldfd (code, ainfo->reg, GP_SCRATCH_REG);
1871 break;
1872 default:
1873 ia64_ld8 (code, cfg->arch.reg_in0 + ainfo->reg, GP_SCRATCH_REG);
1874 break;
1877 break;
1878 case ArgOnStack:
1879 break;
1880 default:
1881 NOT_IMPLEMENTED;
1885 if (ins->opcode == OP_REGVAR) {
1886 /* Argument allocated to (non-volatile) register */
1887 switch (ainfo->storage) {
1888 case ArgInIReg:
1889 if (ins->dreg != cfg->arch.reg_in0 + ainfo->reg)
1890 ia64_mov (code, cfg->arch.reg_in0 + ainfo->reg, ins->dreg);
1891 break;
1892 case ArgOnStack:
1893 ia64_adds_imm (code, GP_SCRATCH_REG, 16 + ainfo->offset, cfg->frame_reg);
1894 ia64_st8 (code, GP_SCRATCH_REG, ins->dreg);
1895 break;
1896 default:
1897 NOT_IMPLEMENTED;
1902 return code;
1905 static Ia64CodegenState
1906 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, Ia64CodegenState code)
1908 CallInfo *cinfo;
1909 int i;
1911 /* Move return value to the target register */
1912 switch (ins->opcode) {
1913 case OP_VOIDCALL:
1914 case OP_VOIDCALL_REG:
1915 case OP_VOIDCALL_MEMBASE:
1916 break;
1917 case OP_CALL:
1918 case OP_CALL_REG:
1919 case OP_CALL_MEMBASE:
1920 case OP_LCALL:
1921 case OP_LCALL_REG:
1922 case OP_LCALL_MEMBASE:
1923 g_assert (ins->dreg == IA64_R8);
1924 break;
1925 case OP_FCALL:
1926 case OP_FCALL_REG:
1927 case OP_FCALL_MEMBASE:
1928 g_assert (ins->dreg == 8);
1929 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4)
1930 ia64_fnorm_d_sf (code, ins->dreg, ins->dreg, 0);
1931 break;
1932 case OP_VCALL:
1933 case OP_VCALL_REG:
1934 case OP_VCALL_MEMBASE:
1935 case OP_VCALL2:
1936 case OP_VCALL2_REG:
1937 case OP_VCALL2_MEMBASE: {
1938 ArgStorage storage;
1940 cinfo = get_call_info (cfg, cfg->mempool, ((MonoCallInst*)ins)->signature, FALSE);
1941 storage = cinfo->ret.storage;
1943 if (storage == ArgAggregate) {
1944 MonoInst *local = (MonoInst*)cfg->arch.ret_var_addr_local;
1946 /* Load address of stack space allocated for the return value */
1947 ia64_movl (code, GP_SCRATCH_REG, local->inst_offset);
1948 ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, local->inst_basereg);
1949 ia64_ld8 (code, GP_SCRATCH_REG, GP_SCRATCH_REG);
1951 for (i = 0; i < cinfo->ret.nregs; ++i) {
1952 switch (cinfo->ret.atype) {
1953 case AggregateNormal:
1954 ia64_st8_inc_imm_hint (code, GP_SCRATCH_REG, cinfo->ret.reg + i, 8, 0);
1955 break;
1956 case AggregateSingleHFA:
1957 ia64_stfs_inc_imm_hint (code, GP_SCRATCH_REG, cinfo->ret.reg + i, 4, 0);
1958 break;
1959 case AggregateDoubleHFA:
1960 ia64_stfd_inc_imm_hint (code, GP_SCRATCH_REG, cinfo->ret.reg + i, 8, 0);
1961 break;
1962 default:
1963 g_assert_not_reached ();
1967 break;
1969 default:
1970 g_assert_not_reached ();
1973 return code;
1976 #define add_patch_info(cfg,code,patch_type,data) do { \
1977 mono_add_patch_info (cfg, code.buf + code.nins - cfg->native_code, patch_type, data); \
1978 } while (0)
1980 #define emit_cond_system_exception(cfg,code,exc_name,predicate) do { \
1981 MonoInst *tins = mono_branch_optimize_exception_target (cfg, bb, exc_name); \
1982 if (tins == NULL) \
1983 add_patch_info (cfg, code, MONO_PATCH_INFO_EXC, exc_name); \
1984 else \
1985 add_patch_info (cfg, code, MONO_PATCH_INFO_BB, tins->inst_true_bb); \
1986 ia64_br_cond_pred (code, (predicate), 0); \
1987 } while (0)
1989 static Ia64CodegenState
1990 emit_call (MonoCompile *cfg, Ia64CodegenState code, guint32 patch_type, gconstpointer data)
1992 add_patch_info (cfg, code, patch_type, data);
1994 if ((patch_type == MONO_PATCH_INFO_ABS) || (patch_type == MONO_PATCH_INFO_INTERNAL_METHOD)) {
1995 /* Indirect call */
1996 /* mono_arch_patch_callsite will patch this */
1997 /* mono_arch_nullify_class_init_trampoline will patch this */
1998 ia64_movl (code, GP_SCRATCH_REG, 0);
1999 ia64_ld8_inc_imm (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, 8);
2000 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG2);
2001 ia64_ld8 (code, IA64_GP, GP_SCRATCH_REG);
2002 ia64_br_call_reg (code, IA64_B0, IA64_B6);
2004 else {
2005 /* Can't use a direct call since the displacement might be too small */
2006 /* mono_arch_patch_callsite will patch this */
2007 ia64_movl (code, GP_SCRATCH_REG, 0);
2008 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
2009 ia64_br_call_reg (code, IA64_B0, IA64_B6);
2012 return code;
2015 #define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
2017 void
2018 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
2020 MonoInst *ins;
2021 MonoCallInst *call;
2022 guint offset;
2023 Ia64CodegenState code;
2024 guint8 *code_start = cfg->native_code + cfg->code_len;
2025 MonoInst *last_ins = NULL;
2026 guint last_offset = 0;
2027 int max_len, cpos;
2029 if (cfg->opt & MONO_OPT_LOOP) {
2030 /* FIXME: */
2033 if (cfg->verbose_level > 2)
2034 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
2036 cpos = bb->max_offset;
2038 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
2039 NOT_IMPLEMENTED;
2042 offset = code_start - cfg->native_code;
2044 ia64_codegen_init (code, code_start);
2046 #if 0
2047 if (strstr (cfg->method->name, "conv_ovf_i1") && (bb->block_num == 2))
2048 break_count ();
2049 #endif
2051 MONO_BB_FOR_EACH_INS (bb, ins) {
2052 offset = code.buf - cfg->native_code;
2054 max_len = ((int)(((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN])) + 128;
2056 while (offset + max_len + 16 > cfg->code_size) {
2057 ia64_codegen_close (code);
2059 offset = code.buf - cfg->native_code;
2061 cfg->code_size *= 2;
2062 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2063 code_start = cfg->native_code + offset;
2064 cfg->stat_code_reallocs++;
2066 ia64_codegen_init (code, code_start);
2069 mono_debug_record_line_number (cfg, ins, offset);
2071 switch (ins->opcode) {
2072 case OP_ICONST:
2073 case OP_I8CONST:
2074 if (ia64_is_imm14 (ins->inst_c0))
2075 ia64_adds_imm (code, ins->dreg, ins->inst_c0, IA64_R0);
2076 else
2077 ia64_movl (code, ins->dreg, ins->inst_c0);
2078 break;
2079 case OP_JUMP_TABLE:
2080 add_patch_info (cfg, code, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2081 ia64_movl (code, ins->dreg, 0);
2082 break;
2083 case OP_MOVE:
2084 ia64_mov (code, ins->dreg, ins->sreg1);
2085 break;
2086 case OP_BR:
2087 case OP_IA64_BR_COND: {
2088 int pred = 0;
2089 if (ins->opcode == OP_IA64_BR_COND)
2090 pred = 6;
2091 if (ins->inst_target_bb->native_offset) {
2092 guint8 *pos = code.buf + code.nins;
2094 ia64_br_cond_pred (code, pred, 0);
2095 ia64_begin_bundle (code);
2096 ia64_patch (pos, cfg->native_code + ins->inst_target_bb->native_offset);
2097 } else {
2098 add_patch_info (cfg, code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
2099 ia64_br_cond_pred (code, pred, 0);
2101 break;
2103 case OP_LABEL:
2104 ia64_begin_bundle (code);
2105 ins->inst_c0 = code.buf - cfg->native_code;
2106 break;
2107 case OP_NOP:
2108 case OP_RELAXED_NOP:
2109 case OP_DUMMY_USE:
2110 case OP_DUMMY_STORE:
2111 case OP_NOT_REACHED:
2112 case OP_NOT_NULL:
2113 break;
2114 case OP_BR_REG:
2115 ia64_mov_to_br (code, IA64_B6, ins->sreg1);
2116 ia64_br_cond_reg (code, IA64_B6);
2117 break;
2118 case OP_IADD:
2119 case OP_LADD:
2120 ia64_add (code, ins->dreg, ins->sreg1, ins->sreg2);
2121 break;
2122 case OP_ISUB:
2123 case OP_LSUB:
2124 ia64_sub (code, ins->dreg, ins->sreg1, ins->sreg2);
2125 break;
2126 case OP_IAND:
2127 case OP_LAND:
2128 ia64_and (code, ins->dreg, ins->sreg1, ins->sreg2);
2129 break;
2130 case OP_IOR:
2131 case OP_LOR:
2132 ia64_or (code, ins->dreg, ins->sreg1, ins->sreg2);
2133 break;
2134 case OP_IXOR:
2135 case OP_LXOR:
2136 ia64_xor (code, ins->dreg, ins->sreg1, ins->sreg2);
2137 break;
2138 case OP_INEG:
2139 case OP_LNEG:
2140 ia64_sub (code, ins->dreg, IA64_R0, ins->sreg1);
2141 break;
2142 case OP_INOT:
2143 case OP_LNOT:
2144 ia64_andcm_imm (code, ins->dreg, -1, ins->sreg1);
2145 break;
2146 case OP_ISHL:
2147 case OP_LSHL:
2148 ia64_shl (code, ins->dreg, ins->sreg1, ins->sreg2);
2149 break;
2150 case OP_ISHR:
2151 ia64_sxt4 (code, GP_SCRATCH_REG, ins->sreg1);
2152 ia64_shr (code, ins->dreg, GP_SCRATCH_REG, ins->sreg2);
2153 break;
2154 case OP_LSHR:
2155 ia64_shr (code, ins->dreg, ins->sreg1, ins->sreg2);
2156 break;
2157 case OP_ISHR_UN:
2158 ia64_zxt4 (code, GP_SCRATCH_REG, ins->sreg1);
2159 ia64_shr_u (code, ins->dreg, GP_SCRATCH_REG, ins->sreg2);
2160 break;
2161 case OP_LSHR_UN:
2162 ia64_shr_u (code, ins->dreg, ins->sreg1, ins->sreg2);
2163 break;
2164 case OP_IADDCC:
2165 /* p6 and p7 is set if there is signed/unsigned overflow */
2167 /* Set p8-p9 == (sreg2 > 0) */
2168 ia64_cmp4_lt (code, 8, 9, IA64_R0, ins->sreg2);
2170 ia64_add (code, GP_SCRATCH_REG, ins->sreg1, ins->sreg2);
2172 /* (sreg2 > 0) && (res < ins->sreg1) => signed overflow */
2173 ia64_cmp4_lt_pred (code, 8, 6, 10, GP_SCRATCH_REG, ins->sreg1);
2174 /* (sreg2 <= 0) && (res > ins->sreg1) => signed overflow */
2175 ia64_cmp4_lt_pred (code, 9, 6, 10, ins->sreg1, GP_SCRATCH_REG);
2177 /* res <u sreg1 => unsigned overflow */
2178 ia64_cmp4_ltu (code, 7, 10, GP_SCRATCH_REG, ins->sreg1);
2180 /* FIXME: Predicate this since this is a side effect */
2181 ia64_mov (code, ins->dreg, GP_SCRATCH_REG);
2182 break;
2183 case OP_ISUBCC:
2184 /* p6 and p7 is set if there is signed/unsigned overflow */
2186 /* Set p8-p9 == (sreg2 > 0) */
2187 ia64_cmp4_lt (code, 8, 9, IA64_R0, ins->sreg2);
2189 ia64_sub (code, GP_SCRATCH_REG, ins->sreg1, ins->sreg2);
2191 /* (sreg2 > 0) && (res > ins->sreg1) => signed overflow */
2192 ia64_cmp4_gt_pred (code, 8, 6, 10, GP_SCRATCH_REG, ins->sreg1);
2193 /* (sreg2 <= 0) && (res < ins->sreg1) => signed overflow */
2194 ia64_cmp4_lt_pred (code, 9, 6, 10, GP_SCRATCH_REG, ins->sreg1);
2196 /* sreg1 <u sreg2 => unsigned overflow */
2197 ia64_cmp4_ltu (code, 7, 10, ins->sreg1, ins->sreg2);
2199 /* FIXME: Predicate this since this is a side effect */
2200 ia64_mov (code, ins->dreg, GP_SCRATCH_REG);
2201 break;
2202 case OP_ADDCC:
2203 /* Same as OP_IADDCC */
2204 ia64_cmp_lt (code, 8, 9, IA64_R0, ins->sreg2);
2206 ia64_add (code, GP_SCRATCH_REG, ins->sreg1, ins->sreg2);
2208 ia64_cmp_lt_pred (code, 8, 6, 10, GP_SCRATCH_REG, ins->sreg1);
2209 ia64_cmp_lt_pred (code, 9, 6, 10, ins->sreg1, GP_SCRATCH_REG);
2211 ia64_cmp_ltu (code, 7, 10, GP_SCRATCH_REG, ins->sreg1);
2213 ia64_mov (code, ins->dreg, GP_SCRATCH_REG);
2214 break;
2215 case OP_SUBCC:
2216 /* Same as OP_ISUBCC */
2218 ia64_cmp_lt (code, 8, 9, IA64_R0, ins->sreg2);
2220 ia64_sub (code, GP_SCRATCH_REG, ins->sreg1, ins->sreg2);
2222 ia64_cmp_gt_pred (code, 8, 6, 10, GP_SCRATCH_REG, ins->sreg1);
2223 ia64_cmp_lt_pred (code, 9, 6, 10, GP_SCRATCH_REG, ins->sreg1);
2225 ia64_cmp_ltu (code, 7, 10, ins->sreg1, ins->sreg2);
2227 ia64_mov (code, ins->dreg, GP_SCRATCH_REG);
2228 break;
2229 case OP_ADD_IMM:
2230 case OP_IADD_IMM:
2231 case OP_LADD_IMM:
2232 ia64_adds_imm (code, ins->dreg, ins->inst_imm, ins->sreg1);
2233 break;
2234 case OP_IAND_IMM:
2235 case OP_AND_IMM:
2236 case OP_LAND_IMM:
2237 ia64_and_imm (code, ins->dreg, ins->inst_imm, ins->sreg1);
2238 break;
2239 case OP_IOR_IMM:
2240 case OP_LOR_IMM:
2241 ia64_or_imm (code, ins->dreg, ins->inst_imm, ins->sreg1);
2242 break;
2243 case OP_IXOR_IMM:
2244 case OP_LXOR_IMM:
2245 ia64_xor_imm (code, ins->dreg, ins->inst_imm, ins->sreg1);
2246 break;
2247 case OP_SHL_IMM:
2248 case OP_ISHL_IMM:
2249 case OP_LSHL_IMM:
2250 ia64_shl_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
2251 break;
2252 case OP_SHR_IMM:
2253 case OP_LSHR_IMM:
2254 ia64_shr_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
2255 break;
2256 case OP_ISHR_IMM:
2257 g_assert (ins->inst_imm <= 64);
2258 ia64_extr (code, ins->dreg, ins->sreg1, ins->inst_imm, 32 - ins->inst_imm);
2259 break;
2260 case OP_ISHR_UN_IMM:
2261 ia64_zxt4 (code, GP_SCRATCH_REG, ins->sreg1);
2262 ia64_shr_u_imm (code, ins->dreg, GP_SCRATCH_REG, ins->inst_imm);
2263 break;
2264 case OP_LSHR_UN_IMM:
2265 ia64_shr_u_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
2266 break;
2267 case OP_LMUL:
2268 /* Based on gcc code */
2269 ia64_setf_sig (code, FP_SCRATCH_REG, ins->sreg1);
2270 ia64_setf_sig (code, FP_SCRATCH_REG2, ins->sreg2);
2271 ia64_xmpy_l (code, FP_SCRATCH_REG, FP_SCRATCH_REG, FP_SCRATCH_REG2);
2272 ia64_getf_sig (code, ins->dreg, FP_SCRATCH_REG);
2273 break;
2275 case OP_STOREI1_MEMBASE_REG:
2276 ia64_st1_hint (code, ins->inst_destbasereg, ins->sreg1, 0);
2277 break;
2278 case OP_STOREI2_MEMBASE_REG:
2279 ia64_st2_hint (code, ins->inst_destbasereg, ins->sreg1, 0);
2280 break;
2281 case OP_STOREI4_MEMBASE_REG:
2282 ia64_st4_hint (code, ins->inst_destbasereg, ins->sreg1, 0);
2283 break;
2284 case OP_STOREI8_MEMBASE_REG:
2285 case OP_STORE_MEMBASE_REG:
2286 if (ins->inst_offset != 0) {
2287 /* This is generated by local regalloc */
2288 if (ia64_is_imm14 (ins->inst_offset)) {
2289 ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_destbasereg);
2290 } else {
2291 ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
2292 ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, ins->inst_destbasereg);
2294 ins->inst_destbasereg = GP_SCRATCH_REG;
2296 ia64_st8_hint (code, ins->inst_destbasereg, ins->sreg1, 0);
2297 break;
2299 case OP_IA64_STOREI1_MEMBASE_INC_REG:
2300 ia64_st1_inc_imm_hint (code, ins->inst_destbasereg, ins->sreg1, 1, 0);
2301 break;
2302 case OP_IA64_STOREI2_MEMBASE_INC_REG:
2303 ia64_st2_inc_imm_hint (code, ins->inst_destbasereg, ins->sreg1, 2, 0);
2304 break;
2305 case OP_IA64_STOREI4_MEMBASE_INC_REG:
2306 ia64_st4_inc_imm_hint (code, ins->inst_destbasereg, ins->sreg1, 4, 0);
2307 break;
2308 case OP_IA64_STOREI8_MEMBASE_INC_REG:
2309 ia64_st8_inc_imm_hint (code, ins->inst_destbasereg, ins->sreg1, 8, 0);
2310 break;
2312 case OP_LOADU1_MEMBASE:
2313 ia64_ld1 (code, ins->dreg, ins->inst_basereg);
2314 break;
2315 case OP_LOADU2_MEMBASE:
2316 ia64_ld2 (code, ins->dreg, ins->inst_basereg);
2317 break;
2318 case OP_LOADU4_MEMBASE:
2319 ia64_ld4 (code, ins->dreg, ins->inst_basereg);
2320 break;
2321 case OP_LOADI1_MEMBASE:
2322 ia64_ld1 (code, ins->dreg, ins->inst_basereg);
2323 ia64_sxt1 (code, ins->dreg, ins->dreg);
2324 break;
2325 case OP_LOADI2_MEMBASE:
2326 ia64_ld2 (code, ins->dreg, ins->inst_basereg);
2327 ia64_sxt2 (code, ins->dreg, ins->dreg);
2328 break;
2329 case OP_LOADI4_MEMBASE:
2330 ia64_ld4 (code, ins->dreg, ins->inst_basereg);
2331 ia64_sxt4 (code, ins->dreg, ins->dreg);
2332 break;
2333 case OP_LOAD_MEMBASE:
2334 case OP_LOADI8_MEMBASE:
2335 if (ins->inst_offset != 0) {
2336 /* This is generated by local regalloc */
2337 if (ia64_is_imm14 (ins->inst_offset)) {
2338 ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_basereg);
2339 } else {
2340 ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
2341 ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, ins->inst_basereg);
2343 ins->inst_basereg = GP_SCRATCH_REG;
2345 ia64_ld8 (code, ins->dreg, ins->inst_basereg);
2346 break;
2348 case OP_IA64_LOADU1_MEMBASE_INC:
2349 ia64_ld1_inc_imm_hint (code, ins->dreg, ins->inst_basereg, 1, 0);
2350 break;
2351 case OP_IA64_LOADU2_MEMBASE_INC:
2352 ia64_ld2_inc_imm_hint (code, ins->dreg, ins->inst_basereg, 2, 0);
2353 break;
2354 case OP_IA64_LOADU4_MEMBASE_INC:
2355 ia64_ld4_inc_imm_hint (code, ins->dreg, ins->inst_basereg, 4, 0);
2356 break;
2357 case OP_IA64_LOADI8_MEMBASE_INC:
2358 ia64_ld8_inc_imm_hint (code, ins->dreg, ins->inst_basereg, 8, 0);
2359 break;
2361 case OP_SEXT_I1:
2362 ia64_sxt1 (code, ins->dreg, ins->sreg1);
2363 break;
2364 case OP_SEXT_I2:
2365 ia64_sxt2 (code, ins->dreg, ins->sreg1);
2366 break;
2367 case OP_SEXT_I4:
2368 ia64_sxt4 (code, ins->dreg, ins->sreg1);
2369 break;
2370 case OP_ZEXT_I1:
2371 ia64_zxt1 (code, ins->dreg, ins->sreg1);
2372 break;
2373 case OP_ZEXT_I2:
2374 ia64_zxt2 (code, ins->dreg, ins->sreg1);
2375 break;
2376 case OP_ZEXT_I4:
2377 ia64_zxt4 (code, ins->dreg, ins->sreg1);
2378 break;
2380 /* Compare opcodes */
2381 case OP_IA64_CMP4_EQ:
2382 ia64_cmp4_eq (code, 6, 7, ins->sreg1, ins->sreg2);
2383 break;
2384 case OP_IA64_CMP4_NE:
2385 ia64_cmp4_ne (code, 6, 7, ins->sreg1, ins->sreg2);
2386 break;
2387 case OP_IA64_CMP4_LE:
2388 ia64_cmp4_le (code, 6, 7, ins->sreg1, ins->sreg2);
2389 break;
2390 case OP_IA64_CMP4_LT:
2391 ia64_cmp4_lt (code, 6, 7, ins->sreg1, ins->sreg2);
2392 break;
2393 case OP_IA64_CMP4_GE:
2394 ia64_cmp4_ge (code, 6, 7, ins->sreg1, ins->sreg2);
2395 break;
2396 case OP_IA64_CMP4_GT:
2397 ia64_cmp4_gt (code, 6, 7, ins->sreg1, ins->sreg2);
2398 break;
2399 case OP_IA64_CMP4_LT_UN:
2400 ia64_cmp4_ltu (code, 6, 7, ins->sreg1, ins->sreg2);
2401 break;
2402 case OP_IA64_CMP4_LE_UN:
2403 ia64_cmp4_leu (code, 6, 7, ins->sreg1, ins->sreg2);
2404 break;
2405 case OP_IA64_CMP4_GT_UN:
2406 ia64_cmp4_gtu (code, 6, 7, ins->sreg1, ins->sreg2);
2407 break;
2408 case OP_IA64_CMP4_GE_UN:
2409 ia64_cmp4_geu (code, 6, 7, ins->sreg1, ins->sreg2);
2410 break;
2411 case OP_IA64_CMP_EQ:
2412 ia64_cmp_eq (code, 6, 7, ins->sreg1, ins->sreg2);
2413 break;
2414 case OP_IA64_CMP_NE:
2415 ia64_cmp_ne (code, 6, 7, ins->sreg1, ins->sreg2);
2416 break;
2417 case OP_IA64_CMP_LE:
2418 ia64_cmp_le (code, 6, 7, ins->sreg1, ins->sreg2);
2419 break;
2420 case OP_IA64_CMP_LT:
2421 ia64_cmp_lt (code, 6, 7, ins->sreg1, ins->sreg2);
2422 break;
2423 case OP_IA64_CMP_GE:
2424 ia64_cmp_ge (code, 6, 7, ins->sreg1, ins->sreg2);
2425 break;
2426 case OP_IA64_CMP_GT:
2427 ia64_cmp_gt (code, 6, 7, ins->sreg1, ins->sreg2);
2428 break;
2429 case OP_IA64_CMP_GT_UN:
2430 ia64_cmp_gtu (code, 6, 7, ins->sreg1, ins->sreg2);
2431 break;
2432 case OP_IA64_CMP_LT_UN:
2433 ia64_cmp_ltu (code, 6, 7, ins->sreg1, ins->sreg2);
2434 break;
2435 case OP_IA64_CMP_GE_UN:
2436 ia64_cmp_geu (code, 6, 7, ins->sreg1, ins->sreg2);
2437 break;
2438 case OP_IA64_CMP_LE_UN:
2439 ia64_cmp_leu (code, 6, 7, ins->sreg1, ins->sreg2);
2440 break;
2441 case OP_IA64_CMP4_EQ_IMM:
2442 ia64_cmp4_eq_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2443 break;
2444 case OP_IA64_CMP4_NE_IMM:
2445 ia64_cmp4_ne_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2446 break;
2447 case OP_IA64_CMP4_LE_IMM:
2448 ia64_cmp4_le_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2449 break;
2450 case OP_IA64_CMP4_LT_IMM:
2451 ia64_cmp4_lt_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2452 break;
2453 case OP_IA64_CMP4_GE_IMM:
2454 ia64_cmp4_ge_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2455 break;
2456 case OP_IA64_CMP4_GT_IMM:
2457 ia64_cmp4_gt_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2458 break;
2459 case OP_IA64_CMP4_LT_UN_IMM:
2460 ia64_cmp4_ltu_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2461 break;
2462 case OP_IA64_CMP4_LE_UN_IMM:
2463 ia64_cmp4_leu_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2464 break;
2465 case OP_IA64_CMP4_GT_UN_IMM:
2466 ia64_cmp4_gtu_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2467 break;
2468 case OP_IA64_CMP4_GE_UN_IMM:
2469 ia64_cmp4_geu_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2470 break;
2471 case OP_IA64_CMP_EQ_IMM:
2472 ia64_cmp_eq_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2473 break;
2474 case OP_IA64_CMP_NE_IMM:
2475 ia64_cmp_ne_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2476 break;
2477 case OP_IA64_CMP_LE_IMM:
2478 ia64_cmp_le_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2479 break;
2480 case OP_IA64_CMP_LT_IMM:
2481 ia64_cmp_lt_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2482 break;
2483 case OP_IA64_CMP_GE_IMM:
2484 ia64_cmp_ge_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2485 break;
2486 case OP_IA64_CMP_GT_IMM:
2487 ia64_cmp_gt_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2488 break;
2489 case OP_IA64_CMP_GT_UN_IMM:
2490 ia64_cmp_gtu_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2491 break;
2492 case OP_IA64_CMP_LT_UN_IMM:
2493 ia64_cmp_ltu_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2494 break;
2495 case OP_IA64_CMP_GE_UN_IMM:
2496 ia64_cmp_geu_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2497 break;
2498 case OP_IA64_CMP_LE_UN_IMM:
2499 ia64_cmp_leu_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2500 break;
2501 case OP_IA64_FCMP_EQ:
2502 ia64_fcmp_eq_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2503 break;
2504 case OP_IA64_FCMP_NE:
2505 ia64_fcmp_ne_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2506 break;
2507 case OP_IA64_FCMP_LT:
2508 ia64_fcmp_lt_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2509 break;
2510 case OP_IA64_FCMP_GT:
2511 ia64_fcmp_gt_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2512 break;
2513 case OP_IA64_FCMP_LE:
2514 ia64_fcmp_le_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2515 break;
2516 case OP_IA64_FCMP_GE:
2517 ia64_fcmp_ge_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2518 break;
2519 case OP_IA64_FCMP_GT_UN:
2520 ia64_fcmp_gt_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2521 ia64_fcmp_unord_sf_pred (code, 7, 6, 7, ins->sreg1, ins->sreg2, 0);
2522 break;
2523 case OP_IA64_FCMP_LT_UN:
2524 ia64_fcmp_lt_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2525 ia64_fcmp_unord_sf_pred (code, 7, 6, 7, ins->sreg1, ins->sreg2, 0);
2526 break;
2527 case OP_IA64_FCMP_GE_UN:
2528 ia64_fcmp_ge_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2529 ia64_fcmp_unord_sf_pred (code, 7, 6, 7, ins->sreg1, ins->sreg2, 0);
2530 break;
2531 case OP_IA64_FCMP_LE_UN:
2532 ia64_fcmp_le_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2533 ia64_fcmp_unord_sf_pred (code, 7, 6, 7, ins->sreg1, ins->sreg2, 0);
2534 break;
2536 case OP_COND_EXC_IOV:
2537 case OP_COND_EXC_OV:
2538 emit_cond_system_exception (cfg, code, "OverflowException", 6);
2539 break;
2540 case OP_COND_EXC_IC:
2541 case OP_COND_EXC_C:
2542 emit_cond_system_exception (cfg, code, "OverflowException", 7);
2543 break;
2544 case OP_IA64_COND_EXC:
2545 emit_cond_system_exception (cfg, code, ins->inst_p1, 6);
2546 break;
2547 case OP_IA64_CSET:
2548 ia64_mov_pred (code, 7, ins->dreg, IA64_R0);
2549 ia64_no_stop (code);
2550 ia64_add1_pred (code, 6, ins->dreg, IA64_R0, IA64_R0);
2551 break;
2552 case OP_ICONV_TO_I1:
2553 case OP_LCONV_TO_I1:
2554 /* FIXME: Is this needed ? */
2555 ia64_sxt1 (code, ins->dreg, ins->sreg1);
2556 break;
2557 case OP_ICONV_TO_I2:
2558 case OP_LCONV_TO_I2:
2559 /* FIXME: Is this needed ? */
2560 ia64_sxt2 (code, ins->dreg, ins->sreg1);
2561 break;
2562 case OP_LCONV_TO_I4:
2563 /* FIXME: Is this needed ? */
2564 ia64_sxt4 (code, ins->dreg, ins->sreg1);
2565 break;
2566 case OP_ICONV_TO_U1:
2567 case OP_LCONV_TO_U1:
2568 /* FIXME: Is this needed */
2569 ia64_zxt1 (code, ins->dreg, ins->sreg1);
2570 break;
2571 case OP_ICONV_TO_U2:
2572 case OP_LCONV_TO_U2:
2573 /* FIXME: Is this needed */
2574 ia64_zxt2 (code, ins->dreg, ins->sreg1);
2575 break;
2576 case OP_LCONV_TO_U4:
2577 /* FIXME: Is this needed */
2578 ia64_zxt4 (code, ins->dreg, ins->sreg1);
2579 break;
2580 case OP_ICONV_TO_I8:
2581 case OP_ICONV_TO_I:
2582 case OP_LCONV_TO_I8:
2583 case OP_LCONV_TO_I:
2584 ia64_sxt4 (code, ins->dreg, ins->sreg1);
2585 break;
2586 case OP_LCONV_TO_U8:
2587 case OP_LCONV_TO_U:
2588 ia64_zxt4 (code, ins->dreg, ins->sreg1);
2589 break;
2592 * FLOAT OPCODES
2594 case OP_R8CONST: {
2595 double d = *(double *)ins->inst_p0;
2597 if ((d == 0.0) && (mono_signbit (d) == 0))
2598 ia64_fmov (code, ins->dreg, 0);
2599 else if (d == 1.0)
2600 ia64_fmov (code, ins->dreg, 1);
2601 else {
2602 add_patch_info (cfg, code, MONO_PATCH_INFO_R8, ins->inst_p0);
2603 ia64_movl (code, GP_SCRATCH_REG, 0);
2604 ia64_ldfd (code, ins->dreg, GP_SCRATCH_REG);
2606 break;
2608 case OP_R4CONST: {
2609 float f = *(float *)ins->inst_p0;
2611 if ((f == 0.0) && (mono_signbit (f) == 0))
2612 ia64_fmov (code, ins->dreg, 0);
2613 else if (f == 1.0)
2614 ia64_fmov (code, ins->dreg, 1);
2615 else {
2616 add_patch_info (cfg, code, MONO_PATCH_INFO_R4, ins->inst_p0);
2617 ia64_movl (code, GP_SCRATCH_REG, 0);
2618 ia64_ldfs (code, ins->dreg, GP_SCRATCH_REG);
2620 break;
2622 case OP_FMOVE:
2623 ia64_fmov (code, ins->dreg, ins->sreg1);
2624 break;
2625 case OP_STORER8_MEMBASE_REG:
2626 if (ins->inst_offset != 0) {
2627 /* This is generated by local regalloc */
2628 if (ia64_is_imm14 (ins->inst_offset)) {
2629 ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_destbasereg);
2630 } else {
2631 ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
2632 ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, ins->inst_destbasereg);
2634 ins->inst_destbasereg = GP_SCRATCH_REG;
2636 ia64_stfd_hint (code, ins->inst_destbasereg, ins->sreg1, 0);
2637 break;
2638 case OP_STORER4_MEMBASE_REG:
2639 ia64_fnorm_s_sf (code, FP_SCRATCH_REG, ins->sreg1, 0);
2640 ia64_stfs_hint (code, ins->inst_destbasereg, FP_SCRATCH_REG, 0);
2641 break;
2642 case OP_LOADR8_MEMBASE:
2643 if (ins->inst_offset != 0) {
2644 /* This is generated by local regalloc */
2645 if (ia64_is_imm14 (ins->inst_offset)) {
2646 ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_basereg);
2647 } else {
2648 ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
2649 ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, ins->inst_basereg);
2651 ins->inst_basereg = GP_SCRATCH_REG;
2653 ia64_ldfd (code, ins->dreg, ins->inst_basereg);
2654 break;
2655 case OP_LOADR4_MEMBASE:
2656 ia64_ldfs (code, ins->dreg, ins->inst_basereg);
2657 ia64_fnorm_d_sf (code, ins->dreg, ins->dreg, 0);
2658 break;
2659 case OP_ICONV_TO_R4:
2660 case OP_LCONV_TO_R4:
2661 ia64_setf_sig (code, ins->dreg, ins->sreg1);
2662 ia64_fcvt_xf (code, ins->dreg, ins->dreg);
2663 ia64_fnorm_s_sf (code, ins->dreg, ins->dreg, 0);
2664 break;
2665 case OP_ICONV_TO_R8:
2666 case OP_LCONV_TO_R8:
2667 ia64_setf_sig (code, ins->dreg, ins->sreg1);
2668 ia64_fcvt_xf (code, ins->dreg, ins->dreg);
2669 ia64_fnorm_d_sf (code, ins->dreg, ins->dreg, 0);
2670 break;
2671 case OP_FCONV_TO_R4:
2672 ia64_fnorm_s_sf (code, ins->dreg, ins->sreg1, 0);
2673 break;
2674 case OP_FCONV_TO_I8:
2675 case OP_FCONV_TO_I:
2676 ia64_fcvt_fx_trunc_sf (code, FP_SCRATCH_REG, ins->sreg1, 0);
2677 ia64_getf_sig (code, ins->dreg, FP_SCRATCH_REG);
2678 break;
2679 case OP_FADD:
2680 ia64_fma_d_sf (code, ins->dreg, ins->sreg1, 1, ins->sreg2, 0);
2681 break;
2682 case OP_FSUB:
2683 ia64_fms_d_sf (code, ins->dreg, ins->sreg1, 1, ins->sreg2, 0);
2684 break;
2685 case OP_FMUL:
2686 ia64_fma_d_sf (code, ins->dreg, ins->sreg1, ins->sreg2, 0, 0);
2687 break;
2688 case OP_FNEG:
2689 ia64_fmerge_ns (code, ins->dreg, ins->sreg1, ins->sreg1);
2690 break;
2691 case OP_CKFINITE:
2692 /* Quiet NaN */
2693 ia64_fclass_m (code, 6, 7, ins->sreg1, 0x080);
2694 emit_cond_system_exception (cfg, code, "ArithmeticException", 6);
2695 /* Signaling NaN */
2696 ia64_fclass_m (code, 6, 7, ins->sreg1, 0x040);
2697 emit_cond_system_exception (cfg, code, "ArithmeticException", 6);
2698 /* Positive infinity */
2699 ia64_fclass_m (code, 6, 7, ins->sreg1, 0x021);
2700 emit_cond_system_exception (cfg, code, "ArithmeticException", 6);
2701 /* Negative infinity */
2702 ia64_fclass_m (code, 6, 7, ins->sreg1, 0x022);
2703 emit_cond_system_exception (cfg, code, "ArithmeticException", 6);
2704 break;
2706 /* Calls */
2707 case OP_CHECK_THIS:
2708 /* ensure ins->sreg1 is not NULL */
2709 /* Can't use ld8 as this could be a vtype address */
2710 ia64_ld1 (code, GP_SCRATCH_REG, ins->sreg1);
2711 break;
2712 case OP_ARGLIST:
2713 ia64_adds_imm (code, GP_SCRATCH_REG, cfg->sig_cookie, cfg->frame_reg);
2714 ia64_st8 (code, ins->sreg1, GP_SCRATCH_REG);
2715 break;
2716 case OP_FCALL:
2717 case OP_LCALL:
2718 case OP_VCALL:
2719 case OP_VCALL2:
2720 case OP_VOIDCALL:
2721 case OP_CALL:
2722 call = (MonoCallInst*)ins;
2724 if (ins->flags & MONO_INST_HAS_METHOD)
2725 code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method);
2726 else
2727 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
2729 code = emit_move_return_value (cfg, ins, code);
2730 break;
2732 case OP_CALL_REG:
2733 case OP_FCALL_REG:
2734 case OP_LCALL_REG:
2735 case OP_VCALL_REG:
2736 case OP_VCALL2_REG:
2737 case OP_VOIDCALL_REG: {
2738 MonoCallInst *call = (MonoCallInst*)ins;
2739 CallInfo *cinfo;
2740 int out_reg;
2743 * mono_arch_get_this_arg_from_call () needs to find the this argument in a global
2744 * register.
2746 cinfo = get_call_info (cfg, cfg->mempool, call->signature, FALSE);
2747 out_reg = cfg->arch.reg_out0;
2748 ia64_mov (code, IA64_R10, out_reg);
2750 /* Indirect call */
2751 ia64_mov (code, IA64_R8, ins->sreg1);
2752 ia64_ld8_inc_imm (code, GP_SCRATCH_REG2, IA64_R8, 8);
2753 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG2);
2754 ia64_ld8 (code, IA64_GP, IA64_R8);
2755 ia64_br_call_reg (code, IA64_B0, IA64_B6);
2757 code = emit_move_return_value (cfg, ins, code);
2758 break;
2760 case OP_FCALL_MEMBASE:
2761 case OP_LCALL_MEMBASE:
2762 case OP_VCALL_MEMBASE:
2763 case OP_VCALL2_MEMBASE:
2764 case OP_VOIDCALL_MEMBASE:
2765 case OP_CALL_MEMBASE: {
2766 MonoCallInst *call = (MonoCallInst*)ins;
2767 CallInfo *cinfo;
2768 int out_reg;
2770 ia64_mov (code, IA64_R11, ins->sreg1);
2771 if (ia64_is_imm14 (ins->inst_offset))
2772 ia64_adds_imm (code, IA64_R8, ins->inst_offset, ins->sreg1);
2773 else {
2774 ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
2775 ia64_add (code, IA64_R8, GP_SCRATCH_REG, ins->sreg1);
2778 if (call->method && ins->inst_offset < 0) {
2780 * This is a possible IMT call so save the IMT method in a global
2781 * register where mono_arch_find_imt_method () and its friends can
2782 * access it.
2784 ia64_movl (code, IA64_R9, call->method);
2788 * mono_arch_find_this_arg () needs to find the this argument in a global
2789 * register.
2791 cinfo = get_call_info (cfg, cfg->mempool, call->signature, FALSE);
2792 out_reg = cfg->arch.reg_out0;
2793 ia64_mov (code, IA64_R10, out_reg);
2795 ia64_ld8 (code, GP_SCRATCH_REG, IA64_R8);
2797 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
2799 ia64_br_call_reg (code, IA64_B0, IA64_B6);
2801 code = emit_move_return_value (cfg, ins, code);
2802 break;
2804 case OP_JMP: {
2806 * Keep in sync with the code in emit_epilog.
2809 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
2810 NOT_IMPLEMENTED;
2812 g_assert (!cfg->method->save_lmf);
2814 /* Load arguments into their original registers */
2815 code = emit_load_volatile_arguments (cfg, code);
2817 if (cfg->arch.stack_alloc_size) {
2818 if (cfg->arch.omit_fp) {
2819 if (ia64_is_imm14 (cfg->arch.stack_alloc_size))
2820 ia64_adds_imm (code, IA64_SP, (cfg->arch.stack_alloc_size), IA64_SP);
2821 else {
2822 ia64_movl (code, GP_SCRATCH_REG, cfg->arch.stack_alloc_size);
2823 ia64_add (code, IA64_SP, GP_SCRATCH_REG, IA64_SP);
2826 else
2827 ia64_mov (code, IA64_SP, cfg->arch.reg_saved_sp);
2829 ia64_mov_to_ar_i (code, IA64_PFS, cfg->arch.reg_saved_ar_pfs);
2830 ia64_mov_ret_to_br (code, IA64_B0, cfg->arch.reg_saved_b0);
2832 add_patch_info (cfg, code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
2833 ia64_movl (code, GP_SCRATCH_REG, 0);
2834 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
2835 ia64_br_cond_reg (code, IA64_B6);
2837 break;
2839 case OP_BREAK:
2840 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, mono_break);
2841 break;
2843 case OP_LOCALLOC: {
2844 gint32 abi_offset;
2846 /* FIXME: Sigaltstack support */
2848 /* keep alignment */
2849 ia64_adds_imm (code, GP_SCRATCH_REG, MONO_ARCH_LOCALLOC_ALIGNMENT - 1, ins->sreg1);
2850 ia64_movl (code, GP_SCRATCH_REG2, ~(MONO_ARCH_LOCALLOC_ALIGNMENT - 1));
2851 ia64_and (code, GP_SCRATCH_REG, GP_SCRATCH_REG, GP_SCRATCH_REG2);
2853 ia64_sub (code, IA64_SP, IA64_SP, GP_SCRATCH_REG);
2855 ia64_mov (code, ins->dreg, IA64_SP);
2857 /* An area at sp is reserved by the ABI for parameter passing */
2858 abi_offset = - ALIGN_TO (cfg->param_area + 16, MONO_ARCH_LOCALLOC_ALIGNMENT);
2859 if (ia64_is_adds_imm (abi_offset))
2860 ia64_adds_imm (code, IA64_SP, abi_offset, IA64_SP);
2861 else {
2862 ia64_movl (code, GP_SCRATCH_REG2, abi_offset);
2863 ia64_add (code, IA64_SP, IA64_SP, GP_SCRATCH_REG2);
2866 if (ins->flags & MONO_INST_INIT) {
2867 /* Upper limit */
2868 ia64_add (code, GP_SCRATCH_REG2, ins->dreg, GP_SCRATCH_REG);
2870 ia64_codegen_set_one_ins_per_bundle (code, TRUE);
2872 /* Init loop */
2873 ia64_st8_inc_imm_hint (code, ins->dreg, IA64_R0, 8, 0);
2874 ia64_cmp_lt (code, 8, 9, ins->dreg, GP_SCRATCH_REG2);
2875 ia64_br_cond_pred (code, 8, -2);
2877 ia64_codegen_set_one_ins_per_bundle (code, FALSE);
2879 ia64_sub (code, ins->dreg, GP_SCRATCH_REG2, GP_SCRATCH_REG);
2882 break;
2884 case OP_LOCALLOC_IMM: {
2885 gint32 abi_offset;
2887 /* FIXME: Sigaltstack support */
2889 gssize size = ins->inst_imm;
2890 size = (size + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1);
2892 if (ia64_is_adds_imm (size))
2893 ia64_adds_imm (code, GP_SCRATCH_REG, size, IA64_R0);
2894 else
2895 ia64_movl (code, GP_SCRATCH_REG, size);
2897 ia64_sub (code, IA64_SP, IA64_SP, GP_SCRATCH_REG);
2898 ia64_mov (code, ins->dreg, IA64_SP);
2900 /* An area at sp is reserved by the ABI for parameter passing */
2901 abi_offset = - ALIGN_TO (cfg->param_area + 16, MONO_ARCH_FRAME_ALIGNMENT);
2902 if (ia64_is_adds_imm (abi_offset))
2903 ia64_adds_imm (code, IA64_SP, abi_offset, IA64_SP);
2904 else {
2905 ia64_movl (code, GP_SCRATCH_REG2, abi_offset);
2906 ia64_add (code, IA64_SP, IA64_SP, GP_SCRATCH_REG2);
2909 if (ins->flags & MONO_INST_INIT) {
2910 /* Upper limit */
2911 ia64_add (code, GP_SCRATCH_REG2, ins->dreg, GP_SCRATCH_REG);
2913 ia64_codegen_set_one_ins_per_bundle (code, TRUE);
2915 /* Init loop */
2916 ia64_st8_inc_imm_hint (code, ins->dreg, IA64_R0, 8, 0);
2917 ia64_cmp_lt (code, 8, 9, ins->dreg, GP_SCRATCH_REG2);
2918 ia64_br_cond_pred (code, 8, -2);
2920 ia64_codegen_set_one_ins_per_bundle (code, FALSE);
2922 ia64_sub (code, ins->dreg, GP_SCRATCH_REG2, GP_SCRATCH_REG);
2925 break;
2927 case OP_TLS_GET:
2928 ia64_adds_imm (code, ins->dreg, ins->inst_offset, IA64_TP);
2929 ia64_ld8 (code, ins->dreg, ins->dreg);
2930 break;
2932 /* Synchronization */
2933 case OP_MEMORY_BARRIER:
2934 ia64_mf (code);
2935 break;
2936 case OP_ATOMIC_ADD_IMM_NEW_I4:
2937 g_assert (ins->inst_offset == 0);
2938 ia64_fetchadd4_acq_hint (code, ins->dreg, ins->inst_basereg, ins->inst_imm, 0);
2939 ia64_adds_imm (code, ins->dreg, ins->inst_imm, ins->dreg);
2940 break;
2941 case OP_ATOMIC_ADD_IMM_NEW_I8:
2942 g_assert (ins->inst_offset == 0);
2943 ia64_fetchadd8_acq_hint (code, ins->dreg, ins->inst_basereg, ins->inst_imm, 0);
2944 ia64_adds_imm (code, ins->dreg, ins->inst_imm, ins->dreg);
2945 break;
2946 case OP_ATOMIC_EXCHANGE_I4:
2947 ia64_xchg4_hint (code, ins->dreg, ins->inst_basereg, ins->sreg2, 0);
2948 ia64_sxt4 (code, ins->dreg, ins->dreg);
2949 break;
2950 case OP_ATOMIC_EXCHANGE_I8:
2951 ia64_xchg8_hint (code, ins->dreg, ins->inst_basereg, ins->sreg2, 0);
2952 break;
2953 case OP_ATOMIC_ADD_NEW_I4: {
2954 guint8 *label, *buf;
2956 /* From libatomic_ops */
2957 ia64_mf (code);
2959 ia64_begin_bundle (code);
2960 label = code.buf + code.nins;
2961 ia64_ld4_acq (code, GP_SCRATCH_REG, ins->sreg1);
2962 ia64_add (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, ins->sreg2);
2963 ia64_mov_to_ar_m (code, IA64_CCV, GP_SCRATCH_REG);
2964 ia64_cmpxchg4_acq_hint (code, GP_SCRATCH_REG2, ins->sreg1, GP_SCRATCH_REG2, 0);
2965 ia64_cmp4_eq (code, 6, 7, GP_SCRATCH_REG, GP_SCRATCH_REG2);
2966 buf = code.buf + code.nins;
2967 ia64_br_cond_pred (code, 7, 0);
2968 ia64_begin_bundle (code);
2969 ia64_patch (buf, label);
2970 ia64_add (code, ins->dreg, GP_SCRATCH_REG, ins->sreg2);
2971 break;
2973 case OP_ATOMIC_ADD_NEW_I8: {
2974 guint8 *label, *buf;
2976 /* From libatomic_ops */
2977 ia64_mf (code);
2979 ia64_begin_bundle (code);
2980 label = code.buf + code.nins;
2981 ia64_ld8_acq (code, GP_SCRATCH_REG, ins->sreg1);
2982 ia64_add (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, ins->sreg2);
2983 ia64_mov_to_ar_m (code, IA64_CCV, GP_SCRATCH_REG);
2984 ia64_cmpxchg8_acq_hint (code, GP_SCRATCH_REG2, ins->sreg1, GP_SCRATCH_REG2, 0);
2985 ia64_cmp_eq (code, 6, 7, GP_SCRATCH_REG, GP_SCRATCH_REG2);
2986 buf = code.buf + code.nins;
2987 ia64_br_cond_pred (code, 7, 0);
2988 ia64_begin_bundle (code);
2989 ia64_patch (buf, label);
2990 ia64_add (code, ins->dreg, GP_SCRATCH_REG, ins->sreg2);
2991 break;
2994 /* Exception handling */
2995 case OP_CALL_HANDLER:
2997 * Using a call instruction would mess up the register stack, so
2998 * save the return address to a register and use a
2999 * branch.
3001 ia64_codegen_set_one_ins_per_bundle (code, TRUE);
3002 ia64_mov (code, IA64_R15, IA64_R0);
3003 ia64_mov_from_ip (code, GP_SCRATCH_REG);
3004 /* Add the length of OP_CALL_HANDLER */
3005 ia64_adds_imm (code, GP_SCRATCH_REG, 5 * 16, GP_SCRATCH_REG);
3006 add_patch_info (cfg, code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3007 ia64_movl (code, GP_SCRATCH_REG2, 0);
3008 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG2);
3009 ia64_br_cond_reg (code, IA64_B6);
3010 // FIXME:
3011 //mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
3012 ia64_codegen_set_one_ins_per_bundle (code, FALSE);
3013 break;
3014 case OP_START_HANDLER: {
3016 * We receive the return address in GP_SCRATCH_REG.
3018 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3021 * R15 determines our caller. It is used since it is writable using
3022 * libunwind.
3023 * R15 == 0 means we are called by OP_CALL_HANDLER or via resume_context ()
3024 * R15 != 0 means we are called by call_filter ().
3026 ia64_codegen_set_one_ins_per_bundle (code, TRUE);
3027 ia64_cmp_eq (code, 6, 7, IA64_R15, IA64_R0);
3029 ia64_br_cond_pred (code, 6, 6);
3032 * Called by call_filter:
3033 * Allocate a new stack frame, and set the fp register from the
3034 * value passed in by the caller.
3035 * We allocate a similar frame as is done by the prolog, so
3036 * if an exception is thrown while executing the filter, the
3037 * unwinder can unwind through the filter frame using the unwind
3038 * info for the prolog.
3040 ia64_alloc (code, cfg->arch.reg_saved_ar_pfs, cfg->arch.reg_local0 - cfg->arch.reg_in0, cfg->arch.reg_out0 - cfg->arch.reg_local0, cfg->arch.n_out_regs, 0);
3041 ia64_mov_from_br (code, cfg->arch.reg_saved_b0, IA64_B0);
3042 ia64_mov (code, cfg->arch.reg_saved_sp, IA64_SP);
3043 ia64_mov (code, cfg->frame_reg, IA64_R15);
3044 /* Signal to endfilter that we are called by call_filter */
3045 ia64_mov (code, GP_SCRATCH_REG, IA64_R0);
3047 /* Branch target: */
3048 if (ia64_is_imm14 (spvar->inst_offset))
3049 ia64_adds_imm (code, GP_SCRATCH_REG2, spvar->inst_offset, cfg->frame_reg);
3050 else {
3051 ia64_movl (code, GP_SCRATCH_REG2, spvar->inst_offset);
3052 ia64_add (code, GP_SCRATCH_REG2, cfg->frame_reg, GP_SCRATCH_REG2);
3055 /* Save the return address */
3056 ia64_st8_hint (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, 0);
3057 ia64_codegen_set_one_ins_per_bundle (code, FALSE);
3059 break;
3061 case OP_ENDFINALLY:
3062 case OP_ENDFILTER: {
3063 /* FIXME: Return the value in ENDFILTER */
3064 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3066 /* Load the return address */
3067 if (ia64_is_imm14 (spvar->inst_offset)) {
3068 ia64_adds_imm (code, GP_SCRATCH_REG, spvar->inst_offset, cfg->frame_reg);
3069 } else {
3070 ia64_movl (code, GP_SCRATCH_REG, spvar->inst_offset);
3071 ia64_add (code, GP_SCRATCH_REG, cfg->frame_reg, GP_SCRATCH_REG);
3073 ia64_ld8_hint (code, GP_SCRATCH_REG, GP_SCRATCH_REG, 0);
3075 /* Test caller */
3076 ia64_cmp_eq (code, 6, 7, GP_SCRATCH_REG, IA64_R0);
3077 ia64_br_cond_pred (code, 7, 4);
3079 /* Called by call_filter */
3080 /* Pop frame */
3081 ia64_mov_to_ar_i (code, IA64_PFS, cfg->arch.reg_saved_ar_pfs);
3082 ia64_mov_to_br (code, IA64_B0, cfg->arch.reg_saved_b0);
3083 ia64_br_ret_reg (code, IA64_B0);
3085 /* Called by CALL_HANDLER */
3086 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
3087 ia64_br_cond_reg (code, IA64_B6);
3088 break;
3090 case OP_THROW:
3091 ia64_mov (code, cfg->arch.reg_out0, ins->sreg1);
3092 code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
3093 (gpointer)"mono_arch_throw_exception");
3096 * This might be the last instruction in the method, so add a dummy
3097 * instruction so the unwinder will work.
3099 ia64_break_i (code, 0);
3100 break;
3101 case OP_RETHROW:
3102 ia64_mov (code, cfg->arch.reg_out0, ins->sreg1);
3103 code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
3104 (gpointer)"mono_arch_rethrow_exception");
3106 ia64_break_i (code, 0);
3107 break;
3109 default:
3110 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
3111 g_assert_not_reached ();
3114 if ((code.buf - cfg->native_code - offset) > max_len) {
3115 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %ld)",
3116 mono_inst_name (ins->opcode), max_len, code.buf - cfg->native_code - offset);
3117 g_assert_not_reached ();
3120 cpos += max_len;
3122 last_ins = ins;
3123 last_offset = offset;
3126 ia64_codegen_close (code);
3128 cfg->code_len = code.buf - cfg->native_code;
3131 void
3132 mono_arch_register_lowlevel_calls (void)
3136 static Ia64InsType ins_types_in_template [32][3] = {
3137 {IA64_INS_TYPE_M, IA64_INS_TYPE_I, IA64_INS_TYPE_I},
3138 {IA64_INS_TYPE_M, IA64_INS_TYPE_I, IA64_INS_TYPE_I},
3139 {IA64_INS_TYPE_M, IA64_INS_TYPE_I, IA64_INS_TYPE_I},
3140 {IA64_INS_TYPE_M, IA64_INS_TYPE_I, IA64_INS_TYPE_I},
3141 {IA64_INS_TYPE_M, IA64_INS_TYPE_LX, IA64_INS_TYPE_LX},
3142 {IA64_INS_TYPE_M, IA64_INS_TYPE_LX, IA64_INS_TYPE_LX},
3143 {0, 0, 0},
3144 {0, 0, 0},
3145 {IA64_INS_TYPE_M, IA64_INS_TYPE_M, IA64_INS_TYPE_I},
3146 {IA64_INS_TYPE_M, IA64_INS_TYPE_M, IA64_INS_TYPE_I},
3147 {IA64_INS_TYPE_M, IA64_INS_TYPE_M, IA64_INS_TYPE_I},
3148 {IA64_INS_TYPE_M, IA64_INS_TYPE_M, IA64_INS_TYPE_I},
3149 {IA64_INS_TYPE_M, IA64_INS_TYPE_F, IA64_INS_TYPE_I},
3150 {IA64_INS_TYPE_M, IA64_INS_TYPE_F, IA64_INS_TYPE_I},
3151 {IA64_INS_TYPE_M, IA64_INS_TYPE_M, IA64_INS_TYPE_F},
3152 {IA64_INS_TYPE_M, IA64_INS_TYPE_M, IA64_INS_TYPE_F},
3153 {IA64_INS_TYPE_M, IA64_INS_TYPE_I, IA64_INS_TYPE_B},
3154 {IA64_INS_TYPE_M, IA64_INS_TYPE_I, IA64_INS_TYPE_B},
3155 {IA64_INS_TYPE_M, IA64_INS_TYPE_B, IA64_INS_TYPE_B},
3156 {IA64_INS_TYPE_M, IA64_INS_TYPE_B, IA64_INS_TYPE_B},
3157 {0, 0, 0},
3158 {0, 0, 0},
3159 {IA64_INS_TYPE_B, IA64_INS_TYPE_B, IA64_INS_TYPE_B},
3160 {IA64_INS_TYPE_B, IA64_INS_TYPE_B, IA64_INS_TYPE_B},
3161 {IA64_INS_TYPE_M, IA64_INS_TYPE_M, IA64_INS_TYPE_B},
3162 {IA64_INS_TYPE_M, IA64_INS_TYPE_M, IA64_INS_TYPE_B},
3163 {0, 0, 0},
3164 {0, 0, 0},
3165 {IA64_INS_TYPE_M, IA64_INS_TYPE_F, IA64_INS_TYPE_B},
3166 {IA64_INS_TYPE_M, IA64_INS_TYPE_F, IA64_INS_TYPE_B},
3167 {0, 0, 0},
3168 {0, 0, 0}
3171 static gboolean stops_in_template [32][3] = {
3172 { FALSE, FALSE, FALSE },
3173 { FALSE, FALSE, TRUE },
3174 { FALSE, TRUE, FALSE },
3175 { FALSE, TRUE, TRUE },
3176 { FALSE, FALSE, FALSE },
3177 { FALSE, FALSE, TRUE },
3178 { FALSE, FALSE, FALSE },
3179 { FALSE, FALSE, FALSE },
3181 { FALSE, FALSE, FALSE },
3182 { FALSE, FALSE, TRUE },
3183 { TRUE, FALSE, FALSE },
3184 { TRUE, FALSE, TRUE },
3185 { FALSE, FALSE, FALSE },
3186 { FALSE, FALSE, TRUE },
3187 { FALSE, FALSE, FALSE },
3188 { FALSE, FALSE, TRUE },
3190 { FALSE, FALSE, FALSE },
3191 { FALSE, FALSE, TRUE },
3192 { FALSE, FALSE, FALSE },
3193 { FALSE, FALSE, TRUE },
3194 { FALSE, FALSE, FALSE },
3195 { FALSE, FALSE, FALSE },
3196 { FALSE, FALSE, FALSE },
3197 { FALSE, FALSE, TRUE },
3199 { FALSE, FALSE, FALSE },
3200 { FALSE, FALSE, TRUE },
3201 { FALSE, FALSE, FALSE },
3202 { FALSE, FALSE, FALSE },
3203 { FALSE, FALSE, FALSE },
3204 { FALSE, FALSE, TRUE },
3205 { FALSE, FALSE, FALSE },
3206 { FALSE, FALSE, FALSE }
3209 static int last_stop_in_template [32] = {
3210 -1, 2, 1, 2, -1, 2, -1, -1,
3211 -1, 2, 0, 2, -1, 2, -1, 2,
3212 -1, 2, -1, 2, -1, -1, -1, 2,
3213 -1, 2, -1, -1, -1, 2, -1, -1
3216 static guint64 nops_for_ins_types [6] = {
3217 IA64_NOP_I,
3218 IA64_NOP_I,
3219 IA64_NOP_M,
3220 IA64_NOP_F,
3221 IA64_NOP_B,
3222 IA64_NOP_X
3225 #define ITYPE_MATCH(itype1, itype2) (((itype1) == (itype2)) || (((itype2) == IA64_INS_TYPE_A) && (((itype1) == IA64_INS_TYPE_I) || ((itype1) == IA64_INS_TYPE_M))))
3228 * Debugging support
3231 #if 0
3232 #define DEBUG_INS_SCHED(a) do { a; } while (0)
3233 #else
3234 #define DEBUG_INS_SCHED(a)
3235 #endif
3237 static void
3238 ia64_analyze_deps (Ia64CodegenState *code, int *deps_start, int *stops)
3240 int i, pos, ins_index, current_deps_start, current_ins_start, reg;
3241 guint8 *deps = code->dep_info;
3242 gboolean need_stop, no_stop;
3244 for (i = 0; i < code->nins; ++i)
3245 stops [i] = FALSE;
3247 ins_index = 0;
3248 current_deps_start = 0;
3249 current_ins_start = 0;
3250 deps_start [ins_index] = current_ins_start;
3251 pos = 0;
3252 no_stop = FALSE;
3253 DEBUG_INS_SCHED (printf ("BEGIN.\n"));
3254 while (pos < code->dep_info_pos) {
3255 need_stop = FALSE;
3256 switch (deps [pos]) {
3257 case IA64_END_OF_INS:
3258 ins_index ++;
3259 current_ins_start = pos + 2;
3260 deps_start [ins_index] = current_ins_start;
3261 no_stop = FALSE;
3262 DEBUG_INS_SCHED (printf ("(%d) END INS.\n", ins_index - 1));
3263 break;
3264 case IA64_NONE:
3265 break;
3266 case IA64_READ_GR:
3267 reg = deps [pos + 1];
3269 DEBUG_INS_SCHED (printf ("READ GR: %d\n", reg));
3270 for (i = current_deps_start; i < current_ins_start; i += 2)
3271 if (deps [i] == IA64_WRITE_GR && deps [i + 1] == reg)
3272 need_stop = TRUE;
3273 break;
3274 case IA64_WRITE_GR:
3275 reg = code->dep_info [pos + 1];
3277 DEBUG_INS_SCHED (printf ("WRITE GR: %d\n", reg));
3278 for (i = current_deps_start; i < current_ins_start; i += 2)
3279 if (deps [i] == IA64_WRITE_GR && deps [i + 1] == reg)
3280 need_stop = TRUE;
3281 break;
3282 case IA64_READ_PR:
3283 reg = deps [pos + 1];
3285 DEBUG_INS_SCHED (printf ("READ PR: %d\n", reg));
3286 for (i = current_deps_start; i < current_ins_start; i += 2)
3287 if (((deps [i] == IA64_WRITE_PR) || (deps [i] == IA64_WRITE_PR_FLOAT)) && deps [i + 1] == reg)
3288 need_stop = TRUE;
3289 break;
3290 case IA64_READ_PR_BRANCH:
3291 reg = deps [pos + 1];
3293 /* Writes to prs by non-float instructions are visible to branches */
3294 DEBUG_INS_SCHED (printf ("READ PR BRANCH: %d\n", reg));
3295 for (i = current_deps_start; i < current_ins_start; i += 2)
3296 if (deps [i] == IA64_WRITE_PR_FLOAT && deps [i + 1] == reg)
3297 need_stop = TRUE;
3298 break;
3299 case IA64_WRITE_PR:
3300 reg = code->dep_info [pos + 1];
3302 DEBUG_INS_SCHED (printf ("WRITE PR: %d\n", reg));
3303 for (i = current_deps_start; i < current_ins_start; i += 2)
3304 if (((deps [i] == IA64_WRITE_PR) || (deps [i] == IA64_WRITE_PR_FLOAT)) && deps [i + 1] == reg)
3305 need_stop = TRUE;
3306 break;
3307 case IA64_WRITE_PR_FLOAT:
3308 reg = code->dep_info [pos + 1];
3310 DEBUG_INS_SCHED (printf ("WRITE PR FP: %d\n", reg));
3311 for (i = current_deps_start; i < current_ins_start; i += 2)
3312 if (((deps [i] == IA64_WRITE_GR) || (deps [i] == IA64_WRITE_PR_FLOAT)) && deps [i + 1] == reg)
3313 need_stop = TRUE;
3314 break;
3315 case IA64_READ_BR:
3316 reg = deps [pos + 1];
3318 DEBUG_INS_SCHED (printf ("READ BR: %d\n", reg));
3319 for (i = current_deps_start; i < current_ins_start; i += 2)
3320 if (deps [i] == IA64_WRITE_BR && deps [i + 1] == reg)
3321 need_stop = TRUE;
3322 break;
3323 case IA64_WRITE_BR:
3324 reg = code->dep_info [pos + 1];
3326 DEBUG_INS_SCHED (printf ("WRITE BR: %d\n", reg));
3327 for (i = current_deps_start; i < current_ins_start; i += 2)
3328 if (deps [i] == IA64_WRITE_BR && deps [i + 1] == reg)
3329 need_stop = TRUE;
3330 break;
3331 case IA64_READ_BR_BRANCH:
3332 reg = deps [pos + 1];
3334 /* Writes to brs are visible to branches */
3335 DEBUG_INS_SCHED (printf ("READ BR BRACH: %d\n", reg));
3336 break;
3337 case IA64_READ_FR:
3338 reg = deps [pos + 1];
3340 DEBUG_INS_SCHED (printf ("READ BR: %d\n", reg));
3341 for (i = current_deps_start; i < current_ins_start; i += 2)
3342 if (deps [i] == IA64_WRITE_FR && deps [i + 1] == reg)
3343 need_stop = TRUE;
3344 break;
3345 case IA64_WRITE_FR:
3346 reg = code->dep_info [pos + 1];
3348 DEBUG_INS_SCHED (printf ("WRITE BR: %d\n", reg));
3349 for (i = current_deps_start; i < current_ins_start; i += 2)
3350 if (deps [i] == IA64_WRITE_FR && deps [i + 1] == reg)
3351 need_stop = TRUE;
3352 break;
3353 case IA64_READ_AR:
3354 reg = deps [pos + 1];
3356 DEBUG_INS_SCHED (printf ("READ AR: %d\n", reg));
3357 for (i = current_deps_start; i < current_ins_start; i += 2)
3358 if (deps [i] == IA64_WRITE_AR && deps [i + 1] == reg)
3359 need_stop = TRUE;
3360 break;
3361 case IA64_WRITE_AR:
3362 reg = code->dep_info [pos + 1];
3364 DEBUG_INS_SCHED (printf ("WRITE AR: %d\n", reg));
3365 for (i = current_deps_start; i < current_ins_start; i += 2)
3366 if (deps [i] == IA64_WRITE_AR && deps [i + 1] == reg)
3367 need_stop = TRUE;
3368 break;
3369 case IA64_NO_STOP:
3371 * Explicitly indicate that a stop is not required. Useful for
3372 * example when two predicated instructions with negated predicates
3373 * write the same registers.
3375 no_stop = TRUE;
3376 break;
3377 default:
3378 g_assert_not_reached ();
3380 pos += 2;
3382 if (need_stop && !no_stop) {
3383 g_assert (ins_index > 0);
3384 stops [ins_index - 1] = 1;
3386 DEBUG_INS_SCHED (printf ("STOP\n"));
3387 current_deps_start = current_ins_start;
3389 /* Skip remaining deps for this instruction */
3390 while (deps [pos] != IA64_END_OF_INS)
3391 pos += 2;
3395 if (code->nins > 0) {
3396 /* No dependency info for the last instruction */
3397 stops [code->nins - 1] = 1;
3400 deps_start [code->nins] = code->dep_info_pos;
3403 static void
3404 ia64_real_emit_bundle (Ia64CodegenState *code, int *deps_start, int *stops, int n, guint64 template, guint64 ins1, guint64 ins2, guint64 ins3, guint8 nops)
3406 int stop_pos, i, deps_to_shift, dep_shift;
3408 g_assert (n <= code->nins);
3410 // if (n > 1) printf ("FOUND: %ld.\n", template);
3412 ia64_emit_bundle_template (code, template, ins1, ins2, ins3);
3414 stop_pos = last_stop_in_template [template] + 1;
3415 if (stop_pos > n)
3416 stop_pos = n;
3418 /* Compute the number of 'real' instructions before the stop */
3419 deps_to_shift = stop_pos;
3420 if (stop_pos >= 3 && (nops & (1 << 2)))
3421 deps_to_shift --;
3422 if (stop_pos >= 2 && (nops & (1 << 1)))
3423 deps_to_shift --;
3424 if (stop_pos >= 1 && (nops & (1 << 0)))
3425 deps_to_shift --;
3428 * We have to keep some dependencies whose instructions have been shifted
3429 * out of the buffer. So nullify the end_of_ins markers in the dependency
3430 * array.
3432 for (i = deps_start [deps_to_shift]; i < deps_start [n]; i += 2)
3433 if (code->dep_info [i] == IA64_END_OF_INS)
3434 code->dep_info [i] = IA64_NONE;
3436 g_assert (deps_start [deps_to_shift] <= code->dep_info_pos);
3437 memcpy (code->dep_info, &code->dep_info [deps_start [deps_to_shift]], code->dep_info_pos - deps_start [deps_to_shift]);
3438 code->dep_info_pos = code->dep_info_pos - deps_start [deps_to_shift];
3440 dep_shift = deps_start [deps_to_shift];
3441 for (i = 0; i < code->nins + 1 - n; ++i)
3442 deps_start [i] = deps_start [n + i] - dep_shift;
3444 /* Determine the exact positions of instructions with unwind ops */
3445 if (code->unw_op_count) {
3446 int ins_pos [16];
3447 int curr_ins, curr_ins_pos;
3449 curr_ins = 0;
3450 curr_ins_pos = ((code->buf - code->region_start - 16) / 16) * 3;
3451 for (i = 0; i < 3; ++i) {
3452 if (! (nops & (1 << i))) {
3453 ins_pos [curr_ins] = curr_ins_pos + i;
3454 curr_ins ++;
3458 for (i = code->unw_op_pos; i < code->unw_op_count; ++i) {
3459 if (code->unw_ops_pos [i] < n) {
3460 code->unw_ops [i].when = ins_pos [code->unw_ops_pos [i]];
3461 //printf ("UNW-OP: %d -> %d\n", code->unw_ops_pos [i], code->unw_ops [i].when);
3464 if (code->unw_op_pos < code->unw_op_count)
3465 code->unw_op_pos += n;
3468 if (n == code->nins) {
3469 code->template = 0;
3470 code->nins = 0;
3472 else {
3473 memcpy (&code->instructions [0], &code->instructions [n], (code->nins - n) * sizeof (guint64));
3474 memcpy (&code->itypes [0], &code->itypes [n], (code->nins - n) * sizeof (int));
3475 memcpy (&stops [0], &stops [n], (code->nins - n) * sizeof (int));
3476 code->nins -= n;
3480 void
3481 ia64_emit_bundle (Ia64CodegenState *code, gboolean flush)
3483 int i, ins_type, template, nins_to_emit;
3484 int deps_start [16];
3485 int stops [16];
3486 gboolean found;
3489 * We implement a simple scheduler which tries to put three instructions
3490 * per bundle, then two, then one.
3492 ia64_analyze_deps (code, deps_start, stops);
3494 if ((code->nins >= 3) && !code->one_ins_per_bundle) {
3495 /* Find a suitable template */
3496 for (template = 0; template < 32; ++template) {
3497 if (stops_in_template [template][0] != stops [0] ||
3498 stops_in_template [template][1] != stops [1] ||
3499 stops_in_template [template][2] != stops [2])
3500 continue;
3502 found = TRUE;
3503 for (i = 0; i < 3; ++i) {
3504 ins_type = ins_types_in_template [template][i];
3505 switch (code->itypes [i]) {
3506 case IA64_INS_TYPE_A:
3507 found &= (ins_type == IA64_INS_TYPE_I) || (ins_type == IA64_INS_TYPE_M);
3508 break;
3509 default:
3510 found &= (ins_type == code->itypes [i]);
3511 break;
3515 if (found)
3516 found = debug_ins_sched ();
3518 if (found) {
3519 ia64_real_emit_bundle (code, deps_start, stops, 3, template, code->instructions [0], code->instructions [1], code->instructions [2], 0);
3520 break;
3525 if (code->nins < IA64_INS_BUFFER_SIZE && !flush)
3526 /* Wait for more instructions */
3527 return;
3529 /* If it didn't work out, try putting two instructions into one bundle */
3530 if ((code->nins >= 2) && !code->one_ins_per_bundle) {
3531 /* Try a nop at the end */
3532 for (template = 0; template < 32; ++template) {
3533 if (stops_in_template [template][0] != stops [0] ||
3534 ((stops_in_template [template][1] != stops [1]) &&
3535 (stops_in_template [template][2] != stops [1])))
3537 continue;
3539 if (!ITYPE_MATCH (ins_types_in_template [template][0], code->itypes [0]) ||
3540 !ITYPE_MATCH (ins_types_in_template [template][1], code->itypes [1]))
3541 continue;
3543 if (!debug_ins_sched ())
3544 continue;
3546 ia64_real_emit_bundle (code, deps_start, stops, 2, template, code->instructions [0], code->instructions [1], nops_for_ins_types [ins_types_in_template [template][2]], 1 << 2);
3547 break;
3551 if (code->nins < IA64_INS_BUFFER_SIZE && !flush)
3552 /* Wait for more instructions */
3553 return;
3555 if ((code->nins >= 2) && !code->one_ins_per_bundle) {
3556 /* Try a nop in the middle */
3557 for (template = 0; template < 32; ++template) {
3558 if (((stops_in_template [template][0] != stops [0]) &&
3559 (stops_in_template [template][1] != stops [0])) ||
3560 stops_in_template [template][2] != stops [1])
3561 continue;
3563 if (!ITYPE_MATCH (ins_types_in_template [template][0], code->itypes [0]) ||
3564 !ITYPE_MATCH (ins_types_in_template [template][2], code->itypes [1]))
3565 continue;
3567 if (!debug_ins_sched ())
3568 continue;
3570 ia64_real_emit_bundle (code, deps_start, stops, 2, template, code->instructions [0], nops_for_ins_types [ins_types_in_template [template][1]], code->instructions [1], 1 << 1);
3571 break;
3575 if ((code->nins >= 2) && flush && !code->one_ins_per_bundle) {
3576 /* Try a nop at the beginning */
3577 for (template = 0; template < 32; ++template) {
3578 if ((stops_in_template [template][1] != stops [0]) ||
3579 (stops_in_template [template][2] != stops [1]))
3580 continue;
3582 if (!ITYPE_MATCH (ins_types_in_template [template][1], code->itypes [0]) ||
3583 !ITYPE_MATCH (ins_types_in_template [template][2], code->itypes [1]))
3584 continue;
3586 if (!debug_ins_sched ())
3587 continue;
3589 ia64_real_emit_bundle (code, deps_start, stops, 2, template, nops_for_ins_types [ins_types_in_template [template][0]], code->instructions [0], code->instructions [1], 1 << 0);
3590 break;
3594 if (code->nins < IA64_INS_BUFFER_SIZE && !flush)
3595 /* Wait for more instructions */
3596 return;
3598 if (flush)
3599 nins_to_emit = code->nins;
3600 else
3601 nins_to_emit = 1;
3603 while (nins_to_emit > 0) {
3604 if (!debug_ins_sched ())
3605 stops [0] = 1;
3606 switch (code->itypes [0]) {
3607 case IA64_INS_TYPE_A:
3608 if (stops [0])
3609 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MIIS, code->instructions [0], IA64_NOP_I, IA64_NOP_I, 0);
3610 else
3611 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MII, code->instructions [0], IA64_NOP_I, IA64_NOP_I, 0);
3612 break;
3613 case IA64_INS_TYPE_I:
3614 if (stops [0])
3615 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MIIS, IA64_NOP_M, code->instructions [0], IA64_NOP_I, 0);
3616 else
3617 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MII, IA64_NOP_M, code->instructions [0], IA64_NOP_I, 0);
3618 break;
3619 case IA64_INS_TYPE_M:
3620 if (stops [0])
3621 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MIIS, code->instructions [0], IA64_NOP_I, IA64_NOP_I, 0);
3622 else
3623 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MII, code->instructions [0], IA64_NOP_I, IA64_NOP_I, 0);
3624 break;
3625 case IA64_INS_TYPE_B:
3626 if (stops [0])
3627 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MIBS, IA64_NOP_M, IA64_NOP_I, code->instructions [0], 0);
3628 else
3629 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MIB, IA64_NOP_M, IA64_NOP_I, code->instructions [0], 0);
3630 break;
3631 case IA64_INS_TYPE_F:
3632 if (stops [0])
3633 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MFIS, IA64_NOP_M, code->instructions [0], IA64_NOP_I, 0);
3634 else
3635 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MFI, IA64_NOP_M, code->instructions [0], IA64_NOP_I, 0);
3636 break;
3637 case IA64_INS_TYPE_LX:
3638 if (stops [0] || stops [1])
3639 ia64_real_emit_bundle (code, deps_start, stops, 2, IA64_TEMPLATE_MLXS, IA64_NOP_M, code->instructions [0], code->instructions [1], 0);
3640 else
3641 ia64_real_emit_bundle (code, deps_start, stops, 2, IA64_TEMPLATE_MLX, IA64_NOP_M, code->instructions [0], code->instructions [1], 0);
3642 nins_to_emit --;
3643 break;
3644 default:
3645 g_assert_not_reached ();
3647 nins_to_emit --;
3651 unw_dyn_region_info_t*
3652 mono_ia64_create_unwind_region (Ia64CodegenState *code)
3654 unw_dyn_region_info_t *r;
3656 g_assert (code->nins == 0);
3657 r = g_malloc0 (_U_dyn_region_info_size (code->unw_op_count));
3658 memcpy (&r->op, &code->unw_ops, sizeof (unw_dyn_op_t) * code->unw_op_count);
3659 r->op_count = code->unw_op_count;
3660 r->insn_count = ((code->buf - code->region_start) >> 4) * 3;
3661 code->unw_op_count = 0;
3662 code->unw_op_pos = 0;
3663 code->region_start = code->buf;
3665 return r;
3668 static void
3669 ia64_patch (unsigned char* code, gpointer target)
3671 int template, i;
3672 guint64 instructions [3];
3673 guint8 gen_buf [16];
3674 Ia64CodegenState gen;
3675 int ins_to_skip;
3676 gboolean found;
3679 * code encodes both the position inside the buffer and code.nins when
3680 * the instruction was emitted.
3682 ins_to_skip = (guint64)code % 16;
3683 code = (unsigned char*)((guint64)code & ~15);
3686 * Search for the first instruction which is 'patchable', skipping
3687 * ins_to_skip instructions.
3690 while (TRUE) {
3692 template = ia64_bundle_template (code);
3693 instructions [0] = ia64_bundle_ins1 (code);
3694 instructions [1] = ia64_bundle_ins2 (code);
3695 instructions [2] = ia64_bundle_ins3 (code);
3697 ia64_codegen_init (gen, gen_buf);
3699 found = FALSE;
3700 for (i = 0; i < 3; ++i) {
3701 guint64 ins = instructions [i];
3702 int opcode = ia64_ins_opcode (ins);
3704 if (ins == nops_for_ins_types [ins_types_in_template [template][i]])
3705 continue;
3707 if (ins_to_skip) {
3708 ins_to_skip --;
3709 continue;
3712 switch (ins_types_in_template [template][i]) {
3713 case IA64_INS_TYPE_A:
3714 case IA64_INS_TYPE_M:
3715 if ((opcode == 8) && (ia64_ins_x2a (ins) == 2) && (ia64_ins_ve (ins) == 0)) {
3716 /* adds */
3717 ia64_adds_imm_pred (gen, ia64_ins_qp (ins), ia64_ins_r1 (ins), (guint64)target, ia64_ins_r3 (ins));
3718 instructions [i] = gen.instructions [0];
3719 found = TRUE;
3721 else
3722 NOT_IMPLEMENTED;
3723 break;
3724 case IA64_INS_TYPE_B:
3725 if ((opcode == 4) && (ia64_ins_btype (ins) == 0)) {
3726 /* br.cond */
3727 gint64 disp = ((guint8*)target - code) >> 4;
3729 /* FIXME: hints */
3730 ia64_br_cond_hint_pred (gen, ia64_ins_qp (ins), disp, 0, 0, 0);
3732 instructions [i] = gen.instructions [0];
3733 found = TRUE;
3735 else if (opcode == 5) {
3736 /* br.call */
3737 gint64 disp = ((guint8*)target - code) >> 4;
3739 /* FIXME: hints */
3740 ia64_br_call_hint_pred (gen, ia64_ins_qp (ins), ia64_ins_b1 (ins), disp, 0, 0, 0);
3741 instructions [i] = gen.instructions [0];
3742 found = TRUE;
3744 else
3745 NOT_IMPLEMENTED;
3746 break;
3747 case IA64_INS_TYPE_LX:
3748 if (i == 1)
3749 break;
3751 if ((opcode == 6) && (ia64_ins_vc (ins) == 0)) {
3752 /* movl */
3753 ia64_movl_pred (gen, ia64_ins_qp (ins), ia64_ins_r1 (ins), target);
3754 instructions [1] = gen.instructions [0];
3755 instructions [2] = gen.instructions [1];
3756 found = TRUE;
3758 else
3759 NOT_IMPLEMENTED;
3761 break;
3762 default:
3763 NOT_IMPLEMENTED;
3766 if (found) {
3767 /* Rewrite code */
3768 ia64_codegen_init (gen, code);
3769 ia64_emit_bundle_template (&gen, template, instructions [0], instructions [1], instructions [2]);
3770 return;
3774 code += 16;
3778 void
3779 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
3781 MonoJumpInfo *patch_info;
3783 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
3784 unsigned char *ip = patch_info->ip.i + code;
3785 const unsigned char *target;
3787 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
3789 if (patch_info->type == MONO_PATCH_INFO_NONE)
3790 continue;
3791 if (mono_compile_aot) {
3792 NOT_IMPLEMENTED;
3795 ia64_patch (ip, (gpointer)target);
3799 guint8 *
3800 mono_arch_emit_prolog (MonoCompile *cfg)
3802 MonoMethod *method = cfg->method;
3803 MonoMethodSignature *sig;
3804 MonoInst *inst;
3805 int alloc_size, pos, i;
3806 Ia64CodegenState code;
3807 CallInfo *cinfo;
3809 sig = mono_method_signature (method);
3810 pos = 0;
3812 cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
3814 cfg->code_size = MAX (cfg->header->code_size * 4, 512);
3816 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
3817 cfg->code_size += 1024;
3818 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
3819 cfg->code_size += 1024;
3821 cfg->native_code = g_malloc (cfg->code_size);
3823 ia64_codegen_init (code, cfg->native_code);
3825 alloc_size = ALIGN_TO (cfg->stack_offset, MONO_ARCH_FRAME_ALIGNMENT);
3826 if (cfg->param_area)
3827 alloc_size += cfg->param_area;
3828 if (alloc_size)
3829 /* scratch area */
3830 alloc_size += 16;
3831 alloc_size = ALIGN_TO (alloc_size, MONO_ARCH_FRAME_ALIGNMENT);
3833 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
3834 /* Force sp to be saved/restored */
3835 alloc_size += MONO_ARCH_FRAME_ALIGNMENT;
3837 cfg->arch.stack_alloc_size = alloc_size;
3839 pos = 0;
3841 if (method->save_lmf) {
3842 /* No LMF on IA64 */
3845 alloc_size -= pos;
3847 ia64_unw_save_reg (code, UNW_IA64_AR_PFS, UNW_IA64_GR + cfg->arch.reg_saved_ar_pfs);
3848 ia64_alloc (code, cfg->arch.reg_saved_ar_pfs, cfg->arch.reg_local0 - cfg->arch.reg_in0, cfg->arch.reg_out0 - cfg->arch.reg_local0, cfg->arch.n_out_regs, 0);
3849 ia64_unw_save_reg (code, UNW_IA64_RP, UNW_IA64_GR + cfg->arch.reg_saved_b0);
3850 ia64_mov_from_br (code, cfg->arch.reg_saved_b0, IA64_B0);
3852 if ((alloc_size || cinfo->stack_usage) && !cfg->arch.omit_fp) {
3853 ia64_unw_save_reg (code, UNW_IA64_SP, UNW_IA64_GR + cfg->arch.reg_saved_sp);
3854 ia64_mov (code, cfg->arch.reg_saved_sp, IA64_SP);
3855 if (cfg->frame_reg != cfg->arch.reg_saved_sp)
3856 ia64_mov (code, cfg->frame_reg, IA64_SP);
3859 if (alloc_size) {
3860 #if defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
3861 int pagesize = getpagesize ();
3863 if (alloc_size >= pagesize) {
3864 gint32 remaining_size = alloc_size;
3866 /* Generate stack touching code */
3867 ia64_mov (code, GP_SCRATCH_REG, IA64_SP);
3868 while (remaining_size >= pagesize) {
3869 ia64_movl (code, GP_SCRATCH_REG2, pagesize);
3870 ia64_sub (code, GP_SCRATCH_REG, GP_SCRATCH_REG, GP_SCRATCH_REG2);
3871 ia64_ld8 (code, GP_SCRATCH_REG2, GP_SCRATCH_REG);
3872 remaining_size -= pagesize;
3875 #endif
3876 if (ia64_is_imm14 (-alloc_size)) {
3877 if (cfg->arch.omit_fp)
3878 ia64_unw_add (code, UNW_IA64_SP, (-alloc_size));
3879 ia64_adds_imm (code, IA64_SP, (-alloc_size), IA64_SP);
3881 else {
3882 ia64_movl (code, GP_SCRATCH_REG, -alloc_size);
3883 if (cfg->arch.omit_fp)
3884 ia64_unw_add (code, UNW_IA64_SP, (-alloc_size));
3885 ia64_add (code, IA64_SP, GP_SCRATCH_REG, IA64_SP);
3889 ia64_begin_bundle (code);
3891 /* Initialize unwind info */
3892 cfg->arch.r_pro = mono_ia64_create_unwind_region (&code);
3894 if (sig->ret->type != MONO_TYPE_VOID) {
3895 if ((cinfo->ret.storage == ArgInIReg) && (cfg->ret->opcode != OP_REGVAR)) {
3896 /* Save volatile arguments to the stack */
3897 NOT_IMPLEMENTED;
3901 /* Keep this in sync with emit_load_volatile_arguments */
3902 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3903 ArgInfo *ainfo = cinfo->args + i;
3904 gint32 stack_offset;
3905 MonoType *arg_type;
3907 inst = cfg->args [i];
3909 if (sig->hasthis && (i == 0))
3910 arg_type = &mono_defaults.object_class->byval_arg;
3911 else
3912 arg_type = sig->params [i - sig->hasthis];
3914 arg_type = mono_type_get_underlying_type (arg_type);
3916 stack_offset = ainfo->offset + ARGS_OFFSET;
3919 * FIXME: Native code might pass non register sized integers
3920 * without initializing the upper bits.
3922 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED && !arg_type->byref && ainfo->storage == ArgInIReg) {
3923 int reg = cfg->arch.reg_in0 + ainfo->reg;
3925 switch (mono_type_to_load_membase (cfg, arg_type)) {
3926 case OP_LOADI1_MEMBASE:
3927 ia64_sxt1 (code, reg, reg);
3928 break;
3929 case OP_LOADU1_MEMBASE:
3930 ia64_zxt1 (code, reg, reg);
3931 break;
3932 case OP_LOADI2_MEMBASE:
3933 ia64_sxt2 (code, reg, reg);
3934 break;
3935 case OP_LOADU2_MEMBASE:
3936 ia64_zxt2 (code, reg, reg);
3937 break;
3938 default:
3939 break;
3943 /* Save volatile arguments to the stack */
3944 if (inst->opcode != OP_REGVAR) {
3945 switch (ainfo->storage) {
3946 case ArgInIReg:
3947 case ArgInFloatReg:
3948 case ArgInFloatRegR4:
3949 g_assert (inst->opcode == OP_REGOFFSET);
3950 if (ia64_is_adds_imm (inst->inst_offset))
3951 ia64_adds_imm (code, GP_SCRATCH_REG, inst->inst_offset, inst->inst_basereg);
3952 else {
3953 ia64_movl (code, GP_SCRATCH_REG2, inst->inst_offset);
3954 ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, GP_SCRATCH_REG2);
3956 if (arg_type->byref)
3957 ia64_st8_hint (code, GP_SCRATCH_REG, cfg->arch.reg_in0 + ainfo->reg, 0);
3958 else {
3959 switch (arg_type->type) {
3960 case MONO_TYPE_R4:
3961 ia64_stfs_hint (code, GP_SCRATCH_REG, ainfo->reg, 0);
3962 break;
3963 case MONO_TYPE_R8:
3964 ia64_stfd_hint (code, GP_SCRATCH_REG, ainfo->reg, 0);
3965 break;
3966 default:
3967 ia64_st8_hint (code, GP_SCRATCH_REG, cfg->arch.reg_in0 + ainfo->reg, 0);
3968 break;
3971 break;
3972 case ArgOnStack:
3973 break;
3974 case ArgAggregate:
3975 if (ainfo->nslots != ainfo->nregs)
3976 NOT_IMPLEMENTED;
3978 g_assert (inst->opcode == OP_REGOFFSET);
3979 ia64_adds_imm (code, GP_SCRATCH_REG, inst->inst_offset, inst->inst_basereg);
3980 for (i = 0; i < ainfo->nregs; ++i) {
3981 switch (ainfo->atype) {
3982 case AggregateNormal:
3983 ia64_st8_inc_imm_hint (code, GP_SCRATCH_REG, cfg->arch.reg_in0 + ainfo->reg + i, sizeof (gpointer), 0);
3984 break;
3985 case AggregateSingleHFA:
3986 ia64_stfs_inc_imm_hint (code, GP_SCRATCH_REG, ainfo->reg + i, 4, 0);
3987 break;
3988 case AggregateDoubleHFA:
3989 ia64_stfd_inc_imm_hint (code, GP_SCRATCH_REG, ainfo->reg + i, sizeof (gpointer), 0);
3990 break;
3991 default:
3992 NOT_IMPLEMENTED;
3995 break;
3996 default:
3997 g_assert_not_reached ();
4001 if (inst->opcode == OP_REGVAR) {
4002 /* Argument allocated to (non-volatile) register */
4003 switch (ainfo->storage) {
4004 case ArgInIReg:
4005 if (inst->dreg != cfg->arch.reg_in0 + ainfo->reg)
4006 ia64_mov (code, inst->dreg, cfg->arch.reg_in0 + ainfo->reg);
4007 break;
4008 case ArgOnStack:
4009 ia64_adds_imm (code, GP_SCRATCH_REG, 16 + ainfo->offset, cfg->frame_reg);
4010 ia64_ld8 (code, inst->dreg, GP_SCRATCH_REG);
4011 break;
4012 default:
4013 NOT_IMPLEMENTED;
4018 if (method->save_lmf) {
4019 /* No LMF on IA64 */
4022 ia64_codegen_close (code);
4024 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4025 code.buf = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code.buf, TRUE);
4027 cfg->code_len = code.buf - cfg->native_code;
4029 g_assert (cfg->code_len < cfg->code_size);
4031 cfg->arch.prolog_end_offset = cfg->code_len;
4033 return code.buf;
4036 void
4037 mono_arch_emit_epilog (MonoCompile *cfg)
4039 MonoMethod *method = cfg->method;
4040 int i, pos;
4041 int max_epilog_size = 16 * 4;
4042 Ia64CodegenState code;
4043 guint8 *buf;
4044 CallInfo *cinfo;
4045 ArgInfo *ainfo;
4047 if (mono_jit_trace_calls != NULL)
4048 max_epilog_size += 1024;
4050 cfg->arch.epilog_begin_offset = cfg->code_len;
4052 while (cfg->code_len + max_epilog_size > cfg->code_size) {
4053 cfg->code_size *= 2;
4054 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4055 cfg->stat_code_reallocs++;
4058 /* FIXME: Emit unwind info */
4060 buf = cfg->native_code + cfg->code_len;
4062 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4063 buf = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, buf, TRUE);
4065 ia64_codegen_init (code, buf);
4067 /* the code restoring the registers must be kept in sync with OP_JMP */
4068 pos = 0;
4070 if (method->save_lmf) {
4071 /* No LMF on IA64 */
4074 /* Load returned vtypes into registers if needed */
4075 cinfo = get_call_info (cfg, cfg->mempool, mono_method_signature (method), FALSE);
4076 ainfo = &cinfo->ret;
4077 switch (ainfo->storage) {
4078 case ArgAggregate:
4079 if (ainfo->nslots != ainfo->nregs)
4080 NOT_IMPLEMENTED;
4082 g_assert (cfg->ret->opcode == OP_REGOFFSET);
4083 ia64_adds_imm (code, GP_SCRATCH_REG, cfg->ret->inst_offset, cfg->ret->inst_basereg);
4084 for (i = 0; i < ainfo->nregs; ++i) {
4085 switch (ainfo->atype) {
4086 case AggregateNormal:
4087 ia64_ld8_inc_imm_hint (code, ainfo->reg + i, GP_SCRATCH_REG, sizeof (gpointer), 0);
4088 break;
4089 case AggregateSingleHFA:
4090 ia64_ldfs_inc_imm_hint (code, ainfo->reg + i, GP_SCRATCH_REG, 4, 0);
4091 break;
4092 case AggregateDoubleHFA:
4093 ia64_ldfd_inc_imm_hint (code, ainfo->reg + i, GP_SCRATCH_REG, sizeof (gpointer), 0);
4094 break;
4095 default:
4096 g_assert_not_reached ();
4099 break;
4100 default:
4101 break;
4104 ia64_begin_bundle (code);
4106 code.region_start = cfg->native_code;
4108 /* Label the unwind state at the start of the exception throwing region */
4109 //ia64_unw_label_state (code, 1234);
4111 if (cfg->arch.stack_alloc_size) {
4112 if (cfg->arch.omit_fp) {
4113 if (ia64_is_imm14 (cfg->arch.stack_alloc_size)) {
4114 ia64_unw_pop_frames (code, 1);
4115 ia64_adds_imm (code, IA64_SP, (cfg->arch.stack_alloc_size), IA64_SP);
4116 } else {
4117 ia64_movl (code, GP_SCRATCH_REG, cfg->arch.stack_alloc_size);
4118 ia64_unw_pop_frames (code, 1);
4119 ia64_add (code, IA64_SP, GP_SCRATCH_REG, IA64_SP);
4122 else {
4123 ia64_unw_pop_frames (code, 1);
4124 ia64_mov (code, IA64_SP, cfg->arch.reg_saved_sp);
4127 ia64_mov_to_ar_i (code, IA64_PFS, cfg->arch.reg_saved_ar_pfs);
4128 ia64_mov_ret_to_br (code, IA64_B0, cfg->arch.reg_saved_b0);
4129 ia64_br_ret_reg (code, IA64_B0);
4131 ia64_codegen_close (code);
4133 cfg->arch.r_epilog = mono_ia64_create_unwind_region (&code);
4134 cfg->arch.r_pro->next = cfg->arch.r_epilog;
4136 cfg->code_len = code.buf - cfg->native_code;
4138 g_assert (cfg->code_len < cfg->code_size);
4141 void
4142 mono_arch_emit_exceptions (MonoCompile *cfg)
4144 MonoJumpInfo *patch_info;
4145 int i, nthrows;
4146 Ia64CodegenState code;
4147 gboolean empty = TRUE;
4148 //unw_dyn_region_info_t *r_exceptions;
4149 MonoClass *exc_classes [16];
4150 guint8 *exc_throw_start [16], *exc_throw_end [16];
4151 guint32 code_size = 0;
4153 /* Compute needed space */
4154 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4155 if (patch_info->type == MONO_PATCH_INFO_EXC)
4156 code_size += 256;
4157 if (patch_info->type == MONO_PATCH_INFO_R8)
4158 code_size += 8 + 7; /* sizeof (double) + alignment */
4159 if (patch_info->type == MONO_PATCH_INFO_R4)
4160 code_size += 4 + 7; /* sizeof (float) + alignment */
4163 if (code_size == 0)
4164 return;
4166 while (cfg->code_len + code_size > (cfg->code_size - 16)) {
4167 cfg->code_size *= 2;
4168 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4169 cfg->stat_code_reallocs++;
4172 ia64_codegen_init (code, cfg->native_code + cfg->code_len);
4174 /* The unwind state here is the same as before the epilog */
4175 //ia64_unw_copy_state (code, 1234);
4177 /* add code to raise exceptions */
4178 /* FIXME: Optimize this */
4179 nthrows = 0;
4180 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4181 switch (patch_info->type) {
4182 case MONO_PATCH_INFO_EXC: {
4183 MonoClass *exc_class;
4184 guint8* throw_ip;
4185 guint8* buf;
4186 guint64 exc_token_index;
4188 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
4189 g_assert (exc_class);
4190 exc_token_index = mono_metadata_token_index (exc_class->type_token);
4191 throw_ip = cfg->native_code + patch_info->ip.i;
4193 ia64_begin_bundle (code);
4195 ia64_patch (cfg->native_code + patch_info->ip.i, code.buf);
4197 /* Find a throw sequence for the same exception class */
4198 for (i = 0; i < nthrows; ++i)
4199 if (exc_classes [i] == exc_class)
4200 break;
4202 if (i < nthrows) {
4203 gint64 offset = exc_throw_end [i] - 16 - throw_ip;
4205 if (ia64_is_adds_imm (offset))
4206 ia64_adds_imm (code, cfg->arch.reg_out0 + 1, offset, IA64_R0);
4207 else
4208 ia64_movl (code, cfg->arch.reg_out0 + 1, offset);
4210 buf = code.buf + code.nins;
4211 ia64_br_cond_pred (code, 0, 0);
4212 ia64_begin_bundle (code);
4213 ia64_patch (buf, exc_throw_start [i]);
4215 patch_info->type = MONO_PATCH_INFO_NONE;
4217 else {
4218 /* Arg1 */
4219 buf = code.buf;
4220 ia64_movl (code, cfg->arch.reg_out0 + 1, 0);
4222 ia64_begin_bundle (code);
4224 if (nthrows < 16) {
4225 exc_classes [nthrows] = exc_class;
4226 exc_throw_start [nthrows] = code.buf;
4229 /* Arg2 */
4230 if (ia64_is_adds_imm (exc_token_index))
4231 ia64_adds_imm (code, cfg->arch.reg_out0 + 0, exc_token_index, IA64_R0);
4232 else
4233 ia64_movl (code, cfg->arch.reg_out0 + 0, exc_token_index);
4235 patch_info->data.name = "mono_arch_throw_corlib_exception";
4236 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4237 patch_info->ip.i = code.buf + code.nins - cfg->native_code;
4239 /* Indirect call */
4240 ia64_movl (code, GP_SCRATCH_REG, 0);
4241 ia64_ld8_inc_imm (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, 8);
4242 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG2);
4243 ia64_ld8 (code, IA64_GP, GP_SCRATCH_REG);
4245 ia64_br_call_reg (code, IA64_B0, IA64_B6);
4247 /* Patch up the throw offset */
4248 ia64_begin_bundle (code);
4250 ia64_patch (buf, (gpointer)(code.buf - 16 - throw_ip));
4252 if (nthrows < 16) {
4253 exc_throw_end [nthrows] = code.buf;
4254 nthrows ++;
4258 empty = FALSE;
4259 break;
4261 default:
4262 break;
4266 if (!empty)
4267 /* The unwinder needs this to work */
4268 ia64_break_i (code, 0);
4270 ia64_codegen_close (code);
4272 /* FIXME: */
4273 //r_exceptions = mono_ia64_create_unwind_region (&code);
4274 //cfg->arch.r_epilog = r_exceptions;
4276 cfg->code_len = code.buf - cfg->native_code;
4278 g_assert (cfg->code_len < cfg->code_size);
4281 void*
4282 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
4284 Ia64CodegenState code;
4285 CallInfo *cinfo = NULL;
4286 MonoMethodSignature *sig;
4287 MonoInst *ins;
4288 int i, n, stack_area = 0;
4290 ia64_codegen_init (code, p);
4292 /* Keep this in sync with mono_arch_get_argument_info */
4294 if (enable_arguments) {
4295 /* Allocate a new area on the stack and save arguments there */
4296 sig = mono_method_signature (cfg->method);
4298 cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
4300 n = sig->param_count + sig->hasthis;
4302 stack_area = ALIGN_TO (n * 8, 16);
4304 if (n) {
4305 ia64_movl (code, GP_SCRATCH_REG, stack_area);
4307 ia64_sub (code, IA64_SP, IA64_SP, GP_SCRATCH_REG);
4309 /* FIXME: Allocate out registers */
4311 ia64_mov (code, cfg->arch.reg_out0 + 1, IA64_SP);
4313 /* Required by the ABI */
4314 ia64_adds_imm (code, IA64_SP, -16, IA64_SP);
4316 add_patch_info (cfg, code, MONO_PATCH_INFO_METHODCONST, cfg->method);
4317 ia64_movl (code, cfg->arch.reg_out0 + 0, 0);
4319 /* Save arguments to the stack */
4320 for (i = 0; i < n; ++i) {
4321 ins = cfg->args [i];
4323 if (ins->opcode == OP_REGVAR) {
4324 ia64_movl (code, GP_SCRATCH_REG, (i * 8));
4325 ia64_add (code, GP_SCRATCH_REG, cfg->arch.reg_out0 + 1, GP_SCRATCH_REG);
4326 ia64_st8 (code, GP_SCRATCH_REG, ins->dreg);
4328 else {
4329 ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
4330 ia64_add (code, GP_SCRATCH_REG, ins->inst_basereg, GP_SCRATCH_REG);
4331 ia64_ld8 (code, GP_SCRATCH_REG2, GP_SCRATCH_REG);
4332 ia64_movl (code, GP_SCRATCH_REG, (i * 8));
4333 ia64_add (code, GP_SCRATCH_REG, cfg->arch.reg_out0 + 1, GP_SCRATCH_REG);
4334 ia64_st8 (code, GP_SCRATCH_REG, GP_SCRATCH_REG2);
4338 else
4339 ia64_mov (code, cfg->arch.reg_out0 + 1, IA64_R0);
4341 else
4342 ia64_mov (code, cfg->arch.reg_out0 + 1, IA64_R0);
4344 add_patch_info (cfg, code, MONO_PATCH_INFO_METHODCONST, cfg->method);
4345 ia64_movl (code, cfg->arch.reg_out0 + 0, 0);
4347 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)func);
4349 if (enable_arguments && stack_area) {
4350 ia64_movl (code, GP_SCRATCH_REG, stack_area);
4352 ia64_add (code, IA64_SP, IA64_SP, GP_SCRATCH_REG);
4354 ia64_adds_imm (code, IA64_SP, 16, IA64_SP);
4357 ia64_codegen_close (code);
4359 return code.buf;
4362 void*
4363 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
4365 Ia64CodegenState code;
4366 CallInfo *cinfo = NULL;
4367 MonoMethod *method = cfg->method;
4368 MonoMethodSignature *sig = mono_method_signature (cfg->method);
4370 ia64_codegen_init (code, p);
4372 cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
4374 /* Save return value + pass it to func */
4375 switch (cinfo->ret.storage) {
4376 case ArgNone:
4377 break;
4378 case ArgInIReg:
4379 ia64_mov (code, cfg->arch.reg_saved_return_val, cinfo->ret.reg);
4380 ia64_mov (code, cfg->arch.reg_out0 + 1, cinfo->ret.reg);
4381 break;
4382 case ArgInFloatReg:
4383 ia64_adds_imm (code, IA64_SP, -16, IA64_SP);
4384 ia64_adds_imm (code, GP_SCRATCH_REG, 16, IA64_SP);
4385 ia64_stfd_hint (code, GP_SCRATCH_REG, cinfo->ret.reg, 0);
4386 ia64_fmov (code, 8 + 1, cinfo->ret.reg);
4387 break;
4388 case ArgValuetypeAddrInIReg:
4389 ia64_mov (code, cfg->arch.reg_out0 + 1, cfg->arch.reg_in0 + cinfo->ret.reg);
4390 break;
4391 case ArgAggregate:
4392 NOT_IMPLEMENTED;
4393 break;
4394 default:
4395 break;
4398 add_patch_info (cfg, code, MONO_PATCH_INFO_METHODCONST, method);
4399 ia64_movl (code, cfg->arch.reg_out0 + 0, 0);
4400 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)func);
4402 /* Restore return value */
4403 switch (cinfo->ret.storage) {
4404 case ArgNone:
4405 break;
4406 case ArgInIReg:
4407 ia64_mov (code, cinfo->ret.reg, cfg->arch.reg_saved_return_val);
4408 break;
4409 case ArgInFloatReg:
4410 ia64_adds_imm (code, GP_SCRATCH_REG, 16, IA64_SP);
4411 ia64_ldfd (code, cinfo->ret.reg, GP_SCRATCH_REG);
4412 break;
4413 case ArgValuetypeAddrInIReg:
4414 break;
4415 case ArgAggregate:
4416 break;
4417 default:
4418 break;
4421 ia64_codegen_close (code);
4423 return code.buf;
4426 void
4427 mono_arch_save_unwind_info (MonoCompile *cfg)
4429 unw_dyn_info_t *di;
4431 /* FIXME: Unregister this for dynamic methods */
4433 di = g_malloc0 (sizeof (unw_dyn_info_t));
4434 di->start_ip = (unw_word_t) cfg->native_code;
4435 di->end_ip = (unw_word_t) cfg->native_code + cfg->code_len;
4436 di->gp = 0;
4437 di->format = UNW_INFO_FORMAT_DYNAMIC;
4438 di->u.pi.name_ptr = (unw_word_t)mono_method_full_name (cfg->method, TRUE);
4439 di->u.pi.regions = cfg->arch.r_pro;
4441 _U_dyn_register (di);
4445 unw_dyn_region_info_t *region = di->u.pi.regions;
4447 printf ("Unwind info for method %s:\n", mono_method_full_name (cfg->method, TRUE));
4448 while (region) {
4449 printf (" [Region: %d]\n", region->insn_count);
4450 region = region->next;
4456 void
4457 mono_arch_flush_icache (guint8 *code, gint size)
4459 guint8* p = (guint8*)((guint64)code & ~(0x3f));
4460 guint8* end = (guint8*)((guint64)code + size);
4462 #ifdef __INTEL_COMPILER
4463 /* icc doesn't define an fc.i instrinsic, but fc==fc.i on itanium 2 */
4464 while (p < end) {
4465 __fc ((guint64)p);
4466 p += 32;
4468 #else
4469 while (p < end) {
4470 __asm__ __volatile__ ("fc.i %0"::"r"(p));
4471 /* FIXME: This could be increased to 128 on some cpus */
4472 p += 32;
4474 #endif
4477 void
4478 mono_arch_flush_register_windows (void)
4480 /* Not needed because of libunwind */
4483 gboolean
4484 mono_arch_is_inst_imm (gint64 imm)
4486 /* The lowering pass will take care of it */
4488 return TRUE;
4492 * Determine whenever the trap whose info is in SIGINFO is caused by
4493 * integer overflow.
4495 gboolean
4496 mono_arch_is_int_overflow (void *sigctx, void *info)
4498 /* Division is emulated with explicit overflow checks */
4499 return FALSE;
4502 guint32
4503 mono_arch_get_patch_offset (guint8 *code)
4505 NOT_IMPLEMENTED;
4507 return 0;
4510 gpointer*
4511 mono_arch_get_delegate_method_ptr_addr (guint8* code, mgreg_t *regs)
4513 NOT_IMPLEMENTED;
4515 return NULL;
4518 void
4519 mono_arch_finish_init (void)
4523 void
4524 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
4528 #ifdef MONO_ARCH_HAVE_IMT
4531 * LOCKING: called with the domain lock held
4533 gpointer
4534 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
4535 gpointer fail_tramp)
4537 int i;
4538 int size = 0;
4539 guint8 *start, *buf;
4540 Ia64CodegenState code;
4542 size = count * 256;
4543 buf = g_malloc0 (size);
4544 ia64_codegen_init (code, buf);
4546 /* IA64_R9 contains the IMT method */
4548 for (i = 0; i < count; ++i) {
4549 MonoIMTCheckItem *item = imt_entries [i];
4550 ia64_begin_bundle (code);
4551 item->code_target = (guint8*)code.buf + code.nins;
4552 if (item->is_equals) {
4553 gboolean fail_case = !item->check_target_idx && fail_tramp;
4555 if (item->check_target_idx || fail_case) {
4556 if (!item->compare_done || fail_case) {
4557 ia64_movl (code, GP_SCRATCH_REG, item->key);
4558 ia64_cmp_eq (code, 6, 7, IA64_R9, GP_SCRATCH_REG);
4560 item->jmp_code = (guint8*)code.buf + code.nins;
4561 ia64_br_cond_pred (code, 7, 0);
4563 if (item->has_target_code) {
4564 ia64_movl (code, GP_SCRATCH_REG, item->value.target_code);
4565 } else {
4566 ia64_movl (code, GP_SCRATCH_REG, &(vtable->vtable [item->value.vtable_slot]));
4567 ia64_ld8 (code, GP_SCRATCH_REG, GP_SCRATCH_REG);
4569 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
4570 ia64_br_cond_reg (code, IA64_B6);
4572 if (fail_case) {
4573 ia64_begin_bundle (code);
4574 ia64_patch (item->jmp_code, (guint8*)code.buf + code.nins);
4575 ia64_movl (code, GP_SCRATCH_REG, fail_tramp);
4576 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
4577 ia64_br_cond_reg (code, IA64_B6);
4578 item->jmp_code = NULL;
4580 } else {
4581 /* enable the commented code to assert on wrong method */
4582 #if ENABLE_WRONG_METHOD_CHECK
4583 g_assert_not_reached ();
4584 #endif
4585 ia64_movl (code, GP_SCRATCH_REG, &(vtable->vtable [item->value.vtable_slot]));
4586 ia64_ld8 (code, GP_SCRATCH_REG, GP_SCRATCH_REG);
4587 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
4588 ia64_br_cond_reg (code, IA64_B6);
4589 #if ENABLE_WRONG_METHOD_CHECK
4590 g_assert_not_reached ();
4591 #endif
4593 } else {
4594 ia64_movl (code, GP_SCRATCH_REG, item->key);
4595 ia64_cmp_geu (code, 6, 7, IA64_R9, GP_SCRATCH_REG);
4596 item->jmp_code = (guint8*)code.buf + code.nins;
4597 ia64_br_cond_pred (code, 6, 0);
4600 /* patch the branches to get to the target items */
4601 for (i = 0; i < count; ++i) {
4602 MonoIMTCheckItem *item = imt_entries [i];
4603 if (item->jmp_code) {
4604 if (item->check_target_idx) {
4605 ia64_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
4610 ia64_codegen_close (code);
4611 g_assert (code.buf - buf <= size);
4613 size = code.buf - buf;
4614 if (fail_tramp) {
4615 start = mono_method_alloc_generic_virtual_thunk (domain, size + 16);
4616 start = (gpointer)ALIGN_TO (start, 16);
4617 } else {
4618 start = mono_domain_code_reserve (domain, size);
4620 memcpy (start, buf, size);
4622 mono_arch_flush_icache (start, size);
4624 mono_stats.imt_thunks_size += size;
4626 return start;
4629 MonoMethod*
4630 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
4632 return (MonoMethod*)regs [IA64_R9];
4635 void
4636 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
4638 /* Done by the implementation of the CALL_MEMBASE opcodes */
4640 #endif
4642 gpointer
4643 mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
4645 return (gpointer)regs [IA64_R10];
4648 gpointer
4649 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
4651 return NULL;
4654 MonoInst*
4655 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4657 MonoInst *ins = NULL;
4659 if (cmethod->klass->image == mono_defaults.corlib &&
4660 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4661 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4664 * We don't use the generic version in mini_emit_inst_for_method () since we
4665 * ia64 has atomic_add_imm opcodes.
4667 if (strcmp (cmethod->name, "Increment") == 0) {
4668 guint32 opcode;
4670 if (fsig->params [0]->type == MONO_TYPE_I4)
4671 opcode = OP_ATOMIC_ADD_IMM_NEW_I4;
4672 else if (fsig->params [0]->type == MONO_TYPE_I8)
4673 opcode = OP_ATOMIC_ADD_IMM_NEW_I8;
4674 else
4675 g_assert_not_reached ();
4676 MONO_INST_NEW (cfg, ins, opcode);
4677 ins->dreg = mono_alloc_preg (cfg);
4678 ins->inst_imm = 1;
4679 ins->inst_basereg = args [0]->dreg;
4680 ins->inst_offset = 0;
4681 MONO_ADD_INS (cfg->cbb, ins);
4682 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4683 guint32 opcode;
4685 if (fsig->params [0]->type == MONO_TYPE_I4)
4686 opcode = OP_ATOMIC_ADD_IMM_NEW_I4;
4687 else if (fsig->params [0]->type == MONO_TYPE_I8)
4688 opcode = OP_ATOMIC_ADD_IMM_NEW_I8;
4689 else
4690 g_assert_not_reached ();
4691 MONO_INST_NEW (cfg, ins, opcode);
4692 ins->dreg = mono_alloc_preg (cfg);
4693 ins->inst_imm = -1;
4694 ins->inst_basereg = args [0]->dreg;
4695 ins->inst_offset = 0;
4696 MONO_ADD_INS (cfg->cbb, ins);
4697 } else if (strcmp (cmethod->name, "Add") == 0) {
4698 guint32 opcode;
4699 gboolean is_imm = FALSE;
4700 gint64 imm = 0;
4702 if ((args [1]->opcode == OP_ICONST) || (args [1]->opcode == OP_I8CONST)) {
4703 imm = (args [1]->opcode == OP_ICONST) ? args [1]->inst_c0 : args [1]->inst_l;
4705 is_imm = (imm == 1 || imm == 4 || imm == 8 || imm == 16 || imm == -1 || imm == -4 || imm == -8 || imm == -16);
4708 if (is_imm) {
4709 if (fsig->params [0]->type == MONO_TYPE_I4)
4710 opcode = OP_ATOMIC_ADD_IMM_NEW_I4;
4711 else if (fsig->params [0]->type == MONO_TYPE_I8)
4712 opcode = OP_ATOMIC_ADD_IMM_NEW_I8;
4713 else
4714 g_assert_not_reached ();
4716 MONO_INST_NEW (cfg, ins, opcode);
4717 ins->dreg = mono_alloc_ireg (cfg);
4718 ins->inst_basereg = args [0]->dreg;
4719 ins->inst_offset = 0;
4720 ins->inst_imm = imm;
4721 ins->type = (opcode == OP_ATOMIC_ADD_IMM_NEW_I4) ? STACK_I4 : STACK_I8;
4722 } else {
4723 if (fsig->params [0]->type == MONO_TYPE_I4)
4724 opcode = OP_ATOMIC_ADD_NEW_I4;
4725 else if (fsig->params [0]->type == MONO_TYPE_I8)
4726 opcode = OP_ATOMIC_ADD_NEW_I8;
4727 else
4728 g_assert_not_reached ();
4730 MONO_INST_NEW (cfg, ins, opcode);
4731 ins->dreg = mono_alloc_ireg (cfg);
4732 ins->inst_basereg = args [0]->dreg;
4733 ins->inst_offset = 0;
4734 ins->sreg2 = args [1]->dreg;
4735 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4737 MONO_ADD_INS (cfg->cbb, ins);
4741 return ins;
4744 gboolean
4745 mono_arch_print_tree (MonoInst *tree, int arity)
4747 return 0;
4750 MonoInst*
4751 mono_arch_get_domain_intrinsic (MonoCompile* cfg)
4753 return mono_get_domain_intrinsic (cfg);
4756 mgreg_t
4757 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
4759 /* FIXME: implement */
4760 g_assert_not_reached ();