2010-04-06 Rodrigo Kumpera <rkumpera@novell.com>
[mono.git] / mono / mini / mini-ia64.c
blob6a504588ceaa746eac3893f270649ba55714c15f
1 /*
2 * mini-ia64.c: IA64 backend for the Mono code generator
4 * Authors:
5 * Zoltan Varga (vargaz@gmail.com)
7 * (C) 2003 Ximian, Inc.
8 */
9 #include "mini.h"
10 #include <string.h>
11 #include <math.h>
12 #include <unistd.h>
13 #include <sys/mman.h>
15 #ifdef __INTEL_COMPILER
16 #include <ia64intrin.h>
17 #endif
19 #include <mono/metadata/appdomain.h>
20 #include <mono/metadata/debug-helpers.h>
21 #include <mono/metadata/threads.h>
22 #include <mono/metadata/profiler-private.h>
23 #include <mono/utils/mono-math.h>
25 #include "trace.h"
26 #include "mini-ia64.h"
27 #include "cpu-ia64.h"
28 #include "jit-icalls.h"
29 #include "ir-emit.h"
31 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
33 #define IS_IMM32(val) ((((guint64)val) >> 32) == 0)
36 * IA64 register usage:
37 * - local registers are used for global register allocation
38 * - r8..r11, r14..r30 is used for local register allocation
39 * - r31 is a scratch register used within opcode implementations
40 * - FIXME: Use out registers as well
41 * - the first three locals are used for saving ar.pfst, b0, and sp
42 * - compare instructions allways set p6 and p7
46 * There are a lot of places where generated code is disassembled/patched.
47 * The automatic bundling of instructions done by the code generation macros
48 * could complicate things, so it is best to call
49 * ia64_codegen_set_one_ins_per_bundle () at those places.
52 #define ARGS_OFFSET 16
54 #define GP_SCRATCH_REG 31
55 #define GP_SCRATCH_REG2 30
56 #define FP_SCRATCH_REG 32
57 #define FP_SCRATCH_REG2 33
59 #define LOOP_ALIGNMENT 8
60 #define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
62 static const char* gregs [] = {
63 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9",
64 "r10", "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19",
65 "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29",
66 "r30", "r31", "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
67 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47", "r48", "r49",
68 "r50", "r51", "r52", "r53", "r54", "r55", "r56", "r57", "r58", "r59",
69 "r60", "r61", "r62", "r63", "r64", "r65", "r66", "r67", "r68", "r69",
70 "r70", "r71", "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
71 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87", "r88", "r89",
72 "r90", "r91", "r92", "r93", "r94", "r95", "r96", "r97", "r98", "r99",
73 "r100", "r101", "r102", "r103", "r104", "r105", "r106", "r107", "r108", "r109",
74 "r110", "r111", "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
75 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127"
78 const char*
79 mono_arch_regname (int reg)
81 if (reg < 128)
82 return gregs [reg];
83 else
84 return "unknown";
87 static const char* fregs [] = {
88 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9",
89 "f10", "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19",
90 "f20", "f21", "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29",
91 "f30", "f31", "f32", "f33", "f34", "f35", "f36", "f37", "f38", "f39",
92 "f40", "f41", "f42", "f43", "f44", "f45", "f46", "f47", "f48", "f49",
93 "f50", "f51", "f52", "f53", "f54", "f55", "f56", "f57", "f58", "f59",
94 "f60", "f61", "f62", "f63", "f64", "f65", "f66", "f67", "f68", "f69",
95 "f70", "f71", "f72", "f73", "f74", "f75", "f76", "f77", "f78", "f79",
96 "f80", "f81", "f82", "f83", "f84", "f85", "f86", "f87", "f88", "f89",
97 "f90", "f91", "f92", "f93", "f94", "f95", "f96", "f97", "f98", "f99",
98 "f100", "f101", "f102", "f103", "f104", "f105", "f106", "f107", "f108", "f109",
99 "f110", "f111", "f112", "f113", "f114", "f115", "f116", "f117", "f118", "f119",
100 "f120", "f121", "f122", "f123", "f124", "f125", "f126", "f127"
103 const char*
104 mono_arch_fregname (int reg)
106 if (reg < 128)
107 return fregs [reg];
108 else
109 return "unknown";
112 G_GNUC_UNUSED static void
113 break_count (void)
117 G_GNUC_UNUSED static gboolean
118 debug_count (void)
120 static int count = 0;
121 count ++;
123 if (count == atoi (getenv ("COUNT"))) {
124 break_count ();
127 if (count > atoi (getenv ("COUNT"))) {
128 return FALSE;
131 return TRUE;
134 static gboolean
135 debug_ins_sched (void)
137 #if 0
138 return debug_count ();
139 #else
140 return TRUE;
141 #endif
144 static gboolean
145 debug_omit_fp (void)
147 #if 0
148 return debug_count ();
149 #else
150 return TRUE;
151 #endif
154 static void
155 ia64_patch (unsigned char* code, gpointer target);
157 typedef enum {
158 ArgInIReg,
159 ArgInFloatReg,
160 ArgInFloatRegR4,
161 ArgOnStack,
162 ArgValuetypeAddrInIReg,
163 ArgAggregate,
164 ArgSingleHFA,
165 ArgDoubleHFA,
166 ArgNone
167 } ArgStorage;
169 typedef enum {
170 AggregateNormal,
171 AggregateSingleHFA,
172 AggregateDoubleHFA
173 } AggregateType;
175 typedef struct {
176 gint16 offset;
177 gint8 reg;
178 ArgStorage storage;
180 /* Only if storage == ArgAggregate */
181 int nregs, nslots;
182 AggregateType atype;
183 } ArgInfo;
185 typedef struct {
186 int nargs;
187 guint32 stack_usage;
188 guint32 reg_usage;
189 guint32 freg_usage;
190 gboolean need_stack_align;
191 ArgInfo ret;
192 ArgInfo sig_cookie;
193 ArgInfo args [1];
194 } CallInfo;
196 #define DEBUG(a) if (cfg->verbose_level > 1) a
198 #define PARAM_REGS 8
200 static void inline
201 add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo)
203 ainfo->offset = *stack_size;
205 if (*gr >= PARAM_REGS) {
206 ainfo->storage = ArgOnStack;
207 (*stack_size) += sizeof (gpointer);
209 else {
210 ainfo->storage = ArgInIReg;
211 ainfo->reg = *gr;
212 *(gr) += 1;
216 #define FLOAT_PARAM_REGS 8
218 static void inline
219 add_float (guint32 *gr, guint32 *fr, guint32 *stack_size, ArgInfo *ainfo, gboolean is_double)
221 ainfo->offset = *stack_size;
223 if (*gr >= PARAM_REGS) {
224 ainfo->storage = ArgOnStack;
225 (*stack_size) += sizeof (gpointer);
227 else {
228 ainfo->storage = is_double ? ArgInFloatReg : ArgInFloatRegR4;
229 ainfo->reg = 8 + *fr;
230 (*fr) += 1;
231 (*gr) += 1;
235 static void
236 add_valuetype (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type,
237 gboolean is_return,
238 guint32 *gr, guint32 *fr, guint32 *stack_size)
240 guint32 size, i;
241 MonoClass *klass;
242 MonoMarshalType *info;
243 gboolean is_hfa = TRUE;
244 guint32 hfa_type = 0;
246 klass = mono_class_from_mono_type (type);
247 if (type->type == MONO_TYPE_TYPEDBYREF)
248 size = 3 * sizeof (gpointer);
249 else if (sig->pinvoke)
250 size = mono_type_native_stack_size (&klass->byval_arg, NULL);
251 else
252 size = mini_type_stack_size (gsctx, &klass->byval_arg, NULL);
254 if (!sig->pinvoke || (size == 0)) {
255 /* Allways pass in memory */
256 ainfo->offset = *stack_size;
257 *stack_size += ALIGN_TO (size, 8);
258 ainfo->storage = ArgOnStack;
260 return;
263 /* Determine whenever it is a HFA (Homogeneous Floating Point Aggregate) */
264 info = mono_marshal_load_type_info (klass);
265 g_assert (info);
266 for (i = 0; i < info->num_fields; ++i) {
267 guint32 ftype = info->fields [i].field->type->type;
268 if (!(info->fields [i].field->type->byref) &&
269 ((ftype == MONO_TYPE_R4) || (ftype == MONO_TYPE_R8))) {
270 if (hfa_type == 0)
271 hfa_type = ftype;
272 else if (hfa_type != ftype)
273 is_hfa = FALSE;
275 else
276 is_hfa = FALSE;
278 if (hfa_type == 0)
279 is_hfa = FALSE;
281 ainfo->storage = ArgAggregate;
282 ainfo->atype = AggregateNormal;
284 if (is_hfa) {
285 ainfo->atype = hfa_type == MONO_TYPE_R4 ? AggregateSingleHFA : AggregateDoubleHFA;
286 if (is_return) {
287 if (info->num_fields <= 8) {
288 ainfo->reg = 8;
289 ainfo->nregs = info->num_fields;
290 ainfo->nslots = ainfo->nregs;
291 return;
293 /* Fall through */
295 else {
296 if ((*fr) + info->num_fields > 8)
297 NOT_IMPLEMENTED;
299 ainfo->reg = 8 + (*fr);
300 ainfo->nregs = info->num_fields;
301 ainfo->nslots = ainfo->nregs;
302 (*fr) += info->num_fields;
303 if (ainfo->atype == AggregateSingleHFA) {
305 * FIXME: Have to keep track of the parameter slot number, which is
306 * not the same as *gr.
308 (*gr) += ALIGN_TO (info->num_fields, 2) / 2;
309 } else {
310 (*gr) += info->num_fields;
312 return;
316 /* This also handles returning of TypedByRef used by some icalls */
317 if (is_return) {
318 if (size <= 32) {
319 ainfo->reg = IA64_R8;
320 ainfo->nregs = (size + 7) / 8;
321 ainfo->nslots = ainfo->nregs;
322 return;
324 NOT_IMPLEMENTED;
327 ainfo->reg = (*gr);
328 ainfo->offset = *stack_size;
329 ainfo->nslots = (size + 7) / 8;
331 if (((*gr) + ainfo->nslots) <= 8) {
332 /* Fits entirely in registers */
333 ainfo->nregs = ainfo->nslots;
334 (*gr) += ainfo->nregs;
335 return;
338 ainfo->nregs = 8 - (*gr);
339 (*gr) = 8;
340 (*stack_size) += (ainfo->nslots - ainfo->nregs) * 8;
344 * get_call_info:
346 * Obtain information about a call according to the calling convention.
347 * For IA64, see the "Itanium Software Conventions and Runtime Architecture
348 * Gude" document for more information.
350 static CallInfo*
351 get_call_info (MonoCompile *cfg, MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
353 guint32 i, gr, fr;
354 MonoType *ret_type;
355 int n = sig->hasthis + sig->param_count;
356 guint32 stack_size = 0;
357 CallInfo *cinfo;
358 MonoGenericSharingContext *gsctx = cfg ? cfg->generic_sharing_context : NULL;
360 if (mp)
361 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
362 else
363 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
365 gr = 0;
366 fr = 0;
368 /* return value */
370 ret_type = mono_type_get_underlying_type (sig->ret);
371 ret_type = mini_get_basic_type_from_generic (gsctx, ret_type);
372 switch (ret_type->type) {
373 case MONO_TYPE_BOOLEAN:
374 case MONO_TYPE_I1:
375 case MONO_TYPE_U1:
376 case MONO_TYPE_I2:
377 case MONO_TYPE_U2:
378 case MONO_TYPE_CHAR:
379 case MONO_TYPE_I4:
380 case MONO_TYPE_U4:
381 case MONO_TYPE_I:
382 case MONO_TYPE_U:
383 case MONO_TYPE_PTR:
384 case MONO_TYPE_FNPTR:
385 case MONO_TYPE_CLASS:
386 case MONO_TYPE_OBJECT:
387 case MONO_TYPE_SZARRAY:
388 case MONO_TYPE_ARRAY:
389 case MONO_TYPE_STRING:
390 cinfo->ret.storage = ArgInIReg;
391 cinfo->ret.reg = IA64_R8;
392 break;
393 case MONO_TYPE_U8:
394 case MONO_TYPE_I8:
395 cinfo->ret.storage = ArgInIReg;
396 cinfo->ret.reg = IA64_R8;
397 break;
398 case MONO_TYPE_R4:
399 case MONO_TYPE_R8:
400 cinfo->ret.storage = ArgInFloatReg;
401 cinfo->ret.reg = 8;
402 break;
403 case MONO_TYPE_GENERICINST:
404 if (!mono_type_generic_inst_is_valuetype (ret_type)) {
405 cinfo->ret.storage = ArgInIReg;
406 cinfo->ret.reg = IA64_R8;
407 break;
409 /* Fall through */
410 case MONO_TYPE_VALUETYPE:
411 case MONO_TYPE_TYPEDBYREF: {
412 guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0;
414 if (sig->ret->byref) {
415 /* This seems to happen with ldfld wrappers */
416 cinfo->ret.storage = ArgInIReg;
417 } else {
418 add_valuetype (gsctx, sig, &cinfo->ret, sig->ret, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
419 if (cinfo->ret.storage == ArgOnStack)
420 /* The caller passes the address where the value is stored */
421 add_general (&gr, &stack_size, &cinfo->ret);
422 if (cinfo->ret.storage == ArgInIReg)
423 cinfo->ret.storage = ArgValuetypeAddrInIReg;
425 break;
427 case MONO_TYPE_VOID:
428 cinfo->ret.storage = ArgNone;
429 break;
430 default:
431 g_error ("Can't handle as return value 0x%x", sig->ret->type);
435 /* this */
436 if (sig->hasthis)
437 add_general (&gr, &stack_size, cinfo->args + 0);
439 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
440 gr = PARAM_REGS;
441 fr = FLOAT_PARAM_REGS;
443 /* Emit the signature cookie just before the implicit arguments */
444 add_general (&gr, &stack_size, &cinfo->sig_cookie);
447 for (i = 0; i < sig->param_count; ++i) {
448 ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
449 MonoType *ptype;
451 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
452 /* We allways pass the sig cookie on the stack for simplicity */
454 * Prevent implicit arguments + the sig cookie from being passed
455 * in registers.
457 gr = PARAM_REGS;
458 fr = FLOAT_PARAM_REGS;
460 /* Emit the signature cookie just before the implicit arguments */
461 add_general (&gr, &stack_size, &cinfo->sig_cookie);
464 if (sig->params [i]->byref) {
465 add_general (&gr, &stack_size, ainfo);
466 continue;
468 ptype = mono_type_get_underlying_type (sig->params [i]);
469 ptype = mini_get_basic_type_from_generic (gsctx, ptype);
470 switch (ptype->type) {
471 case MONO_TYPE_BOOLEAN:
472 case MONO_TYPE_I1:
473 case MONO_TYPE_U1:
474 add_general (&gr, &stack_size, ainfo);
475 break;
476 case MONO_TYPE_I2:
477 case MONO_TYPE_U2:
478 case MONO_TYPE_CHAR:
479 add_general (&gr, &stack_size, ainfo);
480 break;
481 case MONO_TYPE_I4:
482 case MONO_TYPE_U4:
483 add_general (&gr, &stack_size, ainfo);
484 break;
485 case MONO_TYPE_I:
486 case MONO_TYPE_U:
487 case MONO_TYPE_PTR:
488 case MONO_TYPE_FNPTR:
489 case MONO_TYPE_CLASS:
490 case MONO_TYPE_OBJECT:
491 case MONO_TYPE_STRING:
492 case MONO_TYPE_SZARRAY:
493 case MONO_TYPE_ARRAY:
494 add_general (&gr, &stack_size, ainfo);
495 break;
496 case MONO_TYPE_GENERICINST:
497 if (!mono_type_generic_inst_is_valuetype (ptype)) {
498 add_general (&gr, &stack_size, ainfo);
499 break;
501 /* Fall through */
502 case MONO_TYPE_VALUETYPE:
503 case MONO_TYPE_TYPEDBYREF:
504 /* FIXME: */
505 /* We allways pass valuetypes on the stack */
506 add_valuetype (gsctx, sig, ainfo, sig->params [i], FALSE, &gr, &fr, &stack_size);
507 break;
508 case MONO_TYPE_U8:
509 case MONO_TYPE_I8:
510 add_general (&gr, &stack_size, ainfo);
511 break;
512 case MONO_TYPE_R4:
513 add_float (&gr, &fr, &stack_size, ainfo, FALSE);
514 break;
515 case MONO_TYPE_R8:
516 add_float (&gr, &fr, &stack_size, ainfo, TRUE);
517 break;
518 default:
519 g_assert_not_reached ();
523 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n > 0) && (sig->sentinelpos == sig->param_count)) {
524 gr = PARAM_REGS;
525 fr = FLOAT_PARAM_REGS;
527 /* Emit the signature cookie just before the implicit arguments */
528 add_general (&gr, &stack_size, &cinfo->sig_cookie);
531 cinfo->stack_usage = stack_size;
532 cinfo->reg_usage = gr;
533 cinfo->freg_usage = fr;
534 return cinfo;
538 * mono_arch_get_argument_info:
539 * @csig: a method signature
540 * @param_count: the number of parameters to consider
541 * @arg_info: an array to store the result infos
543 * Gathers information on parameters such as size, alignment and
544 * padding. arg_info should be large enought to hold param_count + 1 entries.
546 * Returns the size of the argument area on the stack.
549 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
551 int k;
552 CallInfo *cinfo = get_call_info (NULL, NULL, csig, FALSE);
553 guint32 args_size = cinfo->stack_usage;
555 /* The arguments are saved to a stack area in mono_arch_instrument_prolog */
556 if (csig->hasthis) {
557 arg_info [0].offset = 0;
560 for (k = 0; k < param_count; k++) {
561 arg_info [k + 1].offset = ((k + csig->hasthis) * 8);
562 /* FIXME: */
563 arg_info [k + 1].size = 0;
566 g_free (cinfo);
568 return args_size;
572 * Initialize the cpu to execute managed code.
574 void
575 mono_arch_cpu_init (void)
580 * Initialize architecture specific code.
582 void
583 mono_arch_init (void)
588 * Cleanup architecture specific code.
590 void
591 mono_arch_cleanup (void)
596 * This function returns the optimizations supported on this cpu.
598 guint32
599 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
601 *exclude_mask = 0;
603 return 0;
606 GList *
607 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
609 GList *vars = NULL;
610 int i;
611 MonoMethodSignature *sig;
612 MonoMethodHeader *header;
613 CallInfo *cinfo;
615 header = cfg->header;
617 sig = mono_method_signature (cfg->method);
619 cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
621 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
622 MonoInst *ins = cfg->args [i];
624 ArgInfo *ainfo = &cinfo->args [i];
626 if (ins->flags & (MONO_INST_IS_DEAD|MONO_INST_VOLATILE|MONO_INST_INDIRECT))
627 continue;
629 if (ainfo->storage == ArgInIReg) {
630 /* The input registers are non-volatile */
631 ins->opcode = OP_REGVAR;
632 ins->dreg = 32 + ainfo->reg;
636 for (i = 0; i < cfg->num_varinfo; i++) {
637 MonoInst *ins = cfg->varinfo [i];
638 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
640 /* unused vars */
641 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
642 continue;
644 if ((ins->flags & (MONO_INST_IS_DEAD|MONO_INST_VOLATILE|MONO_INST_INDIRECT)) ||
645 (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
646 continue;
648 if (mono_is_regsize_var (ins->inst_vtype)) {
649 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
650 g_assert (i == vmv->idx);
651 vars = g_list_prepend (vars, vmv);
655 vars = mono_varlist_sort (cfg, vars, 0);
657 return vars;
660 static void
661 mono_ia64_alloc_stacked_registers (MonoCompile *cfg)
663 CallInfo *cinfo;
664 guint32 reserved_regs;
665 MonoMethodHeader *header;
667 if (cfg->arch.reg_local0 > 0)
668 /* Already done */
669 return;
671 cinfo = get_call_info (cfg, cfg->mempool, mono_method_signature (cfg->method), FALSE);
673 header = cfg->header;
675 /* Some registers are reserved for use by the prolog/epilog */
676 reserved_regs = header->num_clauses ? 4 : 3;
678 if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
679 (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)) {
680 /* One registers is needed by instrument_epilog to save the return value */
681 reserved_regs ++;
682 if (cinfo->reg_usage < 2)
683 /* Number of arguments passed to function call in instrument_prolog */
684 cinfo->reg_usage = 2;
687 cfg->arch.reg_in0 = 32;
688 cfg->arch.reg_local0 = cfg->arch.reg_in0 + cinfo->reg_usage + reserved_regs;
689 cfg->arch.reg_out0 = cfg->arch.reg_local0 + 16;
691 cfg->arch.reg_saved_ar_pfs = cfg->arch.reg_local0 - 1;
692 cfg->arch.reg_saved_b0 = cfg->arch.reg_local0 - 2;
693 cfg->arch.reg_fp = cfg->arch.reg_local0 - 3;
696 * Frames without handlers save sp to fp, frames with handlers save it into
697 * a dedicated register.
699 if (header->num_clauses)
700 cfg->arch.reg_saved_sp = cfg->arch.reg_local0 - 4;
701 else
702 cfg->arch.reg_saved_sp = cfg->arch.reg_fp;
704 if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
705 (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)) {
706 cfg->arch.reg_saved_return_val = cfg->arch.reg_local0 - reserved_regs;
710 * Need to allocate at least 2 out register for use by OP_THROW / the system
711 * exception throwing code.
713 cfg->arch.n_out_regs = MAX (cfg->arch.n_out_regs, 2);
716 GList *
717 mono_arch_get_global_int_regs (MonoCompile *cfg)
719 GList *regs = NULL;
720 int i;
722 mono_ia64_alloc_stacked_registers (cfg);
724 for (i = cfg->arch.reg_local0; i < cfg->arch.reg_out0; ++i) {
725 /* FIXME: regmask */
726 g_assert (i < 64);
727 regs = g_list_prepend (regs, (gpointer)(gssize)(i));
730 return regs;
734 * mono_arch_regalloc_cost:
736 * Return the cost, in number of memory references, of the action of
737 * allocating the variable VMV into a register during global register
738 * allocation.
740 guint32
741 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
743 /* FIXME: Increase costs linearly to avoid using all local registers */
745 return 0;
748 void
749 mono_arch_allocate_vars (MonoCompile *cfg)
751 MonoMethodSignature *sig;
752 MonoMethodHeader *header;
753 MonoInst *inst;
754 int i, offset;
755 guint32 locals_stack_size, locals_stack_align;
756 gint32 *offsets;
757 CallInfo *cinfo;
759 header = cfg->header;
761 sig = mono_method_signature (cfg->method);
763 cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
766 * Determine whenever the frame pointer can be eliminated.
767 * FIXME: Remove some of the restrictions.
769 cfg->arch.omit_fp = TRUE;
771 if (!debug_omit_fp ())
772 cfg->arch.omit_fp = FALSE;
774 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
775 cfg->arch.omit_fp = FALSE;
776 if (header->num_clauses)
777 cfg->arch.omit_fp = FALSE;
778 if (cfg->param_area)
779 cfg->arch.omit_fp = FALSE;
780 if ((sig->ret->type != MONO_TYPE_VOID) && (cinfo->ret.storage == ArgAggregate))
781 cfg->arch.omit_fp = FALSE;
782 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
783 cfg->arch.omit_fp = FALSE;
784 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
785 ArgInfo *ainfo = &cinfo->args [i];
787 if (ainfo->storage == ArgOnStack) {
789 * The stack offset can only be determined when the frame
790 * size is known.
792 cfg->arch.omit_fp = FALSE;
796 mono_ia64_alloc_stacked_registers (cfg);
799 * We use the ABI calling conventions for managed code as well.
800 * Exception: valuetypes are never passed or returned in registers.
803 if (cfg->arch.omit_fp) {
804 cfg->flags |= MONO_CFG_HAS_SPILLUP;
805 cfg->frame_reg = IA64_SP;
806 offset = ARGS_OFFSET;
808 else {
809 /* Locals are allocated backwards from %fp */
810 cfg->frame_reg = cfg->arch.reg_fp;
811 offset = 0;
814 if (cfg->method->save_lmf) {
815 /* No LMF on IA64 */
818 if (sig->ret->type != MONO_TYPE_VOID) {
819 switch (cinfo->ret.storage) {
820 case ArgInIReg:
821 cfg->ret->opcode = OP_REGVAR;
822 cfg->ret->inst_c0 = cinfo->ret.reg;
823 break;
824 case ArgInFloatReg:
825 cfg->ret->opcode = OP_REGVAR;
826 cfg->ret->inst_c0 = cinfo->ret.reg;
827 break;
828 case ArgValuetypeAddrInIReg:
829 cfg->vret_addr->opcode = OP_REGVAR;
830 cfg->vret_addr->dreg = cfg->arch.reg_in0 + cinfo->ret.reg;
831 break;
832 case ArgAggregate:
833 /* Allocate a local to hold the result, the epilog will copy it to the correct place */
834 if (cfg->arch.omit_fp)
835 g_assert_not_reached ();
836 offset = ALIGN_TO (offset, 8);
837 offset += cinfo->ret.nslots * 8;
838 cfg->ret->opcode = OP_REGOFFSET;
839 cfg->ret->inst_basereg = cfg->frame_reg;
840 cfg->ret->inst_offset = - offset;
841 break;
842 default:
843 g_assert_not_reached ();
845 cfg->ret->dreg = cfg->ret->inst_c0;
848 /* Allocate locals */
849 offsets = mono_allocate_stack_slots_full (cfg, cfg->arch.omit_fp ? FALSE : TRUE, &locals_stack_size, &locals_stack_align);
850 if (locals_stack_align) {
851 offset = ALIGN_TO (offset, locals_stack_align);
853 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
854 if (offsets [i] != -1) {
855 MonoInst *inst = cfg->varinfo [i];
856 inst->opcode = OP_REGOFFSET;
857 inst->inst_basereg = cfg->frame_reg;
858 if (cfg->arch.omit_fp)
859 inst->inst_offset = (offset + offsets [i]);
860 else
861 inst->inst_offset = - (offset + offsets [i]);
862 // printf ("allocated local %d to ", i); mono_print_tree_nl (inst);
865 offset += locals_stack_size;
867 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) {
868 if (cfg->arch.omit_fp)
869 g_assert_not_reached ();
870 g_assert (cinfo->sig_cookie.storage == ArgOnStack);
871 cfg->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
874 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
875 inst = cfg->args [i];
876 if (inst->opcode != OP_REGVAR) {
877 ArgInfo *ainfo = &cinfo->args [i];
878 gboolean inreg = TRUE;
879 MonoType *arg_type;
881 if (sig->hasthis && (i == 0))
882 arg_type = &mono_defaults.object_class->byval_arg;
883 else
884 arg_type = sig->params [i - sig->hasthis];
886 /* FIXME: VOLATILE is only set if the liveness pass runs */
887 if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
888 inreg = FALSE;
890 inst->opcode = OP_REGOFFSET;
892 switch (ainfo->storage) {
893 case ArgInIReg:
894 inst->opcode = OP_REGVAR;
895 inst->dreg = cfg->arch.reg_in0 + ainfo->reg;
896 break;
897 case ArgInFloatReg:
898 case ArgInFloatRegR4:
900 * Since float regs are volatile, we save the arguments to
901 * the stack in the prolog.
903 inreg = FALSE;
904 break;
905 case ArgOnStack:
906 if (cfg->arch.omit_fp)
907 g_assert_not_reached ();
908 inst->opcode = OP_REGOFFSET;
909 inst->inst_basereg = cfg->frame_reg;
910 inst->inst_offset = ARGS_OFFSET + ainfo->offset;
911 break;
912 case ArgAggregate:
913 inreg = FALSE;
914 break;
915 default:
916 NOT_IMPLEMENTED;
919 if (!inreg && (ainfo->storage != ArgOnStack)) {
920 guint32 size = 0;
922 inst->opcode = OP_REGOFFSET;
923 inst->inst_basereg = cfg->frame_reg;
924 /* These arguments are saved to the stack in the prolog */
925 switch (ainfo->storage) {
926 case ArgAggregate:
927 if (ainfo->atype == AggregateSingleHFA)
928 size = ainfo->nslots * 4;
929 else
930 size = ainfo->nslots * 8;
931 break;
932 default:
933 size = sizeof (gpointer);
934 break;
937 offset = ALIGN_TO (offset, sizeof (gpointer));
939 if (cfg->arch.omit_fp) {
940 inst->inst_offset = offset;
941 offset += size;
942 } else {
943 offset += size;
944 inst->inst_offset = - offset;
951 * FIXME: This doesn't work because some variables are allocated during local
952 * regalloc.
955 if (cfg->arch.omit_fp && offset == 16)
956 offset = 0;
959 cfg->stack_offset = offset;
962 void
963 mono_arch_create_vars (MonoCompile *cfg)
965 MonoMethodSignature *sig;
966 CallInfo *cinfo;
968 sig = mono_method_signature (cfg->method);
970 cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
972 if (cinfo->ret.storage == ArgAggregate)
973 cfg->ret_var_is_local = TRUE;
974 if (cinfo->ret.storage == ArgValuetypeAddrInIReg) {
975 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
976 if (G_UNLIKELY (cfg->verbose_level > 1)) {
977 printf ("vret_addr = ");
978 mono_print_ins (cfg->vret_addr);
983 static void
984 add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, MonoInst *tree)
986 MonoInst *arg;
988 MONO_INST_NEW (cfg, arg, OP_NOP);
989 arg->sreg1 = tree->dreg;
991 switch (storage) {
992 case ArgInIReg:
993 arg->opcode = OP_MOVE;
994 arg->dreg = mono_alloc_ireg (cfg);
996 mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, FALSE);
997 break;
998 case ArgInFloatReg:
999 arg->opcode = OP_FMOVE;
1000 arg->dreg = mono_alloc_freg (cfg);
1002 mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, TRUE);
1003 break;
1004 case ArgInFloatRegR4:
1005 arg->opcode = OP_FCONV_TO_R4;
1006 arg->dreg = mono_alloc_freg (cfg);
1008 mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, TRUE);
1009 break;
1010 default:
1011 g_assert_not_reached ();
1014 MONO_ADD_INS (cfg->cbb, arg);
1017 static void
1018 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
1020 MonoMethodSignature *tmp_sig;
1022 /* Emit the signature cookie just before the implicit arguments */
1023 MonoInst *sig_arg;
1024 /* FIXME: Add support for signature tokens to AOT */
1025 cfg->disable_aot = TRUE;
1027 g_assert (cinfo->sig_cookie.storage == ArgOnStack);
1030 * mono_ArgIterator_Setup assumes the signature cookie is
1031 * passed first and all the arguments which were before it are
1032 * passed on the stack after the signature. So compensate by
1033 * passing a different signature.
1035 tmp_sig = mono_metadata_signature_dup (call->signature);
1036 tmp_sig->param_count -= call->signature->sentinelpos;
1037 tmp_sig->sentinelpos = 0;
1038 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1040 MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
1041 sig_arg->dreg = mono_alloc_ireg (cfg);
1042 sig_arg->inst_p0 = tmp_sig;
1043 MONO_ADD_INS (cfg->cbb, sig_arg);
1045 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, IA64_SP, 16 + cinfo->sig_cookie.offset, sig_arg->dreg);
1048 void
1049 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1051 MonoInst *in;
1052 MonoMethodSignature *sig;
1053 int i, n, stack_size;
1054 CallInfo *cinfo;
1055 ArgInfo *ainfo;
1057 stack_size = 0;
1059 mono_ia64_alloc_stacked_registers (cfg);
1061 sig = call->signature;
1062 n = sig->param_count + sig->hasthis;
1064 cinfo = get_call_info (cfg, cfg->mempool, sig, sig->pinvoke);
1066 if (cinfo->ret.storage == ArgAggregate) {
1067 MonoInst *vtarg;
1068 MonoInst *local;
1071 * The valuetype is in registers after the call, need to be copied
1072 * to the stack. Save the address to a local here, so the call
1073 * instruction can access it.
1075 local = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1076 local->flags |= MONO_INST_VOLATILE;
1077 cfg->arch.ret_var_addr_local = local;
1079 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
1080 vtarg->sreg1 = call->vret_var->dreg;
1081 vtarg->dreg = local->dreg;
1082 MONO_ADD_INS (cfg->cbb, vtarg);
1085 if (cinfo->ret.storage == ArgValuetypeAddrInIReg) {
1086 add_outarg_reg (cfg, call, ArgInIReg, cfg->arch.reg_out0 + cinfo->ret.reg, call->vret_var);
1089 for (i = 0; i < n; ++i) {
1090 MonoType *arg_type;
1092 ainfo = cinfo->args + i;
1094 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1095 /* Emit the signature cookie just before the implicit arguments */
1096 emit_sig_cookie (cfg, call, cinfo);
1099 in = call->args [i];
1101 if (sig->hasthis && (i == 0))
1102 arg_type = &mono_defaults.object_class->byval_arg;
1103 else
1104 arg_type = sig->params [i - sig->hasthis];
1106 if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(arg_type))) {
1107 guint32 align;
1108 guint32 size;
1110 if (arg_type->type == MONO_TYPE_TYPEDBYREF) {
1111 size = sizeof (MonoTypedRef);
1112 align = sizeof (gpointer);
1114 else if (sig->pinvoke)
1115 size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
1116 else {
1118 * Other backends use mono_type_stack_size (), but that
1119 * aligns the size to 8, which is larger than the size of
1120 * the source, leading to reads of invalid memory if the
1121 * source is at the end of address space.
1123 size = mono_class_value_size (in->klass, &align);
1126 if (size > 0) {
1127 MonoInst *arg;
1129 MONO_INST_NEW (cfg, arg, OP_OUTARG_VT);
1130 arg->sreg1 = in->dreg;
1131 arg->klass = in->klass;
1132 arg->backend.size = size;
1133 arg->inst_p0 = call;
1134 arg->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1135 memcpy (arg->inst_p1, ainfo, sizeof (ArgInfo));
1137 MONO_ADD_INS (cfg->cbb, arg);
1140 else {
1141 switch (ainfo->storage) {
1142 case ArgInIReg:
1143 add_outarg_reg (cfg, call, ainfo->storage, cfg->arch.reg_out0 + ainfo->reg, in);
1144 break;
1145 case ArgInFloatReg:
1146 case ArgInFloatRegR4:
1147 add_outarg_reg (cfg, call, ainfo->storage, ainfo->reg, in);
1148 break;
1149 case ArgOnStack:
1150 if (arg_type->type == MONO_TYPE_R4 && !arg_type->byref)
1151 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, IA64_SP, 16 + ainfo->offset, in->dreg);
1152 else if (arg_type->type == MONO_TYPE_R8 && !arg_type->byref)
1153 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, IA64_SP, 16 + ainfo->offset, in->dreg);
1154 else
1155 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, IA64_SP, 16 + ainfo->offset, in->dreg);
1156 break;
1157 default:
1158 g_assert_not_reached ();
1163 /* Handle the case where there are no implicit arguments */
1164 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos)) {
1165 emit_sig_cookie (cfg, call, cinfo);
1168 call->stack_usage = cinfo->stack_usage;
1169 cfg->arch.n_out_regs = MAX (cfg->arch.n_out_regs, cinfo->reg_usage);
1172 void
1173 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1175 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
1176 ArgInfo *ainfo = (ArgInfo*)ins->inst_p1;
1177 int size = ins->backend.size;
1179 if (ainfo->storage == ArgAggregate) {
1180 MonoInst *load, *store;
1181 int i, slot;
1184 * Part of the structure is passed in registers.
1186 for (i = 0; i < ainfo->nregs; ++i) {
1187 slot = ainfo->reg + i;
1189 if (ainfo->atype == AggregateSingleHFA) {
1190 MONO_INST_NEW (cfg, load, OP_LOADR4_MEMBASE);
1191 load->inst_basereg = src->dreg;
1192 load->inst_offset = i * 4;
1193 load->dreg = mono_alloc_freg (cfg);
1195 mono_call_inst_add_outarg_reg (cfg, call, load->dreg, ainfo->reg + i, TRUE);
1196 } else if (ainfo->atype == AggregateDoubleHFA) {
1197 MONO_INST_NEW (cfg, load, OP_LOADR8_MEMBASE);
1198 load->inst_basereg = src->dreg;
1199 load->inst_offset = i * 8;
1200 load->dreg = mono_alloc_freg (cfg);
1202 mono_call_inst_add_outarg_reg (cfg, call, load->dreg, ainfo->reg + i, TRUE);
1203 } else {
1204 MONO_INST_NEW (cfg, load, OP_LOADI8_MEMBASE);
1205 load->inst_basereg = src->dreg;
1206 load->inst_offset = i * 8;
1207 load->dreg = mono_alloc_ireg (cfg);
1209 mono_call_inst_add_outarg_reg (cfg, call, load->dreg, cfg->arch.reg_out0 + ainfo->reg + i, FALSE);
1211 MONO_ADD_INS (cfg->cbb, load);
1215 * Part of the structure is passed on the stack.
1217 for (i = ainfo->nregs; i < ainfo->nslots; ++i) {
1218 slot = ainfo->reg + i;
1220 MONO_INST_NEW (cfg, load, OP_LOADI8_MEMBASE);
1221 load->inst_basereg = src->dreg;
1222 load->inst_offset = i * sizeof (gpointer);
1223 load->dreg = mono_alloc_preg (cfg);
1224 MONO_ADD_INS (cfg->cbb, load);
1226 MONO_INST_NEW (cfg, store, OP_STOREI8_MEMBASE_REG);
1227 store->sreg1 = load->dreg;
1228 store->inst_destbasereg = IA64_SP;
1229 store->inst_offset = 16 + ainfo->offset + (slot - 8) * 8;
1230 MONO_ADD_INS (cfg->cbb, store);
1232 } else {
1233 mini_emit_memcpy (cfg, IA64_SP, 16 + ainfo->offset, src->dreg, 0, size, 4);
1237 void
1238 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1240 CallInfo *cinfo = get_call_info (cfg, cfg->mempool, mono_method_signature (method), FALSE);
1242 switch (cinfo->ret.storage) {
1243 case ArgInIReg:
1244 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1245 break;
1246 case ArgInFloatReg:
1247 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
1248 break;
1249 default:
1250 g_assert_not_reached ();
1254 void
1255 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
1259 void
1260 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
1262 MonoInst *ins, *n, *last_ins = NULL;
1263 ins = bb->code;
1265 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
1266 switch (ins->opcode) {
1267 case OP_MOVE:
1268 case OP_FMOVE:
1270 * Removes:
1272 * OP_MOVE reg, reg
1274 if (ins->dreg == ins->sreg1) {
1275 MONO_DELETE_INS (bb, ins);
1276 continue;
1279 * Removes:
1281 * OP_MOVE sreg, dreg
1282 * OP_MOVE dreg, sreg
1284 if (last_ins && last_ins->opcode == OP_MOVE &&
1285 ins->sreg1 == last_ins->dreg &&
1286 ins->dreg == last_ins->sreg1) {
1287 MONO_DELETE_INS (bb, ins);
1288 continue;
1290 break;
1291 case OP_MUL_IMM:
1292 case OP_IMUL_IMM:
1293 /* remove unnecessary multiplication with 1 */
1294 if (ins->inst_imm == 1) {
1295 if (ins->dreg != ins->sreg1) {
1296 ins->opcode = OP_MOVE;
1297 } else {
1298 MONO_DELETE_INS (bb, ins);
1299 continue;
1302 break;
1305 last_ins = ins;
1306 ins = ins->next;
1308 bb->last_ins = last_ins;
1311 int cond_to_ia64_cmp [][3] = {
1312 {OP_IA64_CMP_EQ, OP_IA64_CMP4_EQ, OP_IA64_FCMP_EQ},
1313 {OP_IA64_CMP_NE, OP_IA64_CMP4_NE, OP_IA64_FCMP_NE},
1314 {OP_IA64_CMP_LE, OP_IA64_CMP4_LE, OP_IA64_FCMP_LE},
1315 {OP_IA64_CMP_GE, OP_IA64_CMP4_GE, OP_IA64_FCMP_GE},
1316 {OP_IA64_CMP_LT, OP_IA64_CMP4_LT, OP_IA64_FCMP_LT},
1317 {OP_IA64_CMP_GT, OP_IA64_CMP4_GT, OP_IA64_FCMP_GT},
1318 {OP_IA64_CMP_LE_UN, OP_IA64_CMP4_LE_UN, OP_IA64_FCMP_LE_UN},
1319 {OP_IA64_CMP_GE_UN, OP_IA64_CMP4_GE_UN, OP_IA64_FCMP_GE_UN},
1320 {OP_IA64_CMP_LT_UN, OP_IA64_CMP4_LT_UN, OP_IA64_FCMP_LT_UN},
1321 {OP_IA64_CMP_GT_UN, OP_IA64_CMP4_GT_UN, OP_IA64_FCMP_GT_UN}
1324 static int
1325 opcode_to_ia64_cmp (int opcode, int cmp_opcode)
1327 return cond_to_ia64_cmp [mono_opcode_to_cond (opcode)][mono_opcode_to_type (opcode, cmp_opcode)];
1330 int cond_to_ia64_cmp_imm [][3] = {
1331 {OP_IA64_CMP_EQ_IMM, OP_IA64_CMP4_EQ_IMM, 0},
1332 {OP_IA64_CMP_NE_IMM, OP_IA64_CMP4_NE_IMM, 0},
1333 {OP_IA64_CMP_GE_IMM, OP_IA64_CMP4_GE_IMM, 0},
1334 {OP_IA64_CMP_LE_IMM, OP_IA64_CMP4_LE_IMM, 0},
1335 {OP_IA64_CMP_GT_IMM, OP_IA64_CMP4_GT_IMM, 0},
1336 {OP_IA64_CMP_LT_IMM, OP_IA64_CMP4_LT_IMM, 0},
1337 {OP_IA64_CMP_GE_UN_IMM, OP_IA64_CMP4_GE_UN_IMM, 0},
1338 {OP_IA64_CMP_LE_UN_IMM, OP_IA64_CMP4_LE_UN_IMM, 0},
1339 {OP_IA64_CMP_GT_UN_IMM, OP_IA64_CMP4_GT_UN_IMM, 0},
1340 {OP_IA64_CMP_LT_UN_IMM, OP_IA64_CMP4_LT_UN_IMM, 0},
1343 static int
1344 opcode_to_ia64_cmp_imm (int opcode, int cmp_opcode)
1346 /* The condition needs to be reversed */
1347 return cond_to_ia64_cmp_imm [mono_opcode_to_cond (opcode)][mono_opcode_to_type (opcode, cmp_opcode)];
1350 #define NEW_INS(cfg,dest,op) do { \
1351 (dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
1352 (dest)->opcode = (op); \
1353 mono_bblock_insert_after_ins (bb, last_ins, (dest)); \
1354 last_ins = (dest); \
1355 } while (0)
1358 * mono_arch_lowering_pass:
1360 * Converts complex opcodes into simpler ones so that each IR instruction
1361 * corresponds to one machine instruction.
1363 void
1364 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1366 MonoInst *ins, *n, *next, *temp, *temp2, *temp3, *last_ins = NULL;
1367 ins = bb->code;
1369 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
1370 switch (ins->opcode) {
1371 case OP_STOREI1_MEMBASE_IMM:
1372 case OP_STOREI2_MEMBASE_IMM:
1373 case OP_STOREI4_MEMBASE_IMM:
1374 case OP_STOREI8_MEMBASE_IMM:
1375 case OP_STORE_MEMBASE_IMM:
1376 /* There are no store_membase instructions on ia64 */
1377 if (ins->inst_offset == 0) {
1378 temp2 = NULL;
1379 } else if (ia64_is_imm14 (ins->inst_offset)) {
1380 NEW_INS (cfg, temp2, OP_ADD_IMM);
1381 temp2->sreg1 = ins->inst_destbasereg;
1382 temp2->inst_imm = ins->inst_offset;
1383 temp2->dreg = mono_alloc_ireg (cfg);
1385 else {
1386 NEW_INS (cfg, temp, OP_I8CONST);
1387 temp->inst_c0 = ins->inst_offset;
1388 temp->dreg = mono_alloc_ireg (cfg);
1390 NEW_INS (cfg, temp2, OP_LADD);
1391 temp2->sreg1 = ins->inst_destbasereg;
1392 temp2->sreg2 = temp->dreg;
1393 temp2->dreg = mono_alloc_ireg (cfg);
1396 switch (ins->opcode) {
1397 case OP_STOREI1_MEMBASE_IMM:
1398 ins->opcode = OP_STOREI1_MEMBASE_REG;
1399 break;
1400 case OP_STOREI2_MEMBASE_IMM:
1401 ins->opcode = OP_STOREI2_MEMBASE_REG;
1402 break;
1403 case OP_STOREI4_MEMBASE_IMM:
1404 ins->opcode = OP_STOREI4_MEMBASE_REG;
1405 break;
1406 case OP_STOREI8_MEMBASE_IMM:
1407 case OP_STORE_MEMBASE_IMM:
1408 ins->opcode = OP_STOREI8_MEMBASE_REG;
1409 break;
1410 default:
1411 g_assert_not_reached ();
1414 if (ins->inst_imm == 0)
1415 ins->sreg1 = IA64_R0;
1416 else {
1417 NEW_INS (cfg, temp3, OP_I8CONST);
1418 temp3->inst_c0 = ins->inst_imm;
1419 temp3->dreg = mono_alloc_ireg (cfg);
1420 ins->sreg1 = temp3->dreg;
1423 ins->inst_offset = 0;
1424 if (temp2)
1425 ins->inst_destbasereg = temp2->dreg;
1426 break;
1427 case OP_STOREI1_MEMBASE_REG:
1428 case OP_STOREI2_MEMBASE_REG:
1429 case OP_STOREI4_MEMBASE_REG:
1430 case OP_STOREI8_MEMBASE_REG:
1431 case OP_STORER4_MEMBASE_REG:
1432 case OP_STORER8_MEMBASE_REG:
1433 case OP_STORE_MEMBASE_REG:
1434 /* There are no store_membase instructions on ia64 */
1435 if (ins->inst_offset == 0) {
1436 break;
1438 else if (ia64_is_imm14 (ins->inst_offset)) {
1439 NEW_INS (cfg, temp2, OP_ADD_IMM);
1440 temp2->sreg1 = ins->inst_destbasereg;
1441 temp2->inst_imm = ins->inst_offset;
1442 temp2->dreg = mono_alloc_ireg (cfg);
1444 else {
1445 NEW_INS (cfg, temp, OP_I8CONST);
1446 temp->inst_c0 = ins->inst_offset;
1447 temp->dreg = mono_alloc_ireg (cfg);
1448 NEW_INS (cfg, temp2, OP_LADD);
1449 temp2->sreg1 = ins->inst_destbasereg;
1450 temp2->sreg2 = temp->dreg;
1451 temp2->dreg = mono_alloc_ireg (cfg);
1454 ins->inst_offset = 0;
1455 ins->inst_destbasereg = temp2->dreg;
1456 break;
1457 case OP_LOADI1_MEMBASE:
1458 case OP_LOADU1_MEMBASE:
1459 case OP_LOADI2_MEMBASE:
1460 case OP_LOADU2_MEMBASE:
1461 case OP_LOADI4_MEMBASE:
1462 case OP_LOADU4_MEMBASE:
1463 case OP_LOADI8_MEMBASE:
1464 case OP_LOAD_MEMBASE:
1465 case OP_LOADR4_MEMBASE:
1466 case OP_LOADR8_MEMBASE:
1467 case OP_ATOMIC_EXCHANGE_I4:
1468 case OP_ATOMIC_EXCHANGE_I8:
1469 case OP_ATOMIC_ADD_NEW_I4:
1470 case OP_ATOMIC_ADD_NEW_I8:
1471 case OP_ATOMIC_ADD_IMM_NEW_I4:
1472 case OP_ATOMIC_ADD_IMM_NEW_I8:
1473 /* There are no membase instructions on ia64 */
1474 if (ins->inst_offset == 0) {
1475 break;
1477 else if (ia64_is_imm14 (ins->inst_offset)) {
1478 NEW_INS (cfg, temp2, OP_ADD_IMM);
1479 temp2->sreg1 = ins->inst_basereg;
1480 temp2->inst_imm = ins->inst_offset;
1481 temp2->dreg = mono_alloc_ireg (cfg);
1483 else {
1484 NEW_INS (cfg, temp, OP_I8CONST);
1485 temp->inst_c0 = ins->inst_offset;
1486 temp->dreg = mono_alloc_ireg (cfg);
1487 NEW_INS (cfg, temp2, OP_LADD);
1488 temp2->sreg1 = ins->inst_basereg;
1489 temp2->sreg2 = temp->dreg;
1490 temp2->dreg = mono_alloc_ireg (cfg);
1493 ins->inst_offset = 0;
1494 ins->inst_basereg = temp2->dreg;
1495 break;
1496 case OP_ADD_IMM:
1497 case OP_IADD_IMM:
1498 case OP_LADD_IMM:
1499 case OP_ISUB_IMM:
1500 case OP_LSUB_IMM:
1501 case OP_AND_IMM:
1502 case OP_IAND_IMM:
1503 case OP_LAND_IMM:
1504 case OP_IOR_IMM:
1505 case OP_LOR_IMM:
1506 case OP_IXOR_IMM:
1507 case OP_LXOR_IMM:
1508 case OP_SHL_IMM:
1509 case OP_SHR_IMM:
1510 case OP_ISHL_IMM:
1511 case OP_LSHL_IMM:
1512 case OP_ISHR_IMM:
1513 case OP_LSHR_IMM:
1514 case OP_ISHR_UN_IMM:
1515 case OP_LSHR_UN_IMM: {
1516 gboolean is_imm = FALSE;
1517 gboolean switched = FALSE;
1519 if (ins->opcode == OP_AND_IMM && ins->inst_imm == 255) {
1520 ins->opcode = OP_ZEXT_I1;
1521 break;
1524 switch (ins->opcode) {
1525 case OP_ADD_IMM:
1526 case OP_IADD_IMM:
1527 case OP_LADD_IMM:
1528 is_imm = ia64_is_imm14 (ins->inst_imm);
1529 switched = TRUE;
1530 break;
1531 case OP_ISUB_IMM:
1532 case OP_LSUB_IMM:
1533 is_imm = ia64_is_imm14 (- (ins->inst_imm));
1534 if (is_imm) {
1535 /* A = B - IMM -> A = B + (-IMM) */
1536 ins->inst_imm = - ins->inst_imm;
1537 ins->opcode = OP_IADD_IMM;
1539 switched = TRUE;
1540 break;
1541 case OP_IAND_IMM:
1542 case OP_IOR_IMM:
1543 case OP_IXOR_IMM:
1544 case OP_AND_IMM:
1545 case OP_LAND_IMM:
1546 case OP_LOR_IMM:
1547 case OP_LXOR_IMM:
1548 is_imm = ia64_is_imm8 (ins->inst_imm);
1549 switched = TRUE;
1550 break;
1551 case OP_SHL_IMM:
1552 case OP_SHR_IMM:
1553 case OP_ISHL_IMM:
1554 case OP_LSHL_IMM:
1555 case OP_ISHR_IMM:
1556 case OP_LSHR_IMM:
1557 case OP_ISHR_UN_IMM:
1558 case OP_LSHR_UN_IMM:
1559 is_imm = (ins->inst_imm >= 0) && (ins->inst_imm < 64);
1560 break;
1561 default:
1562 break;
1565 if (is_imm) {
1566 if (switched)
1567 ins->sreg2 = ins->sreg1;
1568 break;
1571 ins->opcode = mono_op_imm_to_op (ins->opcode);
1573 if (ins->inst_imm == 0)
1574 ins->sreg2 = IA64_R0;
1575 else {
1576 NEW_INS (cfg, temp, OP_I8CONST);
1577 temp->inst_c0 = ins->inst_imm;
1578 temp->dreg = mono_alloc_ireg (cfg);
1579 ins->sreg2 = temp->dreg;
1581 break;
1583 case OP_COMPARE_IMM:
1584 case OP_ICOMPARE_IMM:
1585 case OP_LCOMPARE_IMM: {
1586 /* Instead of compare+b<cond>, ia64 has compare<cond>+br */
1587 gboolean imm;
1588 CompRelation cond;
1590 next = ins->next;
1592 /* Branch opts can eliminate the branch */
1593 if (!next || (!(MONO_IS_COND_BRANCH_OP (next) || MONO_IS_COND_EXC (next) || MONO_IS_SETCC (next)))) {
1594 NULLIFY_INS (ins);
1595 break;
1599 * The compare_imm instructions have switched up arguments, and
1600 * some of them take an imm between -127 and 128.
1602 next = ins->next;
1603 cond = mono_opcode_to_cond (next->opcode);
1604 if ((cond == CMP_LT) || (cond == CMP_GE))
1605 imm = ia64_is_imm8 (ins->inst_imm - 1);
1606 else if ((cond == CMP_LT_UN) || (cond == CMP_GE_UN))
1607 imm = ia64_is_imm8 (ins->inst_imm - 1) && (ins->inst_imm > 0);
1608 else
1609 imm = ia64_is_imm8 (ins->inst_imm);
1611 if (imm) {
1612 ins->opcode = opcode_to_ia64_cmp_imm (next->opcode, ins->opcode);
1613 ins->sreg2 = ins->sreg1;
1615 else {
1616 ins->opcode = opcode_to_ia64_cmp (next->opcode, ins->opcode);
1618 if (ins->inst_imm == 0)
1619 ins->sreg2 = IA64_R0;
1620 else {
1621 NEW_INS (cfg, temp, OP_I8CONST);
1622 temp->inst_c0 = ins->inst_imm;
1623 temp->dreg = mono_alloc_ireg (cfg);
1624 ins->sreg2 = temp->dreg;
1628 if (MONO_IS_COND_BRANCH_OP (next)) {
1629 next->opcode = OP_IA64_BR_COND;
1630 next->inst_target_bb = next->inst_true_bb;
1631 } else if (MONO_IS_COND_EXC (next)) {
1632 next->opcode = OP_IA64_COND_EXC;
1633 } else if (MONO_IS_SETCC (next)) {
1634 next->opcode = OP_IA64_CSET;
1635 } else {
1636 printf ("%s\n", mono_inst_name (next->opcode));
1637 NOT_IMPLEMENTED;
1640 break;
1642 case OP_COMPARE:
1643 case OP_ICOMPARE:
1644 case OP_LCOMPARE:
1645 case OP_FCOMPARE: {
1646 /* Instead of compare+b<cond>, ia64 has compare<cond>+br */
1648 next = ins->next;
1650 /* Branch opts can eliminate the branch */
1651 if (!next || (!(MONO_IS_COND_BRANCH_OP (next) || MONO_IS_COND_EXC (next) || MONO_IS_SETCC (next)))) {
1652 NULLIFY_INS (ins);
1653 break;
1656 ins->opcode = opcode_to_ia64_cmp (next->opcode, ins->opcode);
1658 if (MONO_IS_COND_BRANCH_OP (next)) {
1659 next->opcode = OP_IA64_BR_COND;
1660 next->inst_target_bb = next->inst_true_bb;
1661 } else if (MONO_IS_COND_EXC (next)) {
1662 next->opcode = OP_IA64_COND_EXC;
1663 } else if (MONO_IS_SETCC (next)) {
1664 next->opcode = OP_IA64_CSET;
1665 } else {
1666 printf ("%s\n", mono_inst_name (next->opcode));
1667 NOT_IMPLEMENTED;
1670 break;
1672 case OP_FCEQ:
1673 case OP_FCGT:
1674 case OP_FCGT_UN:
1675 case OP_FCLT:
1676 case OP_FCLT_UN:
1677 /* The front end removes the fcompare, so introduce it again */
1678 NEW_INS (cfg, temp, opcode_to_ia64_cmp (ins->opcode, OP_FCOMPARE));
1679 temp->sreg1 = ins->sreg1;
1680 temp->sreg2 = ins->sreg2;
1682 ins->opcode = OP_IA64_CSET;
1683 MONO_INST_NULLIFY_SREGS (ins);
1684 break;
1685 case OP_MUL_IMM:
1686 case OP_LMUL_IMM:
1687 case OP_IMUL_IMM: {
1688 int i, sum_reg;
1689 gboolean found = FALSE;
1690 int shl_op = ins->opcode == OP_IMUL_IMM ? OP_ISHL_IMM : OP_SHL_IMM;
1692 /* First the easy cases */
1693 if (ins->inst_imm == 1) {
1694 ins->opcode = OP_MOVE;
1695 break;
1697 for (i = 1; i < 64; ++i)
1698 if (ins->inst_imm == (((gint64)1) << i)) {
1699 ins->opcode = shl_op;
1700 ins->inst_imm = i;
1701 found = TRUE;
1702 break;
1705 /* This could be optimized */
1706 if (!found) {
1707 sum_reg = 0;
1708 for (i = 0; i < 64; ++i) {
1709 if (ins->inst_imm & (((gint64)1) << i)) {
1710 NEW_INS (cfg, temp, shl_op);
1711 temp->dreg = mono_alloc_ireg (cfg);
1712 temp->sreg1 = ins->sreg1;
1713 temp->inst_imm = i;
1715 if (sum_reg == 0)
1716 sum_reg = temp->dreg;
1717 else {
1718 NEW_INS (cfg, temp2, OP_LADD);
1719 temp2->dreg = mono_alloc_ireg (cfg);
1720 temp2->sreg1 = sum_reg;
1721 temp2->sreg2 = temp->dreg;
1722 sum_reg = temp2->dreg;
1726 ins->opcode = OP_MOVE;
1727 ins->sreg1 = sum_reg;
1729 break;
1731 case OP_LCONV_TO_OVF_U4:
1732 NEW_INS (cfg, temp, OP_IA64_CMP4_LT);
1733 temp->sreg1 = ins->sreg1;
1734 temp->sreg2 = IA64_R0;
1736 NEW_INS (cfg, temp, OP_IA64_COND_EXC);
1737 temp->inst_p1 = (char*)"OverflowException";
1739 ins->opcode = OP_MOVE;
1740 break;
1741 case OP_LCONV_TO_OVF_I4_UN:
1742 NEW_INS (cfg, temp, OP_ICONST);
1743 temp->inst_c0 = 0x7fffffff;
1744 temp->dreg = mono_alloc_ireg (cfg);
1746 NEW_INS (cfg, temp2, OP_IA64_CMP4_GT_UN);
1747 temp2->sreg1 = ins->sreg1;
1748 temp2->sreg2 = temp->dreg;
1750 NEW_INS (cfg, temp, OP_IA64_COND_EXC);
1751 temp->inst_p1 = (char*)"OverflowException";
1753 ins->opcode = OP_MOVE;
1754 break;
1755 case OP_FCONV_TO_I4:
1756 case OP_FCONV_TO_I2:
1757 case OP_FCONV_TO_U2:
1758 case OP_FCONV_TO_I1:
1759 case OP_FCONV_TO_U1:
1760 NEW_INS (cfg, temp, OP_FCONV_TO_I8);
1761 temp->sreg1 = ins->sreg1;
1762 temp->dreg = ins->dreg;
1764 switch (ins->opcode) {
1765 case OP_FCONV_TO_I4:
1766 ins->opcode = OP_SEXT_I4;
1767 break;
1768 case OP_FCONV_TO_I2:
1769 ins->opcode = OP_SEXT_I2;
1770 break;
1771 case OP_FCONV_TO_U2:
1772 ins->opcode = OP_ZEXT_I4;
1773 break;
1774 case OP_FCONV_TO_I1:
1775 ins->opcode = OP_SEXT_I1;
1776 break;
1777 case OP_FCONV_TO_U1:
1778 ins->opcode = OP_ZEXT_I1;
1779 break;
1780 default:
1781 g_assert_not_reached ();
1783 ins->sreg1 = ins->dreg;
1784 break;
1785 default:
1786 break;
1788 last_ins = ins;
1789 ins = ins->next;
1791 bb->last_ins = last_ins;
1793 bb->max_vreg = cfg->next_vreg;
1797 * emit_load_volatile_arguments:
1799 * Load volatile arguments from the stack to the original input registers.
1800 * Required before a tail call.
1802 static Ia64CodegenState
1803 emit_load_volatile_arguments (MonoCompile *cfg, Ia64CodegenState code)
1805 MonoMethod *method = cfg->method;
1806 MonoMethodSignature *sig;
1807 MonoInst *ins;
1808 CallInfo *cinfo;
1809 guint32 i;
1811 /* FIXME: Generate intermediate code instead */
1813 sig = mono_method_signature (method);
1815 cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
1817 /* This is the opposite of the code in emit_prolog */
1818 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1819 ArgInfo *ainfo = cinfo->args + i;
1820 gint32 stack_offset;
1821 MonoType *arg_type;
1823 ins = cfg->args [i];
1825 if (sig->hasthis && (i == 0))
1826 arg_type = &mono_defaults.object_class->byval_arg;
1827 else
1828 arg_type = sig->params [i - sig->hasthis];
1830 arg_type = mono_type_get_underlying_type (arg_type);
1832 stack_offset = ainfo->offset + ARGS_OFFSET;
1834 /* Save volatile arguments to the stack */
1835 if (ins->opcode != OP_REGVAR) {
1836 switch (ainfo->storage) {
1837 case ArgInIReg:
1838 case ArgInFloatReg:
1839 /* FIXME: big offsets */
1840 g_assert (ins->opcode == OP_REGOFFSET);
1841 ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_basereg);
1842 if (arg_type->byref)
1843 ia64_ld8 (code, cfg->arch.reg_in0 + ainfo->reg, GP_SCRATCH_REG);
1844 else {
1845 switch (arg_type->type) {
1846 case MONO_TYPE_R4:
1847 ia64_ldfs (code, ainfo->reg, GP_SCRATCH_REG);
1848 break;
1849 case MONO_TYPE_R8:
1850 ia64_ldfd (code, ainfo->reg, GP_SCRATCH_REG);
1851 break;
1852 default:
1853 ia64_ld8 (code, cfg->arch.reg_in0 + ainfo->reg, GP_SCRATCH_REG);
1854 break;
1857 break;
1858 case ArgOnStack:
1859 break;
1860 default:
1861 NOT_IMPLEMENTED;
1865 if (ins->opcode == OP_REGVAR) {
1866 /* Argument allocated to (non-volatile) register */
1867 switch (ainfo->storage) {
1868 case ArgInIReg:
1869 if (ins->dreg != cfg->arch.reg_in0 + ainfo->reg)
1870 ia64_mov (code, cfg->arch.reg_in0 + ainfo->reg, ins->dreg);
1871 break;
1872 case ArgOnStack:
1873 ia64_adds_imm (code, GP_SCRATCH_REG, 16 + ainfo->offset, cfg->frame_reg);
1874 ia64_st8 (code, GP_SCRATCH_REG, ins->dreg);
1875 break;
1876 default:
1877 NOT_IMPLEMENTED;
1882 return code;
1885 static Ia64CodegenState
1886 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, Ia64CodegenState code)
1888 CallInfo *cinfo;
1889 int i;
1891 /* Move return value to the target register */
1892 switch (ins->opcode) {
1893 case OP_VOIDCALL:
1894 case OP_VOIDCALL_REG:
1895 case OP_VOIDCALL_MEMBASE:
1896 break;
1897 case OP_CALL:
1898 case OP_CALL_REG:
1899 case OP_CALL_MEMBASE:
1900 case OP_LCALL:
1901 case OP_LCALL_REG:
1902 case OP_LCALL_MEMBASE:
1903 g_assert (ins->dreg == IA64_R8);
1904 break;
1905 case OP_FCALL:
1906 case OP_FCALL_REG:
1907 case OP_FCALL_MEMBASE:
1908 g_assert (ins->dreg == 8);
1909 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4)
1910 ia64_fnorm_d_sf (code, ins->dreg, ins->dreg, 0);
1911 break;
1912 case OP_VCALL:
1913 case OP_VCALL_REG:
1914 case OP_VCALL_MEMBASE:
1915 case OP_VCALL2:
1916 case OP_VCALL2_REG:
1917 case OP_VCALL2_MEMBASE: {
1918 ArgStorage storage;
1920 cinfo = get_call_info (cfg, cfg->mempool, ((MonoCallInst*)ins)->signature, FALSE);
1921 storage = cinfo->ret.storage;
1923 if (storage == ArgAggregate) {
1924 MonoInst *local = (MonoInst*)cfg->arch.ret_var_addr_local;
1926 /* Load address of stack space allocated for the return value */
1927 ia64_movl (code, GP_SCRATCH_REG, local->inst_offset);
1928 ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, local->inst_basereg);
1929 ia64_ld8 (code, GP_SCRATCH_REG, GP_SCRATCH_REG);
1931 for (i = 0; i < cinfo->ret.nregs; ++i) {
1932 switch (cinfo->ret.atype) {
1933 case AggregateNormal:
1934 ia64_st8_inc_imm_hint (code, GP_SCRATCH_REG, cinfo->ret.reg + i, 8, 0);
1935 break;
1936 case AggregateSingleHFA:
1937 ia64_stfs_inc_imm_hint (code, GP_SCRATCH_REG, cinfo->ret.reg + i, 4, 0);
1938 break;
1939 case AggregateDoubleHFA:
1940 ia64_stfd_inc_imm_hint (code, GP_SCRATCH_REG, cinfo->ret.reg + i, 8, 0);
1941 break;
1942 default:
1943 g_assert_not_reached ();
1947 break;
1949 default:
1950 g_assert_not_reached ();
1953 return code;
1956 #define add_patch_info(cfg,code,patch_type,data) do { \
1957 mono_add_patch_info (cfg, code.buf + code.nins - cfg->native_code, patch_type, data); \
1958 } while (0)
1960 #define emit_cond_system_exception(cfg,code,exc_name,predicate) do { \
1961 MonoInst *tins = mono_branch_optimize_exception_target (cfg, bb, exc_name); \
1962 if (tins == NULL) \
1963 add_patch_info (cfg, code, MONO_PATCH_INFO_EXC, exc_name); \
1964 else \
1965 add_patch_info (cfg, code, MONO_PATCH_INFO_BB, tins->inst_true_bb); \
1966 ia64_br_cond_pred (code, (predicate), 0); \
1967 } while (0)
1969 static Ia64CodegenState
1970 emit_call (MonoCompile *cfg, Ia64CodegenState code, guint32 patch_type, gconstpointer data)
1972 add_patch_info (cfg, code, patch_type, data);
1974 if ((patch_type == MONO_PATCH_INFO_ABS) || (patch_type == MONO_PATCH_INFO_INTERNAL_METHOD)) {
1975 /* Indirect call */
1976 /* mono_arch_patch_callsite will patch this */
1977 /* mono_arch_nullify_class_init_trampoline will patch this */
1978 ia64_movl (code, GP_SCRATCH_REG, 0);
1979 ia64_ld8_inc_imm (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, 8);
1980 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG2);
1981 ia64_ld8 (code, IA64_GP, GP_SCRATCH_REG);
1982 ia64_br_call_reg (code, IA64_B0, IA64_B6);
1984 else {
1985 /* Can't use a direct call since the displacement might be too small */
1986 /* mono_arch_patch_callsite will patch this */
1987 ia64_movl (code, GP_SCRATCH_REG, 0);
1988 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
1989 ia64_br_call_reg (code, IA64_B0, IA64_B6);
1992 return code;
1995 #define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
1997 void
1998 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
2000 MonoInst *ins;
2001 MonoCallInst *call;
2002 guint offset;
2003 Ia64CodegenState code;
2004 guint8 *code_start = cfg->native_code + cfg->code_len;
2005 MonoInst *last_ins = NULL;
2006 guint last_offset = 0;
2007 int max_len, cpos;
2009 if (cfg->opt & MONO_OPT_LOOP) {
2010 /* FIXME: */
2013 if (cfg->verbose_level > 2)
2014 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
2016 cpos = bb->max_offset;
2018 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
2019 NOT_IMPLEMENTED;
2022 offset = code_start - cfg->native_code;
2024 ia64_codegen_init (code, code_start);
2026 #if 0
2027 if (strstr (cfg->method->name, "conv_ovf_i1") && (bb->block_num == 2))
2028 break_count ();
2029 #endif
2031 MONO_BB_FOR_EACH_INS (bb, ins) {
2032 offset = code.buf - cfg->native_code;
2034 max_len = ((int)(((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN])) + 128;
2036 while (offset + max_len + 16 > cfg->code_size) {
2037 ia64_codegen_close (code);
2039 offset = code.buf - cfg->native_code;
2041 cfg->code_size *= 2;
2042 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2043 code_start = cfg->native_code + offset;
2044 mono_jit_stats.code_reallocs++;
2046 ia64_codegen_init (code, code_start);
2049 mono_debug_record_line_number (cfg, ins, offset);
2051 switch (ins->opcode) {
2052 case OP_ICONST:
2053 case OP_I8CONST:
2054 if (ia64_is_imm14 (ins->inst_c0))
2055 ia64_adds_imm (code, ins->dreg, ins->inst_c0, IA64_R0);
2056 else
2057 ia64_movl (code, ins->dreg, ins->inst_c0);
2058 break;
2059 case OP_JUMP_TABLE:
2060 add_patch_info (cfg, code, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2061 ia64_movl (code, ins->dreg, 0);
2062 break;
2063 case OP_MOVE:
2064 ia64_mov (code, ins->dreg, ins->sreg1);
2065 break;
2066 case OP_BR:
2067 case OP_IA64_BR_COND: {
2068 int pred = 0;
2069 if (ins->opcode == OP_IA64_BR_COND)
2070 pred = 6;
2071 if (ins->inst_target_bb->native_offset) {
2072 guint8 *pos = code.buf + code.nins;
2074 ia64_br_cond_pred (code, pred, 0);
2075 ia64_begin_bundle (code);
2076 ia64_patch (pos, cfg->native_code + ins->inst_target_bb->native_offset);
2077 } else {
2078 add_patch_info (cfg, code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
2079 ia64_br_cond_pred (code, pred, 0);
2081 break;
2083 case OP_LABEL:
2084 ia64_begin_bundle (code);
2085 ins->inst_c0 = code.buf - cfg->native_code;
2086 break;
2087 case OP_NOP:
2088 case OP_RELAXED_NOP:
2089 case OP_DUMMY_USE:
2090 case OP_DUMMY_STORE:
2091 case OP_NOT_REACHED:
2092 case OP_NOT_NULL:
2093 break;
2094 case OP_BR_REG:
2095 ia64_mov_to_br (code, IA64_B6, ins->sreg1);
2096 ia64_br_cond_reg (code, IA64_B6);
2097 break;
2098 case OP_IADD:
2099 case OP_LADD:
2100 ia64_add (code, ins->dreg, ins->sreg1, ins->sreg2);
2101 break;
2102 case OP_ISUB:
2103 case OP_LSUB:
2104 ia64_sub (code, ins->dreg, ins->sreg1, ins->sreg2);
2105 break;
2106 case OP_IAND:
2107 case OP_LAND:
2108 ia64_and (code, ins->dreg, ins->sreg1, ins->sreg2);
2109 break;
2110 case OP_IOR:
2111 case OP_LOR:
2112 ia64_or (code, ins->dreg, ins->sreg1, ins->sreg2);
2113 break;
2114 case OP_IXOR:
2115 case OP_LXOR:
2116 ia64_xor (code, ins->dreg, ins->sreg1, ins->sreg2);
2117 break;
2118 case OP_INEG:
2119 case OP_LNEG:
2120 ia64_sub (code, ins->dreg, IA64_R0, ins->sreg1);
2121 break;
2122 case OP_INOT:
2123 case OP_LNOT:
2124 ia64_andcm_imm (code, ins->dreg, -1, ins->sreg1);
2125 break;
2126 case OP_ISHL:
2127 case OP_LSHL:
2128 ia64_shl (code, ins->dreg, ins->sreg1, ins->sreg2);
2129 break;
2130 case OP_ISHR:
2131 ia64_sxt4 (code, GP_SCRATCH_REG, ins->sreg1);
2132 ia64_shr (code, ins->dreg, GP_SCRATCH_REG, ins->sreg2);
2133 break;
2134 case OP_LSHR:
2135 ia64_shr (code, ins->dreg, ins->sreg1, ins->sreg2);
2136 break;
2137 case OP_ISHR_UN:
2138 ia64_zxt4 (code, GP_SCRATCH_REG, ins->sreg1);
2139 ia64_shr_u (code, ins->dreg, GP_SCRATCH_REG, ins->sreg2);
2140 break;
2141 case OP_LSHR_UN:
2142 ia64_shr_u (code, ins->dreg, ins->sreg1, ins->sreg2);
2143 break;
2144 case OP_IADDCC:
2145 /* p6 and p7 is set if there is signed/unsigned overflow */
2147 /* Set p8-p9 == (sreg2 > 0) */
2148 ia64_cmp4_lt (code, 8, 9, IA64_R0, ins->sreg2);
2150 ia64_add (code, GP_SCRATCH_REG, ins->sreg1, ins->sreg2);
2152 /* (sreg2 > 0) && (res < ins->sreg1) => signed overflow */
2153 ia64_cmp4_lt_pred (code, 8, 6, 10, GP_SCRATCH_REG, ins->sreg1);
2154 /* (sreg2 <= 0) && (res > ins->sreg1) => signed overflow */
2155 ia64_cmp4_lt_pred (code, 9, 6, 10, ins->sreg1, GP_SCRATCH_REG);
2157 /* res <u sreg1 => unsigned overflow */
2158 ia64_cmp4_ltu (code, 7, 10, GP_SCRATCH_REG, ins->sreg1);
2160 /* FIXME: Predicate this since this is a side effect */
2161 ia64_mov (code, ins->dreg, GP_SCRATCH_REG);
2162 break;
2163 case OP_ISUBCC:
2164 /* p6 and p7 is set if there is signed/unsigned overflow */
2166 /* Set p8-p9 == (sreg2 > 0) */
2167 ia64_cmp4_lt (code, 8, 9, IA64_R0, ins->sreg2);
2169 ia64_sub (code, GP_SCRATCH_REG, ins->sreg1, ins->sreg2);
2171 /* (sreg2 > 0) && (res > ins->sreg1) => signed overflow */
2172 ia64_cmp4_gt_pred (code, 8, 6, 10, GP_SCRATCH_REG, ins->sreg1);
2173 /* (sreg2 <= 0) && (res < ins->sreg1) => signed overflow */
2174 ia64_cmp4_lt_pred (code, 9, 6, 10, GP_SCRATCH_REG, ins->sreg1);
2176 /* sreg1 <u sreg2 => unsigned overflow */
2177 ia64_cmp4_ltu (code, 7, 10, ins->sreg1, ins->sreg2);
2179 /* FIXME: Predicate this since this is a side effect */
2180 ia64_mov (code, ins->dreg, GP_SCRATCH_REG);
2181 break;
2182 case OP_ADDCC:
2183 /* Same as OP_IADDCC */
2184 ia64_cmp_lt (code, 8, 9, IA64_R0, ins->sreg2);
2186 ia64_add (code, GP_SCRATCH_REG, ins->sreg1, ins->sreg2);
2188 ia64_cmp_lt_pred (code, 8, 6, 10, GP_SCRATCH_REG, ins->sreg1);
2189 ia64_cmp_lt_pred (code, 9, 6, 10, ins->sreg1, GP_SCRATCH_REG);
2191 ia64_cmp_ltu (code, 7, 10, GP_SCRATCH_REG, ins->sreg1);
2193 ia64_mov (code, ins->dreg, GP_SCRATCH_REG);
2194 break;
2195 case OP_SUBCC:
2196 /* Same as OP_ISUBCC */
2198 ia64_cmp_lt (code, 8, 9, IA64_R0, ins->sreg2);
2200 ia64_sub (code, GP_SCRATCH_REG, ins->sreg1, ins->sreg2);
2202 ia64_cmp_gt_pred (code, 8, 6, 10, GP_SCRATCH_REG, ins->sreg1);
2203 ia64_cmp_lt_pred (code, 9, 6, 10, GP_SCRATCH_REG, ins->sreg1);
2205 ia64_cmp_ltu (code, 7, 10, ins->sreg1, ins->sreg2);
2207 ia64_mov (code, ins->dreg, GP_SCRATCH_REG);
2208 break;
2209 case OP_ADD_IMM:
2210 case OP_IADD_IMM:
2211 case OP_LADD_IMM:
2212 ia64_adds_imm (code, ins->dreg, ins->inst_imm, ins->sreg1);
2213 break;
2214 case OP_IAND_IMM:
2215 case OP_AND_IMM:
2216 case OP_LAND_IMM:
2217 ia64_and_imm (code, ins->dreg, ins->inst_imm, ins->sreg1);
2218 break;
2219 case OP_IOR_IMM:
2220 case OP_LOR_IMM:
2221 ia64_or_imm (code, ins->dreg, ins->inst_imm, ins->sreg1);
2222 break;
2223 case OP_IXOR_IMM:
2224 case OP_LXOR_IMM:
2225 ia64_xor_imm (code, ins->dreg, ins->inst_imm, ins->sreg1);
2226 break;
2227 case OP_SHL_IMM:
2228 case OP_ISHL_IMM:
2229 case OP_LSHL_IMM:
2230 ia64_shl_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
2231 break;
2232 case OP_SHR_IMM:
2233 case OP_LSHR_IMM:
2234 ia64_shr_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
2235 break;
2236 case OP_ISHR_IMM:
2237 g_assert (ins->inst_imm <= 64);
2238 ia64_extr (code, ins->dreg, ins->sreg1, ins->inst_imm, 32 - ins->inst_imm);
2239 break;
2240 case OP_ISHR_UN_IMM:
2241 ia64_zxt4 (code, GP_SCRATCH_REG, ins->sreg1);
2242 ia64_shr_u_imm (code, ins->dreg, GP_SCRATCH_REG, ins->inst_imm);
2243 break;
2244 case OP_LSHR_UN_IMM:
2245 ia64_shr_u_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
2246 break;
2247 case OP_LMUL:
2248 /* Based on gcc code */
2249 ia64_setf_sig (code, FP_SCRATCH_REG, ins->sreg1);
2250 ia64_setf_sig (code, FP_SCRATCH_REG2, ins->sreg2);
2251 ia64_xmpy_l (code, FP_SCRATCH_REG, FP_SCRATCH_REG, FP_SCRATCH_REG2);
2252 ia64_getf_sig (code, ins->dreg, FP_SCRATCH_REG);
2253 break;
2255 case OP_STOREI1_MEMBASE_REG:
2256 ia64_st1_hint (code, ins->inst_destbasereg, ins->sreg1, 0);
2257 break;
2258 case OP_STOREI2_MEMBASE_REG:
2259 ia64_st2_hint (code, ins->inst_destbasereg, ins->sreg1, 0);
2260 break;
2261 case OP_STOREI4_MEMBASE_REG:
2262 ia64_st4_hint (code, ins->inst_destbasereg, ins->sreg1, 0);
2263 break;
2264 case OP_STOREI8_MEMBASE_REG:
2265 case OP_STORE_MEMBASE_REG:
2266 if (ins->inst_offset != 0) {
2267 /* This is generated by local regalloc */
2268 if (ia64_is_imm14 (ins->inst_offset)) {
2269 ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_destbasereg);
2270 } else {
2271 ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
2272 ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, ins->inst_destbasereg);
2274 ins->inst_destbasereg = GP_SCRATCH_REG;
2276 ia64_st8_hint (code, ins->inst_destbasereg, ins->sreg1, 0);
2277 break;
2279 case OP_IA64_STOREI1_MEMBASE_INC_REG:
2280 ia64_st1_inc_imm_hint (code, ins->inst_destbasereg, ins->sreg1, 1, 0);
2281 break;
2282 case OP_IA64_STOREI2_MEMBASE_INC_REG:
2283 ia64_st2_inc_imm_hint (code, ins->inst_destbasereg, ins->sreg1, 2, 0);
2284 break;
2285 case OP_IA64_STOREI4_MEMBASE_INC_REG:
2286 ia64_st4_inc_imm_hint (code, ins->inst_destbasereg, ins->sreg1, 4, 0);
2287 break;
2288 case OP_IA64_STOREI8_MEMBASE_INC_REG:
2289 ia64_st8_inc_imm_hint (code, ins->inst_destbasereg, ins->sreg1, 8, 0);
2290 break;
2292 case OP_LOADU1_MEMBASE:
2293 ia64_ld1 (code, ins->dreg, ins->inst_basereg);
2294 break;
2295 case OP_LOADU2_MEMBASE:
2296 ia64_ld2 (code, ins->dreg, ins->inst_basereg);
2297 break;
2298 case OP_LOADU4_MEMBASE:
2299 ia64_ld4 (code, ins->dreg, ins->inst_basereg);
2300 break;
2301 case OP_LOADI1_MEMBASE:
2302 ia64_ld1 (code, ins->dreg, ins->inst_basereg);
2303 ia64_sxt1 (code, ins->dreg, ins->dreg);
2304 break;
2305 case OP_LOADI2_MEMBASE:
2306 ia64_ld2 (code, ins->dreg, ins->inst_basereg);
2307 ia64_sxt2 (code, ins->dreg, ins->dreg);
2308 break;
2309 case OP_LOADI4_MEMBASE:
2310 ia64_ld4 (code, ins->dreg, ins->inst_basereg);
2311 ia64_sxt4 (code, ins->dreg, ins->dreg);
2312 break;
2313 case OP_LOAD_MEMBASE:
2314 case OP_LOADI8_MEMBASE:
2315 if (ins->inst_offset != 0) {
2316 /* This is generated by local regalloc */
2317 if (ia64_is_imm14 (ins->inst_offset)) {
2318 ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_basereg);
2319 } else {
2320 ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
2321 ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, ins->inst_basereg);
2323 ins->inst_basereg = GP_SCRATCH_REG;
2325 ia64_ld8 (code, ins->dreg, ins->inst_basereg);
2326 break;
2328 case OP_IA64_LOADU1_MEMBASE_INC:
2329 ia64_ld1_inc_imm_hint (code, ins->dreg, ins->inst_basereg, 1, 0);
2330 break;
2331 case OP_IA64_LOADU2_MEMBASE_INC:
2332 ia64_ld2_inc_imm_hint (code, ins->dreg, ins->inst_basereg, 2, 0);
2333 break;
2334 case OP_IA64_LOADU4_MEMBASE_INC:
2335 ia64_ld4_inc_imm_hint (code, ins->dreg, ins->inst_basereg, 4, 0);
2336 break;
2337 case OP_IA64_LOADI8_MEMBASE_INC:
2338 ia64_ld8_inc_imm_hint (code, ins->dreg, ins->inst_basereg, 8, 0);
2339 break;
2341 case OP_SEXT_I1:
2342 ia64_sxt1 (code, ins->dreg, ins->sreg1);
2343 break;
2344 case OP_SEXT_I2:
2345 ia64_sxt2 (code, ins->dreg, ins->sreg1);
2346 break;
2347 case OP_SEXT_I4:
2348 ia64_sxt4 (code, ins->dreg, ins->sreg1);
2349 break;
2350 case OP_ZEXT_I1:
2351 ia64_zxt1 (code, ins->dreg, ins->sreg1);
2352 break;
2353 case OP_ZEXT_I2:
2354 ia64_zxt2 (code, ins->dreg, ins->sreg1);
2355 break;
2356 case OP_ZEXT_I4:
2357 ia64_zxt4 (code, ins->dreg, ins->sreg1);
2358 break;
2360 /* Compare opcodes */
2361 case OP_IA64_CMP4_EQ:
2362 ia64_cmp4_eq (code, 6, 7, ins->sreg1, ins->sreg2);
2363 break;
2364 case OP_IA64_CMP4_NE:
2365 ia64_cmp4_ne (code, 6, 7, ins->sreg1, ins->sreg2);
2366 break;
2367 case OP_IA64_CMP4_LE:
2368 ia64_cmp4_le (code, 6, 7, ins->sreg1, ins->sreg2);
2369 break;
2370 case OP_IA64_CMP4_LT:
2371 ia64_cmp4_lt (code, 6, 7, ins->sreg1, ins->sreg2);
2372 break;
2373 case OP_IA64_CMP4_GE:
2374 ia64_cmp4_ge (code, 6, 7, ins->sreg1, ins->sreg2);
2375 break;
2376 case OP_IA64_CMP4_GT:
2377 ia64_cmp4_gt (code, 6, 7, ins->sreg1, ins->sreg2);
2378 break;
2379 case OP_IA64_CMP4_LT_UN:
2380 ia64_cmp4_ltu (code, 6, 7, ins->sreg1, ins->sreg2);
2381 break;
2382 case OP_IA64_CMP4_LE_UN:
2383 ia64_cmp4_leu (code, 6, 7, ins->sreg1, ins->sreg2);
2384 break;
2385 case OP_IA64_CMP4_GT_UN:
2386 ia64_cmp4_gtu (code, 6, 7, ins->sreg1, ins->sreg2);
2387 break;
2388 case OP_IA64_CMP4_GE_UN:
2389 ia64_cmp4_geu (code, 6, 7, ins->sreg1, ins->sreg2);
2390 break;
2391 case OP_IA64_CMP_EQ:
2392 ia64_cmp_eq (code, 6, 7, ins->sreg1, ins->sreg2);
2393 break;
2394 case OP_IA64_CMP_NE:
2395 ia64_cmp_ne (code, 6, 7, ins->sreg1, ins->sreg2);
2396 break;
2397 case OP_IA64_CMP_LE:
2398 ia64_cmp_le (code, 6, 7, ins->sreg1, ins->sreg2);
2399 break;
2400 case OP_IA64_CMP_LT:
2401 ia64_cmp_lt (code, 6, 7, ins->sreg1, ins->sreg2);
2402 break;
2403 case OP_IA64_CMP_GE:
2404 ia64_cmp_ge (code, 6, 7, ins->sreg1, ins->sreg2);
2405 break;
2406 case OP_IA64_CMP_GT:
2407 ia64_cmp_gt (code, 6, 7, ins->sreg1, ins->sreg2);
2408 break;
2409 case OP_IA64_CMP_GT_UN:
2410 ia64_cmp_gtu (code, 6, 7, ins->sreg1, ins->sreg2);
2411 break;
2412 case OP_IA64_CMP_LT_UN:
2413 ia64_cmp_ltu (code, 6, 7, ins->sreg1, ins->sreg2);
2414 break;
2415 case OP_IA64_CMP_GE_UN:
2416 ia64_cmp_geu (code, 6, 7, ins->sreg1, ins->sreg2);
2417 break;
2418 case OP_IA64_CMP_LE_UN:
2419 ia64_cmp_leu (code, 6, 7, ins->sreg1, ins->sreg2);
2420 break;
2421 case OP_IA64_CMP4_EQ_IMM:
2422 ia64_cmp4_eq_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2423 break;
2424 case OP_IA64_CMP4_NE_IMM:
2425 ia64_cmp4_ne_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2426 break;
2427 case OP_IA64_CMP4_LE_IMM:
2428 ia64_cmp4_le_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2429 break;
2430 case OP_IA64_CMP4_LT_IMM:
2431 ia64_cmp4_lt_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2432 break;
2433 case OP_IA64_CMP4_GE_IMM:
2434 ia64_cmp4_ge_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2435 break;
2436 case OP_IA64_CMP4_GT_IMM:
2437 ia64_cmp4_gt_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2438 break;
2439 case OP_IA64_CMP4_LT_UN_IMM:
2440 ia64_cmp4_ltu_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2441 break;
2442 case OP_IA64_CMP4_LE_UN_IMM:
2443 ia64_cmp4_leu_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2444 break;
2445 case OP_IA64_CMP4_GT_UN_IMM:
2446 ia64_cmp4_gtu_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2447 break;
2448 case OP_IA64_CMP4_GE_UN_IMM:
2449 ia64_cmp4_geu_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2450 break;
2451 case OP_IA64_CMP_EQ_IMM:
2452 ia64_cmp_eq_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2453 break;
2454 case OP_IA64_CMP_NE_IMM:
2455 ia64_cmp_ne_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2456 break;
2457 case OP_IA64_CMP_LE_IMM:
2458 ia64_cmp_le_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2459 break;
2460 case OP_IA64_CMP_LT_IMM:
2461 ia64_cmp_lt_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2462 break;
2463 case OP_IA64_CMP_GE_IMM:
2464 ia64_cmp_ge_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2465 break;
2466 case OP_IA64_CMP_GT_IMM:
2467 ia64_cmp_gt_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2468 break;
2469 case OP_IA64_CMP_GT_UN_IMM:
2470 ia64_cmp_gtu_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2471 break;
2472 case OP_IA64_CMP_LT_UN_IMM:
2473 ia64_cmp_ltu_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2474 break;
2475 case OP_IA64_CMP_GE_UN_IMM:
2476 ia64_cmp_geu_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2477 break;
2478 case OP_IA64_CMP_LE_UN_IMM:
2479 ia64_cmp_leu_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2480 break;
2481 case OP_IA64_FCMP_EQ:
2482 ia64_fcmp_eq_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2483 break;
2484 case OP_IA64_FCMP_NE:
2485 ia64_fcmp_ne_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2486 break;
2487 case OP_IA64_FCMP_LT:
2488 ia64_fcmp_lt_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2489 break;
2490 case OP_IA64_FCMP_GT:
2491 ia64_fcmp_gt_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2492 break;
2493 case OP_IA64_FCMP_LE:
2494 ia64_fcmp_le_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2495 break;
2496 case OP_IA64_FCMP_GE:
2497 ia64_fcmp_ge_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2498 break;
2499 case OP_IA64_FCMP_GT_UN:
2500 ia64_fcmp_gt_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2501 ia64_fcmp_unord_sf_pred (code, 7, 6, 7, ins->sreg1, ins->sreg2, 0);
2502 break;
2503 case OP_IA64_FCMP_LT_UN:
2504 ia64_fcmp_lt_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2505 ia64_fcmp_unord_sf_pred (code, 7, 6, 7, ins->sreg1, ins->sreg2, 0);
2506 break;
2507 case OP_IA64_FCMP_GE_UN:
2508 ia64_fcmp_ge_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2509 ia64_fcmp_unord_sf_pred (code, 7, 6, 7, ins->sreg1, ins->sreg2, 0);
2510 break;
2511 case OP_IA64_FCMP_LE_UN:
2512 ia64_fcmp_le_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2513 ia64_fcmp_unord_sf_pred (code, 7, 6, 7, ins->sreg1, ins->sreg2, 0);
2514 break;
2516 case OP_COND_EXC_IOV:
2517 case OP_COND_EXC_OV:
2518 emit_cond_system_exception (cfg, code, "OverflowException", 6);
2519 break;
2520 case OP_COND_EXC_IC:
2521 case OP_COND_EXC_C:
2522 emit_cond_system_exception (cfg, code, "OverflowException", 7);
2523 break;
2524 case OP_IA64_COND_EXC:
2525 emit_cond_system_exception (cfg, code, ins->inst_p1, 6);
2526 break;
2527 case OP_IA64_CSET:
2528 ia64_mov_pred (code, 7, ins->dreg, IA64_R0);
2529 ia64_no_stop (code);
2530 ia64_add1_pred (code, 6, ins->dreg, IA64_R0, IA64_R0);
2531 break;
2532 case OP_ICONV_TO_I1:
2533 case OP_LCONV_TO_I1:
2534 /* FIXME: Is this needed ? */
2535 ia64_sxt1 (code, ins->dreg, ins->sreg1);
2536 break;
2537 case OP_ICONV_TO_I2:
2538 case OP_LCONV_TO_I2:
2539 /* FIXME: Is this needed ? */
2540 ia64_sxt2 (code, ins->dreg, ins->sreg1);
2541 break;
2542 case OP_LCONV_TO_I4:
2543 /* FIXME: Is this needed ? */
2544 ia64_sxt4 (code, ins->dreg, ins->sreg1);
2545 break;
2546 case OP_ICONV_TO_U1:
2547 case OP_LCONV_TO_U1:
2548 /* FIXME: Is this needed */
2549 ia64_zxt1 (code, ins->dreg, ins->sreg1);
2550 break;
2551 case OP_ICONV_TO_U2:
2552 case OP_LCONV_TO_U2:
2553 /* FIXME: Is this needed */
2554 ia64_zxt2 (code, ins->dreg, ins->sreg1);
2555 break;
2556 case OP_LCONV_TO_U4:
2557 /* FIXME: Is this needed */
2558 ia64_zxt4 (code, ins->dreg, ins->sreg1);
2559 break;
2560 case OP_ICONV_TO_I8:
2561 case OP_ICONV_TO_I:
2562 case OP_LCONV_TO_I8:
2563 case OP_LCONV_TO_I:
2564 ia64_sxt4 (code, ins->dreg, ins->sreg1);
2565 break;
2566 case OP_LCONV_TO_U8:
2567 case OP_LCONV_TO_U:
2568 ia64_zxt4 (code, ins->dreg, ins->sreg1);
2569 break;
2572 * FLOAT OPCODES
2574 case OP_R8CONST: {
2575 double d = *(double *)ins->inst_p0;
2577 if ((d == 0.0) && (mono_signbit (d) == 0))
2578 ia64_fmov (code, ins->dreg, 0);
2579 else if (d == 1.0)
2580 ia64_fmov (code, ins->dreg, 1);
2581 else {
2582 add_patch_info (cfg, code, MONO_PATCH_INFO_R8, ins->inst_p0);
2583 ia64_movl (code, GP_SCRATCH_REG, 0);
2584 ia64_ldfd (code, ins->dreg, GP_SCRATCH_REG);
2586 break;
2588 case OP_R4CONST: {
2589 float f = *(float *)ins->inst_p0;
2591 if ((f == 0.0) && (mono_signbit (f) == 0))
2592 ia64_fmov (code, ins->dreg, 0);
2593 else if (f == 1.0)
2594 ia64_fmov (code, ins->dreg, 1);
2595 else {
2596 add_patch_info (cfg, code, MONO_PATCH_INFO_R4, ins->inst_p0);
2597 ia64_movl (code, GP_SCRATCH_REG, 0);
2598 ia64_ldfs (code, ins->dreg, GP_SCRATCH_REG);
2600 break;
2602 case OP_FMOVE:
2603 ia64_fmov (code, ins->dreg, ins->sreg1);
2604 break;
2605 case OP_STORER8_MEMBASE_REG:
2606 if (ins->inst_offset != 0) {
2607 /* This is generated by local regalloc */
2608 if (ia64_is_imm14 (ins->inst_offset)) {
2609 ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_destbasereg);
2610 } else {
2611 ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
2612 ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, ins->inst_destbasereg);
2614 ins->inst_destbasereg = GP_SCRATCH_REG;
2616 ia64_stfd_hint (code, ins->inst_destbasereg, ins->sreg1, 0);
2617 break;
2618 case OP_STORER4_MEMBASE_REG:
2619 ia64_fnorm_s_sf (code, FP_SCRATCH_REG, ins->sreg1, 0);
2620 ia64_stfs_hint (code, ins->inst_destbasereg, FP_SCRATCH_REG, 0);
2621 break;
2622 case OP_LOADR8_MEMBASE:
2623 if (ins->inst_offset != 0) {
2624 /* This is generated by local regalloc */
2625 if (ia64_is_imm14 (ins->inst_offset)) {
2626 ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_basereg);
2627 } else {
2628 ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
2629 ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, ins->inst_basereg);
2631 ins->inst_basereg = GP_SCRATCH_REG;
2633 ia64_ldfd (code, ins->dreg, ins->inst_basereg);
2634 break;
2635 case OP_LOADR4_MEMBASE:
2636 ia64_ldfs (code, ins->dreg, ins->inst_basereg);
2637 ia64_fnorm_d_sf (code, ins->dreg, ins->dreg, 0);
2638 break;
2639 case OP_ICONV_TO_R4:
2640 case OP_LCONV_TO_R4:
2641 ia64_setf_sig (code, ins->dreg, ins->sreg1);
2642 ia64_fcvt_xf (code, ins->dreg, ins->dreg);
2643 ia64_fnorm_s_sf (code, ins->dreg, ins->dreg, 0);
2644 break;
2645 case OP_ICONV_TO_R8:
2646 case OP_LCONV_TO_R8:
2647 ia64_setf_sig (code, ins->dreg, ins->sreg1);
2648 ia64_fcvt_xf (code, ins->dreg, ins->dreg);
2649 ia64_fnorm_d_sf (code, ins->dreg, ins->dreg, 0);
2650 break;
2651 case OP_FCONV_TO_R4:
2652 ia64_fnorm_s_sf (code, ins->dreg, ins->sreg1, 0);
2653 break;
2654 case OP_FCONV_TO_I8:
2655 case OP_FCONV_TO_I:
2656 ia64_fcvt_fx_trunc_sf (code, FP_SCRATCH_REG, ins->sreg1, 0);
2657 ia64_getf_sig (code, ins->dreg, FP_SCRATCH_REG);
2658 break;
2659 case OP_FADD:
2660 ia64_fma_d_sf (code, ins->dreg, ins->sreg1, 1, ins->sreg2, 0);
2661 break;
2662 case OP_FSUB:
2663 ia64_fms_d_sf (code, ins->dreg, ins->sreg1, 1, ins->sreg2, 0);
2664 break;
2665 case OP_FMUL:
2666 ia64_fma_d_sf (code, ins->dreg, ins->sreg1, ins->sreg2, 0, 0);
2667 break;
2668 case OP_FNEG:
2669 ia64_fmerge_ns (code, ins->dreg, ins->sreg1, ins->sreg1);
2670 break;
2671 case OP_CKFINITE:
2672 /* Quiet NaN */
2673 ia64_fclass_m (code, 6, 7, ins->sreg1, 0x080);
2674 emit_cond_system_exception (cfg, code, "ArithmeticException", 6);
2675 /* Signaling NaN */
2676 ia64_fclass_m (code, 6, 7, ins->sreg1, 0x040);
2677 emit_cond_system_exception (cfg, code, "ArithmeticException", 6);
2678 /* Positive infinity */
2679 ia64_fclass_m (code, 6, 7, ins->sreg1, 0x021);
2680 emit_cond_system_exception (cfg, code, "ArithmeticException", 6);
2681 /* Negative infinity */
2682 ia64_fclass_m (code, 6, 7, ins->sreg1, 0x022);
2683 emit_cond_system_exception (cfg, code, "ArithmeticException", 6);
2684 break;
2686 /* Calls */
2687 case OP_CHECK_THIS:
2688 /* ensure ins->sreg1 is not NULL */
2689 ia64_ld8 (code, GP_SCRATCH_REG, ins->sreg1);
2690 break;
2691 case OP_ARGLIST:
2692 ia64_adds_imm (code, GP_SCRATCH_REG, cfg->sig_cookie, cfg->frame_reg);
2693 ia64_st8 (code, ins->sreg1, GP_SCRATCH_REG);
2694 break;
2695 case OP_FCALL:
2696 case OP_LCALL:
2697 case OP_VCALL:
2698 case OP_VCALL2:
2699 case OP_VOIDCALL:
2700 case OP_CALL:
2701 call = (MonoCallInst*)ins;
2703 if (ins->flags & MONO_INST_HAS_METHOD)
2704 code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method);
2705 else
2706 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
2708 code = emit_move_return_value (cfg, ins, code);
2709 break;
2711 case OP_CALL_REG:
2712 case OP_FCALL_REG:
2713 case OP_LCALL_REG:
2714 case OP_VCALL_REG:
2715 case OP_VCALL2_REG:
2716 case OP_VOIDCALL_REG: {
2717 MonoCallInst *call = (MonoCallInst*)ins;
2718 CallInfo *cinfo;
2719 int out_reg;
2722 * mono_arch_find_this_arg () needs to find the this argument in a global
2723 * register.
2725 cinfo = get_call_info (cfg, cfg->mempool, call->signature, FALSE);
2726 out_reg = cfg->arch.reg_out0;
2727 if (cinfo->ret.storage == ArgValuetypeAddrInIReg)
2728 out_reg ++;
2729 ia64_mov (code, IA64_R10, out_reg);
2731 /* Indirect call */
2732 ia64_mov (code, IA64_R8, ins->sreg1);
2733 ia64_ld8_inc_imm (code, GP_SCRATCH_REG2, IA64_R8, 8);
2734 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG2);
2735 ia64_ld8 (code, IA64_GP, IA64_R8);
2736 ia64_br_call_reg (code, IA64_B0, IA64_B6);
2738 code = emit_move_return_value (cfg, ins, code);
2739 break;
2741 case OP_FCALL_MEMBASE:
2742 case OP_LCALL_MEMBASE:
2743 case OP_VCALL_MEMBASE:
2744 case OP_VCALL2_MEMBASE:
2745 case OP_VOIDCALL_MEMBASE:
2746 case OP_CALL_MEMBASE: {
2747 MonoCallInst *call = (MonoCallInst*)ins;
2748 CallInfo *cinfo;
2749 int out_reg;
2752 * There are no membase instructions on ia64, but we can't
2753 * lower this since get_vcall_slot_addr () needs to decode it.
2756 /* Keep this in synch with get_vcall_slot_addr */
2757 ia64_mov (code, IA64_R11, ins->sreg1);
2758 if (ia64_is_imm14 (ins->inst_offset))
2759 ia64_adds_imm (code, IA64_R8, ins->inst_offset, ins->sreg1);
2760 else {
2761 ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
2762 ia64_add (code, IA64_R8, GP_SCRATCH_REG, ins->sreg1);
2765 if (call->method && ins->inst_offset < 0) {
2767 * This is a possible IMT call so save the IMT method in a global
2768 * register where mono_arch_find_imt_method () and its friends can
2769 * access it.
2771 ia64_movl (code, IA64_R9, call->method);
2775 * mono_arch_find_this_arg () needs to find the this argument in a global
2776 * register.
2778 cinfo = get_call_info (cfg, cfg->mempool, call->signature, FALSE);
2779 out_reg = cfg->arch.reg_out0;
2780 if (cinfo->ret.storage == ArgValuetypeAddrInIReg)
2781 out_reg ++;
2782 ia64_mov (code, IA64_R10, out_reg);
2784 ia64_begin_bundle (code);
2785 ia64_codegen_set_one_ins_per_bundle (code, TRUE);
2787 ia64_ld8 (code, GP_SCRATCH_REG, IA64_R8);
2789 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
2792 * This nop will tell get_vcall_slot_addr that this is a virtual
2793 * call.
2795 ia64_nop_i (code, 0x12345);
2797 ia64_br_call_reg (code, IA64_B0, IA64_B6);
2799 ia64_codegen_set_one_ins_per_bundle (code, FALSE);
2801 code = emit_move_return_value (cfg, ins, code);
2802 break;
2804 case OP_JMP: {
2806 * Keep in sync with the code in emit_epilog.
2809 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
2810 NOT_IMPLEMENTED;
2812 g_assert (!cfg->method->save_lmf);
2814 /* Load arguments into their original registers */
2815 code = emit_load_volatile_arguments (cfg, code);
2817 if (cfg->arch.stack_alloc_size) {
2818 if (cfg->arch.omit_fp) {
2819 if (ia64_is_imm14 (cfg->arch.stack_alloc_size))
2820 ia64_adds_imm (code, IA64_SP, (cfg->arch.stack_alloc_size), IA64_SP);
2821 else {
2822 ia64_movl (code, GP_SCRATCH_REG, cfg->arch.stack_alloc_size);
2823 ia64_add (code, IA64_SP, GP_SCRATCH_REG, IA64_SP);
2826 else
2827 ia64_mov (code, IA64_SP, cfg->arch.reg_saved_sp);
2829 ia64_mov_to_ar_i (code, IA64_PFS, cfg->arch.reg_saved_ar_pfs);
2830 ia64_mov_ret_to_br (code, IA64_B0, cfg->arch.reg_saved_b0);
2832 add_patch_info (cfg, code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
2833 ia64_movl (code, GP_SCRATCH_REG, 0);
2834 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
2835 ia64_br_cond_reg (code, IA64_B6);
2837 break;
2839 case OP_BREAK:
2840 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, mono_break);
2841 break;
2843 case OP_LOCALLOC: {
2844 gint32 abi_offset;
2846 /* FIXME: Sigaltstack support */
2848 /* keep alignment */
2849 ia64_adds_imm (code, GP_SCRATCH_REG, MONO_ARCH_LOCALLOC_ALIGNMENT - 1, ins->sreg1);
2850 ia64_movl (code, GP_SCRATCH_REG2, ~(MONO_ARCH_LOCALLOC_ALIGNMENT - 1));
2851 ia64_and (code, GP_SCRATCH_REG, GP_SCRATCH_REG, GP_SCRATCH_REG2);
2853 ia64_sub (code, IA64_SP, IA64_SP, GP_SCRATCH_REG);
2855 ia64_mov (code, ins->dreg, IA64_SP);
2857 /* An area at sp is reserved by the ABI for parameter passing */
2858 abi_offset = - ALIGN_TO (cfg->param_area + 16, MONO_ARCH_LOCALLOC_ALIGNMENT);
2859 if (ia64_is_adds_imm (abi_offset))
2860 ia64_adds_imm (code, IA64_SP, abi_offset, IA64_SP);
2861 else {
2862 ia64_movl (code, GP_SCRATCH_REG2, abi_offset);
2863 ia64_add (code, IA64_SP, IA64_SP, GP_SCRATCH_REG2);
2866 if (ins->flags & MONO_INST_INIT) {
2867 /* Upper limit */
2868 ia64_add (code, GP_SCRATCH_REG2, ins->dreg, GP_SCRATCH_REG);
2870 ia64_codegen_set_one_ins_per_bundle (code, TRUE);
2872 /* Init loop */
2873 ia64_st8_inc_imm_hint (code, ins->dreg, IA64_R0, 8, 0);
2874 ia64_cmp_lt (code, 8, 9, ins->dreg, GP_SCRATCH_REG2);
2875 ia64_br_cond_pred (code, 8, -2);
2877 ia64_codegen_set_one_ins_per_bundle (code, FALSE);
2879 ia64_sub (code, ins->dreg, GP_SCRATCH_REG2, GP_SCRATCH_REG);
2882 break;
2884 case OP_LOCALLOC_IMM: {
2885 gint32 abi_offset;
2887 /* FIXME: Sigaltstack support */
2889 gssize size = ins->inst_imm;
2890 size = (size + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1);
2892 if (ia64_is_adds_imm (size))
2893 ia64_adds_imm (code, GP_SCRATCH_REG, size, IA64_R0);
2894 else
2895 ia64_movl (code, GP_SCRATCH_REG, size);
2897 ia64_sub (code, IA64_SP, IA64_SP, GP_SCRATCH_REG);
2898 ia64_mov (code, ins->dreg, IA64_SP);
2900 /* An area at sp is reserved by the ABI for parameter passing */
2901 abi_offset = - ALIGN_TO (cfg->param_area + 16, MONO_ARCH_FRAME_ALIGNMENT);
2902 if (ia64_is_adds_imm (abi_offset))
2903 ia64_adds_imm (code, IA64_SP, abi_offset, IA64_SP);
2904 else {
2905 ia64_movl (code, GP_SCRATCH_REG2, abi_offset);
2906 ia64_add (code, IA64_SP, IA64_SP, GP_SCRATCH_REG2);
2909 if (ins->flags & MONO_INST_INIT) {
2910 /* Upper limit */
2911 ia64_add (code, GP_SCRATCH_REG2, ins->dreg, GP_SCRATCH_REG);
2913 ia64_codegen_set_one_ins_per_bundle (code, TRUE);
2915 /* Init loop */
2916 ia64_st8_inc_imm_hint (code, ins->dreg, IA64_R0, 8, 0);
2917 ia64_cmp_lt (code, 8, 9, ins->dreg, GP_SCRATCH_REG2);
2918 ia64_br_cond_pred (code, 8, -2);
2920 ia64_codegen_set_one_ins_per_bundle (code, FALSE);
2922 ia64_sub (code, ins->dreg, GP_SCRATCH_REG2, GP_SCRATCH_REG);
2925 break;
2927 case OP_TLS_GET:
2928 ia64_adds_imm (code, ins->dreg, ins->inst_offset, IA64_TP);
2929 ia64_ld8 (code, ins->dreg, ins->dreg);
2930 break;
2932 /* Synchronization */
2933 case OP_MEMORY_BARRIER:
2934 ia64_mf (code);
2935 break;
2936 case OP_ATOMIC_ADD_IMM_NEW_I4:
2937 g_assert (ins->inst_offset == 0);
2938 ia64_fetchadd4_acq_hint (code, ins->dreg, ins->inst_basereg, ins->inst_imm, 0);
2939 ia64_adds_imm (code, ins->dreg, ins->inst_imm, ins->dreg);
2940 break;
2941 case OP_ATOMIC_ADD_IMM_NEW_I8:
2942 g_assert (ins->inst_offset == 0);
2943 ia64_fetchadd8_acq_hint (code, ins->dreg, ins->inst_basereg, ins->inst_imm, 0);
2944 ia64_adds_imm (code, ins->dreg, ins->inst_imm, ins->dreg);
2945 break;
2946 case OP_ATOMIC_EXCHANGE_I4:
2947 ia64_xchg4_hint (code, ins->dreg, ins->inst_basereg, ins->sreg2, 0);
2948 ia64_sxt4 (code, ins->dreg, ins->dreg);
2949 break;
2950 case OP_ATOMIC_EXCHANGE_I8:
2951 ia64_xchg8_hint (code, ins->dreg, ins->inst_basereg, ins->sreg2, 0);
2952 break;
2953 case OP_ATOMIC_ADD_NEW_I4: {
2954 guint8 *label, *buf;
2956 /* From libatomic_ops */
2957 ia64_mf (code);
2959 ia64_begin_bundle (code);
2960 label = code.buf + code.nins;
2961 ia64_ld4_acq (code, GP_SCRATCH_REG, ins->sreg1);
2962 ia64_add (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, ins->sreg2);
2963 ia64_mov_to_ar_m (code, IA64_CCV, GP_SCRATCH_REG);
2964 ia64_cmpxchg4_acq_hint (code, GP_SCRATCH_REG2, ins->sreg1, GP_SCRATCH_REG2, 0);
2965 ia64_cmp4_eq (code, 6, 7, GP_SCRATCH_REG, GP_SCRATCH_REG2);
2966 buf = code.buf + code.nins;
2967 ia64_br_cond_pred (code, 7, 0);
2968 ia64_begin_bundle (code);
2969 ia64_patch (buf, label);
2970 ia64_add (code, ins->dreg, GP_SCRATCH_REG, ins->sreg2);
2971 break;
2973 case OP_ATOMIC_ADD_NEW_I8: {
2974 guint8 *label, *buf;
2976 /* From libatomic_ops */
2977 ia64_mf (code);
2979 ia64_begin_bundle (code);
2980 label = code.buf + code.nins;
2981 ia64_ld8_acq (code, GP_SCRATCH_REG, ins->sreg1);
2982 ia64_add (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, ins->sreg2);
2983 ia64_mov_to_ar_m (code, IA64_CCV, GP_SCRATCH_REG);
2984 ia64_cmpxchg8_acq_hint (code, GP_SCRATCH_REG2, ins->sreg1, GP_SCRATCH_REG2, 0);
2985 ia64_cmp_eq (code, 6, 7, GP_SCRATCH_REG, GP_SCRATCH_REG2);
2986 buf = code.buf + code.nins;
2987 ia64_br_cond_pred (code, 7, 0);
2988 ia64_begin_bundle (code);
2989 ia64_patch (buf, label);
2990 ia64_add (code, ins->dreg, GP_SCRATCH_REG, ins->sreg2);
2991 break;
2994 /* Exception handling */
2995 case OP_CALL_HANDLER:
2997 * Using a call instruction would mess up the register stack, so
2998 * save the return address to a register and use a
2999 * branch.
3001 ia64_codegen_set_one_ins_per_bundle (code, TRUE);
3002 ia64_mov (code, IA64_R15, IA64_R0);
3003 ia64_mov_from_ip (code, GP_SCRATCH_REG);
3004 /* Add the length of OP_CALL_HANDLER */
3005 ia64_adds_imm (code, GP_SCRATCH_REG, 5 * 16, GP_SCRATCH_REG);
3006 add_patch_info (cfg, code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3007 ia64_movl (code, GP_SCRATCH_REG2, 0);
3008 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG2);
3009 ia64_br_cond_reg (code, IA64_B6);
3010 // FIXME:
3011 //mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
3012 ia64_codegen_set_one_ins_per_bundle (code, FALSE);
3013 break;
3014 case OP_START_HANDLER: {
3016 * We receive the return address in GP_SCRATCH_REG.
3018 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3021 * R15 determines our caller. It is used since it is writable using
3022 * libunwind.
3023 * R15 == 0 means we are called by OP_CALL_HANDLER or via resume_context ()
3024 * R15 != 0 means we are called by call_filter ().
3026 ia64_codegen_set_one_ins_per_bundle (code, TRUE);
3027 ia64_cmp_eq (code, 6, 7, IA64_R15, IA64_R0);
3029 ia64_br_cond_pred (code, 6, 6);
3032 * Called by call_filter:
3033 * Allocate a new stack frame, and set the fp register from the
3034 * value passed in by the caller.
3035 * We allocate a similar frame as is done by the prolog, so
3036 * if an exception is thrown while executing the filter, the
3037 * unwinder can unwind through the filter frame using the unwind
3038 * info for the prolog.
3040 ia64_alloc (code, cfg->arch.reg_saved_ar_pfs, cfg->arch.reg_local0 - cfg->arch.reg_in0, cfg->arch.reg_out0 - cfg->arch.reg_local0, cfg->arch.n_out_regs, 0);
3041 ia64_mov_from_br (code, cfg->arch.reg_saved_b0, IA64_B0);
3042 ia64_mov (code, cfg->arch.reg_saved_sp, IA64_SP);
3043 ia64_mov (code, cfg->frame_reg, IA64_R15);
3044 /* Signal to endfilter that we are called by call_filter */
3045 ia64_mov (code, GP_SCRATCH_REG, IA64_R0);
3047 /* Branch target: */
3048 if (ia64_is_imm14 (spvar->inst_offset))
3049 ia64_adds_imm (code, GP_SCRATCH_REG2, spvar->inst_offset, cfg->frame_reg);
3050 else {
3051 ia64_movl (code, GP_SCRATCH_REG2, spvar->inst_offset);
3052 ia64_add (code, GP_SCRATCH_REG2, cfg->frame_reg, GP_SCRATCH_REG2);
3055 /* Save the return address */
3056 ia64_st8_hint (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, 0);
3057 ia64_codegen_set_one_ins_per_bundle (code, FALSE);
3059 break;
3061 case OP_ENDFINALLY:
3062 case OP_ENDFILTER: {
3063 /* FIXME: Return the value in ENDFILTER */
3064 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3066 /* Load the return address */
3067 if (ia64_is_imm14 (spvar->inst_offset)) {
3068 ia64_adds_imm (code, GP_SCRATCH_REG, spvar->inst_offset, cfg->frame_reg);
3069 } else {
3070 ia64_movl (code, GP_SCRATCH_REG, spvar->inst_offset);
3071 ia64_add (code, GP_SCRATCH_REG, cfg->frame_reg, GP_SCRATCH_REG);
3073 ia64_ld8_hint (code, GP_SCRATCH_REG, GP_SCRATCH_REG, 0);
3075 /* Test caller */
3076 ia64_cmp_eq (code, 6, 7, GP_SCRATCH_REG, IA64_R0);
3077 ia64_br_cond_pred (code, 7, 4);
3079 /* Called by call_filter */
3080 /* Pop frame */
3081 ia64_mov_to_ar_i (code, IA64_PFS, cfg->arch.reg_saved_ar_pfs);
3082 ia64_mov_to_br (code, IA64_B0, cfg->arch.reg_saved_b0);
3083 ia64_br_ret_reg (code, IA64_B0);
3085 /* Called by CALL_HANDLER */
3086 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
3087 ia64_br_cond_reg (code, IA64_B6);
3088 break;
3090 case OP_THROW:
3091 ia64_mov (code, cfg->arch.reg_out0, ins->sreg1);
3092 code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
3093 (gpointer)"mono_arch_throw_exception");
3096 * This might be the last instruction in the method, so add a dummy
3097 * instruction so the unwinder will work.
3099 ia64_break_i (code, 0);
3100 break;
3101 case OP_RETHROW:
3102 ia64_mov (code, cfg->arch.reg_out0, ins->sreg1);
3103 code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
3104 (gpointer)"mono_arch_rethrow_exception");
3106 ia64_break_i (code, 0);
3107 break;
3109 default:
3110 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
3111 g_assert_not_reached ();
3114 if ((code.buf - cfg->native_code - offset) > max_len) {
3115 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %ld)",
3116 mono_inst_name (ins->opcode), max_len, code.buf - cfg->native_code - offset);
3117 g_assert_not_reached ();
3120 cpos += max_len;
3122 last_ins = ins;
3123 last_offset = offset;
3126 ia64_codegen_close (code);
3128 cfg->code_len = code.buf - cfg->native_code;
3131 void
3132 mono_arch_register_lowlevel_calls (void)
3136 static Ia64InsType ins_types_in_template [32][3] = {
3137 {IA64_INS_TYPE_M, IA64_INS_TYPE_I, IA64_INS_TYPE_I},
3138 {IA64_INS_TYPE_M, IA64_INS_TYPE_I, IA64_INS_TYPE_I},
3139 {IA64_INS_TYPE_M, IA64_INS_TYPE_I, IA64_INS_TYPE_I},
3140 {IA64_INS_TYPE_M, IA64_INS_TYPE_I, IA64_INS_TYPE_I},
3141 {IA64_INS_TYPE_M, IA64_INS_TYPE_LX, IA64_INS_TYPE_LX},
3142 {IA64_INS_TYPE_M, IA64_INS_TYPE_LX, IA64_INS_TYPE_LX},
3143 {0, 0, 0},
3144 {0, 0, 0},
3145 {IA64_INS_TYPE_M, IA64_INS_TYPE_M, IA64_INS_TYPE_I},
3146 {IA64_INS_TYPE_M, IA64_INS_TYPE_M, IA64_INS_TYPE_I},
3147 {IA64_INS_TYPE_M, IA64_INS_TYPE_M, IA64_INS_TYPE_I},
3148 {IA64_INS_TYPE_M, IA64_INS_TYPE_M, IA64_INS_TYPE_I},
3149 {IA64_INS_TYPE_M, IA64_INS_TYPE_F, IA64_INS_TYPE_I},
3150 {IA64_INS_TYPE_M, IA64_INS_TYPE_F, IA64_INS_TYPE_I},
3151 {IA64_INS_TYPE_M, IA64_INS_TYPE_M, IA64_INS_TYPE_F},
3152 {IA64_INS_TYPE_M, IA64_INS_TYPE_M, IA64_INS_TYPE_F},
3153 {IA64_INS_TYPE_M, IA64_INS_TYPE_I, IA64_INS_TYPE_B},
3154 {IA64_INS_TYPE_M, IA64_INS_TYPE_I, IA64_INS_TYPE_B},
3155 {IA64_INS_TYPE_M, IA64_INS_TYPE_B, IA64_INS_TYPE_B},
3156 {IA64_INS_TYPE_M, IA64_INS_TYPE_B, IA64_INS_TYPE_B},
3157 {0, 0, 0},
3158 {0, 0, 0},
3159 {IA64_INS_TYPE_B, IA64_INS_TYPE_B, IA64_INS_TYPE_B},
3160 {IA64_INS_TYPE_B, IA64_INS_TYPE_B, IA64_INS_TYPE_B},
3161 {IA64_INS_TYPE_M, IA64_INS_TYPE_M, IA64_INS_TYPE_B},
3162 {IA64_INS_TYPE_M, IA64_INS_TYPE_M, IA64_INS_TYPE_B},
3163 {0, 0, 0},
3164 {0, 0, 0},
3165 {IA64_INS_TYPE_M, IA64_INS_TYPE_F, IA64_INS_TYPE_B},
3166 {IA64_INS_TYPE_M, IA64_INS_TYPE_F, IA64_INS_TYPE_B},
3167 {0, 0, 0},
3168 {0, 0, 0}
3171 static gboolean stops_in_template [32][3] = {
3172 { FALSE, FALSE, FALSE },
3173 { FALSE, FALSE, TRUE },
3174 { FALSE, TRUE, FALSE },
3175 { FALSE, TRUE, TRUE },
3176 { FALSE, FALSE, FALSE },
3177 { FALSE, FALSE, TRUE },
3178 { FALSE, FALSE, FALSE },
3179 { FALSE, FALSE, FALSE },
3181 { FALSE, FALSE, FALSE },
3182 { FALSE, FALSE, TRUE },
3183 { TRUE, FALSE, FALSE },
3184 { TRUE, FALSE, TRUE },
3185 { FALSE, FALSE, FALSE },
3186 { FALSE, FALSE, TRUE },
3187 { FALSE, FALSE, FALSE },
3188 { FALSE, FALSE, TRUE },
3190 { FALSE, FALSE, FALSE },
3191 { FALSE, FALSE, TRUE },
3192 { FALSE, FALSE, FALSE },
3193 { FALSE, FALSE, TRUE },
3194 { FALSE, FALSE, FALSE },
3195 { FALSE, FALSE, FALSE },
3196 { FALSE, FALSE, FALSE },
3197 { FALSE, FALSE, TRUE },
3199 { FALSE, FALSE, FALSE },
3200 { FALSE, FALSE, TRUE },
3201 { FALSE, FALSE, FALSE },
3202 { FALSE, FALSE, FALSE },
3203 { FALSE, FALSE, FALSE },
3204 { FALSE, FALSE, TRUE },
3205 { FALSE, FALSE, FALSE },
3206 { FALSE, FALSE, FALSE }
3209 static int last_stop_in_template [32] = {
3210 -1, 2, 1, 2, -1, 2, -1, -1,
3211 -1, 2, 0, 2, -1, 2, -1, 2,
3212 -1, 2, -1, 2, -1, -1, -1, 2,
3213 -1, 2, -1, -1, -1, 2, -1, -1
3216 static guint64 nops_for_ins_types [6] = {
3217 IA64_NOP_I,
3218 IA64_NOP_I,
3219 IA64_NOP_M,
3220 IA64_NOP_F,
3221 IA64_NOP_B,
3222 IA64_NOP_X
3225 #define ITYPE_MATCH(itype1, itype2) (((itype1) == (itype2)) || (((itype2) == IA64_INS_TYPE_A) && (((itype1) == IA64_INS_TYPE_I) || ((itype1) == IA64_INS_TYPE_M))))
3228 * Debugging support
3231 #if 0
3232 #define DEBUG_INS_SCHED(a) do { a; } while (0)
3233 #else
3234 #define DEBUG_INS_SCHED(a)
3235 #endif
3237 static void
3238 ia64_analyze_deps (Ia64CodegenState *code, int *deps_start, int *stops)
3240 int i, pos, ins_index, current_deps_start, current_ins_start, reg;
3241 guint8 *deps = code->dep_info;
3242 gboolean need_stop, no_stop;
3244 for (i = 0; i < code->nins; ++i)
3245 stops [i] = FALSE;
3247 ins_index = 0;
3248 current_deps_start = 0;
3249 current_ins_start = 0;
3250 deps_start [ins_index] = current_ins_start;
3251 pos = 0;
3252 no_stop = FALSE;
3253 DEBUG_INS_SCHED (printf ("BEGIN.\n"));
3254 while (pos < code->dep_info_pos) {
3255 need_stop = FALSE;
3256 switch (deps [pos]) {
3257 case IA64_END_OF_INS:
3258 ins_index ++;
3259 current_ins_start = pos + 2;
3260 deps_start [ins_index] = current_ins_start;
3261 no_stop = FALSE;
3262 DEBUG_INS_SCHED (printf ("(%d) END INS.\n", ins_index - 1));
3263 break;
3264 case IA64_NONE:
3265 break;
3266 case IA64_READ_GR:
3267 reg = deps [pos + 1];
3269 DEBUG_INS_SCHED (printf ("READ GR: %d\n", reg));
3270 for (i = current_deps_start; i < current_ins_start; i += 2)
3271 if (deps [i] == IA64_WRITE_GR && deps [i + 1] == reg)
3272 need_stop = TRUE;
3273 break;
3274 case IA64_WRITE_GR:
3275 reg = code->dep_info [pos + 1];
3277 DEBUG_INS_SCHED (printf ("WRITE GR: %d\n", reg));
3278 for (i = current_deps_start; i < current_ins_start; i += 2)
3279 if (deps [i] == IA64_WRITE_GR && deps [i + 1] == reg)
3280 need_stop = TRUE;
3281 break;
3282 case IA64_READ_PR:
3283 reg = deps [pos + 1];
3285 DEBUG_INS_SCHED (printf ("READ PR: %d\n", reg));
3286 for (i = current_deps_start; i < current_ins_start; i += 2)
3287 if (((deps [i] == IA64_WRITE_PR) || (deps [i] == IA64_WRITE_PR_FLOAT)) && deps [i + 1] == reg)
3288 need_stop = TRUE;
3289 break;
3290 case IA64_READ_PR_BRANCH:
3291 reg = deps [pos + 1];
3293 /* Writes to prs by non-float instructions are visible to branches */
3294 DEBUG_INS_SCHED (printf ("READ PR BRANCH: %d\n", reg));
3295 for (i = current_deps_start; i < current_ins_start; i += 2)
3296 if (deps [i] == IA64_WRITE_PR_FLOAT && deps [i + 1] == reg)
3297 need_stop = TRUE;
3298 break;
3299 case IA64_WRITE_PR:
3300 reg = code->dep_info [pos + 1];
3302 DEBUG_INS_SCHED (printf ("WRITE PR: %d\n", reg));
3303 for (i = current_deps_start; i < current_ins_start; i += 2)
3304 if (((deps [i] == IA64_WRITE_PR) || (deps [i] == IA64_WRITE_PR_FLOAT)) && deps [i + 1] == reg)
3305 need_stop = TRUE;
3306 break;
3307 case IA64_WRITE_PR_FLOAT:
3308 reg = code->dep_info [pos + 1];
3310 DEBUG_INS_SCHED (printf ("WRITE PR FP: %d\n", reg));
3311 for (i = current_deps_start; i < current_ins_start; i += 2)
3312 if (((deps [i] == IA64_WRITE_GR) || (deps [i] == IA64_WRITE_PR_FLOAT)) && deps [i + 1] == reg)
3313 need_stop = TRUE;
3314 break;
3315 case IA64_READ_BR:
3316 reg = deps [pos + 1];
3318 DEBUG_INS_SCHED (printf ("READ BR: %d\n", reg));
3319 for (i = current_deps_start; i < current_ins_start; i += 2)
3320 if (deps [i] == IA64_WRITE_BR && deps [i + 1] == reg)
3321 need_stop = TRUE;
3322 break;
3323 case IA64_WRITE_BR:
3324 reg = code->dep_info [pos + 1];
3326 DEBUG_INS_SCHED (printf ("WRITE BR: %d\n", reg));
3327 for (i = current_deps_start; i < current_ins_start; i += 2)
3328 if (deps [i] == IA64_WRITE_BR && deps [i + 1] == reg)
3329 need_stop = TRUE;
3330 break;
3331 case IA64_READ_BR_BRANCH:
3332 reg = deps [pos + 1];
3334 /* Writes to brs are visible to branches */
3335 DEBUG_INS_SCHED (printf ("READ BR BRACH: %d\n", reg));
3336 break;
3337 case IA64_READ_FR:
3338 reg = deps [pos + 1];
3340 DEBUG_INS_SCHED (printf ("READ BR: %d\n", reg));
3341 for (i = current_deps_start; i < current_ins_start; i += 2)
3342 if (deps [i] == IA64_WRITE_FR && deps [i + 1] == reg)
3343 need_stop = TRUE;
3344 break;
3345 case IA64_WRITE_FR:
3346 reg = code->dep_info [pos + 1];
3348 DEBUG_INS_SCHED (printf ("WRITE BR: %d\n", reg));
3349 for (i = current_deps_start; i < current_ins_start; i += 2)
3350 if (deps [i] == IA64_WRITE_FR && deps [i + 1] == reg)
3351 need_stop = TRUE;
3352 break;
3353 case IA64_READ_AR:
3354 reg = deps [pos + 1];
3356 DEBUG_INS_SCHED (printf ("READ AR: %d\n", reg));
3357 for (i = current_deps_start; i < current_ins_start; i += 2)
3358 if (deps [i] == IA64_WRITE_AR && deps [i + 1] == reg)
3359 need_stop = TRUE;
3360 break;
3361 case IA64_WRITE_AR:
3362 reg = code->dep_info [pos + 1];
3364 DEBUG_INS_SCHED (printf ("WRITE AR: %d\n", reg));
3365 for (i = current_deps_start; i < current_ins_start; i += 2)
3366 if (deps [i] == IA64_WRITE_AR && deps [i + 1] == reg)
3367 need_stop = TRUE;
3368 break;
3369 case IA64_NO_STOP:
3371 * Explicitly indicate that a stop is not required. Useful for
3372 * example when two predicated instructions with negated predicates
3373 * write the same registers.
3375 no_stop = TRUE;
3376 break;
3377 default:
3378 g_assert_not_reached ();
3380 pos += 2;
3382 if (need_stop && !no_stop) {
3383 g_assert (ins_index > 0);
3384 stops [ins_index - 1] = 1;
3386 DEBUG_INS_SCHED (printf ("STOP\n"));
3387 current_deps_start = current_ins_start;
3389 /* Skip remaining deps for this instruction */
3390 while (deps [pos] != IA64_END_OF_INS)
3391 pos += 2;
3395 if (code->nins > 0) {
3396 /* No dependency info for the last instruction */
3397 stops [code->nins - 1] = 1;
3400 deps_start [code->nins] = code->dep_info_pos;
3403 static void
3404 ia64_real_emit_bundle (Ia64CodegenState *code, int *deps_start, int *stops, int n, guint64 template, guint64 ins1, guint64 ins2, guint64 ins3, guint8 nops)
3406 int stop_pos, i, deps_to_shift, dep_shift;
3408 g_assert (n <= code->nins);
3410 // if (n > 1) printf ("FOUND: %ld.\n", template);
3412 ia64_emit_bundle_template (code, template, ins1, ins2, ins3);
3414 stop_pos = last_stop_in_template [template] + 1;
3415 if (stop_pos > n)
3416 stop_pos = n;
3418 /* Compute the number of 'real' instructions before the stop */
3419 deps_to_shift = stop_pos;
3420 if (stop_pos >= 3 && (nops & (1 << 2)))
3421 deps_to_shift --;
3422 if (stop_pos >= 2 && (nops & (1 << 1)))
3423 deps_to_shift --;
3424 if (stop_pos >= 1 && (nops & (1 << 0)))
3425 deps_to_shift --;
3428 * We have to keep some dependencies whose instructions have been shifted
3429 * out of the buffer. So nullify the end_of_ins markers in the dependency
3430 * array.
3432 for (i = deps_start [deps_to_shift]; i < deps_start [n]; i += 2)
3433 if (code->dep_info [i] == IA64_END_OF_INS)
3434 code->dep_info [i] = IA64_NONE;
3436 g_assert (deps_start [deps_to_shift] <= code->dep_info_pos);
3437 memcpy (code->dep_info, &code->dep_info [deps_start [deps_to_shift]], code->dep_info_pos - deps_start [deps_to_shift]);
3438 code->dep_info_pos = code->dep_info_pos - deps_start [deps_to_shift];
3440 dep_shift = deps_start [deps_to_shift];
3441 for (i = 0; i < code->nins + 1 - n; ++i)
3442 deps_start [i] = deps_start [n + i] - dep_shift;
3444 /* Determine the exact positions of instructions with unwind ops */
3445 if (code->unw_op_count) {
3446 int ins_pos [16];
3447 int curr_ins, curr_ins_pos;
3449 curr_ins = 0;
3450 curr_ins_pos = ((code->buf - code->region_start - 16) / 16) * 3;
3451 for (i = 0; i < 3; ++i) {
3452 if (! (nops & (1 << i))) {
3453 ins_pos [curr_ins] = curr_ins_pos + i;
3454 curr_ins ++;
3458 for (i = code->unw_op_pos; i < code->unw_op_count; ++i) {
3459 if (code->unw_ops_pos [i] < n) {
3460 code->unw_ops [i].when = ins_pos [code->unw_ops_pos [i]];
3461 //printf ("UNW-OP: %d -> %d\n", code->unw_ops_pos [i], code->unw_ops [i].when);
3464 if (code->unw_op_pos < code->unw_op_count)
3465 code->unw_op_pos += n;
3468 if (n == code->nins) {
3469 code->template = 0;
3470 code->nins = 0;
3472 else {
3473 memcpy (&code->instructions [0], &code->instructions [n], (code->nins - n) * sizeof (guint64));
3474 memcpy (&code->itypes [0], &code->itypes [n], (code->nins - n) * sizeof (int));
3475 memcpy (&stops [0], &stops [n], (code->nins - n) * sizeof (int));
3476 code->nins -= n;
3480 void
3481 ia64_emit_bundle (Ia64CodegenState *code, gboolean flush)
3483 int i, ins_type, template, nins_to_emit;
3484 int deps_start [16];
3485 int stops [16];
3486 gboolean found;
3489 * We implement a simple scheduler which tries to put three instructions
3490 * per bundle, then two, then one.
3492 ia64_analyze_deps (code, deps_start, stops);
3494 if ((code->nins >= 3) && !code->one_ins_per_bundle) {
3495 /* Find a suitable template */
3496 for (template = 0; template < 32; ++template) {
3497 if (stops_in_template [template][0] != stops [0] ||
3498 stops_in_template [template][1] != stops [1] ||
3499 stops_in_template [template][2] != stops [2])
3500 continue;
3502 found = TRUE;
3503 for (i = 0; i < 3; ++i) {
3504 ins_type = ins_types_in_template [template][i];
3505 switch (code->itypes [i]) {
3506 case IA64_INS_TYPE_A:
3507 found &= (ins_type == IA64_INS_TYPE_I) || (ins_type == IA64_INS_TYPE_M);
3508 break;
3509 default:
3510 found &= (ins_type == code->itypes [i]);
3511 break;
3515 if (found)
3516 found = debug_ins_sched ();
3518 if (found) {
3519 ia64_real_emit_bundle (code, deps_start, stops, 3, template, code->instructions [0], code->instructions [1], code->instructions [2], 0);
3520 break;
3525 if (code->nins < IA64_INS_BUFFER_SIZE && !flush)
3526 /* Wait for more instructions */
3527 return;
3529 /* If it didn't work out, try putting two instructions into one bundle */
3530 if ((code->nins >= 2) && !code->one_ins_per_bundle) {
3531 /* Try a nop at the end */
3532 for (template = 0; template < 32; ++template) {
3533 if (stops_in_template [template][0] != stops [0] ||
3534 ((stops_in_template [template][1] != stops [1]) &&
3535 (stops_in_template [template][2] != stops [1])))
3537 continue;
3539 if (!ITYPE_MATCH (ins_types_in_template [template][0], code->itypes [0]) ||
3540 !ITYPE_MATCH (ins_types_in_template [template][1], code->itypes [1]))
3541 continue;
3543 if (!debug_ins_sched ())
3544 continue;
3546 ia64_real_emit_bundle (code, deps_start, stops, 2, template, code->instructions [0], code->instructions [1], nops_for_ins_types [ins_types_in_template [template][2]], 1 << 2);
3547 break;
3551 if (code->nins < IA64_INS_BUFFER_SIZE && !flush)
3552 /* Wait for more instructions */
3553 return;
3555 if ((code->nins >= 2) && !code->one_ins_per_bundle) {
3556 /* Try a nop in the middle */
3557 for (template = 0; template < 32; ++template) {
3558 if (((stops_in_template [template][0] != stops [0]) &&
3559 (stops_in_template [template][1] != stops [0])) ||
3560 stops_in_template [template][2] != stops [1])
3561 continue;
3563 if (!ITYPE_MATCH (ins_types_in_template [template][0], code->itypes [0]) ||
3564 !ITYPE_MATCH (ins_types_in_template [template][2], code->itypes [1]))
3565 continue;
3567 if (!debug_ins_sched ())
3568 continue;
3570 ia64_real_emit_bundle (code, deps_start, stops, 2, template, code->instructions [0], nops_for_ins_types [ins_types_in_template [template][1]], code->instructions [1], 1 << 1);
3571 break;
3575 if ((code->nins >= 2) && flush && !code->one_ins_per_bundle) {
3576 /* Try a nop at the beginning */
3577 for (template = 0; template < 32; ++template) {
3578 if ((stops_in_template [template][1] != stops [0]) ||
3579 (stops_in_template [template][2] != stops [1]))
3580 continue;
3582 if (!ITYPE_MATCH (ins_types_in_template [template][1], code->itypes [0]) ||
3583 !ITYPE_MATCH (ins_types_in_template [template][2], code->itypes [1]))
3584 continue;
3586 if (!debug_ins_sched ())
3587 continue;
3589 ia64_real_emit_bundle (code, deps_start, stops, 2, template, nops_for_ins_types [ins_types_in_template [template][0]], code->instructions [0], code->instructions [1], 1 << 0);
3590 break;
3594 if (code->nins < IA64_INS_BUFFER_SIZE && !flush)
3595 /* Wait for more instructions */
3596 return;
3598 if (flush)
3599 nins_to_emit = code->nins;
3600 else
3601 nins_to_emit = 1;
3603 while (nins_to_emit > 0) {
3604 if (!debug_ins_sched ())
3605 stops [0] = 1;
3606 switch (code->itypes [0]) {
3607 case IA64_INS_TYPE_A:
3608 if (stops [0])
3609 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MIIS, code->instructions [0], IA64_NOP_I, IA64_NOP_I, 0);
3610 else
3611 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MII, code->instructions [0], IA64_NOP_I, IA64_NOP_I, 0);
3612 break;
3613 case IA64_INS_TYPE_I:
3614 if (stops [0])
3615 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MIIS, IA64_NOP_M, code->instructions [0], IA64_NOP_I, 0);
3616 else
3617 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MII, IA64_NOP_M, code->instructions [0], IA64_NOP_I, 0);
3618 break;
3619 case IA64_INS_TYPE_M:
3620 if (stops [0])
3621 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MIIS, code->instructions [0], IA64_NOP_I, IA64_NOP_I, 0);
3622 else
3623 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MII, code->instructions [0], IA64_NOP_I, IA64_NOP_I, 0);
3624 break;
3625 case IA64_INS_TYPE_B:
3626 if (stops [0])
3627 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MIBS, IA64_NOP_M, IA64_NOP_I, code->instructions [0], 0);
3628 else
3629 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MIB, IA64_NOP_M, IA64_NOP_I, code->instructions [0], 0);
3630 break;
3631 case IA64_INS_TYPE_F:
3632 if (stops [0])
3633 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MFIS, IA64_NOP_M, code->instructions [0], IA64_NOP_I, 0);
3634 else
3635 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MFI, IA64_NOP_M, code->instructions [0], IA64_NOP_I, 0);
3636 break;
3637 case IA64_INS_TYPE_LX:
3638 if (stops [0] || stops [1])
3639 ia64_real_emit_bundle (code, deps_start, stops, 2, IA64_TEMPLATE_MLXS, IA64_NOP_M, code->instructions [0], code->instructions [1], 0);
3640 else
3641 ia64_real_emit_bundle (code, deps_start, stops, 2, IA64_TEMPLATE_MLX, IA64_NOP_M, code->instructions [0], code->instructions [1], 0);
3642 nins_to_emit --;
3643 break;
3644 default:
3645 g_assert_not_reached ();
3647 nins_to_emit --;
3651 unw_dyn_region_info_t*
3652 mono_ia64_create_unwind_region (Ia64CodegenState *code)
3654 unw_dyn_region_info_t *r;
3656 g_assert (code->nins == 0);
3657 r = g_malloc0 (_U_dyn_region_info_size (code->unw_op_count));
3658 memcpy (&r->op, &code->unw_ops, sizeof (unw_dyn_op_t) * code->unw_op_count);
3659 r->op_count = code->unw_op_count;
3660 r->insn_count = ((code->buf - code->region_start) >> 4) * 3;
3661 code->unw_op_count = 0;
3662 code->unw_op_pos = 0;
3663 code->region_start = code->buf;
3665 return r;
3668 static void
3669 ia64_patch (unsigned char* code, gpointer target)
3671 int template, i;
3672 guint64 instructions [3];
3673 guint8 gen_buf [16];
3674 Ia64CodegenState gen;
3675 int ins_to_skip;
3676 gboolean found;
3679 * code encodes both the position inside the buffer and code.nins when
3680 * the instruction was emitted.
3682 ins_to_skip = (guint64)code % 16;
3683 code = (unsigned char*)((guint64)code & ~15);
3686 * Search for the first instruction which is 'patchable', skipping
3687 * ins_to_skip instructions.
3690 while (TRUE) {
3692 template = ia64_bundle_template (code);
3693 instructions [0] = ia64_bundle_ins1 (code);
3694 instructions [1] = ia64_bundle_ins2 (code);
3695 instructions [2] = ia64_bundle_ins3 (code);
3697 ia64_codegen_init (gen, gen_buf);
3699 found = FALSE;
3700 for (i = 0; i < 3; ++i) {
3701 guint64 ins = instructions [i];
3702 int opcode = ia64_ins_opcode (ins);
3704 if (ins == nops_for_ins_types [ins_types_in_template [template][i]])
3705 continue;
3707 if (ins_to_skip) {
3708 ins_to_skip --;
3709 continue;
3712 switch (ins_types_in_template [template][i]) {
3713 case IA64_INS_TYPE_A:
3714 case IA64_INS_TYPE_M:
3715 if ((opcode == 8) && (ia64_ins_x2a (ins) == 2) && (ia64_ins_ve (ins) == 0)) {
3716 /* adds */
3717 ia64_adds_imm_pred (gen, ia64_ins_qp (ins), ia64_ins_r1 (ins), (guint64)target, ia64_ins_r3 (ins));
3718 instructions [i] = gen.instructions [0];
3719 found = TRUE;
3721 else
3722 NOT_IMPLEMENTED;
3723 break;
3724 case IA64_INS_TYPE_B:
3725 if ((opcode == 4) && (ia64_ins_btype (ins) == 0)) {
3726 /* br.cond */
3727 gint64 disp = ((guint8*)target - code) >> 4;
3729 /* FIXME: hints */
3730 ia64_br_cond_hint_pred (gen, ia64_ins_qp (ins), disp, 0, 0, 0);
3732 instructions [i] = gen.instructions [0];
3733 found = TRUE;
3735 else if (opcode == 5) {
3736 /* br.call */
3737 gint64 disp = ((guint8*)target - code) >> 4;
3739 /* FIXME: hints */
3740 ia64_br_call_hint_pred (gen, ia64_ins_qp (ins), ia64_ins_b1 (ins), disp, 0, 0, 0);
3741 instructions [i] = gen.instructions [0];
3742 found = TRUE;
3744 else
3745 NOT_IMPLEMENTED;
3746 break;
3747 case IA64_INS_TYPE_LX:
3748 if (i == 1)
3749 break;
3751 if ((opcode == 6) && (ia64_ins_vc (ins) == 0)) {
3752 /* movl */
3753 ia64_movl_pred (gen, ia64_ins_qp (ins), ia64_ins_r1 (ins), target);
3754 instructions [1] = gen.instructions [0];
3755 instructions [2] = gen.instructions [1];
3756 found = TRUE;
3758 else
3759 NOT_IMPLEMENTED;
3761 break;
3762 default:
3763 NOT_IMPLEMENTED;
3766 if (found) {
3767 /* Rewrite code */
3768 ia64_codegen_init (gen, code);
3769 ia64_emit_bundle_template (&gen, template, instructions [0], instructions [1], instructions [2]);
3770 return;
3774 code += 16;
3778 void
3779 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
3781 MonoJumpInfo *patch_info;
3783 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
3784 unsigned char *ip = patch_info->ip.i + code;
3785 const unsigned char *target;
3787 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
3789 if (patch_info->type == MONO_PATCH_INFO_NONE)
3790 continue;
3791 if (mono_compile_aot) {
3792 NOT_IMPLEMENTED;
3795 ia64_patch (ip, (gpointer)target);
3799 guint8 *
3800 mono_arch_emit_prolog (MonoCompile *cfg)
3802 MonoMethod *method = cfg->method;
3803 MonoMethodSignature *sig;
3804 MonoInst *inst;
3805 int alloc_size, pos, i;
3806 Ia64CodegenState code;
3807 CallInfo *cinfo;
3809 sig = mono_method_signature (method);
3810 pos = 0;
3812 cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
3814 cfg->code_size = MAX (cfg->header->code_size * 4, 512);
3816 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
3817 cfg->code_size += 1024;
3818 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
3819 cfg->code_size += 1024;
3821 cfg->native_code = g_malloc (cfg->code_size);
3823 ia64_codegen_init (code, cfg->native_code);
3825 alloc_size = ALIGN_TO (cfg->stack_offset, MONO_ARCH_FRAME_ALIGNMENT);
3826 if (cfg->param_area)
3827 alloc_size += cfg->param_area;
3828 if (alloc_size)
3829 /* scratch area */
3830 alloc_size += 16;
3831 alloc_size = ALIGN_TO (alloc_size, MONO_ARCH_FRAME_ALIGNMENT);
3833 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
3834 /* Force sp to be saved/restored */
3835 alloc_size += MONO_ARCH_FRAME_ALIGNMENT;
3837 cfg->arch.stack_alloc_size = alloc_size;
3839 pos = 0;
3841 if (method->save_lmf) {
3842 /* No LMF on IA64 */
3845 alloc_size -= pos;
3847 ia64_unw_save_reg (code, UNW_IA64_AR_PFS, UNW_IA64_GR + cfg->arch.reg_saved_ar_pfs);
3848 ia64_alloc (code, cfg->arch.reg_saved_ar_pfs, cfg->arch.reg_local0 - cfg->arch.reg_in0, cfg->arch.reg_out0 - cfg->arch.reg_local0, cfg->arch.n_out_regs, 0);
3849 ia64_unw_save_reg (code, UNW_IA64_RP, UNW_IA64_GR + cfg->arch.reg_saved_b0);
3850 ia64_mov_from_br (code, cfg->arch.reg_saved_b0, IA64_B0);
3852 if ((alloc_size || cinfo->stack_usage) && !cfg->arch.omit_fp) {
3853 ia64_unw_save_reg (code, UNW_IA64_SP, UNW_IA64_GR + cfg->arch.reg_saved_sp);
3854 ia64_mov (code, cfg->arch.reg_saved_sp, IA64_SP);
3855 if (cfg->frame_reg != cfg->arch.reg_saved_sp)
3856 ia64_mov (code, cfg->frame_reg, IA64_SP);
3859 if (alloc_size) {
3860 #if defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
3861 int pagesize = getpagesize ();
3863 if (alloc_size >= pagesize) {
3864 gint32 remaining_size = alloc_size;
3866 /* Generate stack touching code */
3867 ia64_mov (code, GP_SCRATCH_REG, IA64_SP);
3868 while (remaining_size >= pagesize) {
3869 ia64_movl (code, GP_SCRATCH_REG2, pagesize);
3870 ia64_sub (code, GP_SCRATCH_REG, GP_SCRATCH_REG, GP_SCRATCH_REG2);
3871 ia64_ld8 (code, GP_SCRATCH_REG2, GP_SCRATCH_REG);
3872 remaining_size -= pagesize;
3875 #endif
3876 if (ia64_is_imm14 (-alloc_size)) {
3877 if (cfg->arch.omit_fp)
3878 ia64_unw_add (code, UNW_IA64_SP, (-alloc_size));
3879 ia64_adds_imm (code, IA64_SP, (-alloc_size), IA64_SP);
3881 else {
3882 ia64_movl (code, GP_SCRATCH_REG, -alloc_size);
3883 if (cfg->arch.omit_fp)
3884 ia64_unw_add (code, UNW_IA64_SP, (-alloc_size));
3885 ia64_add (code, IA64_SP, GP_SCRATCH_REG, IA64_SP);
3889 ia64_begin_bundle (code);
3891 /* Initialize unwind info */
3892 cfg->arch.r_pro = mono_ia64_create_unwind_region (&code);
3894 if (sig->ret->type != MONO_TYPE_VOID) {
3895 if ((cinfo->ret.storage == ArgInIReg) && (cfg->ret->opcode != OP_REGVAR)) {
3896 /* Save volatile arguments to the stack */
3897 NOT_IMPLEMENTED;
3901 /* Keep this in sync with emit_load_volatile_arguments */
3902 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3903 ArgInfo *ainfo = cinfo->args + i;
3904 gint32 stack_offset;
3905 MonoType *arg_type;
3907 inst = cfg->args [i];
3909 if (sig->hasthis && (i == 0))
3910 arg_type = &mono_defaults.object_class->byval_arg;
3911 else
3912 arg_type = sig->params [i - sig->hasthis];
3914 arg_type = mono_type_get_underlying_type (arg_type);
3916 stack_offset = ainfo->offset + ARGS_OFFSET;
3919 * FIXME: Native code might pass non register sized integers
3920 * without initializing the upper bits.
3922 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED && !arg_type->byref && ainfo->storage == ArgInIReg) {
3923 int reg = cfg->arch.reg_in0 + ainfo->reg;
3925 switch (mono_type_to_load_membase (cfg, arg_type)) {
3926 case OP_LOADI1_MEMBASE:
3927 ia64_sxt1 (code, reg, reg);
3928 break;
3929 case OP_LOADU1_MEMBASE:
3930 ia64_zxt1 (code, reg, reg);
3931 break;
3932 case OP_LOADI2_MEMBASE:
3933 ia64_sxt2 (code, reg, reg);
3934 break;
3935 case OP_LOADU2_MEMBASE:
3936 ia64_zxt2 (code, reg, reg);
3937 break;
3938 default:
3939 break;
3943 /* Save volatile arguments to the stack */
3944 if (inst->opcode != OP_REGVAR) {
3945 switch (ainfo->storage) {
3946 case ArgInIReg:
3947 case ArgInFloatReg:
3948 case ArgInFloatRegR4:
3949 g_assert (inst->opcode == OP_REGOFFSET);
3950 if (ia64_is_adds_imm (inst->inst_offset))
3951 ia64_adds_imm (code, GP_SCRATCH_REG, inst->inst_offset, inst->inst_basereg);
3952 else {
3953 ia64_movl (code, GP_SCRATCH_REG2, inst->inst_offset);
3954 ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, GP_SCRATCH_REG2);
3956 if (arg_type->byref)
3957 ia64_st8_hint (code, GP_SCRATCH_REG, cfg->arch.reg_in0 + ainfo->reg, 0);
3958 else {
3959 switch (arg_type->type) {
3960 case MONO_TYPE_R4:
3961 ia64_stfs_hint (code, GP_SCRATCH_REG, ainfo->reg, 0);
3962 break;
3963 case MONO_TYPE_R8:
3964 ia64_stfd_hint (code, GP_SCRATCH_REG, ainfo->reg, 0);
3965 break;
3966 default:
3967 ia64_st8_hint (code, GP_SCRATCH_REG, cfg->arch.reg_in0 + ainfo->reg, 0);
3968 break;
3971 break;
3972 case ArgOnStack:
3973 break;
3974 case ArgAggregate:
3975 if (ainfo->nslots != ainfo->nregs)
3976 NOT_IMPLEMENTED;
3978 g_assert (inst->opcode == OP_REGOFFSET);
3979 ia64_adds_imm (code, GP_SCRATCH_REG, inst->inst_offset, inst->inst_basereg);
3980 for (i = 0; i < ainfo->nregs; ++i) {
3981 switch (ainfo->atype) {
3982 case AggregateNormal:
3983 ia64_st8_inc_imm_hint (code, GP_SCRATCH_REG, cfg->arch.reg_in0 + ainfo->reg + i, sizeof (gpointer), 0);
3984 break;
3985 case AggregateSingleHFA:
3986 ia64_stfs_inc_imm_hint (code, GP_SCRATCH_REG, ainfo->reg + i, 4, 0);
3987 break;
3988 case AggregateDoubleHFA:
3989 ia64_stfd_inc_imm_hint (code, GP_SCRATCH_REG, ainfo->reg + i, sizeof (gpointer), 0);
3990 break;
3991 default:
3992 NOT_IMPLEMENTED;
3995 break;
3996 default:
3997 g_assert_not_reached ();
4001 if (inst->opcode == OP_REGVAR) {
4002 /* Argument allocated to (non-volatile) register */
4003 switch (ainfo->storage) {
4004 case ArgInIReg:
4005 if (inst->dreg != cfg->arch.reg_in0 + ainfo->reg)
4006 ia64_mov (code, inst->dreg, cfg->arch.reg_in0 + ainfo->reg);
4007 break;
4008 case ArgOnStack:
4009 ia64_adds_imm (code, GP_SCRATCH_REG, 16 + ainfo->offset, cfg->frame_reg);
4010 ia64_ld8 (code, inst->dreg, GP_SCRATCH_REG);
4011 break;
4012 default:
4013 NOT_IMPLEMENTED;
4018 if (method->save_lmf) {
4019 /* No LMF on IA64 */
4022 ia64_codegen_close (code);
4024 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4025 code.buf = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code.buf, TRUE);
4027 cfg->code_len = code.buf - cfg->native_code;
4029 g_assert (cfg->code_len < cfg->code_size);
4031 cfg->arch.prolog_end_offset = cfg->code_len;
4033 return code.buf;
4036 void
4037 mono_arch_emit_epilog (MonoCompile *cfg)
4039 MonoMethod *method = cfg->method;
4040 int i, pos;
4041 int max_epilog_size = 16 * 4;
4042 Ia64CodegenState code;
4043 guint8 *buf;
4044 CallInfo *cinfo;
4045 ArgInfo *ainfo;
4047 if (mono_jit_trace_calls != NULL)
4048 max_epilog_size += 1024;
4050 cfg->arch.epilog_begin_offset = cfg->code_len;
4052 while (cfg->code_len + max_epilog_size > cfg->code_size) {
4053 cfg->code_size *= 2;
4054 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4055 mono_jit_stats.code_reallocs++;
4058 /* FIXME: Emit unwind info */
4060 buf = cfg->native_code + cfg->code_len;
4062 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4063 buf = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, buf, TRUE);
4065 ia64_codegen_init (code, buf);
4067 /* the code restoring the registers must be kept in sync with OP_JMP */
4068 pos = 0;
4070 if (method->save_lmf) {
4071 /* No LMF on IA64 */
4074 /* Load returned vtypes into registers if needed */
4075 cinfo = get_call_info (cfg, cfg->mempool, mono_method_signature (method), FALSE);
4076 ainfo = &cinfo->ret;
4077 switch (ainfo->storage) {
4078 case ArgAggregate:
4079 if (ainfo->nslots != ainfo->nregs)
4080 NOT_IMPLEMENTED;
4082 g_assert (cfg->ret->opcode == OP_REGOFFSET);
4083 ia64_adds_imm (code, GP_SCRATCH_REG, cfg->ret->inst_offset, cfg->ret->inst_basereg);
4084 for (i = 0; i < ainfo->nregs; ++i) {
4085 switch (ainfo->atype) {
4086 case AggregateNormal:
4087 ia64_ld8_inc_imm_hint (code, ainfo->reg + i, GP_SCRATCH_REG, sizeof (gpointer), 0);
4088 break;
4089 case AggregateSingleHFA:
4090 ia64_ldfs_inc_imm_hint (code, ainfo->reg + i, GP_SCRATCH_REG, 4, 0);
4091 break;
4092 case AggregateDoubleHFA:
4093 ia64_ldfd_inc_imm_hint (code, ainfo->reg + i, GP_SCRATCH_REG, sizeof (gpointer), 0);
4094 break;
4095 default:
4096 g_assert_not_reached ();
4099 break;
4100 default:
4101 break;
4104 ia64_begin_bundle (code);
4106 code.region_start = cfg->native_code;
4108 /* Label the unwind state at the start of the exception throwing region */
4109 //ia64_unw_label_state (code, 1234);
4111 if (cfg->arch.stack_alloc_size) {
4112 if (cfg->arch.omit_fp) {
4113 if (ia64_is_imm14 (cfg->arch.stack_alloc_size)) {
4114 ia64_unw_pop_frames (code, 1);
4115 ia64_adds_imm (code, IA64_SP, (cfg->arch.stack_alloc_size), IA64_SP);
4116 } else {
4117 ia64_movl (code, GP_SCRATCH_REG, cfg->arch.stack_alloc_size);
4118 ia64_unw_pop_frames (code, 1);
4119 ia64_add (code, IA64_SP, GP_SCRATCH_REG, IA64_SP);
4122 else {
4123 ia64_unw_pop_frames (code, 1);
4124 ia64_mov (code, IA64_SP, cfg->arch.reg_saved_sp);
4127 ia64_mov_to_ar_i (code, IA64_PFS, cfg->arch.reg_saved_ar_pfs);
4128 ia64_mov_ret_to_br (code, IA64_B0, cfg->arch.reg_saved_b0);
4129 ia64_br_ret_reg (code, IA64_B0);
4131 ia64_codegen_close (code);
4133 cfg->arch.r_epilog = mono_ia64_create_unwind_region (&code);
4134 cfg->arch.r_pro->next = cfg->arch.r_epilog;
4136 cfg->code_len = code.buf - cfg->native_code;
4138 g_assert (cfg->code_len < cfg->code_size);
4141 void
4142 mono_arch_emit_exceptions (MonoCompile *cfg)
4144 MonoJumpInfo *patch_info;
4145 int i, nthrows;
4146 Ia64CodegenState code;
4147 gboolean empty = TRUE;
4148 //unw_dyn_region_info_t *r_exceptions;
4149 MonoClass *exc_classes [16];
4150 guint8 *exc_throw_start [16], *exc_throw_end [16];
4151 guint32 code_size = 0;
4153 /* Compute needed space */
4154 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4155 if (patch_info->type == MONO_PATCH_INFO_EXC)
4156 code_size += 256;
4157 if (patch_info->type == MONO_PATCH_INFO_R8)
4158 code_size += 8 + 7; /* sizeof (double) + alignment */
4159 if (patch_info->type == MONO_PATCH_INFO_R4)
4160 code_size += 4 + 7; /* sizeof (float) + alignment */
4163 if (code_size == 0)
4164 return;
4166 while (cfg->code_len + code_size > (cfg->code_size - 16)) {
4167 cfg->code_size *= 2;
4168 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4169 mono_jit_stats.code_reallocs++;
4172 ia64_codegen_init (code, cfg->native_code + cfg->code_len);
4174 /* The unwind state here is the same as before the epilog */
4175 //ia64_unw_copy_state (code, 1234);
4177 /* add code to raise exceptions */
4178 /* FIXME: Optimize this */
4179 nthrows = 0;
4180 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4181 switch (patch_info->type) {
4182 case MONO_PATCH_INFO_EXC: {
4183 MonoClass *exc_class;
4184 guint8* throw_ip;
4185 guint8* buf;
4186 guint64 exc_token_index;
4188 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
4189 g_assert (exc_class);
4190 exc_token_index = mono_metadata_token_index (exc_class->type_token);
4191 throw_ip = cfg->native_code + patch_info->ip.i;
4193 ia64_begin_bundle (code);
4195 ia64_patch (cfg->native_code + patch_info->ip.i, code.buf);
4197 /* Find a throw sequence for the same exception class */
4198 for (i = 0; i < nthrows; ++i)
4199 if (exc_classes [i] == exc_class)
4200 break;
4202 if (i < nthrows) {
4203 gint64 offset = exc_throw_end [i] - 16 - throw_ip;
4205 if (ia64_is_adds_imm (offset))
4206 ia64_adds_imm (code, cfg->arch.reg_out0 + 1, offset, IA64_R0);
4207 else
4208 ia64_movl (code, cfg->arch.reg_out0 + 1, offset);
4210 buf = code.buf + code.nins;
4211 ia64_br_cond_pred (code, 0, 0);
4212 ia64_begin_bundle (code);
4213 ia64_patch (buf, exc_throw_start [i]);
4215 patch_info->type = MONO_PATCH_INFO_NONE;
4217 else {
4218 /* Arg1 */
4219 buf = code.buf;
4220 ia64_movl (code, cfg->arch.reg_out0 + 1, 0);
4222 ia64_begin_bundle (code);
4224 if (nthrows < 16) {
4225 exc_classes [nthrows] = exc_class;
4226 exc_throw_start [nthrows] = code.buf;
4229 /* Arg2 */
4230 if (ia64_is_adds_imm (exc_token_index))
4231 ia64_adds_imm (code, cfg->arch.reg_out0 + 0, exc_token_index, IA64_R0);
4232 else
4233 ia64_movl (code, cfg->arch.reg_out0 + 0, exc_token_index);
4235 patch_info->data.name = "mono_arch_throw_corlib_exception";
4236 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4237 patch_info->ip.i = code.buf + code.nins - cfg->native_code;
4239 /* Indirect call */
4240 ia64_movl (code, GP_SCRATCH_REG, 0);
4241 ia64_ld8_inc_imm (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, 8);
4242 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG2);
4243 ia64_ld8 (code, IA64_GP, GP_SCRATCH_REG);
4245 ia64_br_call_reg (code, IA64_B0, IA64_B6);
4247 /* Patch up the throw offset */
4248 ia64_begin_bundle (code);
4250 ia64_patch (buf, (gpointer)(code.buf - 16 - throw_ip));
4252 if (nthrows < 16) {
4253 exc_throw_end [nthrows] = code.buf;
4254 nthrows ++;
4258 empty = FALSE;
4259 break;
4261 default:
4262 break;
4266 if (!empty)
4267 /* The unwinder needs this to work */
4268 ia64_break_i (code, 0);
4270 ia64_codegen_close (code);
4272 /* FIXME: */
4273 //r_exceptions = mono_ia64_create_unwind_region (&code);
4274 //cfg->arch.r_epilog = r_exceptions;
4276 cfg->code_len = code.buf - cfg->native_code;
4278 g_assert (cfg->code_len < cfg->code_size);
4281 void*
4282 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
4284 Ia64CodegenState code;
4285 CallInfo *cinfo = NULL;
4286 MonoMethodSignature *sig;
4287 MonoInst *ins;
4288 int i, n, stack_area = 0;
4290 ia64_codegen_init (code, p);
4292 /* Keep this in sync with mono_arch_get_argument_info */
4294 if (enable_arguments) {
4295 /* Allocate a new area on the stack and save arguments there */
4296 sig = mono_method_signature (cfg->method);
4298 cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
4300 n = sig->param_count + sig->hasthis;
4302 stack_area = ALIGN_TO (n * 8, 16);
4304 if (n) {
4305 ia64_movl (code, GP_SCRATCH_REG, stack_area);
4307 ia64_sub (code, IA64_SP, IA64_SP, GP_SCRATCH_REG);
4309 /* FIXME: Allocate out registers */
4311 ia64_mov (code, cfg->arch.reg_out0 + 1, IA64_SP);
4313 /* Required by the ABI */
4314 ia64_adds_imm (code, IA64_SP, -16, IA64_SP);
4316 add_patch_info (cfg, code, MONO_PATCH_INFO_METHODCONST, cfg->method);
4317 ia64_movl (code, cfg->arch.reg_out0 + 0, 0);
4319 /* Save arguments to the stack */
4320 for (i = 0; i < n; ++i) {
4321 ins = cfg->args [i];
4323 if (ins->opcode == OP_REGVAR) {
4324 ia64_movl (code, GP_SCRATCH_REG, (i * 8));
4325 ia64_add (code, GP_SCRATCH_REG, cfg->arch.reg_out0 + 1, GP_SCRATCH_REG);
4326 ia64_st8 (code, GP_SCRATCH_REG, ins->dreg);
4328 else {
4329 ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
4330 ia64_add (code, GP_SCRATCH_REG, ins->inst_basereg, GP_SCRATCH_REG);
4331 ia64_ld8 (code, GP_SCRATCH_REG2, GP_SCRATCH_REG);
4332 ia64_movl (code, GP_SCRATCH_REG, (i * 8));
4333 ia64_add (code, GP_SCRATCH_REG, cfg->arch.reg_out0 + 1, GP_SCRATCH_REG);
4334 ia64_st8 (code, GP_SCRATCH_REG, GP_SCRATCH_REG2);
4338 else
4339 ia64_mov (code, cfg->arch.reg_out0 + 1, IA64_R0);
4341 else
4342 ia64_mov (code, cfg->arch.reg_out0 + 1, IA64_R0);
4344 add_patch_info (cfg, code, MONO_PATCH_INFO_METHODCONST, cfg->method);
4345 ia64_movl (code, cfg->arch.reg_out0 + 0, 0);
4347 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)func);
4349 if (enable_arguments && stack_area) {
4350 ia64_movl (code, GP_SCRATCH_REG, stack_area);
4352 ia64_add (code, IA64_SP, IA64_SP, GP_SCRATCH_REG);
4354 ia64_adds_imm (code, IA64_SP, 16, IA64_SP);
4357 ia64_codegen_close (code);
4359 return code.buf;
4362 void*
4363 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
4365 Ia64CodegenState code;
4366 CallInfo *cinfo = NULL;
4367 MonoMethod *method = cfg->method;
4368 MonoMethodSignature *sig = mono_method_signature (cfg->method);
4370 ia64_codegen_init (code, p);
4372 cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
4374 /* Save return value + pass it to func */
4375 switch (cinfo->ret.storage) {
4376 case ArgNone:
4377 break;
4378 case ArgInIReg:
4379 ia64_mov (code, cfg->arch.reg_saved_return_val, cinfo->ret.reg);
4380 ia64_mov (code, cfg->arch.reg_out0 + 1, cinfo->ret.reg);
4381 break;
4382 case ArgInFloatReg:
4383 ia64_adds_imm (code, IA64_SP, -16, IA64_SP);
4384 ia64_adds_imm (code, GP_SCRATCH_REG, 16, IA64_SP);
4385 ia64_stfd_hint (code, GP_SCRATCH_REG, cinfo->ret.reg, 0);
4386 ia64_fmov (code, 8 + 1, cinfo->ret.reg);
4387 break;
4388 case ArgValuetypeAddrInIReg:
4389 ia64_mov (code, cfg->arch.reg_out0 + 1, cfg->arch.reg_in0 + cinfo->ret.reg);
4390 break;
4391 case ArgAggregate:
4392 NOT_IMPLEMENTED;
4393 break;
4394 default:
4395 break;
4398 add_patch_info (cfg, code, MONO_PATCH_INFO_METHODCONST, method);
4399 ia64_movl (code, cfg->arch.reg_out0 + 0, 0);
4400 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)func);
4402 /* Restore return value */
4403 switch (cinfo->ret.storage) {
4404 case ArgNone:
4405 break;
4406 case ArgInIReg:
4407 ia64_mov (code, cinfo->ret.reg, cfg->arch.reg_saved_return_val);
4408 break;
4409 case ArgInFloatReg:
4410 ia64_adds_imm (code, GP_SCRATCH_REG, 16, IA64_SP);
4411 ia64_ldfd (code, cinfo->ret.reg, GP_SCRATCH_REG);
4412 break;
4413 case ArgValuetypeAddrInIReg:
4414 break;
4415 case ArgAggregate:
4416 break;
4417 default:
4418 break;
4421 ia64_codegen_close (code);
4423 return code.buf;
4426 void
4427 mono_arch_save_unwind_info (MonoCompile *cfg)
4429 unw_dyn_info_t *di;
4431 /* FIXME: Unregister this for dynamic methods */
4433 di = g_malloc0 (sizeof (unw_dyn_info_t));
4434 di->start_ip = (unw_word_t) cfg->native_code;
4435 di->end_ip = (unw_word_t) cfg->native_code + cfg->code_len;
4436 di->gp = 0;
4437 di->format = UNW_INFO_FORMAT_DYNAMIC;
4438 di->u.pi.name_ptr = (unw_word_t)mono_method_full_name (cfg->method, TRUE);
4439 di->u.pi.regions = cfg->arch.r_pro;
4441 _U_dyn_register (di);
4445 unw_dyn_region_info_t *region = di->u.pi.regions;
4447 printf ("Unwind info for method %s:\n", mono_method_full_name (cfg->method, TRUE));
4448 while (region) {
4449 printf (" [Region: %d]\n", region->insn_count);
4450 region = region->next;
4456 void
4457 mono_arch_flush_icache (guint8 *code, gint size)
4459 guint8* p = (guint8*)((guint64)code & ~(0x3f));
4460 guint8* end = (guint8*)((guint64)code + size);
4462 #ifdef __INTEL_COMPILER
4463 /* icc doesn't define an fc.i instrinsic, but fc==fc.i on itanium 2 */
4464 while (p < end) {
4465 __fc ((guint64)p);
4466 p += 32;
4468 #else
4469 while (p < end) {
4470 __asm__ __volatile__ ("fc.i %0"::"r"(p));
4471 /* FIXME: This could be increased to 128 on some cpus */
4472 p += 32;
4474 #endif
4477 void
4478 mono_arch_flush_register_windows (void)
4480 /* Not needed because of libunwind */
4483 gboolean
4484 mono_arch_is_inst_imm (gint64 imm)
4486 /* The lowering pass will take care of it */
4488 return TRUE;
4492 * Determine whenever the trap whose info is in SIGINFO is caused by
4493 * integer overflow.
4495 gboolean
4496 mono_arch_is_int_overflow (void *sigctx, void *info)
4498 /* Division is emulated with explicit overflow checks */
4499 return FALSE;
4502 guint32
4503 mono_arch_get_patch_offset (guint8 *code)
4505 NOT_IMPLEMENTED;
4507 return 0;
4510 gpointer
4511 mono_arch_get_vcall_slot (guint8* code, mgreg_t *regs, int *displacement)
4513 guint8 *bundle2 = code - 48;
4514 guint8 *bundle3 = code - 32;
4515 guint8 *bundle4 = code - 16;
4516 guint64 ins21 = ia64_bundle_ins1 (bundle2);
4517 guint64 ins22 = ia64_bundle_ins2 (bundle2);
4518 guint64 ins23 = ia64_bundle_ins3 (bundle2);
4519 guint64 ins31 = ia64_bundle_ins1 (bundle3);
4520 guint64 ins32 = ia64_bundle_ins2 (bundle3);
4521 guint64 ins33 = ia64_bundle_ins3 (bundle3);
4522 guint64 ins41 = ia64_bundle_ins1 (bundle4);
4523 guint64 ins42 = ia64_bundle_ins2 (bundle4);
4524 guint64 ins43 = ia64_bundle_ins3 (bundle4);
4527 * Virtual calls are made with:
4529 * [MII] ld8 r31=[r8]
4530 * nop.i 0x0
4531 * nop.i 0x0;;
4532 * [MII] nop.m 0x0
4533 * mov.sptk b6=r31,0x2000000000f32a80
4534 * nop.i 0x0
4535 * [MII] nop.m 0x0
4536 * nop.i 0x123456
4537 * nop.i 0x0
4538 * [MIB] nop.m 0x0
4539 * nop.i 0x0
4540 * br.call.sptk.few b0=b6;;
4543 if (((ia64_bundle_template (bundle3) == IA64_TEMPLATE_MII) ||
4544 (ia64_bundle_template (bundle3) == IA64_TEMPLATE_MIIS)) &&
4545 (ia64_bundle_template (bundle4) == IA64_TEMPLATE_MIBS) &&
4546 (ins31 == IA64_NOP_M) &&
4547 (ia64_ins_opcode (ins32) == 0) && (ia64_ins_x3 (ins32) == 0) && (ia64_ins_x6 (ins32) == 0x1) && (ia64_ins_y (ins32) == 0) &&
4548 (ins33 == IA64_NOP_I) &&
4549 (ins41 == IA64_NOP_M) &&
4550 (ins42 == IA64_NOP_I) &&
4551 (ia64_ins_opcode (ins43) == 1) && (ia64_ins_b1 (ins43) == 0) && (ia64_ins_b2 (ins43) == 6) &&
4552 ((ins32 >> 6) & 0xfffff) == 0x12345) {
4553 g_assert (ins21 == IA64_NOP_M);
4554 g_assert (ins23 == IA64_NOP_I);
4555 g_assert (ia64_ins_opcode (ins22) == 0);
4556 g_assert (ia64_ins_x3 (ins22) == 7);
4557 g_assert (ia64_ins_x (ins22) == 0);
4558 g_assert (ia64_ins_b1 (ins22) == IA64_B6);
4560 *displacement = (gssize)regs [IA64_R8] - (gssize)regs [IA64_R11];
4562 return (gpointer)regs [IA64_R11];
4565 return NULL;
4568 gpointer*
4569 mono_arch_get_delegate_method_ptr_addr (guint8* code, mgreg_t *regs)
4571 NOT_IMPLEMENTED;
4573 return NULL;
4576 void
4577 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
4581 void
4582 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
4586 #ifdef MONO_ARCH_HAVE_IMT
4589 * LOCKING: called with the domain lock held
4591 gpointer
4592 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
4593 gpointer fail_tramp)
4595 int i;
4596 int size = 0;
4597 guint8 *start, *buf;
4598 Ia64CodegenState code;
4600 size = count * 256;
4601 buf = g_malloc0 (size);
4602 ia64_codegen_init (code, buf);
4604 /* IA64_R9 contains the IMT method */
4606 for (i = 0; i < count; ++i) {
4607 MonoIMTCheckItem *item = imt_entries [i];
4608 ia64_begin_bundle (code);
4609 item->code_target = (guint8*)code.buf + code.nins;
4610 if (item->is_equals) {
4611 gboolean fail_case = !item->check_target_idx && fail_tramp;
4613 if (item->check_target_idx || fail_case) {
4614 if (!item->compare_done || fail_case) {
4615 ia64_movl (code, GP_SCRATCH_REG, item->key);
4616 ia64_cmp_eq (code, 6, 7, IA64_R9, GP_SCRATCH_REG);
4618 item->jmp_code = (guint8*)code.buf + code.nins;
4619 ia64_br_cond_pred (code, 7, 0);
4621 ia64_movl (code, GP_SCRATCH_REG, &(vtable->vtable [item->value.vtable_slot]));
4622 ia64_ld8 (code, GP_SCRATCH_REG, GP_SCRATCH_REG);
4623 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
4624 ia64_br_cond_reg (code, IA64_B6);
4626 if (fail_case) {
4627 ia64_patch (item->jmp_code, (guint8*)code.buf + code.nins);
4628 ia64_movl (code, GP_SCRATCH_REG, fail_tramp);
4629 ia64_ld8 (code, GP_SCRATCH_REG, GP_SCRATCH_REG);
4630 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
4631 ia64_br_cond_reg (code, IA64_B6);
4632 item->jmp_code = NULL;
4634 } else {
4635 /* enable the commented code to assert on wrong method */
4636 #if ENABLE_WRONG_METHOD_CHECK
4637 g_assert_not_reached ();
4638 #endif
4639 ia64_movl (code, GP_SCRATCH_REG, &(vtable->vtable [item->value.vtable_slot]));
4640 ia64_ld8 (code, GP_SCRATCH_REG, GP_SCRATCH_REG);
4641 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
4642 ia64_br_cond_reg (code, IA64_B6);
4643 #if ENABLE_WRONG_METHOD_CHECK
4644 g_assert_not_reached ();
4645 #endif
4647 } else {
4648 ia64_movl (code, GP_SCRATCH_REG, item->key);
4649 ia64_cmp_geu (code, 6, 7, IA64_R9, GP_SCRATCH_REG);
4650 item->jmp_code = (guint8*)code.buf + code.nins;
4651 ia64_br_cond_pred (code, 6, 0);
4654 /* patch the branches to get to the target items */
4655 for (i = 0; i < count; ++i) {
4656 MonoIMTCheckItem *item = imt_entries [i];
4657 if (item->jmp_code) {
4658 if (item->check_target_idx) {
4659 ia64_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
4664 ia64_codegen_close (code);
4665 g_assert (code.buf - buf <= size);
4667 size = code.buf - buf;
4668 if (fail_tramp) {
4669 start = mono_method_alloc_generic_virtual_thunk (domain, size + 16);
4670 start = (gpointer)ALIGN_TO (start, 16);
4671 } else {
4672 start = mono_domain_code_reserve (domain, size);
4674 memcpy (start, buf, size);
4676 mono_arch_flush_icache (start, size);
4678 mono_stats.imt_thunks_size += size;
4680 return start;
4683 MonoMethod*
4684 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
4686 return (MonoMethod*)regs [IA64_R9];
4689 void
4690 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
4692 /* Done by the implementation of the CALL_MEMBASE opcodes */
4694 #endif
4696 gpointer
4697 mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, mgreg_t *regs, guint8 *code)
4699 return (gpointer)regs [IA64_R10];
4702 gpointer
4703 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
4705 return NULL;
4708 MonoInst*
4709 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4711 MonoInst *ins = NULL;
4713 if (cmethod->klass->image == mono_defaults.corlib &&
4714 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4715 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4718 * We don't use the generic version in mini_emit_inst_for_method () since we
4719 * ia64 has atomic_add_imm opcodes.
4721 if (strcmp (cmethod->name, "Increment") == 0) {
4722 guint32 opcode;
4724 if (fsig->params [0]->type == MONO_TYPE_I4)
4725 opcode = OP_ATOMIC_ADD_IMM_NEW_I4;
4726 else if (fsig->params [0]->type == MONO_TYPE_I8)
4727 opcode = OP_ATOMIC_ADD_IMM_NEW_I8;
4728 else
4729 g_assert_not_reached ();
4730 MONO_INST_NEW (cfg, ins, opcode);
4731 ins->dreg = mono_alloc_preg (cfg);
4732 ins->inst_imm = 1;
4733 ins->inst_basereg = args [0]->dreg;
4734 ins->inst_offset = 0;
4735 MONO_ADD_INS (cfg->cbb, ins);
4736 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4737 guint32 opcode;
4739 if (fsig->params [0]->type == MONO_TYPE_I4)
4740 opcode = OP_ATOMIC_ADD_IMM_NEW_I4;
4741 else if (fsig->params [0]->type == MONO_TYPE_I8)
4742 opcode = OP_ATOMIC_ADD_IMM_NEW_I8;
4743 else
4744 g_assert_not_reached ();
4745 MONO_INST_NEW (cfg, ins, opcode);
4746 ins->dreg = mono_alloc_preg (cfg);
4747 ins->inst_imm = -1;
4748 ins->inst_basereg = args [0]->dreg;
4749 ins->inst_offset = 0;
4750 MONO_ADD_INS (cfg->cbb, ins);
4751 } else if (strcmp (cmethod->name, "Add") == 0) {
4752 guint32 opcode;
4753 gboolean is_imm = FALSE;
4754 gint64 imm = 0;
4756 if ((args [1]->opcode == OP_ICONST) || (args [1]->opcode == OP_I8CONST)) {
4757 imm = (args [1]->opcode == OP_ICONST) ? args [1]->inst_c0 : args [1]->inst_l;
4759 is_imm = (imm == 1 || imm == 4 || imm == 8 || imm == 16 || imm == -1 || imm == -4 || imm == -8 || imm == -16);
4762 if (is_imm) {
4763 if (fsig->params [0]->type == MONO_TYPE_I4)
4764 opcode = OP_ATOMIC_ADD_IMM_NEW_I4;
4765 else if (fsig->params [0]->type == MONO_TYPE_I8)
4766 opcode = OP_ATOMIC_ADD_IMM_NEW_I8;
4767 else
4768 g_assert_not_reached ();
4770 MONO_INST_NEW (cfg, ins, opcode);
4771 ins->dreg = mono_alloc_ireg (cfg);
4772 ins->inst_basereg = args [0]->dreg;
4773 ins->inst_offset = 0;
4774 ins->inst_imm = imm;
4775 ins->type = (opcode == OP_ATOMIC_ADD_IMM_NEW_I4) ? STACK_I4 : STACK_I8;
4776 } else {
4777 if (fsig->params [0]->type == MONO_TYPE_I4)
4778 opcode = OP_ATOMIC_ADD_NEW_I4;
4779 else if (fsig->params [0]->type == MONO_TYPE_I8)
4780 opcode = OP_ATOMIC_ADD_NEW_I8;
4781 else
4782 g_assert_not_reached ();
4784 MONO_INST_NEW (cfg, ins, opcode);
4785 ins->dreg = mono_alloc_ireg (cfg);
4786 ins->inst_basereg = args [0]->dreg;
4787 ins->inst_offset = 0;
4788 ins->sreg2 = args [1]->dreg;
4789 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4791 MONO_ADD_INS (cfg->cbb, ins);
4795 return ins;
4798 gboolean
4799 mono_arch_print_tree (MonoInst *tree, int arity)
4801 return 0;
4804 MonoInst*
4805 mono_arch_get_domain_intrinsic (MonoCompile* cfg)
4807 return mono_get_domain_intrinsic (cfg);
4810 gpointer
4811 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
4813 /* FIXME: implement */
4814 g_assert_not_reached ();