Revert "Merge pull request #321 from RAOF/aot-cpu-safety"
[mono-project.git] / mono / mini / mini-ia64.c
blobbd3bbdedca2d8c2805f41f055ad3919da9b901b1
1 /*
2 * mini-ia64.c: IA64 backend for the Mono code generator
4 * Authors:
5 * Zoltan Varga (vargaz@gmail.com)
7 * (C) 2003 Ximian, Inc.
8 */
9 #include "mini.h"
10 #include <string.h>
11 #include <math.h>
12 #include <unistd.h>
13 #include <sys/mman.h>
15 #ifdef __INTEL_COMPILER
16 #include <ia64intrin.h>
17 #endif
19 #include <mono/metadata/appdomain.h>
20 #include <mono/metadata/debug-helpers.h>
21 #include <mono/metadata/threads.h>
22 #include <mono/metadata/profiler-private.h>
23 #include <mono/utils/mono-math.h>
25 #include "trace.h"
26 #include "mini-ia64.h"
27 #include "cpu-ia64.h"
28 #include "jit-icalls.h"
29 #include "ir-emit.h"
31 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
33 #define IS_IMM32(val) ((((guint64)val) >> 32) == 0)
36 * IA64 register usage:
37 * - local registers are used for global register allocation
38 * - r8..r11, r14..r30 is used for local register allocation
39 * - r31 is a scratch register used within opcode implementations
40 * - FIXME: Use out registers as well
41 * - the first three locals are used for saving ar.pfst, b0, and sp
42 * - compare instructions allways set p6 and p7
46 * There are a lot of places where generated code is disassembled/patched.
47 * The automatic bundling of instructions done by the code generation macros
48 * could complicate things, so it is best to call
49 * ia64_codegen_set_one_ins_per_bundle () at those places.
52 #define ARGS_OFFSET 16
54 #define GP_SCRATCH_REG 31
55 #define GP_SCRATCH_REG2 30
56 #define FP_SCRATCH_REG 32
57 #define FP_SCRATCH_REG2 33
59 #define LOOP_ALIGNMENT 8
60 #define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
62 static const char* gregs [] = {
63 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9",
64 "r10", "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19",
65 "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29",
66 "r30", "r31", "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
67 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47", "r48", "r49",
68 "r50", "r51", "r52", "r53", "r54", "r55", "r56", "r57", "r58", "r59",
69 "r60", "r61", "r62", "r63", "r64", "r65", "r66", "r67", "r68", "r69",
70 "r70", "r71", "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
71 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87", "r88", "r89",
72 "r90", "r91", "r92", "r93", "r94", "r95", "r96", "r97", "r98", "r99",
73 "r100", "r101", "r102", "r103", "r104", "r105", "r106", "r107", "r108", "r109",
74 "r110", "r111", "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
75 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127"
78 const char*
79 mono_arch_regname (int reg)
81 if (reg < 128)
82 return gregs [reg];
83 else
84 return "unknown";
87 static const char* fregs [] = {
88 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9",
89 "f10", "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19",
90 "f20", "f21", "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29",
91 "f30", "f31", "f32", "f33", "f34", "f35", "f36", "f37", "f38", "f39",
92 "f40", "f41", "f42", "f43", "f44", "f45", "f46", "f47", "f48", "f49",
93 "f50", "f51", "f52", "f53", "f54", "f55", "f56", "f57", "f58", "f59",
94 "f60", "f61", "f62", "f63", "f64", "f65", "f66", "f67", "f68", "f69",
95 "f70", "f71", "f72", "f73", "f74", "f75", "f76", "f77", "f78", "f79",
96 "f80", "f81", "f82", "f83", "f84", "f85", "f86", "f87", "f88", "f89",
97 "f90", "f91", "f92", "f93", "f94", "f95", "f96", "f97", "f98", "f99",
98 "f100", "f101", "f102", "f103", "f104", "f105", "f106", "f107", "f108", "f109",
99 "f110", "f111", "f112", "f113", "f114", "f115", "f116", "f117", "f118", "f119",
100 "f120", "f121", "f122", "f123", "f124", "f125", "f126", "f127"
103 const char*
104 mono_arch_fregname (int reg)
106 if (reg < 128)
107 return fregs [reg];
108 else
109 return "unknown";
112 G_GNUC_UNUSED static void
113 break_count (void)
117 G_GNUC_UNUSED static gboolean
118 debug_count (void)
120 static int count = 0;
121 count ++;
123 if (count == atoi (getenv ("COUNT"))) {
124 break_count ();
127 if (count > atoi (getenv ("COUNT"))) {
128 return FALSE;
131 return TRUE;
134 static gboolean
135 debug_ins_sched (void)
137 #if 0
138 return debug_count ();
139 #else
140 return TRUE;
141 #endif
144 static gboolean
145 debug_omit_fp (void)
147 #if 0
148 return debug_count ();
149 #else
150 return TRUE;
151 #endif
154 static void
155 ia64_patch (unsigned char* code, gpointer target);
157 typedef enum {
158 ArgInIReg,
159 ArgInFloatReg,
160 ArgInFloatRegR4,
161 ArgOnStack,
162 ArgValuetypeAddrInIReg,
163 ArgAggregate,
164 ArgSingleHFA,
165 ArgDoubleHFA,
166 ArgNone
167 } ArgStorage;
169 typedef enum {
170 AggregateNormal,
171 AggregateSingleHFA,
172 AggregateDoubleHFA
173 } AggregateType;
175 typedef struct {
176 gint16 offset;
177 gint8 reg;
178 ArgStorage storage;
180 /* Only if storage == ArgAggregate */
181 int nregs, nslots;
182 AggregateType atype;
183 } ArgInfo;
185 typedef struct {
186 int nargs;
187 guint32 stack_usage;
188 guint32 reg_usage;
189 guint32 freg_usage;
190 gboolean need_stack_align;
191 gboolean vtype_retaddr;
192 /* The index of the vret arg in the argument list */
193 int vret_arg_index;
194 ArgInfo ret;
195 ArgInfo sig_cookie;
196 ArgInfo args [1];
197 } CallInfo;
199 #define DEBUG(a) if (cfg->verbose_level > 1) a
201 #define PARAM_REGS 8
203 static void inline
204 add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo)
206 ainfo->offset = *stack_size;
208 if (*gr >= PARAM_REGS) {
209 ainfo->storage = ArgOnStack;
210 (*stack_size) += sizeof (gpointer);
212 else {
213 ainfo->storage = ArgInIReg;
214 ainfo->reg = *gr;
215 *(gr) += 1;
219 #define FLOAT_PARAM_REGS 8
221 static void inline
222 add_float (guint32 *gr, guint32 *fr, guint32 *stack_size, ArgInfo *ainfo, gboolean is_double)
224 ainfo->offset = *stack_size;
226 if (*gr >= PARAM_REGS) {
227 ainfo->storage = ArgOnStack;
228 (*stack_size) += sizeof (gpointer);
230 else {
231 ainfo->storage = is_double ? ArgInFloatReg : ArgInFloatRegR4;
232 ainfo->reg = 8 + *fr;
233 (*fr) += 1;
234 (*gr) += 1;
238 static void
239 add_valuetype (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type,
240 gboolean is_return,
241 guint32 *gr, guint32 *fr, guint32 *stack_size)
243 guint32 size, i;
244 MonoClass *klass;
245 MonoMarshalType *info;
246 gboolean is_hfa = TRUE;
247 guint32 hfa_type = 0;
249 klass = mono_class_from_mono_type (type);
250 if (type->type == MONO_TYPE_TYPEDBYREF)
251 size = 3 * sizeof (gpointer);
252 else if (sig->pinvoke)
253 size = mono_type_native_stack_size (&klass->byval_arg, NULL);
254 else
255 size = mini_type_stack_size (gsctx, &klass->byval_arg, NULL);
257 if (!sig->pinvoke || (size == 0)) {
258 /* Allways pass in memory */
259 ainfo->offset = *stack_size;
260 *stack_size += ALIGN_TO (size, 8);
261 ainfo->storage = ArgOnStack;
263 return;
266 /* Determine whenever it is a HFA (Homogeneous Floating Point Aggregate) */
267 info = mono_marshal_load_type_info (klass);
268 g_assert (info);
269 for (i = 0; i < info->num_fields; ++i) {
270 guint32 ftype = info->fields [i].field->type->type;
271 if (!(info->fields [i].field->type->byref) &&
272 ((ftype == MONO_TYPE_R4) || (ftype == MONO_TYPE_R8))) {
273 if (hfa_type == 0)
274 hfa_type = ftype;
275 else if (hfa_type != ftype)
276 is_hfa = FALSE;
278 else
279 is_hfa = FALSE;
281 if (hfa_type == 0)
282 is_hfa = FALSE;
284 ainfo->storage = ArgAggregate;
285 ainfo->atype = AggregateNormal;
287 if (is_hfa) {
288 ainfo->atype = hfa_type == MONO_TYPE_R4 ? AggregateSingleHFA : AggregateDoubleHFA;
289 if (is_return) {
290 if (info->num_fields <= 8) {
291 ainfo->reg = 8;
292 ainfo->nregs = info->num_fields;
293 ainfo->nslots = ainfo->nregs;
294 return;
296 /* Fall through */
298 else {
299 if ((*fr) + info->num_fields > 8)
300 NOT_IMPLEMENTED;
302 ainfo->reg = 8 + (*fr);
303 ainfo->nregs = info->num_fields;
304 ainfo->nslots = ainfo->nregs;
305 (*fr) += info->num_fields;
306 if (ainfo->atype == AggregateSingleHFA) {
308 * FIXME: Have to keep track of the parameter slot number, which is
309 * not the same as *gr.
311 (*gr) += ALIGN_TO (info->num_fields, 2) / 2;
312 } else {
313 (*gr) += info->num_fields;
315 return;
319 /* This also handles returning of TypedByRef used by some icalls */
320 if (is_return) {
321 if (size <= 32) {
322 ainfo->reg = IA64_R8;
323 ainfo->nregs = (size + 7) / 8;
324 ainfo->nslots = ainfo->nregs;
325 return;
327 NOT_IMPLEMENTED;
330 ainfo->reg = (*gr);
331 ainfo->offset = *stack_size;
332 ainfo->nslots = (size + 7) / 8;
334 if (((*gr) + ainfo->nslots) <= 8) {
335 /* Fits entirely in registers */
336 ainfo->nregs = ainfo->nslots;
337 (*gr) += ainfo->nregs;
338 return;
341 ainfo->nregs = 8 - (*gr);
342 (*gr) = 8;
343 (*stack_size) += (ainfo->nslots - ainfo->nregs) * 8;
347 * get_call_info:
349 * Obtain information about a call according to the calling convention.
350 * For IA64, see the "Itanium Software Conventions and Runtime Architecture
351 * Gude" document for more information.
353 static CallInfo*
354 get_call_info (MonoCompile *cfg, MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
356 guint32 i, gr, fr, pstart;
357 MonoType *ret_type;
358 int n = sig->hasthis + sig->param_count;
359 guint32 stack_size = 0;
360 CallInfo *cinfo;
361 MonoGenericSharingContext *gsctx = cfg ? cfg->generic_sharing_context : NULL;
363 if (mp)
364 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
365 else
366 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
368 gr = 0;
369 fr = 0;
371 /* return value */
373 ret_type = mono_type_get_underlying_type (sig->ret);
374 ret_type = mini_get_basic_type_from_generic (gsctx, ret_type);
375 switch (ret_type->type) {
376 case MONO_TYPE_BOOLEAN:
377 case MONO_TYPE_I1:
378 case MONO_TYPE_U1:
379 case MONO_TYPE_I2:
380 case MONO_TYPE_U2:
381 case MONO_TYPE_CHAR:
382 case MONO_TYPE_I4:
383 case MONO_TYPE_U4:
384 case MONO_TYPE_I:
385 case MONO_TYPE_U:
386 case MONO_TYPE_PTR:
387 case MONO_TYPE_FNPTR:
388 case MONO_TYPE_CLASS:
389 case MONO_TYPE_OBJECT:
390 case MONO_TYPE_SZARRAY:
391 case MONO_TYPE_ARRAY:
392 case MONO_TYPE_STRING:
393 cinfo->ret.storage = ArgInIReg;
394 cinfo->ret.reg = IA64_R8;
395 break;
396 case MONO_TYPE_U8:
397 case MONO_TYPE_I8:
398 cinfo->ret.storage = ArgInIReg;
399 cinfo->ret.reg = IA64_R8;
400 break;
401 case MONO_TYPE_R4:
402 case MONO_TYPE_R8:
403 cinfo->ret.storage = ArgInFloatReg;
404 cinfo->ret.reg = 8;
405 break;
406 case MONO_TYPE_GENERICINST:
407 if (!mono_type_generic_inst_is_valuetype (ret_type)) {
408 cinfo->ret.storage = ArgInIReg;
409 cinfo->ret.reg = IA64_R8;
410 break;
412 /* Fall through */
413 case MONO_TYPE_VALUETYPE:
414 case MONO_TYPE_TYPEDBYREF: {
415 guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0;
417 if (sig->ret->byref) {
418 /* This seems to happen with ldfld wrappers */
419 cinfo->ret.storage = ArgInIReg;
420 } else {
421 add_valuetype (gsctx, sig, &cinfo->ret, sig->ret, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
422 if (cinfo->ret.storage == ArgOnStack) {
423 /* The caller passes the address where the value is stored */
424 cinfo->vtype_retaddr = TRUE;
427 break;
429 case MONO_TYPE_VOID:
430 cinfo->ret.storage = ArgNone;
431 break;
432 default:
433 g_error ("Can't handle as return value 0x%x", sig->ret->type);
437 pstart = 0;
439 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
440 * the first argument, allowing 'this' to be always passed in the first arg reg.
441 * Also do this if the first argument is a reference type, since virtual calls
442 * are sometimes made using calli without sig->hasthis set, like in the delegate
443 * invoke wrappers.
445 if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
446 if (sig->hasthis) {
447 add_general (&gr, &stack_size, cinfo->args + 0);
448 } else {
449 add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0]);
450 pstart = 1;
452 add_general (&gr, &stack_size, &cinfo->ret);
453 if (cinfo->ret.storage == ArgInIReg)
454 cinfo->ret.storage = ArgValuetypeAddrInIReg;
455 cinfo->vret_arg_index = 1;
456 } else {
457 /* this */
458 if (sig->hasthis)
459 add_general (&gr, &stack_size, cinfo->args + 0);
461 if (cinfo->vtype_retaddr) {
462 add_general (&gr, &stack_size, &cinfo->ret);
463 if (cinfo->ret.storage == ArgInIReg)
464 cinfo->ret.storage = ArgValuetypeAddrInIReg;
468 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
469 gr = PARAM_REGS;
470 fr = FLOAT_PARAM_REGS;
472 /* Emit the signature cookie just before the implicit arguments */
473 add_general (&gr, &stack_size, &cinfo->sig_cookie);
476 for (i = pstart; i < sig->param_count; ++i) {
477 ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
478 MonoType *ptype;
480 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
481 /* We allways pass the sig cookie on the stack for simplicity */
483 * Prevent implicit arguments + the sig cookie from being passed
484 * in registers.
486 gr = PARAM_REGS;
487 fr = FLOAT_PARAM_REGS;
489 /* Emit the signature cookie just before the implicit arguments */
490 add_general (&gr, &stack_size, &cinfo->sig_cookie);
493 if (sig->params [i]->byref) {
494 add_general (&gr, &stack_size, ainfo);
495 continue;
497 ptype = mono_type_get_underlying_type (sig->params [i]);
498 ptype = mini_get_basic_type_from_generic (gsctx, ptype);
499 switch (ptype->type) {
500 case MONO_TYPE_BOOLEAN:
501 case MONO_TYPE_I1:
502 case MONO_TYPE_U1:
503 add_general (&gr, &stack_size, ainfo);
504 break;
505 case MONO_TYPE_I2:
506 case MONO_TYPE_U2:
507 case MONO_TYPE_CHAR:
508 add_general (&gr, &stack_size, ainfo);
509 break;
510 case MONO_TYPE_I4:
511 case MONO_TYPE_U4:
512 add_general (&gr, &stack_size, ainfo);
513 break;
514 case MONO_TYPE_I:
515 case MONO_TYPE_U:
516 case MONO_TYPE_PTR:
517 case MONO_TYPE_FNPTR:
518 case MONO_TYPE_CLASS:
519 case MONO_TYPE_OBJECT:
520 case MONO_TYPE_STRING:
521 case MONO_TYPE_SZARRAY:
522 case MONO_TYPE_ARRAY:
523 add_general (&gr, &stack_size, ainfo);
524 break;
525 case MONO_TYPE_GENERICINST:
526 if (!mono_type_generic_inst_is_valuetype (ptype)) {
527 add_general (&gr, &stack_size, ainfo);
528 break;
530 /* Fall through */
531 case MONO_TYPE_VALUETYPE:
532 case MONO_TYPE_TYPEDBYREF:
533 /* FIXME: */
534 /* We allways pass valuetypes on the stack */
535 add_valuetype (gsctx, sig, ainfo, sig->params [i], FALSE, &gr, &fr, &stack_size);
536 break;
537 case MONO_TYPE_U8:
538 case MONO_TYPE_I8:
539 add_general (&gr, &stack_size, ainfo);
540 break;
541 case MONO_TYPE_R4:
542 add_float (&gr, &fr, &stack_size, ainfo, FALSE);
543 break;
544 case MONO_TYPE_R8:
545 add_float (&gr, &fr, &stack_size, ainfo, TRUE);
546 break;
547 default:
548 g_assert_not_reached ();
552 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n > 0) && (sig->sentinelpos == sig->param_count)) {
553 gr = PARAM_REGS;
554 fr = FLOAT_PARAM_REGS;
556 /* Emit the signature cookie just before the implicit arguments */
557 add_general (&gr, &stack_size, &cinfo->sig_cookie);
560 cinfo->stack_usage = stack_size;
561 cinfo->reg_usage = gr;
562 cinfo->freg_usage = fr;
563 return cinfo;
567 * mono_arch_get_argument_info:
568 * @csig: a method signature
569 * @param_count: the number of parameters to consider
570 * @arg_info: an array to store the result infos
572 * Gathers information on parameters such as size, alignment and
573 * padding. arg_info should be large enought to hold param_count + 1 entries.
575 * Returns the size of the argument area on the stack.
578 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
580 int k;
581 CallInfo *cinfo = get_call_info (NULL, NULL, csig, FALSE);
582 guint32 args_size = cinfo->stack_usage;
584 /* The arguments are saved to a stack area in mono_arch_instrument_prolog */
585 if (csig->hasthis) {
586 arg_info [0].offset = 0;
589 for (k = 0; k < param_count; k++) {
590 arg_info [k + 1].offset = ((k + csig->hasthis) * 8);
591 /* FIXME: */
592 arg_info [k + 1].size = 0;
595 g_free (cinfo);
597 return args_size;
601 * Initialize the cpu to execute managed code.
603 void
604 mono_arch_cpu_init (void)
609 * Initialize architecture specific code.
611 void
612 mono_arch_init (void)
617 * Cleanup architecture specific code.
619 void
620 mono_arch_cleanup (void)
625 * This function returns the optimizations supported on this cpu.
627 guint32
628 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
630 *exclude_mask = 0;
632 return 0;
635 GList *
636 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
638 GList *vars = NULL;
639 int i;
640 MonoMethodSignature *sig;
641 MonoMethodHeader *header;
642 CallInfo *cinfo;
644 header = cfg->header;
646 sig = mono_method_signature (cfg->method);
648 cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
650 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
651 MonoInst *ins = cfg->args [i];
653 ArgInfo *ainfo = &cinfo->args [i];
655 if (ins->flags & (MONO_INST_IS_DEAD|MONO_INST_VOLATILE|MONO_INST_INDIRECT))
656 continue;
658 if (ainfo->storage == ArgInIReg) {
659 /* The input registers are non-volatile */
660 ins->opcode = OP_REGVAR;
661 ins->dreg = 32 + ainfo->reg;
665 for (i = 0; i < cfg->num_varinfo; i++) {
666 MonoInst *ins = cfg->varinfo [i];
667 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
669 /* unused vars */
670 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
671 continue;
673 if ((ins->flags & (MONO_INST_IS_DEAD|MONO_INST_VOLATILE|MONO_INST_INDIRECT)) ||
674 (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
675 continue;
677 if (mono_is_regsize_var (ins->inst_vtype)) {
678 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
679 g_assert (i == vmv->idx);
680 vars = g_list_prepend (vars, vmv);
684 vars = mono_varlist_sort (cfg, vars, 0);
686 return vars;
689 static void
690 mono_ia64_alloc_stacked_registers (MonoCompile *cfg)
692 CallInfo *cinfo;
693 guint32 reserved_regs;
694 MonoMethodHeader *header;
696 if (cfg->arch.reg_local0 > 0)
697 /* Already done */
698 return;
700 cinfo = get_call_info (cfg, cfg->mempool, mono_method_signature (cfg->method), FALSE);
702 header = cfg->header;
704 /* Some registers are reserved for use by the prolog/epilog */
705 reserved_regs = header->num_clauses ? 4 : 3;
707 if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
708 (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)) {
709 /* One registers is needed by instrument_epilog to save the return value */
710 reserved_regs ++;
711 if (cinfo->reg_usage < 2)
712 /* Number of arguments passed to function call in instrument_prolog */
713 cinfo->reg_usage = 2;
716 cfg->arch.reg_in0 = 32;
717 cfg->arch.reg_local0 = cfg->arch.reg_in0 + cinfo->reg_usage + reserved_regs;
718 cfg->arch.reg_out0 = cfg->arch.reg_local0 + 16;
720 cfg->arch.reg_saved_ar_pfs = cfg->arch.reg_local0 - 1;
721 cfg->arch.reg_saved_b0 = cfg->arch.reg_local0 - 2;
722 cfg->arch.reg_fp = cfg->arch.reg_local0 - 3;
725 * Frames without handlers save sp to fp, frames with handlers save it into
726 * a dedicated register.
728 if (header->num_clauses)
729 cfg->arch.reg_saved_sp = cfg->arch.reg_local0 - 4;
730 else
731 cfg->arch.reg_saved_sp = cfg->arch.reg_fp;
733 if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
734 (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)) {
735 cfg->arch.reg_saved_return_val = cfg->arch.reg_local0 - reserved_regs;
739 * Need to allocate at least 2 out register for use by OP_THROW / the system
740 * exception throwing code.
742 cfg->arch.n_out_regs = MAX (cfg->arch.n_out_regs, 2);
745 GList *
746 mono_arch_get_global_int_regs (MonoCompile *cfg)
748 GList *regs = NULL;
749 int i;
751 mono_ia64_alloc_stacked_registers (cfg);
753 for (i = cfg->arch.reg_local0; i < cfg->arch.reg_out0; ++i) {
754 /* FIXME: regmask */
755 g_assert (i < 64);
756 regs = g_list_prepend (regs, (gpointer)(gssize)(i));
759 return regs;
763 * mono_arch_regalloc_cost:
765 * Return the cost, in number of memory references, of the action of
766 * allocating the variable VMV into a register during global register
767 * allocation.
769 guint32
770 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
772 /* FIXME: Increase costs linearly to avoid using all local registers */
774 return 0;
777 void
778 mono_arch_allocate_vars (MonoCompile *cfg)
780 MonoMethodSignature *sig;
781 MonoMethodHeader *header;
782 MonoInst *inst;
783 int i, offset;
784 guint32 locals_stack_size, locals_stack_align;
785 gint32 *offsets;
786 CallInfo *cinfo;
788 header = cfg->header;
790 sig = mono_method_signature (cfg->method);
792 cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
795 * Determine whenever the frame pointer can be eliminated.
796 * FIXME: Remove some of the restrictions.
798 cfg->arch.omit_fp = TRUE;
800 if (!debug_omit_fp ())
801 cfg->arch.omit_fp = FALSE;
803 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
804 cfg->arch.omit_fp = FALSE;
805 if (header->num_clauses)
806 cfg->arch.omit_fp = FALSE;
807 if (cfg->param_area)
808 cfg->arch.omit_fp = FALSE;
809 if ((sig->ret->type != MONO_TYPE_VOID) && (cinfo->ret.storage == ArgAggregate))
810 cfg->arch.omit_fp = FALSE;
811 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
812 cfg->arch.omit_fp = FALSE;
813 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
814 ArgInfo *ainfo = &cinfo->args [i];
816 if (ainfo->storage == ArgOnStack) {
818 * The stack offset can only be determined when the frame
819 * size is known.
821 cfg->arch.omit_fp = FALSE;
825 mono_ia64_alloc_stacked_registers (cfg);
828 * We use the ABI calling conventions for managed code as well.
829 * Exception: valuetypes are never passed or returned in registers.
832 if (cfg->arch.omit_fp) {
833 cfg->flags |= MONO_CFG_HAS_SPILLUP;
834 cfg->frame_reg = IA64_SP;
835 offset = ARGS_OFFSET;
837 else {
838 /* Locals are allocated backwards from %fp */
839 cfg->frame_reg = cfg->arch.reg_fp;
840 offset = 0;
843 if (cfg->method->save_lmf) {
844 /* No LMF on IA64 */
847 if (sig->ret->type != MONO_TYPE_VOID) {
848 switch (cinfo->ret.storage) {
849 case ArgInIReg:
850 cfg->ret->opcode = OP_REGVAR;
851 cfg->ret->inst_c0 = cinfo->ret.reg;
852 break;
853 case ArgInFloatReg:
854 cfg->ret->opcode = OP_REGVAR;
855 cfg->ret->inst_c0 = cinfo->ret.reg;
856 break;
857 case ArgValuetypeAddrInIReg:
858 cfg->vret_addr->opcode = OP_REGVAR;
859 cfg->vret_addr->dreg = cfg->arch.reg_in0 + cinfo->ret.reg;
860 break;
861 case ArgAggregate:
862 /* Allocate a local to hold the result, the epilog will copy it to the correct place */
863 if (cfg->arch.omit_fp)
864 g_assert_not_reached ();
865 offset = ALIGN_TO (offset, 8);
866 offset += cinfo->ret.nslots * 8;
867 cfg->ret->opcode = OP_REGOFFSET;
868 cfg->ret->inst_basereg = cfg->frame_reg;
869 cfg->ret->inst_offset = - offset;
870 break;
871 default:
872 g_assert_not_reached ();
874 cfg->ret->dreg = cfg->ret->inst_c0;
877 /* Allocate locals */
878 offsets = mono_allocate_stack_slots (cfg, cfg->arch.omit_fp ? FALSE : TRUE, &locals_stack_size, &locals_stack_align);
879 if (locals_stack_align) {
880 offset = ALIGN_TO (offset, locals_stack_align);
882 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
883 if (offsets [i] != -1) {
884 MonoInst *inst = cfg->varinfo [i];
885 inst->opcode = OP_REGOFFSET;
886 inst->inst_basereg = cfg->frame_reg;
887 if (cfg->arch.omit_fp)
888 inst->inst_offset = (offset + offsets [i]);
889 else
890 inst->inst_offset = - (offset + offsets [i]);
891 // printf ("allocated local %d to ", i); mono_print_tree_nl (inst);
894 offset += locals_stack_size;
896 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) {
897 if (cfg->arch.omit_fp)
898 g_assert_not_reached ();
899 g_assert (cinfo->sig_cookie.storage == ArgOnStack);
900 cfg->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
903 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
904 inst = cfg->args [i];
905 if (inst->opcode != OP_REGVAR) {
906 ArgInfo *ainfo = &cinfo->args [i];
907 gboolean inreg = TRUE;
908 MonoType *arg_type;
910 if (sig->hasthis && (i == 0))
911 arg_type = &mono_defaults.object_class->byval_arg;
912 else
913 arg_type = sig->params [i - sig->hasthis];
915 /* FIXME: VOLATILE is only set if the liveness pass runs */
916 if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
917 inreg = FALSE;
919 inst->opcode = OP_REGOFFSET;
921 switch (ainfo->storage) {
922 case ArgInIReg:
923 inst->opcode = OP_REGVAR;
924 inst->dreg = cfg->arch.reg_in0 + ainfo->reg;
925 break;
926 case ArgInFloatReg:
927 case ArgInFloatRegR4:
929 * Since float regs are volatile, we save the arguments to
930 * the stack in the prolog.
932 inreg = FALSE;
933 break;
934 case ArgOnStack:
935 if (cfg->arch.omit_fp)
936 g_assert_not_reached ();
937 inst->opcode = OP_REGOFFSET;
938 inst->inst_basereg = cfg->frame_reg;
939 inst->inst_offset = ARGS_OFFSET + ainfo->offset;
940 break;
941 case ArgAggregate:
942 inreg = FALSE;
943 break;
944 default:
945 NOT_IMPLEMENTED;
948 if (!inreg && (ainfo->storage != ArgOnStack)) {
949 guint32 size = 0;
951 inst->opcode = OP_REGOFFSET;
952 inst->inst_basereg = cfg->frame_reg;
953 /* These arguments are saved to the stack in the prolog */
954 switch (ainfo->storage) {
955 case ArgAggregate:
956 if (ainfo->atype == AggregateSingleHFA)
957 size = ainfo->nslots * 4;
958 else
959 size = ainfo->nslots * 8;
960 break;
961 default:
962 size = sizeof (gpointer);
963 break;
966 offset = ALIGN_TO (offset, sizeof (gpointer));
968 if (cfg->arch.omit_fp) {
969 inst->inst_offset = offset;
970 offset += size;
971 } else {
972 offset += size;
973 inst->inst_offset = - offset;
980 * FIXME: This doesn't work because some variables are allocated during local
981 * regalloc.
984 if (cfg->arch.omit_fp && offset == 16)
985 offset = 0;
988 cfg->stack_offset = offset;
991 void
992 mono_arch_create_vars (MonoCompile *cfg)
994 MonoMethodSignature *sig;
995 CallInfo *cinfo;
997 sig = mono_method_signature (cfg->method);
999 cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
1001 if (cinfo->ret.storage == ArgAggregate)
1002 cfg->ret_var_is_local = TRUE;
1003 if (cinfo->ret.storage == ArgValuetypeAddrInIReg) {
1004 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1005 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1006 printf ("vret_addr = ");
1007 mono_print_ins (cfg->vret_addr);
1012 static void
1013 add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, MonoInst *tree)
1015 MonoInst *arg;
1017 MONO_INST_NEW (cfg, arg, OP_NOP);
1018 arg->sreg1 = tree->dreg;
1020 switch (storage) {
1021 case ArgInIReg:
1022 arg->opcode = OP_MOVE;
1023 arg->dreg = mono_alloc_ireg (cfg);
1025 mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, FALSE);
1026 break;
1027 case ArgInFloatReg:
1028 arg->opcode = OP_FMOVE;
1029 arg->dreg = mono_alloc_freg (cfg);
1031 mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, TRUE);
1032 break;
1033 case ArgInFloatRegR4:
1034 arg->opcode = OP_FCONV_TO_R4;
1035 arg->dreg = mono_alloc_freg (cfg);
1037 mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, TRUE);
1038 break;
1039 default:
1040 g_assert_not_reached ();
1043 MONO_ADD_INS (cfg->cbb, arg);
1046 static void
1047 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
1049 MonoMethodSignature *tmp_sig;
1051 /* Emit the signature cookie just before the implicit arguments */
1052 MonoInst *sig_arg;
1053 /* FIXME: Add support for signature tokens to AOT */
1054 cfg->disable_aot = TRUE;
1056 g_assert (cinfo->sig_cookie.storage == ArgOnStack);
1059 * mono_ArgIterator_Setup assumes the signature cookie is
1060 * passed first and all the arguments which were before it are
1061 * passed on the stack after the signature. So compensate by
1062 * passing a different signature.
1064 tmp_sig = mono_metadata_signature_dup (call->signature);
1065 tmp_sig->param_count -= call->signature->sentinelpos;
1066 tmp_sig->sentinelpos = 0;
1067 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1069 MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
1070 sig_arg->dreg = mono_alloc_ireg (cfg);
1071 sig_arg->inst_p0 = tmp_sig;
1072 MONO_ADD_INS (cfg->cbb, sig_arg);
1074 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, IA64_SP, 16 + cinfo->sig_cookie.offset, sig_arg->dreg);
1077 void
1078 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1080 MonoInst *in;
1081 MonoMethodSignature *sig;
1082 int i, n, stack_size;
1083 CallInfo *cinfo;
1084 ArgInfo *ainfo;
1086 stack_size = 0;
1088 mono_ia64_alloc_stacked_registers (cfg);
1090 sig = call->signature;
1091 n = sig->param_count + sig->hasthis;
1093 cinfo = get_call_info (cfg, cfg->mempool, sig, sig->pinvoke);
1095 if (cinfo->ret.storage == ArgAggregate) {
1096 MonoInst *vtarg;
1097 MonoInst *local;
1100 * The valuetype is in registers after the call, need to be copied
1101 * to the stack. Save the address to a local here, so the call
1102 * instruction can access it.
1104 local = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1105 local->flags |= MONO_INST_VOLATILE;
1106 cfg->arch.ret_var_addr_local = local;
1108 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
1109 vtarg->sreg1 = call->vret_var->dreg;
1110 vtarg->dreg = local->dreg;
1111 MONO_ADD_INS (cfg->cbb, vtarg);
1114 if (cinfo->ret.storage == ArgValuetypeAddrInIReg) {
1115 add_outarg_reg (cfg, call, ArgInIReg, cfg->arch.reg_out0 + cinfo->ret.reg, call->vret_var);
1118 for (i = 0; i < n; ++i) {
1119 MonoType *arg_type;
1121 ainfo = cinfo->args + i;
1123 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1124 /* Emit the signature cookie just before the implicit arguments */
1125 emit_sig_cookie (cfg, call, cinfo);
1128 in = call->args [i];
1130 if (sig->hasthis && (i == 0))
1131 arg_type = &mono_defaults.object_class->byval_arg;
1132 else
1133 arg_type = sig->params [i - sig->hasthis];
1135 if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(arg_type))) {
1136 guint32 align;
1137 guint32 size;
1139 if (arg_type->type == MONO_TYPE_TYPEDBYREF) {
1140 size = sizeof (MonoTypedRef);
1141 align = sizeof (gpointer);
1143 else if (sig->pinvoke)
1144 size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
1145 else {
1147 * Other backends use mono_type_stack_size (), but that
1148 * aligns the size to 8, which is larger than the size of
1149 * the source, leading to reads of invalid memory if the
1150 * source is at the end of address space.
1152 size = mono_class_value_size (in->klass, &align);
1155 if (size > 0) {
1156 MonoInst *arg;
1158 MONO_INST_NEW (cfg, arg, OP_OUTARG_VT);
1159 arg->sreg1 = in->dreg;
1160 arg->klass = in->klass;
1161 arg->backend.size = size;
1162 arg->inst_p0 = call;
1163 arg->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1164 memcpy (arg->inst_p1, ainfo, sizeof (ArgInfo));
1166 MONO_ADD_INS (cfg->cbb, arg);
1169 else {
1170 switch (ainfo->storage) {
1171 case ArgInIReg:
1172 add_outarg_reg (cfg, call, ainfo->storage, cfg->arch.reg_out0 + ainfo->reg, in);
1173 break;
1174 case ArgInFloatReg:
1175 case ArgInFloatRegR4:
1176 add_outarg_reg (cfg, call, ainfo->storage, ainfo->reg, in);
1177 break;
1178 case ArgOnStack:
1179 if (arg_type->type == MONO_TYPE_R4 && !arg_type->byref)
1180 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, IA64_SP, 16 + ainfo->offset, in->dreg);
1181 else if (arg_type->type == MONO_TYPE_R8 && !arg_type->byref)
1182 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, IA64_SP, 16 + ainfo->offset, in->dreg);
1183 else
1184 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, IA64_SP, 16 + ainfo->offset, in->dreg);
1185 break;
1186 default:
1187 g_assert_not_reached ();
1192 /* Handle the case where there are no implicit arguments */
1193 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos)) {
1194 emit_sig_cookie (cfg, call, cinfo);
1197 call->stack_usage = cinfo->stack_usage;
1198 cfg->arch.n_out_regs = MAX (cfg->arch.n_out_regs, cinfo->reg_usage);
1201 void
1202 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1204 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
1205 ArgInfo *ainfo = (ArgInfo*)ins->inst_p1;
1206 int size = ins->backend.size;
1208 if (ainfo->storage == ArgAggregate) {
1209 MonoInst *load, *store;
1210 int i, slot;
1213 * Part of the structure is passed in registers.
1215 for (i = 0; i < ainfo->nregs; ++i) {
1216 slot = ainfo->reg + i;
1218 if (ainfo->atype == AggregateSingleHFA) {
1219 MONO_INST_NEW (cfg, load, OP_LOADR4_MEMBASE);
1220 load->inst_basereg = src->dreg;
1221 load->inst_offset = i * 4;
1222 load->dreg = mono_alloc_freg (cfg);
1224 mono_call_inst_add_outarg_reg (cfg, call, load->dreg, ainfo->reg + i, TRUE);
1225 } else if (ainfo->atype == AggregateDoubleHFA) {
1226 MONO_INST_NEW (cfg, load, OP_LOADR8_MEMBASE);
1227 load->inst_basereg = src->dreg;
1228 load->inst_offset = i * 8;
1229 load->dreg = mono_alloc_freg (cfg);
1231 mono_call_inst_add_outarg_reg (cfg, call, load->dreg, ainfo->reg + i, TRUE);
1232 } else {
1233 MONO_INST_NEW (cfg, load, OP_LOADI8_MEMBASE);
1234 load->inst_basereg = src->dreg;
1235 load->inst_offset = i * 8;
1236 load->dreg = mono_alloc_ireg (cfg);
1238 mono_call_inst_add_outarg_reg (cfg, call, load->dreg, cfg->arch.reg_out0 + ainfo->reg + i, FALSE);
1240 MONO_ADD_INS (cfg->cbb, load);
1244 * Part of the structure is passed on the stack.
1246 for (i = ainfo->nregs; i < ainfo->nslots; ++i) {
1247 slot = ainfo->reg + i;
1249 MONO_INST_NEW (cfg, load, OP_LOADI8_MEMBASE);
1250 load->inst_basereg = src->dreg;
1251 load->inst_offset = i * sizeof (gpointer);
1252 load->dreg = mono_alloc_preg (cfg);
1253 MONO_ADD_INS (cfg->cbb, load);
1255 MONO_INST_NEW (cfg, store, OP_STOREI8_MEMBASE_REG);
1256 store->sreg1 = load->dreg;
1257 store->inst_destbasereg = IA64_SP;
1258 store->inst_offset = 16 + ainfo->offset + (slot - 8) * 8;
1259 MONO_ADD_INS (cfg->cbb, store);
1261 } else {
1262 mini_emit_memcpy (cfg, IA64_SP, 16 + ainfo->offset, src->dreg, 0, size, 4);
1266 void
1267 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1269 CallInfo *cinfo = get_call_info (cfg, cfg->mempool, mono_method_signature (method), FALSE);
1271 switch (cinfo->ret.storage) {
1272 case ArgInIReg:
1273 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1274 break;
1275 case ArgInFloatReg:
1276 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
1277 break;
1278 default:
1279 g_assert_not_reached ();
1283 void
1284 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
1288 void
1289 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
1291 MonoInst *ins, *n, *last_ins = NULL;
1292 ins = bb->code;
1294 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
1295 switch (ins->opcode) {
1296 case OP_MOVE:
1297 case OP_FMOVE:
1299 * Removes:
1301 * OP_MOVE reg, reg
1303 if (ins->dreg == ins->sreg1) {
1304 MONO_DELETE_INS (bb, ins);
1305 continue;
1308 * Removes:
1310 * OP_MOVE sreg, dreg
1311 * OP_MOVE dreg, sreg
1313 if (last_ins && last_ins->opcode == OP_MOVE &&
1314 ins->sreg1 == last_ins->dreg &&
1315 ins->dreg == last_ins->sreg1) {
1316 MONO_DELETE_INS (bb, ins);
1317 continue;
1319 break;
1320 case OP_MUL_IMM:
1321 case OP_IMUL_IMM:
1322 /* remove unnecessary multiplication with 1 */
1323 if (ins->inst_imm == 1) {
1324 if (ins->dreg != ins->sreg1) {
1325 ins->opcode = OP_MOVE;
1326 } else {
1327 MONO_DELETE_INS (bb, ins);
1328 continue;
1331 break;
1334 last_ins = ins;
1335 ins = ins->next;
1337 bb->last_ins = last_ins;
1340 int cond_to_ia64_cmp [][3] = {
1341 {OP_IA64_CMP_EQ, OP_IA64_CMP4_EQ, OP_IA64_FCMP_EQ},
1342 {OP_IA64_CMP_NE, OP_IA64_CMP4_NE, OP_IA64_FCMP_NE},
1343 {OP_IA64_CMP_LE, OP_IA64_CMP4_LE, OP_IA64_FCMP_LE},
1344 {OP_IA64_CMP_GE, OP_IA64_CMP4_GE, OP_IA64_FCMP_GE},
1345 {OP_IA64_CMP_LT, OP_IA64_CMP4_LT, OP_IA64_FCMP_LT},
1346 {OP_IA64_CMP_GT, OP_IA64_CMP4_GT, OP_IA64_FCMP_GT},
1347 {OP_IA64_CMP_LE_UN, OP_IA64_CMP4_LE_UN, OP_IA64_FCMP_LE_UN},
1348 {OP_IA64_CMP_GE_UN, OP_IA64_CMP4_GE_UN, OP_IA64_FCMP_GE_UN},
1349 {OP_IA64_CMP_LT_UN, OP_IA64_CMP4_LT_UN, OP_IA64_FCMP_LT_UN},
1350 {OP_IA64_CMP_GT_UN, OP_IA64_CMP4_GT_UN, OP_IA64_FCMP_GT_UN}
1353 static int
1354 opcode_to_ia64_cmp (int opcode, int cmp_opcode)
1356 return cond_to_ia64_cmp [mono_opcode_to_cond (opcode)][mono_opcode_to_type (opcode, cmp_opcode)];
1359 int cond_to_ia64_cmp_imm [][3] = {
1360 {OP_IA64_CMP_EQ_IMM, OP_IA64_CMP4_EQ_IMM, 0},
1361 {OP_IA64_CMP_NE_IMM, OP_IA64_CMP4_NE_IMM, 0},
1362 {OP_IA64_CMP_GE_IMM, OP_IA64_CMP4_GE_IMM, 0},
1363 {OP_IA64_CMP_LE_IMM, OP_IA64_CMP4_LE_IMM, 0},
1364 {OP_IA64_CMP_GT_IMM, OP_IA64_CMP4_GT_IMM, 0},
1365 {OP_IA64_CMP_LT_IMM, OP_IA64_CMP4_LT_IMM, 0},
1366 {OP_IA64_CMP_GE_UN_IMM, OP_IA64_CMP4_GE_UN_IMM, 0},
1367 {OP_IA64_CMP_LE_UN_IMM, OP_IA64_CMP4_LE_UN_IMM, 0},
1368 {OP_IA64_CMP_GT_UN_IMM, OP_IA64_CMP4_GT_UN_IMM, 0},
1369 {OP_IA64_CMP_LT_UN_IMM, OP_IA64_CMP4_LT_UN_IMM, 0},
1372 static int
1373 opcode_to_ia64_cmp_imm (int opcode, int cmp_opcode)
1375 /* The condition needs to be reversed */
1376 return cond_to_ia64_cmp_imm [mono_opcode_to_cond (opcode)][mono_opcode_to_type (opcode, cmp_opcode)];
1379 #define NEW_INS(cfg,dest,op) do { \
1380 (dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
1381 (dest)->opcode = (op); \
1382 mono_bblock_insert_after_ins (bb, last_ins, (dest)); \
1383 last_ins = (dest); \
1384 } while (0)
1387 * mono_arch_lowering_pass:
1389 * Converts complex opcodes into simpler ones so that each IR instruction
1390 * corresponds to one machine instruction.
1392 void
1393 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1395 MonoInst *ins, *n, *next, *temp, *temp2, *temp3, *last_ins = NULL;
1396 ins = bb->code;
1398 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
1399 switch (ins->opcode) {
1400 case OP_STOREI1_MEMBASE_IMM:
1401 case OP_STOREI2_MEMBASE_IMM:
1402 case OP_STOREI4_MEMBASE_IMM:
1403 case OP_STOREI8_MEMBASE_IMM:
1404 case OP_STORE_MEMBASE_IMM:
1405 /* There are no store_membase instructions on ia64 */
1406 if (ins->inst_offset == 0) {
1407 temp2 = NULL;
1408 } else if (ia64_is_imm14 (ins->inst_offset)) {
1409 NEW_INS (cfg, temp2, OP_ADD_IMM);
1410 temp2->sreg1 = ins->inst_destbasereg;
1411 temp2->inst_imm = ins->inst_offset;
1412 temp2->dreg = mono_alloc_ireg (cfg);
1414 else {
1415 NEW_INS (cfg, temp, OP_I8CONST);
1416 temp->inst_c0 = ins->inst_offset;
1417 temp->dreg = mono_alloc_ireg (cfg);
1419 NEW_INS (cfg, temp2, OP_LADD);
1420 temp2->sreg1 = ins->inst_destbasereg;
1421 temp2->sreg2 = temp->dreg;
1422 temp2->dreg = mono_alloc_ireg (cfg);
1425 switch (ins->opcode) {
1426 case OP_STOREI1_MEMBASE_IMM:
1427 ins->opcode = OP_STOREI1_MEMBASE_REG;
1428 break;
1429 case OP_STOREI2_MEMBASE_IMM:
1430 ins->opcode = OP_STOREI2_MEMBASE_REG;
1431 break;
1432 case OP_STOREI4_MEMBASE_IMM:
1433 ins->opcode = OP_STOREI4_MEMBASE_REG;
1434 break;
1435 case OP_STOREI8_MEMBASE_IMM:
1436 case OP_STORE_MEMBASE_IMM:
1437 ins->opcode = OP_STOREI8_MEMBASE_REG;
1438 break;
1439 default:
1440 g_assert_not_reached ();
1443 if (ins->inst_imm == 0)
1444 ins->sreg1 = IA64_R0;
1445 else {
1446 NEW_INS (cfg, temp3, OP_I8CONST);
1447 temp3->inst_c0 = ins->inst_imm;
1448 temp3->dreg = mono_alloc_ireg (cfg);
1449 ins->sreg1 = temp3->dreg;
1452 ins->inst_offset = 0;
1453 if (temp2)
1454 ins->inst_destbasereg = temp2->dreg;
1455 break;
1456 case OP_STOREI1_MEMBASE_REG:
1457 case OP_STOREI2_MEMBASE_REG:
1458 case OP_STOREI4_MEMBASE_REG:
1459 case OP_STOREI8_MEMBASE_REG:
1460 case OP_STORER4_MEMBASE_REG:
1461 case OP_STORER8_MEMBASE_REG:
1462 case OP_STORE_MEMBASE_REG:
1463 /* There are no store_membase instructions on ia64 */
1464 if (ins->inst_offset == 0) {
1465 break;
1467 else if (ia64_is_imm14 (ins->inst_offset)) {
1468 NEW_INS (cfg, temp2, OP_ADD_IMM);
1469 temp2->sreg1 = ins->inst_destbasereg;
1470 temp2->inst_imm = ins->inst_offset;
1471 temp2->dreg = mono_alloc_ireg (cfg);
1473 else {
1474 NEW_INS (cfg, temp, OP_I8CONST);
1475 temp->inst_c0 = ins->inst_offset;
1476 temp->dreg = mono_alloc_ireg (cfg);
1477 NEW_INS (cfg, temp2, OP_LADD);
1478 temp2->sreg1 = ins->inst_destbasereg;
1479 temp2->sreg2 = temp->dreg;
1480 temp2->dreg = mono_alloc_ireg (cfg);
1483 ins->inst_offset = 0;
1484 ins->inst_destbasereg = temp2->dreg;
1485 break;
1486 case OP_LOADI1_MEMBASE:
1487 case OP_LOADU1_MEMBASE:
1488 case OP_LOADI2_MEMBASE:
1489 case OP_LOADU2_MEMBASE:
1490 case OP_LOADI4_MEMBASE:
1491 case OP_LOADU4_MEMBASE:
1492 case OP_LOADI8_MEMBASE:
1493 case OP_LOAD_MEMBASE:
1494 case OP_LOADR4_MEMBASE:
1495 case OP_LOADR8_MEMBASE:
1496 case OP_ATOMIC_EXCHANGE_I4:
1497 case OP_ATOMIC_EXCHANGE_I8:
1498 case OP_ATOMIC_ADD_NEW_I4:
1499 case OP_ATOMIC_ADD_NEW_I8:
1500 case OP_ATOMIC_ADD_IMM_NEW_I4:
1501 case OP_ATOMIC_ADD_IMM_NEW_I8:
1502 /* There are no membase instructions on ia64 */
1503 if (ins->inst_offset == 0) {
1504 break;
1506 else if (ia64_is_imm14 (ins->inst_offset)) {
1507 NEW_INS (cfg, temp2, OP_ADD_IMM);
1508 temp2->sreg1 = ins->inst_basereg;
1509 temp2->inst_imm = ins->inst_offset;
1510 temp2->dreg = mono_alloc_ireg (cfg);
1512 else {
1513 NEW_INS (cfg, temp, OP_I8CONST);
1514 temp->inst_c0 = ins->inst_offset;
1515 temp->dreg = mono_alloc_ireg (cfg);
1516 NEW_INS (cfg, temp2, OP_LADD);
1517 temp2->sreg1 = ins->inst_basereg;
1518 temp2->sreg2 = temp->dreg;
1519 temp2->dreg = mono_alloc_ireg (cfg);
1522 ins->inst_offset = 0;
1523 ins->inst_basereg = temp2->dreg;
1524 break;
1525 case OP_ADD_IMM:
1526 case OP_IADD_IMM:
1527 case OP_LADD_IMM:
1528 case OP_ISUB_IMM:
1529 case OP_LSUB_IMM:
1530 case OP_AND_IMM:
1531 case OP_IAND_IMM:
1532 case OP_LAND_IMM:
1533 case OP_IOR_IMM:
1534 case OP_LOR_IMM:
1535 case OP_IXOR_IMM:
1536 case OP_LXOR_IMM:
1537 case OP_SHL_IMM:
1538 case OP_SHR_IMM:
1539 case OP_ISHL_IMM:
1540 case OP_LSHL_IMM:
1541 case OP_ISHR_IMM:
1542 case OP_LSHR_IMM:
1543 case OP_ISHR_UN_IMM:
1544 case OP_LSHR_UN_IMM: {
1545 gboolean is_imm = FALSE;
1546 gboolean switched = FALSE;
1548 if (ins->opcode == OP_AND_IMM && ins->inst_imm == 255) {
1549 ins->opcode = OP_ZEXT_I1;
1550 break;
1553 switch (ins->opcode) {
1554 case OP_ADD_IMM:
1555 case OP_IADD_IMM:
1556 case OP_LADD_IMM:
1557 is_imm = ia64_is_imm14 (ins->inst_imm);
1558 switched = TRUE;
1559 break;
1560 case OP_ISUB_IMM:
1561 case OP_LSUB_IMM:
1562 is_imm = ia64_is_imm14 (- (ins->inst_imm));
1563 if (is_imm) {
1564 /* A = B - IMM -> A = B + (-IMM) */
1565 ins->inst_imm = - ins->inst_imm;
1566 ins->opcode = OP_IADD_IMM;
1568 switched = TRUE;
1569 break;
1570 case OP_IAND_IMM:
1571 case OP_IOR_IMM:
1572 case OP_IXOR_IMM:
1573 case OP_AND_IMM:
1574 case OP_LAND_IMM:
1575 case OP_LOR_IMM:
1576 case OP_LXOR_IMM:
1577 is_imm = ia64_is_imm8 (ins->inst_imm);
1578 switched = TRUE;
1579 break;
1580 case OP_SHL_IMM:
1581 case OP_SHR_IMM:
1582 case OP_ISHL_IMM:
1583 case OP_LSHL_IMM:
1584 case OP_ISHR_IMM:
1585 case OP_LSHR_IMM:
1586 case OP_ISHR_UN_IMM:
1587 case OP_LSHR_UN_IMM:
1588 is_imm = (ins->inst_imm >= 0) && (ins->inst_imm < 64);
1589 break;
1590 default:
1591 break;
1594 if (is_imm) {
1595 if (switched)
1596 ins->sreg2 = ins->sreg1;
1597 break;
1600 ins->opcode = mono_op_imm_to_op (ins->opcode);
1602 if (ins->inst_imm == 0)
1603 ins->sreg2 = IA64_R0;
1604 else {
1605 NEW_INS (cfg, temp, OP_I8CONST);
1606 temp->inst_c0 = ins->inst_imm;
1607 temp->dreg = mono_alloc_ireg (cfg);
1608 ins->sreg2 = temp->dreg;
1610 break;
1612 case OP_COMPARE_IMM:
1613 case OP_ICOMPARE_IMM:
1614 case OP_LCOMPARE_IMM: {
1615 /* Instead of compare+b<cond>, ia64 has compare<cond>+br */
1616 gboolean imm;
1617 CompRelation cond;
1619 next = ins->next;
1621 /* Branch opts can eliminate the branch */
1622 if (!next || (!(MONO_IS_COND_BRANCH_OP (next) || MONO_IS_COND_EXC (next) || MONO_IS_SETCC (next)))) {
1623 NULLIFY_INS (ins);
1624 break;
1628 * The compare_imm instructions have switched up arguments, and
1629 * some of them take an imm between -127 and 128.
1631 next = ins->next;
1632 cond = mono_opcode_to_cond (next->opcode);
1633 if ((cond == CMP_LT) || (cond == CMP_GE))
1634 imm = ia64_is_imm8 (ins->inst_imm - 1);
1635 else if ((cond == CMP_LT_UN) || (cond == CMP_GE_UN))
1636 imm = ia64_is_imm8 (ins->inst_imm - 1) && (ins->inst_imm > 0);
1637 else
1638 imm = ia64_is_imm8 (ins->inst_imm);
1640 if (imm) {
1641 ins->opcode = opcode_to_ia64_cmp_imm (next->opcode, ins->opcode);
1642 ins->sreg2 = ins->sreg1;
1644 else {
1645 ins->opcode = opcode_to_ia64_cmp (next->opcode, ins->opcode);
1647 if (ins->inst_imm == 0)
1648 ins->sreg2 = IA64_R0;
1649 else {
1650 NEW_INS (cfg, temp, OP_I8CONST);
1651 temp->inst_c0 = ins->inst_imm;
1652 temp->dreg = mono_alloc_ireg (cfg);
1653 ins->sreg2 = temp->dreg;
1657 if (MONO_IS_COND_BRANCH_OP (next)) {
1658 next->opcode = OP_IA64_BR_COND;
1659 next->inst_target_bb = next->inst_true_bb;
1660 } else if (MONO_IS_COND_EXC (next)) {
1661 next->opcode = OP_IA64_COND_EXC;
1662 } else if (MONO_IS_SETCC (next)) {
1663 next->opcode = OP_IA64_CSET;
1664 } else {
1665 printf ("%s\n", mono_inst_name (next->opcode));
1666 NOT_IMPLEMENTED;
1669 break;
1671 case OP_COMPARE:
1672 case OP_ICOMPARE:
1673 case OP_LCOMPARE:
1674 case OP_FCOMPARE: {
1675 /* Instead of compare+b<cond>, ia64 has compare<cond>+br */
1677 next = ins->next;
1679 /* Branch opts can eliminate the branch */
1680 if (!next || (!(MONO_IS_COND_BRANCH_OP (next) || MONO_IS_COND_EXC (next) || MONO_IS_SETCC (next)))) {
1681 NULLIFY_INS (ins);
1682 break;
1685 ins->opcode = opcode_to_ia64_cmp (next->opcode, ins->opcode);
1687 if (MONO_IS_COND_BRANCH_OP (next)) {
1688 next->opcode = OP_IA64_BR_COND;
1689 next->inst_target_bb = next->inst_true_bb;
1690 } else if (MONO_IS_COND_EXC (next)) {
1691 next->opcode = OP_IA64_COND_EXC;
1692 } else if (MONO_IS_SETCC (next)) {
1693 next->opcode = OP_IA64_CSET;
1694 } else {
1695 printf ("%s\n", mono_inst_name (next->opcode));
1696 NOT_IMPLEMENTED;
1699 break;
1701 case OP_FCEQ:
1702 case OP_FCGT:
1703 case OP_FCGT_UN:
1704 case OP_FCLT:
1705 case OP_FCLT_UN:
1706 /* The front end removes the fcompare, so introduce it again */
1707 NEW_INS (cfg, temp, opcode_to_ia64_cmp (ins->opcode, OP_FCOMPARE));
1708 temp->sreg1 = ins->sreg1;
1709 temp->sreg2 = ins->sreg2;
1711 ins->opcode = OP_IA64_CSET;
1712 MONO_INST_NULLIFY_SREGS (ins);
1713 break;
1714 case OP_MUL_IMM:
1715 case OP_LMUL_IMM:
1716 case OP_IMUL_IMM: {
1717 int i, sum_reg;
1718 gboolean found = FALSE;
1719 int shl_op = ins->opcode == OP_IMUL_IMM ? OP_ISHL_IMM : OP_SHL_IMM;
1721 /* First the easy cases */
1722 if (ins->inst_imm == 1) {
1723 ins->opcode = OP_MOVE;
1724 break;
1726 for (i = 1; i < 64; ++i)
1727 if (ins->inst_imm == (((gint64)1) << i)) {
1728 ins->opcode = shl_op;
1729 ins->inst_imm = i;
1730 found = TRUE;
1731 break;
1734 /* This could be optimized */
1735 if (!found) {
1736 sum_reg = 0;
1737 for (i = 0; i < 64; ++i) {
1738 if (ins->inst_imm & (((gint64)1) << i)) {
1739 NEW_INS (cfg, temp, shl_op);
1740 temp->dreg = mono_alloc_ireg (cfg);
1741 temp->sreg1 = ins->sreg1;
1742 temp->inst_imm = i;
1744 if (sum_reg == 0)
1745 sum_reg = temp->dreg;
1746 else {
1747 NEW_INS (cfg, temp2, OP_LADD);
1748 temp2->dreg = mono_alloc_ireg (cfg);
1749 temp2->sreg1 = sum_reg;
1750 temp2->sreg2 = temp->dreg;
1751 sum_reg = temp2->dreg;
1755 ins->opcode = OP_MOVE;
1756 ins->sreg1 = sum_reg;
1758 break;
1760 case OP_LCONV_TO_OVF_U4:
1761 NEW_INS (cfg, temp, OP_IA64_CMP4_LT);
1762 temp->sreg1 = ins->sreg1;
1763 temp->sreg2 = IA64_R0;
1765 NEW_INS (cfg, temp, OP_IA64_COND_EXC);
1766 temp->inst_p1 = (char*)"OverflowException";
1768 ins->opcode = OP_MOVE;
1769 break;
1770 case OP_LCONV_TO_OVF_I4_UN:
1771 NEW_INS (cfg, temp, OP_ICONST);
1772 temp->inst_c0 = 0x7fffffff;
1773 temp->dreg = mono_alloc_ireg (cfg);
1775 NEW_INS (cfg, temp2, OP_IA64_CMP4_GT_UN);
1776 temp2->sreg1 = ins->sreg1;
1777 temp2->sreg2 = temp->dreg;
1779 NEW_INS (cfg, temp, OP_IA64_COND_EXC);
1780 temp->inst_p1 = (char*)"OverflowException";
1782 ins->opcode = OP_MOVE;
1783 break;
1784 case OP_FCONV_TO_I4:
1785 case OP_FCONV_TO_I2:
1786 case OP_FCONV_TO_U2:
1787 case OP_FCONV_TO_I1:
1788 case OP_FCONV_TO_U1:
1789 NEW_INS (cfg, temp, OP_FCONV_TO_I8);
1790 temp->sreg1 = ins->sreg1;
1791 temp->dreg = ins->dreg;
1793 switch (ins->opcode) {
1794 case OP_FCONV_TO_I4:
1795 ins->opcode = OP_SEXT_I4;
1796 break;
1797 case OP_FCONV_TO_I2:
1798 ins->opcode = OP_SEXT_I2;
1799 break;
1800 case OP_FCONV_TO_U2:
1801 ins->opcode = OP_ZEXT_I4;
1802 break;
1803 case OP_FCONV_TO_I1:
1804 ins->opcode = OP_SEXT_I1;
1805 break;
1806 case OP_FCONV_TO_U1:
1807 ins->opcode = OP_ZEXT_I1;
1808 break;
1809 default:
1810 g_assert_not_reached ();
1812 ins->sreg1 = ins->dreg;
1813 break;
1814 default:
1815 break;
1817 last_ins = ins;
1818 ins = ins->next;
1820 bb->last_ins = last_ins;
1822 bb->max_vreg = cfg->next_vreg;
1826 * emit_load_volatile_arguments:
1828 * Load volatile arguments from the stack to the original input registers.
1829 * Required before a tail call.
1831 static Ia64CodegenState
1832 emit_load_volatile_arguments (MonoCompile *cfg, Ia64CodegenState code)
1834 MonoMethod *method = cfg->method;
1835 MonoMethodSignature *sig;
1836 MonoInst *ins;
1837 CallInfo *cinfo;
1838 guint32 i;
1840 /* FIXME: Generate intermediate code instead */
1842 sig = mono_method_signature (method);
1844 cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
1846 /* This is the opposite of the code in emit_prolog */
1847 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1848 ArgInfo *ainfo = cinfo->args + i;
1849 gint32 stack_offset;
1850 MonoType *arg_type;
1852 ins = cfg->args [i];
1854 if (sig->hasthis && (i == 0))
1855 arg_type = &mono_defaults.object_class->byval_arg;
1856 else
1857 arg_type = sig->params [i - sig->hasthis];
1859 arg_type = mono_type_get_underlying_type (arg_type);
1861 stack_offset = ainfo->offset + ARGS_OFFSET;
1863 /* Save volatile arguments to the stack */
1864 if (ins->opcode != OP_REGVAR) {
1865 switch (ainfo->storage) {
1866 case ArgInIReg:
1867 case ArgInFloatReg:
1868 /* FIXME: big offsets */
1869 g_assert (ins->opcode == OP_REGOFFSET);
1870 ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_basereg);
1871 if (arg_type->byref)
1872 ia64_ld8 (code, cfg->arch.reg_in0 + ainfo->reg, GP_SCRATCH_REG);
1873 else {
1874 switch (arg_type->type) {
1875 case MONO_TYPE_R4:
1876 ia64_ldfs (code, ainfo->reg, GP_SCRATCH_REG);
1877 break;
1878 case MONO_TYPE_R8:
1879 ia64_ldfd (code, ainfo->reg, GP_SCRATCH_REG);
1880 break;
1881 default:
1882 ia64_ld8 (code, cfg->arch.reg_in0 + ainfo->reg, GP_SCRATCH_REG);
1883 break;
1886 break;
1887 case ArgOnStack:
1888 break;
1889 default:
1890 NOT_IMPLEMENTED;
1894 if (ins->opcode == OP_REGVAR) {
1895 /* Argument allocated to (non-volatile) register */
1896 switch (ainfo->storage) {
1897 case ArgInIReg:
1898 if (ins->dreg != cfg->arch.reg_in0 + ainfo->reg)
1899 ia64_mov (code, cfg->arch.reg_in0 + ainfo->reg, ins->dreg);
1900 break;
1901 case ArgOnStack:
1902 ia64_adds_imm (code, GP_SCRATCH_REG, 16 + ainfo->offset, cfg->frame_reg);
1903 ia64_st8 (code, GP_SCRATCH_REG, ins->dreg);
1904 break;
1905 default:
1906 NOT_IMPLEMENTED;
1911 return code;
1914 static Ia64CodegenState
1915 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, Ia64CodegenState code)
1917 CallInfo *cinfo;
1918 int i;
1920 /* Move return value to the target register */
1921 switch (ins->opcode) {
1922 case OP_VOIDCALL:
1923 case OP_VOIDCALL_REG:
1924 case OP_VOIDCALL_MEMBASE:
1925 break;
1926 case OP_CALL:
1927 case OP_CALL_REG:
1928 case OP_CALL_MEMBASE:
1929 case OP_LCALL:
1930 case OP_LCALL_REG:
1931 case OP_LCALL_MEMBASE:
1932 g_assert (ins->dreg == IA64_R8);
1933 break;
1934 case OP_FCALL:
1935 case OP_FCALL_REG:
1936 case OP_FCALL_MEMBASE:
1937 g_assert (ins->dreg == 8);
1938 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4)
1939 ia64_fnorm_d_sf (code, ins->dreg, ins->dreg, 0);
1940 break;
1941 case OP_VCALL:
1942 case OP_VCALL_REG:
1943 case OP_VCALL_MEMBASE:
1944 case OP_VCALL2:
1945 case OP_VCALL2_REG:
1946 case OP_VCALL2_MEMBASE: {
1947 ArgStorage storage;
1949 cinfo = get_call_info (cfg, cfg->mempool, ((MonoCallInst*)ins)->signature, FALSE);
1950 storage = cinfo->ret.storage;
1952 if (storage == ArgAggregate) {
1953 MonoInst *local = (MonoInst*)cfg->arch.ret_var_addr_local;
1955 /* Load address of stack space allocated for the return value */
1956 ia64_movl (code, GP_SCRATCH_REG, local->inst_offset);
1957 ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, local->inst_basereg);
1958 ia64_ld8 (code, GP_SCRATCH_REG, GP_SCRATCH_REG);
1960 for (i = 0; i < cinfo->ret.nregs; ++i) {
1961 switch (cinfo->ret.atype) {
1962 case AggregateNormal:
1963 ia64_st8_inc_imm_hint (code, GP_SCRATCH_REG, cinfo->ret.reg + i, 8, 0);
1964 break;
1965 case AggregateSingleHFA:
1966 ia64_stfs_inc_imm_hint (code, GP_SCRATCH_REG, cinfo->ret.reg + i, 4, 0);
1967 break;
1968 case AggregateDoubleHFA:
1969 ia64_stfd_inc_imm_hint (code, GP_SCRATCH_REG, cinfo->ret.reg + i, 8, 0);
1970 break;
1971 default:
1972 g_assert_not_reached ();
1976 break;
1978 default:
1979 g_assert_not_reached ();
1982 return code;
1985 #define add_patch_info(cfg,code,patch_type,data) do { \
1986 mono_add_patch_info (cfg, code.buf + code.nins - cfg->native_code, patch_type, data); \
1987 } while (0)
1989 #define emit_cond_system_exception(cfg,code,exc_name,predicate) do { \
1990 MonoInst *tins = mono_branch_optimize_exception_target (cfg, bb, exc_name); \
1991 if (tins == NULL) \
1992 add_patch_info (cfg, code, MONO_PATCH_INFO_EXC, exc_name); \
1993 else \
1994 add_patch_info (cfg, code, MONO_PATCH_INFO_BB, tins->inst_true_bb); \
1995 ia64_br_cond_pred (code, (predicate), 0); \
1996 } while (0)
1998 static Ia64CodegenState
1999 emit_call (MonoCompile *cfg, Ia64CodegenState code, guint32 patch_type, gconstpointer data)
2001 add_patch_info (cfg, code, patch_type, data);
2003 if ((patch_type == MONO_PATCH_INFO_ABS) || (patch_type == MONO_PATCH_INFO_INTERNAL_METHOD)) {
2004 /* Indirect call */
2005 /* mono_arch_patch_callsite will patch this */
2006 /* mono_arch_nullify_class_init_trampoline will patch this */
2007 ia64_movl (code, GP_SCRATCH_REG, 0);
2008 ia64_ld8_inc_imm (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, 8);
2009 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG2);
2010 ia64_ld8 (code, IA64_GP, GP_SCRATCH_REG);
2011 ia64_br_call_reg (code, IA64_B0, IA64_B6);
2013 else {
2014 /* Can't use a direct call since the displacement might be too small */
2015 /* mono_arch_patch_callsite will patch this */
2016 ia64_movl (code, GP_SCRATCH_REG, 0);
2017 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
2018 ia64_br_call_reg (code, IA64_B0, IA64_B6);
2021 return code;
2024 #define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
2026 void
2027 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
2029 MonoInst *ins;
2030 MonoCallInst *call;
2031 guint offset;
2032 Ia64CodegenState code;
2033 guint8 *code_start = cfg->native_code + cfg->code_len;
2034 MonoInst *last_ins = NULL;
2035 guint last_offset = 0;
2036 int max_len, cpos;
2038 if (cfg->opt & MONO_OPT_LOOP) {
2039 /* FIXME: */
2042 if (cfg->verbose_level > 2)
2043 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
2045 cpos = bb->max_offset;
2047 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
2048 NOT_IMPLEMENTED;
2051 offset = code_start - cfg->native_code;
2053 ia64_codegen_init (code, code_start);
2055 #if 0
2056 if (strstr (cfg->method->name, "conv_ovf_i1") && (bb->block_num == 2))
2057 break_count ();
2058 #endif
2060 MONO_BB_FOR_EACH_INS (bb, ins) {
2061 offset = code.buf - cfg->native_code;
2063 max_len = ((int)(((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN])) + 128;
2065 while (offset + max_len + 16 > cfg->code_size) {
2066 ia64_codegen_close (code);
2068 offset = code.buf - cfg->native_code;
2070 cfg->code_size *= 2;
2071 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2072 code_start = cfg->native_code + offset;
2073 cfg->stat_code_reallocs++;
2075 ia64_codegen_init (code, code_start);
2078 mono_debug_record_line_number (cfg, ins, offset);
2080 switch (ins->opcode) {
2081 case OP_ICONST:
2082 case OP_I8CONST:
2083 if (ia64_is_imm14 (ins->inst_c0))
2084 ia64_adds_imm (code, ins->dreg, ins->inst_c0, IA64_R0);
2085 else
2086 ia64_movl (code, ins->dreg, ins->inst_c0);
2087 break;
2088 case OP_JUMP_TABLE:
2089 add_patch_info (cfg, code, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2090 ia64_movl (code, ins->dreg, 0);
2091 break;
2092 case OP_MOVE:
2093 ia64_mov (code, ins->dreg, ins->sreg1);
2094 break;
2095 case OP_BR:
2096 case OP_IA64_BR_COND: {
2097 int pred = 0;
2098 if (ins->opcode == OP_IA64_BR_COND)
2099 pred = 6;
2100 if (ins->inst_target_bb->native_offset) {
2101 guint8 *pos = code.buf + code.nins;
2103 ia64_br_cond_pred (code, pred, 0);
2104 ia64_begin_bundle (code);
2105 ia64_patch (pos, cfg->native_code + ins->inst_target_bb->native_offset);
2106 } else {
2107 add_patch_info (cfg, code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
2108 ia64_br_cond_pred (code, pred, 0);
2110 break;
2112 case OP_LABEL:
2113 ia64_begin_bundle (code);
2114 ins->inst_c0 = code.buf - cfg->native_code;
2115 break;
2116 case OP_NOP:
2117 case OP_RELAXED_NOP:
2118 case OP_DUMMY_USE:
2119 case OP_DUMMY_STORE:
2120 case OP_NOT_REACHED:
2121 case OP_NOT_NULL:
2122 break;
2123 case OP_BR_REG:
2124 ia64_mov_to_br (code, IA64_B6, ins->sreg1);
2125 ia64_br_cond_reg (code, IA64_B6);
2126 break;
2127 case OP_IADD:
2128 case OP_LADD:
2129 ia64_add (code, ins->dreg, ins->sreg1, ins->sreg2);
2130 break;
2131 case OP_ISUB:
2132 case OP_LSUB:
2133 ia64_sub (code, ins->dreg, ins->sreg1, ins->sreg2);
2134 break;
2135 case OP_IAND:
2136 case OP_LAND:
2137 ia64_and (code, ins->dreg, ins->sreg1, ins->sreg2);
2138 break;
2139 case OP_IOR:
2140 case OP_LOR:
2141 ia64_or (code, ins->dreg, ins->sreg1, ins->sreg2);
2142 break;
2143 case OP_IXOR:
2144 case OP_LXOR:
2145 ia64_xor (code, ins->dreg, ins->sreg1, ins->sreg2);
2146 break;
2147 case OP_INEG:
2148 case OP_LNEG:
2149 ia64_sub (code, ins->dreg, IA64_R0, ins->sreg1);
2150 break;
2151 case OP_INOT:
2152 case OP_LNOT:
2153 ia64_andcm_imm (code, ins->dreg, -1, ins->sreg1);
2154 break;
2155 case OP_ISHL:
2156 case OP_LSHL:
2157 ia64_shl (code, ins->dreg, ins->sreg1, ins->sreg2);
2158 break;
2159 case OP_ISHR:
2160 ia64_sxt4 (code, GP_SCRATCH_REG, ins->sreg1);
2161 ia64_shr (code, ins->dreg, GP_SCRATCH_REG, ins->sreg2);
2162 break;
2163 case OP_LSHR:
2164 ia64_shr (code, ins->dreg, ins->sreg1, ins->sreg2);
2165 break;
2166 case OP_ISHR_UN:
2167 ia64_zxt4 (code, GP_SCRATCH_REG, ins->sreg1);
2168 ia64_shr_u (code, ins->dreg, GP_SCRATCH_REG, ins->sreg2);
2169 break;
2170 case OP_LSHR_UN:
2171 ia64_shr_u (code, ins->dreg, ins->sreg1, ins->sreg2);
2172 break;
2173 case OP_IADDCC:
2174 /* p6 and p7 is set if there is signed/unsigned overflow */
2176 /* Set p8-p9 == (sreg2 > 0) */
2177 ia64_cmp4_lt (code, 8, 9, IA64_R0, ins->sreg2);
2179 ia64_add (code, GP_SCRATCH_REG, ins->sreg1, ins->sreg2);
2181 /* (sreg2 > 0) && (res < ins->sreg1) => signed overflow */
2182 ia64_cmp4_lt_pred (code, 8, 6, 10, GP_SCRATCH_REG, ins->sreg1);
2183 /* (sreg2 <= 0) && (res > ins->sreg1) => signed overflow */
2184 ia64_cmp4_lt_pred (code, 9, 6, 10, ins->sreg1, GP_SCRATCH_REG);
2186 /* res <u sreg1 => unsigned overflow */
2187 ia64_cmp4_ltu (code, 7, 10, GP_SCRATCH_REG, ins->sreg1);
2189 /* FIXME: Predicate this since this is a side effect */
2190 ia64_mov (code, ins->dreg, GP_SCRATCH_REG);
2191 break;
2192 case OP_ISUBCC:
2193 /* p6 and p7 is set if there is signed/unsigned overflow */
2195 /* Set p8-p9 == (sreg2 > 0) */
2196 ia64_cmp4_lt (code, 8, 9, IA64_R0, ins->sreg2);
2198 ia64_sub (code, GP_SCRATCH_REG, ins->sreg1, ins->sreg2);
2200 /* (sreg2 > 0) && (res > ins->sreg1) => signed overflow */
2201 ia64_cmp4_gt_pred (code, 8, 6, 10, GP_SCRATCH_REG, ins->sreg1);
2202 /* (sreg2 <= 0) && (res < ins->sreg1) => signed overflow */
2203 ia64_cmp4_lt_pred (code, 9, 6, 10, GP_SCRATCH_REG, ins->sreg1);
2205 /* sreg1 <u sreg2 => unsigned overflow */
2206 ia64_cmp4_ltu (code, 7, 10, ins->sreg1, ins->sreg2);
2208 /* FIXME: Predicate this since this is a side effect */
2209 ia64_mov (code, ins->dreg, GP_SCRATCH_REG);
2210 break;
2211 case OP_ADDCC:
2212 /* Same as OP_IADDCC */
2213 ia64_cmp_lt (code, 8, 9, IA64_R0, ins->sreg2);
2215 ia64_add (code, GP_SCRATCH_REG, ins->sreg1, ins->sreg2);
2217 ia64_cmp_lt_pred (code, 8, 6, 10, GP_SCRATCH_REG, ins->sreg1);
2218 ia64_cmp_lt_pred (code, 9, 6, 10, ins->sreg1, GP_SCRATCH_REG);
2220 ia64_cmp_ltu (code, 7, 10, GP_SCRATCH_REG, ins->sreg1);
2222 ia64_mov (code, ins->dreg, GP_SCRATCH_REG);
2223 break;
2224 case OP_SUBCC:
2225 /* Same as OP_ISUBCC */
2227 ia64_cmp_lt (code, 8, 9, IA64_R0, ins->sreg2);
2229 ia64_sub (code, GP_SCRATCH_REG, ins->sreg1, ins->sreg2);
2231 ia64_cmp_gt_pred (code, 8, 6, 10, GP_SCRATCH_REG, ins->sreg1);
2232 ia64_cmp_lt_pred (code, 9, 6, 10, GP_SCRATCH_REG, ins->sreg1);
2234 ia64_cmp_ltu (code, 7, 10, ins->sreg1, ins->sreg2);
2236 ia64_mov (code, ins->dreg, GP_SCRATCH_REG);
2237 break;
2238 case OP_ADD_IMM:
2239 case OP_IADD_IMM:
2240 case OP_LADD_IMM:
2241 ia64_adds_imm (code, ins->dreg, ins->inst_imm, ins->sreg1);
2242 break;
2243 case OP_IAND_IMM:
2244 case OP_AND_IMM:
2245 case OP_LAND_IMM:
2246 ia64_and_imm (code, ins->dreg, ins->inst_imm, ins->sreg1);
2247 break;
2248 case OP_IOR_IMM:
2249 case OP_LOR_IMM:
2250 ia64_or_imm (code, ins->dreg, ins->inst_imm, ins->sreg1);
2251 break;
2252 case OP_IXOR_IMM:
2253 case OP_LXOR_IMM:
2254 ia64_xor_imm (code, ins->dreg, ins->inst_imm, ins->sreg1);
2255 break;
2256 case OP_SHL_IMM:
2257 case OP_ISHL_IMM:
2258 case OP_LSHL_IMM:
2259 ia64_shl_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
2260 break;
2261 case OP_SHR_IMM:
2262 case OP_LSHR_IMM:
2263 ia64_shr_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
2264 break;
2265 case OP_ISHR_IMM:
2266 g_assert (ins->inst_imm <= 64);
2267 ia64_extr (code, ins->dreg, ins->sreg1, ins->inst_imm, 32 - ins->inst_imm);
2268 break;
2269 case OP_ISHR_UN_IMM:
2270 ia64_zxt4 (code, GP_SCRATCH_REG, ins->sreg1);
2271 ia64_shr_u_imm (code, ins->dreg, GP_SCRATCH_REG, ins->inst_imm);
2272 break;
2273 case OP_LSHR_UN_IMM:
2274 ia64_shr_u_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
2275 break;
2276 case OP_LMUL:
2277 /* Based on gcc code */
2278 ia64_setf_sig (code, FP_SCRATCH_REG, ins->sreg1);
2279 ia64_setf_sig (code, FP_SCRATCH_REG2, ins->sreg2);
2280 ia64_xmpy_l (code, FP_SCRATCH_REG, FP_SCRATCH_REG, FP_SCRATCH_REG2);
2281 ia64_getf_sig (code, ins->dreg, FP_SCRATCH_REG);
2282 break;
2284 case OP_STOREI1_MEMBASE_REG:
2285 ia64_st1_hint (code, ins->inst_destbasereg, ins->sreg1, 0);
2286 break;
2287 case OP_STOREI2_MEMBASE_REG:
2288 ia64_st2_hint (code, ins->inst_destbasereg, ins->sreg1, 0);
2289 break;
2290 case OP_STOREI4_MEMBASE_REG:
2291 ia64_st4_hint (code, ins->inst_destbasereg, ins->sreg1, 0);
2292 break;
2293 case OP_STOREI8_MEMBASE_REG:
2294 case OP_STORE_MEMBASE_REG:
2295 if (ins->inst_offset != 0) {
2296 /* This is generated by local regalloc */
2297 if (ia64_is_imm14 (ins->inst_offset)) {
2298 ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_destbasereg);
2299 } else {
2300 ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
2301 ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, ins->inst_destbasereg);
2303 ins->inst_destbasereg = GP_SCRATCH_REG;
2305 ia64_st8_hint (code, ins->inst_destbasereg, ins->sreg1, 0);
2306 break;
2308 case OP_IA64_STOREI1_MEMBASE_INC_REG:
2309 ia64_st1_inc_imm_hint (code, ins->inst_destbasereg, ins->sreg1, 1, 0);
2310 break;
2311 case OP_IA64_STOREI2_MEMBASE_INC_REG:
2312 ia64_st2_inc_imm_hint (code, ins->inst_destbasereg, ins->sreg1, 2, 0);
2313 break;
2314 case OP_IA64_STOREI4_MEMBASE_INC_REG:
2315 ia64_st4_inc_imm_hint (code, ins->inst_destbasereg, ins->sreg1, 4, 0);
2316 break;
2317 case OP_IA64_STOREI8_MEMBASE_INC_REG:
2318 ia64_st8_inc_imm_hint (code, ins->inst_destbasereg, ins->sreg1, 8, 0);
2319 break;
2321 case OP_LOADU1_MEMBASE:
2322 ia64_ld1 (code, ins->dreg, ins->inst_basereg);
2323 break;
2324 case OP_LOADU2_MEMBASE:
2325 ia64_ld2 (code, ins->dreg, ins->inst_basereg);
2326 break;
2327 case OP_LOADU4_MEMBASE:
2328 ia64_ld4 (code, ins->dreg, ins->inst_basereg);
2329 break;
2330 case OP_LOADI1_MEMBASE:
2331 ia64_ld1 (code, ins->dreg, ins->inst_basereg);
2332 ia64_sxt1 (code, ins->dreg, ins->dreg);
2333 break;
2334 case OP_LOADI2_MEMBASE:
2335 ia64_ld2 (code, ins->dreg, ins->inst_basereg);
2336 ia64_sxt2 (code, ins->dreg, ins->dreg);
2337 break;
2338 case OP_LOADI4_MEMBASE:
2339 ia64_ld4 (code, ins->dreg, ins->inst_basereg);
2340 ia64_sxt4 (code, ins->dreg, ins->dreg);
2341 break;
2342 case OP_LOAD_MEMBASE:
2343 case OP_LOADI8_MEMBASE:
2344 if (ins->inst_offset != 0) {
2345 /* This is generated by local regalloc */
2346 if (ia64_is_imm14 (ins->inst_offset)) {
2347 ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_basereg);
2348 } else {
2349 ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
2350 ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, ins->inst_basereg);
2352 ins->inst_basereg = GP_SCRATCH_REG;
2354 ia64_ld8 (code, ins->dreg, ins->inst_basereg);
2355 break;
2357 case OP_IA64_LOADU1_MEMBASE_INC:
2358 ia64_ld1_inc_imm_hint (code, ins->dreg, ins->inst_basereg, 1, 0);
2359 break;
2360 case OP_IA64_LOADU2_MEMBASE_INC:
2361 ia64_ld2_inc_imm_hint (code, ins->dreg, ins->inst_basereg, 2, 0);
2362 break;
2363 case OP_IA64_LOADU4_MEMBASE_INC:
2364 ia64_ld4_inc_imm_hint (code, ins->dreg, ins->inst_basereg, 4, 0);
2365 break;
2366 case OP_IA64_LOADI8_MEMBASE_INC:
2367 ia64_ld8_inc_imm_hint (code, ins->dreg, ins->inst_basereg, 8, 0);
2368 break;
2370 case OP_SEXT_I1:
2371 ia64_sxt1 (code, ins->dreg, ins->sreg1);
2372 break;
2373 case OP_SEXT_I2:
2374 ia64_sxt2 (code, ins->dreg, ins->sreg1);
2375 break;
2376 case OP_SEXT_I4:
2377 ia64_sxt4 (code, ins->dreg, ins->sreg1);
2378 break;
2379 case OP_ZEXT_I1:
2380 ia64_zxt1 (code, ins->dreg, ins->sreg1);
2381 break;
2382 case OP_ZEXT_I2:
2383 ia64_zxt2 (code, ins->dreg, ins->sreg1);
2384 break;
2385 case OP_ZEXT_I4:
2386 ia64_zxt4 (code, ins->dreg, ins->sreg1);
2387 break;
2389 /* Compare opcodes */
2390 case OP_IA64_CMP4_EQ:
2391 ia64_cmp4_eq (code, 6, 7, ins->sreg1, ins->sreg2);
2392 break;
2393 case OP_IA64_CMP4_NE:
2394 ia64_cmp4_ne (code, 6, 7, ins->sreg1, ins->sreg2);
2395 break;
2396 case OP_IA64_CMP4_LE:
2397 ia64_cmp4_le (code, 6, 7, ins->sreg1, ins->sreg2);
2398 break;
2399 case OP_IA64_CMP4_LT:
2400 ia64_cmp4_lt (code, 6, 7, ins->sreg1, ins->sreg2);
2401 break;
2402 case OP_IA64_CMP4_GE:
2403 ia64_cmp4_ge (code, 6, 7, ins->sreg1, ins->sreg2);
2404 break;
2405 case OP_IA64_CMP4_GT:
2406 ia64_cmp4_gt (code, 6, 7, ins->sreg1, ins->sreg2);
2407 break;
2408 case OP_IA64_CMP4_LT_UN:
2409 ia64_cmp4_ltu (code, 6, 7, ins->sreg1, ins->sreg2);
2410 break;
2411 case OP_IA64_CMP4_LE_UN:
2412 ia64_cmp4_leu (code, 6, 7, ins->sreg1, ins->sreg2);
2413 break;
2414 case OP_IA64_CMP4_GT_UN:
2415 ia64_cmp4_gtu (code, 6, 7, ins->sreg1, ins->sreg2);
2416 break;
2417 case OP_IA64_CMP4_GE_UN:
2418 ia64_cmp4_geu (code, 6, 7, ins->sreg1, ins->sreg2);
2419 break;
2420 case OP_IA64_CMP_EQ:
2421 ia64_cmp_eq (code, 6, 7, ins->sreg1, ins->sreg2);
2422 break;
2423 case OP_IA64_CMP_NE:
2424 ia64_cmp_ne (code, 6, 7, ins->sreg1, ins->sreg2);
2425 break;
2426 case OP_IA64_CMP_LE:
2427 ia64_cmp_le (code, 6, 7, ins->sreg1, ins->sreg2);
2428 break;
2429 case OP_IA64_CMP_LT:
2430 ia64_cmp_lt (code, 6, 7, ins->sreg1, ins->sreg2);
2431 break;
2432 case OP_IA64_CMP_GE:
2433 ia64_cmp_ge (code, 6, 7, ins->sreg1, ins->sreg2);
2434 break;
2435 case OP_IA64_CMP_GT:
2436 ia64_cmp_gt (code, 6, 7, ins->sreg1, ins->sreg2);
2437 break;
2438 case OP_IA64_CMP_GT_UN:
2439 ia64_cmp_gtu (code, 6, 7, ins->sreg1, ins->sreg2);
2440 break;
2441 case OP_IA64_CMP_LT_UN:
2442 ia64_cmp_ltu (code, 6, 7, ins->sreg1, ins->sreg2);
2443 break;
2444 case OP_IA64_CMP_GE_UN:
2445 ia64_cmp_geu (code, 6, 7, ins->sreg1, ins->sreg2);
2446 break;
2447 case OP_IA64_CMP_LE_UN:
2448 ia64_cmp_leu (code, 6, 7, ins->sreg1, ins->sreg2);
2449 break;
2450 case OP_IA64_CMP4_EQ_IMM:
2451 ia64_cmp4_eq_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2452 break;
2453 case OP_IA64_CMP4_NE_IMM:
2454 ia64_cmp4_ne_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2455 break;
2456 case OP_IA64_CMP4_LE_IMM:
2457 ia64_cmp4_le_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2458 break;
2459 case OP_IA64_CMP4_LT_IMM:
2460 ia64_cmp4_lt_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2461 break;
2462 case OP_IA64_CMP4_GE_IMM:
2463 ia64_cmp4_ge_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2464 break;
2465 case OP_IA64_CMP4_GT_IMM:
2466 ia64_cmp4_gt_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2467 break;
2468 case OP_IA64_CMP4_LT_UN_IMM:
2469 ia64_cmp4_ltu_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2470 break;
2471 case OP_IA64_CMP4_LE_UN_IMM:
2472 ia64_cmp4_leu_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2473 break;
2474 case OP_IA64_CMP4_GT_UN_IMM:
2475 ia64_cmp4_gtu_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2476 break;
2477 case OP_IA64_CMP4_GE_UN_IMM:
2478 ia64_cmp4_geu_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2479 break;
2480 case OP_IA64_CMP_EQ_IMM:
2481 ia64_cmp_eq_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2482 break;
2483 case OP_IA64_CMP_NE_IMM:
2484 ia64_cmp_ne_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2485 break;
2486 case OP_IA64_CMP_LE_IMM:
2487 ia64_cmp_le_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2488 break;
2489 case OP_IA64_CMP_LT_IMM:
2490 ia64_cmp_lt_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2491 break;
2492 case OP_IA64_CMP_GE_IMM:
2493 ia64_cmp_ge_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2494 break;
2495 case OP_IA64_CMP_GT_IMM:
2496 ia64_cmp_gt_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2497 break;
2498 case OP_IA64_CMP_GT_UN_IMM:
2499 ia64_cmp_gtu_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2500 break;
2501 case OP_IA64_CMP_LT_UN_IMM:
2502 ia64_cmp_ltu_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2503 break;
2504 case OP_IA64_CMP_GE_UN_IMM:
2505 ia64_cmp_geu_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2506 break;
2507 case OP_IA64_CMP_LE_UN_IMM:
2508 ia64_cmp_leu_imm (code, 6, 7, ins->inst_imm, ins->sreg2);
2509 break;
2510 case OP_IA64_FCMP_EQ:
2511 ia64_fcmp_eq_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2512 break;
2513 case OP_IA64_FCMP_NE:
2514 ia64_fcmp_ne_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2515 break;
2516 case OP_IA64_FCMP_LT:
2517 ia64_fcmp_lt_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2518 break;
2519 case OP_IA64_FCMP_GT:
2520 ia64_fcmp_gt_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2521 break;
2522 case OP_IA64_FCMP_LE:
2523 ia64_fcmp_le_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2524 break;
2525 case OP_IA64_FCMP_GE:
2526 ia64_fcmp_ge_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2527 break;
2528 case OP_IA64_FCMP_GT_UN:
2529 ia64_fcmp_gt_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2530 ia64_fcmp_unord_sf_pred (code, 7, 6, 7, ins->sreg1, ins->sreg2, 0);
2531 break;
2532 case OP_IA64_FCMP_LT_UN:
2533 ia64_fcmp_lt_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2534 ia64_fcmp_unord_sf_pred (code, 7, 6, 7, ins->sreg1, ins->sreg2, 0);
2535 break;
2536 case OP_IA64_FCMP_GE_UN:
2537 ia64_fcmp_ge_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2538 ia64_fcmp_unord_sf_pred (code, 7, 6, 7, ins->sreg1, ins->sreg2, 0);
2539 break;
2540 case OP_IA64_FCMP_LE_UN:
2541 ia64_fcmp_le_sf (code, 6, 7, ins->sreg1, ins->sreg2, 0);
2542 ia64_fcmp_unord_sf_pred (code, 7, 6, 7, ins->sreg1, ins->sreg2, 0);
2543 break;
2545 case OP_COND_EXC_IOV:
2546 case OP_COND_EXC_OV:
2547 emit_cond_system_exception (cfg, code, "OverflowException", 6);
2548 break;
2549 case OP_COND_EXC_IC:
2550 case OP_COND_EXC_C:
2551 emit_cond_system_exception (cfg, code, "OverflowException", 7);
2552 break;
2553 case OP_IA64_COND_EXC:
2554 emit_cond_system_exception (cfg, code, ins->inst_p1, 6);
2555 break;
2556 case OP_IA64_CSET:
2557 ia64_mov_pred (code, 7, ins->dreg, IA64_R0);
2558 ia64_no_stop (code);
2559 ia64_add1_pred (code, 6, ins->dreg, IA64_R0, IA64_R0);
2560 break;
2561 case OP_ICONV_TO_I1:
2562 case OP_LCONV_TO_I1:
2563 /* FIXME: Is this needed ? */
2564 ia64_sxt1 (code, ins->dreg, ins->sreg1);
2565 break;
2566 case OP_ICONV_TO_I2:
2567 case OP_LCONV_TO_I2:
2568 /* FIXME: Is this needed ? */
2569 ia64_sxt2 (code, ins->dreg, ins->sreg1);
2570 break;
2571 case OP_LCONV_TO_I4:
2572 /* FIXME: Is this needed ? */
2573 ia64_sxt4 (code, ins->dreg, ins->sreg1);
2574 break;
2575 case OP_ICONV_TO_U1:
2576 case OP_LCONV_TO_U1:
2577 /* FIXME: Is this needed */
2578 ia64_zxt1 (code, ins->dreg, ins->sreg1);
2579 break;
2580 case OP_ICONV_TO_U2:
2581 case OP_LCONV_TO_U2:
2582 /* FIXME: Is this needed */
2583 ia64_zxt2 (code, ins->dreg, ins->sreg1);
2584 break;
2585 case OP_LCONV_TO_U4:
2586 /* FIXME: Is this needed */
2587 ia64_zxt4 (code, ins->dreg, ins->sreg1);
2588 break;
2589 case OP_ICONV_TO_I8:
2590 case OP_ICONV_TO_I:
2591 case OP_LCONV_TO_I8:
2592 case OP_LCONV_TO_I:
2593 ia64_sxt4 (code, ins->dreg, ins->sreg1);
2594 break;
2595 case OP_LCONV_TO_U8:
2596 case OP_LCONV_TO_U:
2597 ia64_zxt4 (code, ins->dreg, ins->sreg1);
2598 break;
2601 * FLOAT OPCODES
2603 case OP_R8CONST: {
2604 double d = *(double *)ins->inst_p0;
2606 if ((d == 0.0) && (mono_signbit (d) == 0))
2607 ia64_fmov (code, ins->dreg, 0);
2608 else if (d == 1.0)
2609 ia64_fmov (code, ins->dreg, 1);
2610 else {
2611 add_patch_info (cfg, code, MONO_PATCH_INFO_R8, ins->inst_p0);
2612 ia64_movl (code, GP_SCRATCH_REG, 0);
2613 ia64_ldfd (code, ins->dreg, GP_SCRATCH_REG);
2615 break;
2617 case OP_R4CONST: {
2618 float f = *(float *)ins->inst_p0;
2620 if ((f == 0.0) && (mono_signbit (f) == 0))
2621 ia64_fmov (code, ins->dreg, 0);
2622 else if (f == 1.0)
2623 ia64_fmov (code, ins->dreg, 1);
2624 else {
2625 add_patch_info (cfg, code, MONO_PATCH_INFO_R4, ins->inst_p0);
2626 ia64_movl (code, GP_SCRATCH_REG, 0);
2627 ia64_ldfs (code, ins->dreg, GP_SCRATCH_REG);
2629 break;
2631 case OP_FMOVE:
2632 ia64_fmov (code, ins->dreg, ins->sreg1);
2633 break;
2634 case OP_STORER8_MEMBASE_REG:
2635 if (ins->inst_offset != 0) {
2636 /* This is generated by local regalloc */
2637 if (ia64_is_imm14 (ins->inst_offset)) {
2638 ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_destbasereg);
2639 } else {
2640 ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
2641 ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, ins->inst_destbasereg);
2643 ins->inst_destbasereg = GP_SCRATCH_REG;
2645 ia64_stfd_hint (code, ins->inst_destbasereg, ins->sreg1, 0);
2646 break;
2647 case OP_STORER4_MEMBASE_REG:
2648 ia64_fnorm_s_sf (code, FP_SCRATCH_REG, ins->sreg1, 0);
2649 ia64_stfs_hint (code, ins->inst_destbasereg, FP_SCRATCH_REG, 0);
2650 break;
2651 case OP_LOADR8_MEMBASE:
2652 if (ins->inst_offset != 0) {
2653 /* This is generated by local regalloc */
2654 if (ia64_is_imm14 (ins->inst_offset)) {
2655 ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_basereg);
2656 } else {
2657 ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
2658 ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, ins->inst_basereg);
2660 ins->inst_basereg = GP_SCRATCH_REG;
2662 ia64_ldfd (code, ins->dreg, ins->inst_basereg);
2663 break;
2664 case OP_LOADR4_MEMBASE:
2665 ia64_ldfs (code, ins->dreg, ins->inst_basereg);
2666 ia64_fnorm_d_sf (code, ins->dreg, ins->dreg, 0);
2667 break;
2668 case OP_ICONV_TO_R4:
2669 case OP_LCONV_TO_R4:
2670 ia64_setf_sig (code, ins->dreg, ins->sreg1);
2671 ia64_fcvt_xf (code, ins->dreg, ins->dreg);
2672 ia64_fnorm_s_sf (code, ins->dreg, ins->dreg, 0);
2673 break;
2674 case OP_ICONV_TO_R8:
2675 case OP_LCONV_TO_R8:
2676 ia64_setf_sig (code, ins->dreg, ins->sreg1);
2677 ia64_fcvt_xf (code, ins->dreg, ins->dreg);
2678 ia64_fnorm_d_sf (code, ins->dreg, ins->dreg, 0);
2679 break;
2680 case OP_FCONV_TO_R4:
2681 ia64_fnorm_s_sf (code, ins->dreg, ins->sreg1, 0);
2682 break;
2683 case OP_FCONV_TO_I8:
2684 case OP_FCONV_TO_I:
2685 ia64_fcvt_fx_trunc_sf (code, FP_SCRATCH_REG, ins->sreg1, 0);
2686 ia64_getf_sig (code, ins->dreg, FP_SCRATCH_REG);
2687 break;
2688 case OP_FADD:
2689 ia64_fma_d_sf (code, ins->dreg, ins->sreg1, 1, ins->sreg2, 0);
2690 break;
2691 case OP_FSUB:
2692 ia64_fms_d_sf (code, ins->dreg, ins->sreg1, 1, ins->sreg2, 0);
2693 break;
2694 case OP_FMUL:
2695 ia64_fma_d_sf (code, ins->dreg, ins->sreg1, ins->sreg2, 0, 0);
2696 break;
2697 case OP_FNEG:
2698 ia64_fmerge_ns (code, ins->dreg, ins->sreg1, ins->sreg1);
2699 break;
2700 case OP_CKFINITE:
2701 /* Quiet NaN */
2702 ia64_fclass_m (code, 6, 7, ins->sreg1, 0x080);
2703 emit_cond_system_exception (cfg, code, "ArithmeticException", 6);
2704 /* Signaling NaN */
2705 ia64_fclass_m (code, 6, 7, ins->sreg1, 0x040);
2706 emit_cond_system_exception (cfg, code, "ArithmeticException", 6);
2707 /* Positive infinity */
2708 ia64_fclass_m (code, 6, 7, ins->sreg1, 0x021);
2709 emit_cond_system_exception (cfg, code, "ArithmeticException", 6);
2710 /* Negative infinity */
2711 ia64_fclass_m (code, 6, 7, ins->sreg1, 0x022);
2712 emit_cond_system_exception (cfg, code, "ArithmeticException", 6);
2713 break;
2715 /* Calls */
2716 case OP_CHECK_THIS:
2717 /* ensure ins->sreg1 is not NULL */
2718 /* Can't use ld8 as this could be a vtype address */
2719 ia64_ld1 (code, GP_SCRATCH_REG, ins->sreg1);
2720 break;
2721 case OP_ARGLIST:
2722 ia64_adds_imm (code, GP_SCRATCH_REG, cfg->sig_cookie, cfg->frame_reg);
2723 ia64_st8 (code, ins->sreg1, GP_SCRATCH_REG);
2724 break;
2725 case OP_FCALL:
2726 case OP_LCALL:
2727 case OP_VCALL:
2728 case OP_VCALL2:
2729 case OP_VOIDCALL:
2730 case OP_CALL:
2731 call = (MonoCallInst*)ins;
2733 if (ins->flags & MONO_INST_HAS_METHOD)
2734 code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method);
2735 else
2736 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
2738 code = emit_move_return_value (cfg, ins, code);
2739 break;
2741 case OP_CALL_REG:
2742 case OP_FCALL_REG:
2743 case OP_LCALL_REG:
2744 case OP_VCALL_REG:
2745 case OP_VCALL2_REG:
2746 case OP_VOIDCALL_REG: {
2747 MonoCallInst *call = (MonoCallInst*)ins;
2748 CallInfo *cinfo;
2749 int out_reg;
2752 * mono_arch_get_this_arg_from_call () needs to find the this argument in a global
2753 * register.
2755 cinfo = get_call_info (cfg, cfg->mempool, call->signature, FALSE);
2756 out_reg = cfg->arch.reg_out0;
2757 ia64_mov (code, IA64_R10, out_reg);
2759 /* Indirect call */
2760 ia64_mov (code, IA64_R8, ins->sreg1);
2761 ia64_ld8_inc_imm (code, GP_SCRATCH_REG2, IA64_R8, 8);
2762 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG2);
2763 ia64_ld8 (code, IA64_GP, IA64_R8);
2764 ia64_br_call_reg (code, IA64_B0, IA64_B6);
2766 code = emit_move_return_value (cfg, ins, code);
2767 break;
2769 case OP_FCALL_MEMBASE:
2770 case OP_LCALL_MEMBASE:
2771 case OP_VCALL_MEMBASE:
2772 case OP_VCALL2_MEMBASE:
2773 case OP_VOIDCALL_MEMBASE:
2774 case OP_CALL_MEMBASE: {
2775 MonoCallInst *call = (MonoCallInst*)ins;
2776 CallInfo *cinfo;
2777 int out_reg;
2779 ia64_mov (code, IA64_R11, ins->sreg1);
2780 if (ia64_is_imm14 (ins->inst_offset))
2781 ia64_adds_imm (code, IA64_R8, ins->inst_offset, ins->sreg1);
2782 else {
2783 ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
2784 ia64_add (code, IA64_R8, GP_SCRATCH_REG, ins->sreg1);
2787 if (call->method && ins->inst_offset < 0) {
2789 * This is a possible IMT call so save the IMT method in a global
2790 * register where mono_arch_find_imt_method () and its friends can
2791 * access it.
2793 ia64_movl (code, IA64_R9, call->method);
2797 * mono_arch_find_this_arg () needs to find the this argument in a global
2798 * register.
2800 cinfo = get_call_info (cfg, cfg->mempool, call->signature, FALSE);
2801 out_reg = cfg->arch.reg_out0;
2802 ia64_mov (code, IA64_R10, out_reg);
2804 ia64_ld8 (code, GP_SCRATCH_REG, IA64_R8);
2806 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
2808 ia64_br_call_reg (code, IA64_B0, IA64_B6);
2810 code = emit_move_return_value (cfg, ins, code);
2811 break;
2813 case OP_JMP: {
2815 * Keep in sync with the code in emit_epilog.
2818 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
2819 NOT_IMPLEMENTED;
2821 g_assert (!cfg->method->save_lmf);
2823 /* Load arguments into their original registers */
2824 code = emit_load_volatile_arguments (cfg, code);
2826 if (cfg->arch.stack_alloc_size) {
2827 if (cfg->arch.omit_fp) {
2828 if (ia64_is_imm14 (cfg->arch.stack_alloc_size))
2829 ia64_adds_imm (code, IA64_SP, (cfg->arch.stack_alloc_size), IA64_SP);
2830 else {
2831 ia64_movl (code, GP_SCRATCH_REG, cfg->arch.stack_alloc_size);
2832 ia64_add (code, IA64_SP, GP_SCRATCH_REG, IA64_SP);
2835 else
2836 ia64_mov (code, IA64_SP, cfg->arch.reg_saved_sp);
2838 ia64_mov_to_ar_i (code, IA64_PFS, cfg->arch.reg_saved_ar_pfs);
2839 ia64_mov_ret_to_br (code, IA64_B0, cfg->arch.reg_saved_b0);
2841 add_patch_info (cfg, code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
2842 ia64_movl (code, GP_SCRATCH_REG, 0);
2843 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
2844 ia64_br_cond_reg (code, IA64_B6);
2846 break;
2848 case OP_BREAK:
2849 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, mono_break);
2850 break;
2852 case OP_LOCALLOC: {
2853 gint32 abi_offset;
2855 /* FIXME: Sigaltstack support */
2857 /* keep alignment */
2858 ia64_adds_imm (code, GP_SCRATCH_REG, MONO_ARCH_LOCALLOC_ALIGNMENT - 1, ins->sreg1);
2859 ia64_movl (code, GP_SCRATCH_REG2, ~(MONO_ARCH_LOCALLOC_ALIGNMENT - 1));
2860 ia64_and (code, GP_SCRATCH_REG, GP_SCRATCH_REG, GP_SCRATCH_REG2);
2862 ia64_sub (code, IA64_SP, IA64_SP, GP_SCRATCH_REG);
2864 ia64_mov (code, ins->dreg, IA64_SP);
2866 /* An area at sp is reserved by the ABI for parameter passing */
2867 abi_offset = - ALIGN_TO (cfg->param_area + 16, MONO_ARCH_LOCALLOC_ALIGNMENT);
2868 if (ia64_is_adds_imm (abi_offset))
2869 ia64_adds_imm (code, IA64_SP, abi_offset, IA64_SP);
2870 else {
2871 ia64_movl (code, GP_SCRATCH_REG2, abi_offset);
2872 ia64_add (code, IA64_SP, IA64_SP, GP_SCRATCH_REG2);
2875 if (ins->flags & MONO_INST_INIT) {
2876 /* Upper limit */
2877 ia64_add (code, GP_SCRATCH_REG2, ins->dreg, GP_SCRATCH_REG);
2879 ia64_codegen_set_one_ins_per_bundle (code, TRUE);
2881 /* Init loop */
2882 ia64_st8_inc_imm_hint (code, ins->dreg, IA64_R0, 8, 0);
2883 ia64_cmp_lt (code, 8, 9, ins->dreg, GP_SCRATCH_REG2);
2884 ia64_br_cond_pred (code, 8, -2);
2886 ia64_codegen_set_one_ins_per_bundle (code, FALSE);
2888 ia64_sub (code, ins->dreg, GP_SCRATCH_REG2, GP_SCRATCH_REG);
2891 break;
2893 case OP_LOCALLOC_IMM: {
2894 gint32 abi_offset;
2896 /* FIXME: Sigaltstack support */
2898 gssize size = ins->inst_imm;
2899 size = (size + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1);
2901 if (ia64_is_adds_imm (size))
2902 ia64_adds_imm (code, GP_SCRATCH_REG, size, IA64_R0);
2903 else
2904 ia64_movl (code, GP_SCRATCH_REG, size);
2906 ia64_sub (code, IA64_SP, IA64_SP, GP_SCRATCH_REG);
2907 ia64_mov (code, ins->dreg, IA64_SP);
2909 /* An area at sp is reserved by the ABI for parameter passing */
2910 abi_offset = - ALIGN_TO (cfg->param_area + 16, MONO_ARCH_FRAME_ALIGNMENT);
2911 if (ia64_is_adds_imm (abi_offset))
2912 ia64_adds_imm (code, IA64_SP, abi_offset, IA64_SP);
2913 else {
2914 ia64_movl (code, GP_SCRATCH_REG2, abi_offset);
2915 ia64_add (code, IA64_SP, IA64_SP, GP_SCRATCH_REG2);
2918 if (ins->flags & MONO_INST_INIT) {
2919 /* Upper limit */
2920 ia64_add (code, GP_SCRATCH_REG2, ins->dreg, GP_SCRATCH_REG);
2922 ia64_codegen_set_one_ins_per_bundle (code, TRUE);
2924 /* Init loop */
2925 ia64_st8_inc_imm_hint (code, ins->dreg, IA64_R0, 8, 0);
2926 ia64_cmp_lt (code, 8, 9, ins->dreg, GP_SCRATCH_REG2);
2927 ia64_br_cond_pred (code, 8, -2);
2929 ia64_codegen_set_one_ins_per_bundle (code, FALSE);
2931 ia64_sub (code, ins->dreg, GP_SCRATCH_REG2, GP_SCRATCH_REG);
2934 break;
2936 case OP_TLS_GET:
2937 ia64_adds_imm (code, ins->dreg, ins->inst_offset, IA64_TP);
2938 ia64_ld8 (code, ins->dreg, ins->dreg);
2939 break;
2941 /* Synchronization */
2942 case OP_MEMORY_BARRIER:
2943 ia64_mf (code);
2944 break;
2945 case OP_ATOMIC_ADD_IMM_NEW_I4:
2946 g_assert (ins->inst_offset == 0);
2947 ia64_fetchadd4_acq_hint (code, ins->dreg, ins->inst_basereg, ins->inst_imm, 0);
2948 ia64_adds_imm (code, ins->dreg, ins->inst_imm, ins->dreg);
2949 break;
2950 case OP_ATOMIC_ADD_IMM_NEW_I8:
2951 g_assert (ins->inst_offset == 0);
2952 ia64_fetchadd8_acq_hint (code, ins->dreg, ins->inst_basereg, ins->inst_imm, 0);
2953 ia64_adds_imm (code, ins->dreg, ins->inst_imm, ins->dreg);
2954 break;
2955 case OP_ATOMIC_EXCHANGE_I4:
2956 ia64_xchg4_hint (code, ins->dreg, ins->inst_basereg, ins->sreg2, 0);
2957 ia64_sxt4 (code, ins->dreg, ins->dreg);
2958 break;
2959 case OP_ATOMIC_EXCHANGE_I8:
2960 ia64_xchg8_hint (code, ins->dreg, ins->inst_basereg, ins->sreg2, 0);
2961 break;
2962 case OP_ATOMIC_ADD_NEW_I4: {
2963 guint8 *label, *buf;
2965 /* From libatomic_ops */
2966 ia64_mf (code);
2968 ia64_begin_bundle (code);
2969 label = code.buf + code.nins;
2970 ia64_ld4_acq (code, GP_SCRATCH_REG, ins->sreg1);
2971 ia64_add (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, ins->sreg2);
2972 ia64_mov_to_ar_m (code, IA64_CCV, GP_SCRATCH_REG);
2973 ia64_cmpxchg4_acq_hint (code, GP_SCRATCH_REG2, ins->sreg1, GP_SCRATCH_REG2, 0);
2974 ia64_cmp4_eq (code, 6, 7, GP_SCRATCH_REG, GP_SCRATCH_REG2);
2975 buf = code.buf + code.nins;
2976 ia64_br_cond_pred (code, 7, 0);
2977 ia64_begin_bundle (code);
2978 ia64_patch (buf, label);
2979 ia64_add (code, ins->dreg, GP_SCRATCH_REG, ins->sreg2);
2980 break;
2982 case OP_ATOMIC_ADD_NEW_I8: {
2983 guint8 *label, *buf;
2985 /* From libatomic_ops */
2986 ia64_mf (code);
2988 ia64_begin_bundle (code);
2989 label = code.buf + code.nins;
2990 ia64_ld8_acq (code, GP_SCRATCH_REG, ins->sreg1);
2991 ia64_add (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, ins->sreg2);
2992 ia64_mov_to_ar_m (code, IA64_CCV, GP_SCRATCH_REG);
2993 ia64_cmpxchg8_acq_hint (code, GP_SCRATCH_REG2, ins->sreg1, GP_SCRATCH_REG2, 0);
2994 ia64_cmp_eq (code, 6, 7, GP_SCRATCH_REG, GP_SCRATCH_REG2);
2995 buf = code.buf + code.nins;
2996 ia64_br_cond_pred (code, 7, 0);
2997 ia64_begin_bundle (code);
2998 ia64_patch (buf, label);
2999 ia64_add (code, ins->dreg, GP_SCRATCH_REG, ins->sreg2);
3000 break;
3003 /* Exception handling */
3004 case OP_CALL_HANDLER:
3006 * Using a call instruction would mess up the register stack, so
3007 * save the return address to a register and use a
3008 * branch.
3010 ia64_codegen_set_one_ins_per_bundle (code, TRUE);
3011 ia64_mov (code, IA64_R15, IA64_R0);
3012 ia64_mov_from_ip (code, GP_SCRATCH_REG);
3013 /* Add the length of OP_CALL_HANDLER */
3014 ia64_adds_imm (code, GP_SCRATCH_REG, 5 * 16, GP_SCRATCH_REG);
3015 add_patch_info (cfg, code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3016 ia64_movl (code, GP_SCRATCH_REG2, 0);
3017 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG2);
3018 ia64_br_cond_reg (code, IA64_B6);
3019 // FIXME:
3020 //mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
3021 ia64_codegen_set_one_ins_per_bundle (code, FALSE);
3022 break;
3023 case OP_START_HANDLER: {
3025 * We receive the return address in GP_SCRATCH_REG.
3027 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3030 * R15 determines our caller. It is used since it is writable using
3031 * libunwind.
3032 * R15 == 0 means we are called by OP_CALL_HANDLER or via resume_context ()
3033 * R15 != 0 means we are called by call_filter ().
3035 ia64_codegen_set_one_ins_per_bundle (code, TRUE);
3036 ia64_cmp_eq (code, 6, 7, IA64_R15, IA64_R0);
3038 ia64_br_cond_pred (code, 6, 6);
3041 * Called by call_filter:
3042 * Allocate a new stack frame, and set the fp register from the
3043 * value passed in by the caller.
3044 * We allocate a similar frame as is done by the prolog, so
3045 * if an exception is thrown while executing the filter, the
3046 * unwinder can unwind through the filter frame using the unwind
3047 * info for the prolog.
3049 ia64_alloc (code, cfg->arch.reg_saved_ar_pfs, cfg->arch.reg_local0 - cfg->arch.reg_in0, cfg->arch.reg_out0 - cfg->arch.reg_local0, cfg->arch.n_out_regs, 0);
3050 ia64_mov_from_br (code, cfg->arch.reg_saved_b0, IA64_B0);
3051 ia64_mov (code, cfg->arch.reg_saved_sp, IA64_SP);
3052 ia64_mov (code, cfg->frame_reg, IA64_R15);
3053 /* Signal to endfilter that we are called by call_filter */
3054 ia64_mov (code, GP_SCRATCH_REG, IA64_R0);
3056 /* Branch target: */
3057 if (ia64_is_imm14 (spvar->inst_offset))
3058 ia64_adds_imm (code, GP_SCRATCH_REG2, spvar->inst_offset, cfg->frame_reg);
3059 else {
3060 ia64_movl (code, GP_SCRATCH_REG2, spvar->inst_offset);
3061 ia64_add (code, GP_SCRATCH_REG2, cfg->frame_reg, GP_SCRATCH_REG2);
3064 /* Save the return address */
3065 ia64_st8_hint (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, 0);
3066 ia64_codegen_set_one_ins_per_bundle (code, FALSE);
3068 break;
3070 case OP_ENDFINALLY:
3071 case OP_ENDFILTER: {
3072 /* FIXME: Return the value in ENDFILTER */
3073 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3075 /* Load the return address */
3076 if (ia64_is_imm14 (spvar->inst_offset)) {
3077 ia64_adds_imm (code, GP_SCRATCH_REG, spvar->inst_offset, cfg->frame_reg);
3078 } else {
3079 ia64_movl (code, GP_SCRATCH_REG, spvar->inst_offset);
3080 ia64_add (code, GP_SCRATCH_REG, cfg->frame_reg, GP_SCRATCH_REG);
3082 ia64_ld8_hint (code, GP_SCRATCH_REG, GP_SCRATCH_REG, 0);
3084 /* Test caller */
3085 ia64_cmp_eq (code, 6, 7, GP_SCRATCH_REG, IA64_R0);
3086 ia64_br_cond_pred (code, 7, 4);
3088 /* Called by call_filter */
3089 /* Pop frame */
3090 ia64_mov_to_ar_i (code, IA64_PFS, cfg->arch.reg_saved_ar_pfs);
3091 ia64_mov_to_br (code, IA64_B0, cfg->arch.reg_saved_b0);
3092 ia64_br_ret_reg (code, IA64_B0);
3094 /* Called by CALL_HANDLER */
3095 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
3096 ia64_br_cond_reg (code, IA64_B6);
3097 break;
3099 case OP_THROW:
3100 ia64_mov (code, cfg->arch.reg_out0, ins->sreg1);
3101 code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
3102 (gpointer)"mono_arch_throw_exception");
3105 * This might be the last instruction in the method, so add a dummy
3106 * instruction so the unwinder will work.
3108 ia64_break_i (code, 0);
3109 break;
3110 case OP_RETHROW:
3111 ia64_mov (code, cfg->arch.reg_out0, ins->sreg1);
3112 code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
3113 (gpointer)"mono_arch_rethrow_exception");
3115 ia64_break_i (code, 0);
3116 break;
3118 default:
3119 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
3120 g_assert_not_reached ();
3123 if ((code.buf - cfg->native_code - offset) > max_len) {
3124 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %ld)",
3125 mono_inst_name (ins->opcode), max_len, code.buf - cfg->native_code - offset);
3126 g_assert_not_reached ();
3129 cpos += max_len;
3131 last_ins = ins;
3132 last_offset = offset;
3135 ia64_codegen_close (code);
3137 cfg->code_len = code.buf - cfg->native_code;
3140 void
3141 mono_arch_register_lowlevel_calls (void)
3145 static Ia64InsType ins_types_in_template [32][3] = {
3146 {IA64_INS_TYPE_M, IA64_INS_TYPE_I, IA64_INS_TYPE_I},
3147 {IA64_INS_TYPE_M, IA64_INS_TYPE_I, IA64_INS_TYPE_I},
3148 {IA64_INS_TYPE_M, IA64_INS_TYPE_I, IA64_INS_TYPE_I},
3149 {IA64_INS_TYPE_M, IA64_INS_TYPE_I, IA64_INS_TYPE_I},
3150 {IA64_INS_TYPE_M, IA64_INS_TYPE_LX, IA64_INS_TYPE_LX},
3151 {IA64_INS_TYPE_M, IA64_INS_TYPE_LX, IA64_INS_TYPE_LX},
3152 {0, 0, 0},
3153 {0, 0, 0},
3154 {IA64_INS_TYPE_M, IA64_INS_TYPE_M, IA64_INS_TYPE_I},
3155 {IA64_INS_TYPE_M, IA64_INS_TYPE_M, IA64_INS_TYPE_I},
3156 {IA64_INS_TYPE_M, IA64_INS_TYPE_M, IA64_INS_TYPE_I},
3157 {IA64_INS_TYPE_M, IA64_INS_TYPE_M, IA64_INS_TYPE_I},
3158 {IA64_INS_TYPE_M, IA64_INS_TYPE_F, IA64_INS_TYPE_I},
3159 {IA64_INS_TYPE_M, IA64_INS_TYPE_F, IA64_INS_TYPE_I},
3160 {IA64_INS_TYPE_M, IA64_INS_TYPE_M, IA64_INS_TYPE_F},
3161 {IA64_INS_TYPE_M, IA64_INS_TYPE_M, IA64_INS_TYPE_F},
3162 {IA64_INS_TYPE_M, IA64_INS_TYPE_I, IA64_INS_TYPE_B},
3163 {IA64_INS_TYPE_M, IA64_INS_TYPE_I, IA64_INS_TYPE_B},
3164 {IA64_INS_TYPE_M, IA64_INS_TYPE_B, IA64_INS_TYPE_B},
3165 {IA64_INS_TYPE_M, IA64_INS_TYPE_B, IA64_INS_TYPE_B},
3166 {0, 0, 0},
3167 {0, 0, 0},
3168 {IA64_INS_TYPE_B, IA64_INS_TYPE_B, IA64_INS_TYPE_B},
3169 {IA64_INS_TYPE_B, IA64_INS_TYPE_B, IA64_INS_TYPE_B},
3170 {IA64_INS_TYPE_M, IA64_INS_TYPE_M, IA64_INS_TYPE_B},
3171 {IA64_INS_TYPE_M, IA64_INS_TYPE_M, IA64_INS_TYPE_B},
3172 {0, 0, 0},
3173 {0, 0, 0},
3174 {IA64_INS_TYPE_M, IA64_INS_TYPE_F, IA64_INS_TYPE_B},
3175 {IA64_INS_TYPE_M, IA64_INS_TYPE_F, IA64_INS_TYPE_B},
3176 {0, 0, 0},
3177 {0, 0, 0}
3180 static gboolean stops_in_template [32][3] = {
3181 { FALSE, FALSE, FALSE },
3182 { FALSE, FALSE, TRUE },
3183 { FALSE, TRUE, FALSE },
3184 { FALSE, TRUE, TRUE },
3185 { FALSE, FALSE, FALSE },
3186 { FALSE, FALSE, TRUE },
3187 { FALSE, FALSE, FALSE },
3188 { FALSE, FALSE, FALSE },
3190 { FALSE, FALSE, FALSE },
3191 { FALSE, FALSE, TRUE },
3192 { TRUE, FALSE, FALSE },
3193 { TRUE, FALSE, TRUE },
3194 { FALSE, FALSE, FALSE },
3195 { FALSE, FALSE, TRUE },
3196 { FALSE, FALSE, FALSE },
3197 { FALSE, FALSE, TRUE },
3199 { FALSE, FALSE, FALSE },
3200 { FALSE, FALSE, TRUE },
3201 { FALSE, FALSE, FALSE },
3202 { FALSE, FALSE, TRUE },
3203 { FALSE, FALSE, FALSE },
3204 { FALSE, FALSE, FALSE },
3205 { FALSE, FALSE, FALSE },
3206 { FALSE, FALSE, TRUE },
3208 { FALSE, FALSE, FALSE },
3209 { FALSE, FALSE, TRUE },
3210 { FALSE, FALSE, FALSE },
3211 { FALSE, FALSE, FALSE },
3212 { FALSE, FALSE, FALSE },
3213 { FALSE, FALSE, TRUE },
3214 { FALSE, FALSE, FALSE },
3215 { FALSE, FALSE, FALSE }
3218 static int last_stop_in_template [32] = {
3219 -1, 2, 1, 2, -1, 2, -1, -1,
3220 -1, 2, 0, 2, -1, 2, -1, 2,
3221 -1, 2, -1, 2, -1, -1, -1, 2,
3222 -1, 2, -1, -1, -1, 2, -1, -1
3225 static guint64 nops_for_ins_types [6] = {
3226 IA64_NOP_I,
3227 IA64_NOP_I,
3228 IA64_NOP_M,
3229 IA64_NOP_F,
3230 IA64_NOP_B,
3231 IA64_NOP_X
3234 #define ITYPE_MATCH(itype1, itype2) (((itype1) == (itype2)) || (((itype2) == IA64_INS_TYPE_A) && (((itype1) == IA64_INS_TYPE_I) || ((itype1) == IA64_INS_TYPE_M))))
3237 * Debugging support
3240 #if 0
3241 #define DEBUG_INS_SCHED(a) do { a; } while (0)
3242 #else
3243 #define DEBUG_INS_SCHED(a)
3244 #endif
3246 static void
3247 ia64_analyze_deps (Ia64CodegenState *code, int *deps_start, int *stops)
3249 int i, pos, ins_index, current_deps_start, current_ins_start, reg;
3250 guint8 *deps = code->dep_info;
3251 gboolean need_stop, no_stop;
3253 for (i = 0; i < code->nins; ++i)
3254 stops [i] = FALSE;
3256 ins_index = 0;
3257 current_deps_start = 0;
3258 current_ins_start = 0;
3259 deps_start [ins_index] = current_ins_start;
3260 pos = 0;
3261 no_stop = FALSE;
3262 DEBUG_INS_SCHED (printf ("BEGIN.\n"));
3263 while (pos < code->dep_info_pos) {
3264 need_stop = FALSE;
3265 switch (deps [pos]) {
3266 case IA64_END_OF_INS:
3267 ins_index ++;
3268 current_ins_start = pos + 2;
3269 deps_start [ins_index] = current_ins_start;
3270 no_stop = FALSE;
3271 DEBUG_INS_SCHED (printf ("(%d) END INS.\n", ins_index - 1));
3272 break;
3273 case IA64_NONE:
3274 break;
3275 case IA64_READ_GR:
3276 reg = deps [pos + 1];
3278 DEBUG_INS_SCHED (printf ("READ GR: %d\n", reg));
3279 for (i = current_deps_start; i < current_ins_start; i += 2)
3280 if (deps [i] == IA64_WRITE_GR && deps [i + 1] == reg)
3281 need_stop = TRUE;
3282 break;
3283 case IA64_WRITE_GR:
3284 reg = code->dep_info [pos + 1];
3286 DEBUG_INS_SCHED (printf ("WRITE GR: %d\n", reg));
3287 for (i = current_deps_start; i < current_ins_start; i += 2)
3288 if (deps [i] == IA64_WRITE_GR && deps [i + 1] == reg)
3289 need_stop = TRUE;
3290 break;
3291 case IA64_READ_PR:
3292 reg = deps [pos + 1];
3294 DEBUG_INS_SCHED (printf ("READ PR: %d\n", reg));
3295 for (i = current_deps_start; i < current_ins_start; i += 2)
3296 if (((deps [i] == IA64_WRITE_PR) || (deps [i] == IA64_WRITE_PR_FLOAT)) && deps [i + 1] == reg)
3297 need_stop = TRUE;
3298 break;
3299 case IA64_READ_PR_BRANCH:
3300 reg = deps [pos + 1];
3302 /* Writes to prs by non-float instructions are visible to branches */
3303 DEBUG_INS_SCHED (printf ("READ PR BRANCH: %d\n", reg));
3304 for (i = current_deps_start; i < current_ins_start; i += 2)
3305 if (deps [i] == IA64_WRITE_PR_FLOAT && deps [i + 1] == reg)
3306 need_stop = TRUE;
3307 break;
3308 case IA64_WRITE_PR:
3309 reg = code->dep_info [pos + 1];
3311 DEBUG_INS_SCHED (printf ("WRITE PR: %d\n", reg));
3312 for (i = current_deps_start; i < current_ins_start; i += 2)
3313 if (((deps [i] == IA64_WRITE_PR) || (deps [i] == IA64_WRITE_PR_FLOAT)) && deps [i + 1] == reg)
3314 need_stop = TRUE;
3315 break;
3316 case IA64_WRITE_PR_FLOAT:
3317 reg = code->dep_info [pos + 1];
3319 DEBUG_INS_SCHED (printf ("WRITE PR FP: %d\n", reg));
3320 for (i = current_deps_start; i < current_ins_start; i += 2)
3321 if (((deps [i] == IA64_WRITE_GR) || (deps [i] == IA64_WRITE_PR_FLOAT)) && deps [i + 1] == reg)
3322 need_stop = TRUE;
3323 break;
3324 case IA64_READ_BR:
3325 reg = deps [pos + 1];
3327 DEBUG_INS_SCHED (printf ("READ BR: %d\n", reg));
3328 for (i = current_deps_start; i < current_ins_start; i += 2)
3329 if (deps [i] == IA64_WRITE_BR && deps [i + 1] == reg)
3330 need_stop = TRUE;
3331 break;
3332 case IA64_WRITE_BR:
3333 reg = code->dep_info [pos + 1];
3335 DEBUG_INS_SCHED (printf ("WRITE BR: %d\n", reg));
3336 for (i = current_deps_start; i < current_ins_start; i += 2)
3337 if (deps [i] == IA64_WRITE_BR && deps [i + 1] == reg)
3338 need_stop = TRUE;
3339 break;
3340 case IA64_READ_BR_BRANCH:
3341 reg = deps [pos + 1];
3343 /* Writes to brs are visible to branches */
3344 DEBUG_INS_SCHED (printf ("READ BR BRACH: %d\n", reg));
3345 break;
3346 case IA64_READ_FR:
3347 reg = deps [pos + 1];
3349 DEBUG_INS_SCHED (printf ("READ BR: %d\n", reg));
3350 for (i = current_deps_start; i < current_ins_start; i += 2)
3351 if (deps [i] == IA64_WRITE_FR && deps [i + 1] == reg)
3352 need_stop = TRUE;
3353 break;
3354 case IA64_WRITE_FR:
3355 reg = code->dep_info [pos + 1];
3357 DEBUG_INS_SCHED (printf ("WRITE BR: %d\n", reg));
3358 for (i = current_deps_start; i < current_ins_start; i += 2)
3359 if (deps [i] == IA64_WRITE_FR && deps [i + 1] == reg)
3360 need_stop = TRUE;
3361 break;
3362 case IA64_READ_AR:
3363 reg = deps [pos + 1];
3365 DEBUG_INS_SCHED (printf ("READ AR: %d\n", reg));
3366 for (i = current_deps_start; i < current_ins_start; i += 2)
3367 if (deps [i] == IA64_WRITE_AR && deps [i + 1] == reg)
3368 need_stop = TRUE;
3369 break;
3370 case IA64_WRITE_AR:
3371 reg = code->dep_info [pos + 1];
3373 DEBUG_INS_SCHED (printf ("WRITE AR: %d\n", reg));
3374 for (i = current_deps_start; i < current_ins_start; i += 2)
3375 if (deps [i] == IA64_WRITE_AR && deps [i + 1] == reg)
3376 need_stop = TRUE;
3377 break;
3378 case IA64_NO_STOP:
3380 * Explicitly indicate that a stop is not required. Useful for
3381 * example when two predicated instructions with negated predicates
3382 * write the same registers.
3384 no_stop = TRUE;
3385 break;
3386 default:
3387 g_assert_not_reached ();
3389 pos += 2;
3391 if (need_stop && !no_stop) {
3392 g_assert (ins_index > 0);
3393 stops [ins_index - 1] = 1;
3395 DEBUG_INS_SCHED (printf ("STOP\n"));
3396 current_deps_start = current_ins_start;
3398 /* Skip remaining deps for this instruction */
3399 while (deps [pos] != IA64_END_OF_INS)
3400 pos += 2;
3404 if (code->nins > 0) {
3405 /* No dependency info for the last instruction */
3406 stops [code->nins - 1] = 1;
3409 deps_start [code->nins] = code->dep_info_pos;
3412 static void
3413 ia64_real_emit_bundle (Ia64CodegenState *code, int *deps_start, int *stops, int n, guint64 template, guint64 ins1, guint64 ins2, guint64 ins3, guint8 nops)
3415 int stop_pos, i, deps_to_shift, dep_shift;
3417 g_assert (n <= code->nins);
3419 // if (n > 1) printf ("FOUND: %ld.\n", template);
3421 ia64_emit_bundle_template (code, template, ins1, ins2, ins3);
3423 stop_pos = last_stop_in_template [template] + 1;
3424 if (stop_pos > n)
3425 stop_pos = n;
3427 /* Compute the number of 'real' instructions before the stop */
3428 deps_to_shift = stop_pos;
3429 if (stop_pos >= 3 && (nops & (1 << 2)))
3430 deps_to_shift --;
3431 if (stop_pos >= 2 && (nops & (1 << 1)))
3432 deps_to_shift --;
3433 if (stop_pos >= 1 && (nops & (1 << 0)))
3434 deps_to_shift --;
3437 * We have to keep some dependencies whose instructions have been shifted
3438 * out of the buffer. So nullify the end_of_ins markers in the dependency
3439 * array.
3441 for (i = deps_start [deps_to_shift]; i < deps_start [n]; i += 2)
3442 if (code->dep_info [i] == IA64_END_OF_INS)
3443 code->dep_info [i] = IA64_NONE;
3445 g_assert (deps_start [deps_to_shift] <= code->dep_info_pos);
3446 memcpy (code->dep_info, &code->dep_info [deps_start [deps_to_shift]], code->dep_info_pos - deps_start [deps_to_shift]);
3447 code->dep_info_pos = code->dep_info_pos - deps_start [deps_to_shift];
3449 dep_shift = deps_start [deps_to_shift];
3450 for (i = 0; i < code->nins + 1 - n; ++i)
3451 deps_start [i] = deps_start [n + i] - dep_shift;
3453 /* Determine the exact positions of instructions with unwind ops */
3454 if (code->unw_op_count) {
3455 int ins_pos [16];
3456 int curr_ins, curr_ins_pos;
3458 curr_ins = 0;
3459 curr_ins_pos = ((code->buf - code->region_start - 16) / 16) * 3;
3460 for (i = 0; i < 3; ++i) {
3461 if (! (nops & (1 << i))) {
3462 ins_pos [curr_ins] = curr_ins_pos + i;
3463 curr_ins ++;
3467 for (i = code->unw_op_pos; i < code->unw_op_count; ++i) {
3468 if (code->unw_ops_pos [i] < n) {
3469 code->unw_ops [i].when = ins_pos [code->unw_ops_pos [i]];
3470 //printf ("UNW-OP: %d -> %d\n", code->unw_ops_pos [i], code->unw_ops [i].when);
3473 if (code->unw_op_pos < code->unw_op_count)
3474 code->unw_op_pos += n;
3477 if (n == code->nins) {
3478 code->template = 0;
3479 code->nins = 0;
3481 else {
3482 memcpy (&code->instructions [0], &code->instructions [n], (code->nins - n) * sizeof (guint64));
3483 memcpy (&code->itypes [0], &code->itypes [n], (code->nins - n) * sizeof (int));
3484 memcpy (&stops [0], &stops [n], (code->nins - n) * sizeof (int));
3485 code->nins -= n;
3489 void
3490 ia64_emit_bundle (Ia64CodegenState *code, gboolean flush)
3492 int i, ins_type, template, nins_to_emit;
3493 int deps_start [16];
3494 int stops [16];
3495 gboolean found;
3498 * We implement a simple scheduler which tries to put three instructions
3499 * per bundle, then two, then one.
3501 ia64_analyze_deps (code, deps_start, stops);
3503 if ((code->nins >= 3) && !code->one_ins_per_bundle) {
3504 /* Find a suitable template */
3505 for (template = 0; template < 32; ++template) {
3506 if (stops_in_template [template][0] != stops [0] ||
3507 stops_in_template [template][1] != stops [1] ||
3508 stops_in_template [template][2] != stops [2])
3509 continue;
3511 found = TRUE;
3512 for (i = 0; i < 3; ++i) {
3513 ins_type = ins_types_in_template [template][i];
3514 switch (code->itypes [i]) {
3515 case IA64_INS_TYPE_A:
3516 found &= (ins_type == IA64_INS_TYPE_I) || (ins_type == IA64_INS_TYPE_M);
3517 break;
3518 default:
3519 found &= (ins_type == code->itypes [i]);
3520 break;
3524 if (found)
3525 found = debug_ins_sched ();
3527 if (found) {
3528 ia64_real_emit_bundle (code, deps_start, stops, 3, template, code->instructions [0], code->instructions [1], code->instructions [2], 0);
3529 break;
3534 if (code->nins < IA64_INS_BUFFER_SIZE && !flush)
3535 /* Wait for more instructions */
3536 return;
3538 /* If it didn't work out, try putting two instructions into one bundle */
3539 if ((code->nins >= 2) && !code->one_ins_per_bundle) {
3540 /* Try a nop at the end */
3541 for (template = 0; template < 32; ++template) {
3542 if (stops_in_template [template][0] != stops [0] ||
3543 ((stops_in_template [template][1] != stops [1]) &&
3544 (stops_in_template [template][2] != stops [1])))
3546 continue;
3548 if (!ITYPE_MATCH (ins_types_in_template [template][0], code->itypes [0]) ||
3549 !ITYPE_MATCH (ins_types_in_template [template][1], code->itypes [1]))
3550 continue;
3552 if (!debug_ins_sched ())
3553 continue;
3555 ia64_real_emit_bundle (code, deps_start, stops, 2, template, code->instructions [0], code->instructions [1], nops_for_ins_types [ins_types_in_template [template][2]], 1 << 2);
3556 break;
3560 if (code->nins < IA64_INS_BUFFER_SIZE && !flush)
3561 /* Wait for more instructions */
3562 return;
3564 if ((code->nins >= 2) && !code->one_ins_per_bundle) {
3565 /* Try a nop in the middle */
3566 for (template = 0; template < 32; ++template) {
3567 if (((stops_in_template [template][0] != stops [0]) &&
3568 (stops_in_template [template][1] != stops [0])) ||
3569 stops_in_template [template][2] != stops [1])
3570 continue;
3572 if (!ITYPE_MATCH (ins_types_in_template [template][0], code->itypes [0]) ||
3573 !ITYPE_MATCH (ins_types_in_template [template][2], code->itypes [1]))
3574 continue;
3576 if (!debug_ins_sched ())
3577 continue;
3579 ia64_real_emit_bundle (code, deps_start, stops, 2, template, code->instructions [0], nops_for_ins_types [ins_types_in_template [template][1]], code->instructions [1], 1 << 1);
3580 break;
3584 if ((code->nins >= 2) && flush && !code->one_ins_per_bundle) {
3585 /* Try a nop at the beginning */
3586 for (template = 0; template < 32; ++template) {
3587 if ((stops_in_template [template][1] != stops [0]) ||
3588 (stops_in_template [template][2] != stops [1]))
3589 continue;
3591 if (!ITYPE_MATCH (ins_types_in_template [template][1], code->itypes [0]) ||
3592 !ITYPE_MATCH (ins_types_in_template [template][2], code->itypes [1]))
3593 continue;
3595 if (!debug_ins_sched ())
3596 continue;
3598 ia64_real_emit_bundle (code, deps_start, stops, 2, template, nops_for_ins_types [ins_types_in_template [template][0]], code->instructions [0], code->instructions [1], 1 << 0);
3599 break;
3603 if (code->nins < IA64_INS_BUFFER_SIZE && !flush)
3604 /* Wait for more instructions */
3605 return;
3607 if (flush)
3608 nins_to_emit = code->nins;
3609 else
3610 nins_to_emit = 1;
3612 while (nins_to_emit > 0) {
3613 if (!debug_ins_sched ())
3614 stops [0] = 1;
3615 switch (code->itypes [0]) {
3616 case IA64_INS_TYPE_A:
3617 if (stops [0])
3618 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MIIS, code->instructions [0], IA64_NOP_I, IA64_NOP_I, 0);
3619 else
3620 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MII, code->instructions [0], IA64_NOP_I, IA64_NOP_I, 0);
3621 break;
3622 case IA64_INS_TYPE_I:
3623 if (stops [0])
3624 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MIIS, IA64_NOP_M, code->instructions [0], IA64_NOP_I, 0);
3625 else
3626 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MII, IA64_NOP_M, code->instructions [0], IA64_NOP_I, 0);
3627 break;
3628 case IA64_INS_TYPE_M:
3629 if (stops [0])
3630 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MIIS, code->instructions [0], IA64_NOP_I, IA64_NOP_I, 0);
3631 else
3632 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MII, code->instructions [0], IA64_NOP_I, IA64_NOP_I, 0);
3633 break;
3634 case IA64_INS_TYPE_B:
3635 if (stops [0])
3636 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MIBS, IA64_NOP_M, IA64_NOP_I, code->instructions [0], 0);
3637 else
3638 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MIB, IA64_NOP_M, IA64_NOP_I, code->instructions [0], 0);
3639 break;
3640 case IA64_INS_TYPE_F:
3641 if (stops [0])
3642 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MFIS, IA64_NOP_M, code->instructions [0], IA64_NOP_I, 0);
3643 else
3644 ia64_real_emit_bundle (code, deps_start, stops, 1, IA64_TEMPLATE_MFI, IA64_NOP_M, code->instructions [0], IA64_NOP_I, 0);
3645 break;
3646 case IA64_INS_TYPE_LX:
3647 if (stops [0] || stops [1])
3648 ia64_real_emit_bundle (code, deps_start, stops, 2, IA64_TEMPLATE_MLXS, IA64_NOP_M, code->instructions [0], code->instructions [1], 0);
3649 else
3650 ia64_real_emit_bundle (code, deps_start, stops, 2, IA64_TEMPLATE_MLX, IA64_NOP_M, code->instructions [0], code->instructions [1], 0);
3651 nins_to_emit --;
3652 break;
3653 default:
3654 g_assert_not_reached ();
3656 nins_to_emit --;
3660 unw_dyn_region_info_t*
3661 mono_ia64_create_unwind_region (Ia64CodegenState *code)
3663 unw_dyn_region_info_t *r;
3665 g_assert (code->nins == 0);
3666 r = g_malloc0 (_U_dyn_region_info_size (code->unw_op_count));
3667 memcpy (&r->op, &code->unw_ops, sizeof (unw_dyn_op_t) * code->unw_op_count);
3668 r->op_count = code->unw_op_count;
3669 r->insn_count = ((code->buf - code->region_start) >> 4) * 3;
3670 code->unw_op_count = 0;
3671 code->unw_op_pos = 0;
3672 code->region_start = code->buf;
3674 return r;
3677 static void
3678 ia64_patch (unsigned char* code, gpointer target)
3680 int template, i;
3681 guint64 instructions [3];
3682 guint8 gen_buf [16];
3683 Ia64CodegenState gen;
3684 int ins_to_skip;
3685 gboolean found;
3688 * code encodes both the position inside the buffer and code.nins when
3689 * the instruction was emitted.
3691 ins_to_skip = (guint64)code % 16;
3692 code = (unsigned char*)((guint64)code & ~15);
3695 * Search for the first instruction which is 'patchable', skipping
3696 * ins_to_skip instructions.
3699 while (TRUE) {
3701 template = ia64_bundle_template (code);
3702 instructions [0] = ia64_bundle_ins1 (code);
3703 instructions [1] = ia64_bundle_ins2 (code);
3704 instructions [2] = ia64_bundle_ins3 (code);
3706 ia64_codegen_init (gen, gen_buf);
3708 found = FALSE;
3709 for (i = 0; i < 3; ++i) {
3710 guint64 ins = instructions [i];
3711 int opcode = ia64_ins_opcode (ins);
3713 if (ins == nops_for_ins_types [ins_types_in_template [template][i]])
3714 continue;
3716 if (ins_to_skip) {
3717 ins_to_skip --;
3718 continue;
3721 switch (ins_types_in_template [template][i]) {
3722 case IA64_INS_TYPE_A:
3723 case IA64_INS_TYPE_M:
3724 if ((opcode == 8) && (ia64_ins_x2a (ins) == 2) && (ia64_ins_ve (ins) == 0)) {
3725 /* adds */
3726 ia64_adds_imm_pred (gen, ia64_ins_qp (ins), ia64_ins_r1 (ins), (guint64)target, ia64_ins_r3 (ins));
3727 instructions [i] = gen.instructions [0];
3728 found = TRUE;
3730 else
3731 NOT_IMPLEMENTED;
3732 break;
3733 case IA64_INS_TYPE_B:
3734 if ((opcode == 4) && (ia64_ins_btype (ins) == 0)) {
3735 /* br.cond */
3736 gint64 disp = ((guint8*)target - code) >> 4;
3738 /* FIXME: hints */
3739 ia64_br_cond_hint_pred (gen, ia64_ins_qp (ins), disp, 0, 0, 0);
3741 instructions [i] = gen.instructions [0];
3742 found = TRUE;
3744 else if (opcode == 5) {
3745 /* br.call */
3746 gint64 disp = ((guint8*)target - code) >> 4;
3748 /* FIXME: hints */
3749 ia64_br_call_hint_pred (gen, ia64_ins_qp (ins), ia64_ins_b1 (ins), disp, 0, 0, 0);
3750 instructions [i] = gen.instructions [0];
3751 found = TRUE;
3753 else
3754 NOT_IMPLEMENTED;
3755 break;
3756 case IA64_INS_TYPE_LX:
3757 if (i == 1)
3758 break;
3760 if ((opcode == 6) && (ia64_ins_vc (ins) == 0)) {
3761 /* movl */
3762 ia64_movl_pred (gen, ia64_ins_qp (ins), ia64_ins_r1 (ins), target);
3763 instructions [1] = gen.instructions [0];
3764 instructions [2] = gen.instructions [1];
3765 found = TRUE;
3767 else
3768 NOT_IMPLEMENTED;
3770 break;
3771 default:
3772 NOT_IMPLEMENTED;
3775 if (found) {
3776 /* Rewrite code */
3777 ia64_codegen_init (gen, code);
3778 ia64_emit_bundle_template (&gen, template, instructions [0], instructions [1], instructions [2]);
3779 return;
3783 code += 16;
3787 void
3788 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
3790 MonoJumpInfo *patch_info;
3792 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
3793 unsigned char *ip = patch_info->ip.i + code;
3794 const unsigned char *target;
3796 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
3798 if (patch_info->type == MONO_PATCH_INFO_NONE)
3799 continue;
3800 if (mono_compile_aot) {
3801 NOT_IMPLEMENTED;
3804 ia64_patch (ip, (gpointer)target);
3808 guint8 *
3809 mono_arch_emit_prolog (MonoCompile *cfg)
3811 MonoMethod *method = cfg->method;
3812 MonoMethodSignature *sig;
3813 MonoInst *inst;
3814 int alloc_size, pos, i;
3815 Ia64CodegenState code;
3816 CallInfo *cinfo;
3818 sig = mono_method_signature (method);
3819 pos = 0;
3821 cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
3823 cfg->code_size = MAX (cfg->header->code_size * 4, 512);
3825 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
3826 cfg->code_size += 1024;
3827 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
3828 cfg->code_size += 1024;
3830 cfg->native_code = g_malloc (cfg->code_size);
3832 ia64_codegen_init (code, cfg->native_code);
3834 alloc_size = ALIGN_TO (cfg->stack_offset, MONO_ARCH_FRAME_ALIGNMENT);
3835 if (cfg->param_area)
3836 alloc_size += cfg->param_area;
3837 if (alloc_size)
3838 /* scratch area */
3839 alloc_size += 16;
3840 alloc_size = ALIGN_TO (alloc_size, MONO_ARCH_FRAME_ALIGNMENT);
3842 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
3843 /* Force sp to be saved/restored */
3844 alloc_size += MONO_ARCH_FRAME_ALIGNMENT;
3846 cfg->arch.stack_alloc_size = alloc_size;
3848 pos = 0;
3850 if (method->save_lmf) {
3851 /* No LMF on IA64 */
3854 alloc_size -= pos;
3856 ia64_unw_save_reg (code, UNW_IA64_AR_PFS, UNW_IA64_GR + cfg->arch.reg_saved_ar_pfs);
3857 ia64_alloc (code, cfg->arch.reg_saved_ar_pfs, cfg->arch.reg_local0 - cfg->arch.reg_in0, cfg->arch.reg_out0 - cfg->arch.reg_local0, cfg->arch.n_out_regs, 0);
3858 ia64_unw_save_reg (code, UNW_IA64_RP, UNW_IA64_GR + cfg->arch.reg_saved_b0);
3859 ia64_mov_from_br (code, cfg->arch.reg_saved_b0, IA64_B0);
3861 if ((alloc_size || cinfo->stack_usage) && !cfg->arch.omit_fp) {
3862 ia64_unw_save_reg (code, UNW_IA64_SP, UNW_IA64_GR + cfg->arch.reg_saved_sp);
3863 ia64_mov (code, cfg->arch.reg_saved_sp, IA64_SP);
3864 if (cfg->frame_reg != cfg->arch.reg_saved_sp)
3865 ia64_mov (code, cfg->frame_reg, IA64_SP);
3868 if (alloc_size) {
3869 #if defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
3870 int pagesize = getpagesize ();
3872 if (alloc_size >= pagesize) {
3873 gint32 remaining_size = alloc_size;
3875 /* Generate stack touching code */
3876 ia64_mov (code, GP_SCRATCH_REG, IA64_SP);
3877 while (remaining_size >= pagesize) {
3878 ia64_movl (code, GP_SCRATCH_REG2, pagesize);
3879 ia64_sub (code, GP_SCRATCH_REG, GP_SCRATCH_REG, GP_SCRATCH_REG2);
3880 ia64_ld8 (code, GP_SCRATCH_REG2, GP_SCRATCH_REG);
3881 remaining_size -= pagesize;
3884 #endif
3885 if (ia64_is_imm14 (-alloc_size)) {
3886 if (cfg->arch.omit_fp)
3887 ia64_unw_add (code, UNW_IA64_SP, (-alloc_size));
3888 ia64_adds_imm (code, IA64_SP, (-alloc_size), IA64_SP);
3890 else {
3891 ia64_movl (code, GP_SCRATCH_REG, -alloc_size);
3892 if (cfg->arch.omit_fp)
3893 ia64_unw_add (code, UNW_IA64_SP, (-alloc_size));
3894 ia64_add (code, IA64_SP, GP_SCRATCH_REG, IA64_SP);
3898 ia64_begin_bundle (code);
3900 /* Initialize unwind info */
3901 cfg->arch.r_pro = mono_ia64_create_unwind_region (&code);
3903 if (sig->ret->type != MONO_TYPE_VOID) {
3904 if ((cinfo->ret.storage == ArgInIReg) && (cfg->ret->opcode != OP_REGVAR)) {
3905 /* Save volatile arguments to the stack */
3906 NOT_IMPLEMENTED;
3910 /* Keep this in sync with emit_load_volatile_arguments */
3911 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3912 ArgInfo *ainfo = cinfo->args + i;
3913 gint32 stack_offset;
3914 MonoType *arg_type;
3916 inst = cfg->args [i];
3918 if (sig->hasthis && (i == 0))
3919 arg_type = &mono_defaults.object_class->byval_arg;
3920 else
3921 arg_type = sig->params [i - sig->hasthis];
3923 arg_type = mono_type_get_underlying_type (arg_type);
3925 stack_offset = ainfo->offset + ARGS_OFFSET;
3928 * FIXME: Native code might pass non register sized integers
3929 * without initializing the upper bits.
3931 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED && !arg_type->byref && ainfo->storage == ArgInIReg) {
3932 int reg = cfg->arch.reg_in0 + ainfo->reg;
3934 switch (mono_type_to_load_membase (cfg, arg_type)) {
3935 case OP_LOADI1_MEMBASE:
3936 ia64_sxt1 (code, reg, reg);
3937 break;
3938 case OP_LOADU1_MEMBASE:
3939 ia64_zxt1 (code, reg, reg);
3940 break;
3941 case OP_LOADI2_MEMBASE:
3942 ia64_sxt2 (code, reg, reg);
3943 break;
3944 case OP_LOADU2_MEMBASE:
3945 ia64_zxt2 (code, reg, reg);
3946 break;
3947 default:
3948 break;
3952 /* Save volatile arguments to the stack */
3953 if (inst->opcode != OP_REGVAR) {
3954 switch (ainfo->storage) {
3955 case ArgInIReg:
3956 case ArgInFloatReg:
3957 case ArgInFloatRegR4:
3958 g_assert (inst->opcode == OP_REGOFFSET);
3959 if (ia64_is_adds_imm (inst->inst_offset))
3960 ia64_adds_imm (code, GP_SCRATCH_REG, inst->inst_offset, inst->inst_basereg);
3961 else {
3962 ia64_movl (code, GP_SCRATCH_REG2, inst->inst_offset);
3963 ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, GP_SCRATCH_REG2);
3965 if (arg_type->byref)
3966 ia64_st8_hint (code, GP_SCRATCH_REG, cfg->arch.reg_in0 + ainfo->reg, 0);
3967 else {
3968 switch (arg_type->type) {
3969 case MONO_TYPE_R4:
3970 ia64_stfs_hint (code, GP_SCRATCH_REG, ainfo->reg, 0);
3971 break;
3972 case MONO_TYPE_R8:
3973 ia64_stfd_hint (code, GP_SCRATCH_REG, ainfo->reg, 0);
3974 break;
3975 default:
3976 ia64_st8_hint (code, GP_SCRATCH_REG, cfg->arch.reg_in0 + ainfo->reg, 0);
3977 break;
3980 break;
3981 case ArgOnStack:
3982 break;
3983 case ArgAggregate:
3984 if (ainfo->nslots != ainfo->nregs)
3985 NOT_IMPLEMENTED;
3987 g_assert (inst->opcode == OP_REGOFFSET);
3988 ia64_adds_imm (code, GP_SCRATCH_REG, inst->inst_offset, inst->inst_basereg);
3989 for (i = 0; i < ainfo->nregs; ++i) {
3990 switch (ainfo->atype) {
3991 case AggregateNormal:
3992 ia64_st8_inc_imm_hint (code, GP_SCRATCH_REG, cfg->arch.reg_in0 + ainfo->reg + i, sizeof (gpointer), 0);
3993 break;
3994 case AggregateSingleHFA:
3995 ia64_stfs_inc_imm_hint (code, GP_SCRATCH_REG, ainfo->reg + i, 4, 0);
3996 break;
3997 case AggregateDoubleHFA:
3998 ia64_stfd_inc_imm_hint (code, GP_SCRATCH_REG, ainfo->reg + i, sizeof (gpointer), 0);
3999 break;
4000 default:
4001 NOT_IMPLEMENTED;
4004 break;
4005 default:
4006 g_assert_not_reached ();
4010 if (inst->opcode == OP_REGVAR) {
4011 /* Argument allocated to (non-volatile) register */
4012 switch (ainfo->storage) {
4013 case ArgInIReg:
4014 if (inst->dreg != cfg->arch.reg_in0 + ainfo->reg)
4015 ia64_mov (code, inst->dreg, cfg->arch.reg_in0 + ainfo->reg);
4016 break;
4017 case ArgOnStack:
4018 ia64_adds_imm (code, GP_SCRATCH_REG, 16 + ainfo->offset, cfg->frame_reg);
4019 ia64_ld8 (code, inst->dreg, GP_SCRATCH_REG);
4020 break;
4021 default:
4022 NOT_IMPLEMENTED;
4027 if (method->save_lmf) {
4028 /* No LMF on IA64 */
4031 ia64_codegen_close (code);
4033 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4034 code.buf = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code.buf, TRUE);
4036 cfg->code_len = code.buf - cfg->native_code;
4038 g_assert (cfg->code_len < cfg->code_size);
4040 cfg->arch.prolog_end_offset = cfg->code_len;
4042 return code.buf;
4045 void
4046 mono_arch_emit_epilog (MonoCompile *cfg)
4048 MonoMethod *method = cfg->method;
4049 int i, pos;
4050 int max_epilog_size = 16 * 4;
4051 Ia64CodegenState code;
4052 guint8 *buf;
4053 CallInfo *cinfo;
4054 ArgInfo *ainfo;
4056 if (mono_jit_trace_calls != NULL)
4057 max_epilog_size += 1024;
4059 cfg->arch.epilog_begin_offset = cfg->code_len;
4061 while (cfg->code_len + max_epilog_size > cfg->code_size) {
4062 cfg->code_size *= 2;
4063 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4064 cfg->stat_code_reallocs++;
4067 /* FIXME: Emit unwind info */
4069 buf = cfg->native_code + cfg->code_len;
4071 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4072 buf = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, buf, TRUE);
4074 ia64_codegen_init (code, buf);
4076 /* the code restoring the registers must be kept in sync with OP_JMP */
4077 pos = 0;
4079 if (method->save_lmf) {
4080 /* No LMF on IA64 */
4083 /* Load returned vtypes into registers if needed */
4084 cinfo = get_call_info (cfg, cfg->mempool, mono_method_signature (method), FALSE);
4085 ainfo = &cinfo->ret;
4086 switch (ainfo->storage) {
4087 case ArgAggregate:
4088 if (ainfo->nslots != ainfo->nregs)
4089 NOT_IMPLEMENTED;
4091 g_assert (cfg->ret->opcode == OP_REGOFFSET);
4092 ia64_adds_imm (code, GP_SCRATCH_REG, cfg->ret->inst_offset, cfg->ret->inst_basereg);
4093 for (i = 0; i < ainfo->nregs; ++i) {
4094 switch (ainfo->atype) {
4095 case AggregateNormal:
4096 ia64_ld8_inc_imm_hint (code, ainfo->reg + i, GP_SCRATCH_REG, sizeof (gpointer), 0);
4097 break;
4098 case AggregateSingleHFA:
4099 ia64_ldfs_inc_imm_hint (code, ainfo->reg + i, GP_SCRATCH_REG, 4, 0);
4100 break;
4101 case AggregateDoubleHFA:
4102 ia64_ldfd_inc_imm_hint (code, ainfo->reg + i, GP_SCRATCH_REG, sizeof (gpointer), 0);
4103 break;
4104 default:
4105 g_assert_not_reached ();
4108 break;
4109 default:
4110 break;
4113 ia64_begin_bundle (code);
4115 code.region_start = cfg->native_code;
4117 /* Label the unwind state at the start of the exception throwing region */
4118 //ia64_unw_label_state (code, 1234);
4120 if (cfg->arch.stack_alloc_size) {
4121 if (cfg->arch.omit_fp) {
4122 if (ia64_is_imm14 (cfg->arch.stack_alloc_size)) {
4123 ia64_unw_pop_frames (code, 1);
4124 ia64_adds_imm (code, IA64_SP, (cfg->arch.stack_alloc_size), IA64_SP);
4125 } else {
4126 ia64_movl (code, GP_SCRATCH_REG, cfg->arch.stack_alloc_size);
4127 ia64_unw_pop_frames (code, 1);
4128 ia64_add (code, IA64_SP, GP_SCRATCH_REG, IA64_SP);
4131 else {
4132 ia64_unw_pop_frames (code, 1);
4133 ia64_mov (code, IA64_SP, cfg->arch.reg_saved_sp);
4136 ia64_mov_to_ar_i (code, IA64_PFS, cfg->arch.reg_saved_ar_pfs);
4137 ia64_mov_ret_to_br (code, IA64_B0, cfg->arch.reg_saved_b0);
4138 ia64_br_ret_reg (code, IA64_B0);
4140 ia64_codegen_close (code);
4142 cfg->arch.r_epilog = mono_ia64_create_unwind_region (&code);
4143 cfg->arch.r_pro->next = cfg->arch.r_epilog;
4145 cfg->code_len = code.buf - cfg->native_code;
4147 g_assert (cfg->code_len < cfg->code_size);
4150 void
4151 mono_arch_emit_exceptions (MonoCompile *cfg)
4153 MonoJumpInfo *patch_info;
4154 int i, nthrows;
4155 Ia64CodegenState code;
4156 gboolean empty = TRUE;
4157 //unw_dyn_region_info_t *r_exceptions;
4158 MonoClass *exc_classes [16];
4159 guint8 *exc_throw_start [16], *exc_throw_end [16];
4160 guint32 code_size = 0;
4162 /* Compute needed space */
4163 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4164 if (patch_info->type == MONO_PATCH_INFO_EXC)
4165 code_size += 256;
4166 if (patch_info->type == MONO_PATCH_INFO_R8)
4167 code_size += 8 + 7; /* sizeof (double) + alignment */
4168 if (patch_info->type == MONO_PATCH_INFO_R4)
4169 code_size += 4 + 7; /* sizeof (float) + alignment */
4172 if (code_size == 0)
4173 return;
4175 while (cfg->code_len + code_size > (cfg->code_size - 16)) {
4176 cfg->code_size *= 2;
4177 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4178 cfg->stat_code_reallocs++;
4181 ia64_codegen_init (code, cfg->native_code + cfg->code_len);
4183 /* The unwind state here is the same as before the epilog */
4184 //ia64_unw_copy_state (code, 1234);
4186 /* add code to raise exceptions */
4187 /* FIXME: Optimize this */
4188 nthrows = 0;
4189 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4190 switch (patch_info->type) {
4191 case MONO_PATCH_INFO_EXC: {
4192 MonoClass *exc_class;
4193 guint8* throw_ip;
4194 guint8* buf;
4195 guint64 exc_token_index;
4197 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
4198 g_assert (exc_class);
4199 exc_token_index = mono_metadata_token_index (exc_class->type_token);
4200 throw_ip = cfg->native_code + patch_info->ip.i;
4202 ia64_begin_bundle (code);
4204 ia64_patch (cfg->native_code + patch_info->ip.i, code.buf);
4206 /* Find a throw sequence for the same exception class */
4207 for (i = 0; i < nthrows; ++i)
4208 if (exc_classes [i] == exc_class)
4209 break;
4211 if (i < nthrows) {
4212 gint64 offset = exc_throw_end [i] - 16 - throw_ip;
4214 if (ia64_is_adds_imm (offset))
4215 ia64_adds_imm (code, cfg->arch.reg_out0 + 1, offset, IA64_R0);
4216 else
4217 ia64_movl (code, cfg->arch.reg_out0 + 1, offset);
4219 buf = code.buf + code.nins;
4220 ia64_br_cond_pred (code, 0, 0);
4221 ia64_begin_bundle (code);
4222 ia64_patch (buf, exc_throw_start [i]);
4224 patch_info->type = MONO_PATCH_INFO_NONE;
4226 else {
4227 /* Arg1 */
4228 buf = code.buf;
4229 ia64_movl (code, cfg->arch.reg_out0 + 1, 0);
4231 ia64_begin_bundle (code);
4233 if (nthrows < 16) {
4234 exc_classes [nthrows] = exc_class;
4235 exc_throw_start [nthrows] = code.buf;
4238 /* Arg2 */
4239 if (ia64_is_adds_imm (exc_token_index))
4240 ia64_adds_imm (code, cfg->arch.reg_out0 + 0, exc_token_index, IA64_R0);
4241 else
4242 ia64_movl (code, cfg->arch.reg_out0 + 0, exc_token_index);
4244 patch_info->data.name = "mono_arch_throw_corlib_exception";
4245 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4246 patch_info->ip.i = code.buf + code.nins - cfg->native_code;
4248 /* Indirect call */
4249 ia64_movl (code, GP_SCRATCH_REG, 0);
4250 ia64_ld8_inc_imm (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, 8);
4251 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG2);
4252 ia64_ld8 (code, IA64_GP, GP_SCRATCH_REG);
4254 ia64_br_call_reg (code, IA64_B0, IA64_B6);
4256 /* Patch up the throw offset */
4257 ia64_begin_bundle (code);
4259 ia64_patch (buf, (gpointer)(code.buf - 16 - throw_ip));
4261 if (nthrows < 16) {
4262 exc_throw_end [nthrows] = code.buf;
4263 nthrows ++;
4267 empty = FALSE;
4268 break;
4270 default:
4271 break;
4275 if (!empty)
4276 /* The unwinder needs this to work */
4277 ia64_break_i (code, 0);
4279 ia64_codegen_close (code);
4281 /* FIXME: */
4282 //r_exceptions = mono_ia64_create_unwind_region (&code);
4283 //cfg->arch.r_epilog = r_exceptions;
4285 cfg->code_len = code.buf - cfg->native_code;
4287 g_assert (cfg->code_len < cfg->code_size);
4290 void*
4291 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
4293 Ia64CodegenState code;
4294 CallInfo *cinfo = NULL;
4295 MonoMethodSignature *sig;
4296 MonoInst *ins;
4297 int i, n, stack_area = 0;
4299 ia64_codegen_init (code, p);
4301 /* Keep this in sync with mono_arch_get_argument_info */
4303 if (enable_arguments) {
4304 /* Allocate a new area on the stack and save arguments there */
4305 sig = mono_method_signature (cfg->method);
4307 cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
4309 n = sig->param_count + sig->hasthis;
4311 stack_area = ALIGN_TO (n * 8, 16);
4313 if (n) {
4314 ia64_movl (code, GP_SCRATCH_REG, stack_area);
4316 ia64_sub (code, IA64_SP, IA64_SP, GP_SCRATCH_REG);
4318 /* FIXME: Allocate out registers */
4320 ia64_mov (code, cfg->arch.reg_out0 + 1, IA64_SP);
4322 /* Required by the ABI */
4323 ia64_adds_imm (code, IA64_SP, -16, IA64_SP);
4325 add_patch_info (cfg, code, MONO_PATCH_INFO_METHODCONST, cfg->method);
4326 ia64_movl (code, cfg->arch.reg_out0 + 0, 0);
4328 /* Save arguments to the stack */
4329 for (i = 0; i < n; ++i) {
4330 ins = cfg->args [i];
4332 if (ins->opcode == OP_REGVAR) {
4333 ia64_movl (code, GP_SCRATCH_REG, (i * 8));
4334 ia64_add (code, GP_SCRATCH_REG, cfg->arch.reg_out0 + 1, GP_SCRATCH_REG);
4335 ia64_st8 (code, GP_SCRATCH_REG, ins->dreg);
4337 else {
4338 ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
4339 ia64_add (code, GP_SCRATCH_REG, ins->inst_basereg, GP_SCRATCH_REG);
4340 ia64_ld8 (code, GP_SCRATCH_REG2, GP_SCRATCH_REG);
4341 ia64_movl (code, GP_SCRATCH_REG, (i * 8));
4342 ia64_add (code, GP_SCRATCH_REG, cfg->arch.reg_out0 + 1, GP_SCRATCH_REG);
4343 ia64_st8 (code, GP_SCRATCH_REG, GP_SCRATCH_REG2);
4347 else
4348 ia64_mov (code, cfg->arch.reg_out0 + 1, IA64_R0);
4350 else
4351 ia64_mov (code, cfg->arch.reg_out0 + 1, IA64_R0);
4353 add_patch_info (cfg, code, MONO_PATCH_INFO_METHODCONST, cfg->method);
4354 ia64_movl (code, cfg->arch.reg_out0 + 0, 0);
4356 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)func);
4358 if (enable_arguments && stack_area) {
4359 ia64_movl (code, GP_SCRATCH_REG, stack_area);
4361 ia64_add (code, IA64_SP, IA64_SP, GP_SCRATCH_REG);
4363 ia64_adds_imm (code, IA64_SP, 16, IA64_SP);
4366 ia64_codegen_close (code);
4368 return code.buf;
4371 void*
4372 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
4374 Ia64CodegenState code;
4375 CallInfo *cinfo = NULL;
4376 MonoMethod *method = cfg->method;
4377 MonoMethodSignature *sig = mono_method_signature (cfg->method);
4379 ia64_codegen_init (code, p);
4381 cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
4383 /* Save return value + pass it to func */
4384 switch (cinfo->ret.storage) {
4385 case ArgNone:
4386 break;
4387 case ArgInIReg:
4388 ia64_mov (code, cfg->arch.reg_saved_return_val, cinfo->ret.reg);
4389 ia64_mov (code, cfg->arch.reg_out0 + 1, cinfo->ret.reg);
4390 break;
4391 case ArgInFloatReg:
4392 ia64_adds_imm (code, IA64_SP, -16, IA64_SP);
4393 ia64_adds_imm (code, GP_SCRATCH_REG, 16, IA64_SP);
4394 ia64_stfd_hint (code, GP_SCRATCH_REG, cinfo->ret.reg, 0);
4395 ia64_fmov (code, 8 + 1, cinfo->ret.reg);
4396 break;
4397 case ArgValuetypeAddrInIReg:
4398 ia64_mov (code, cfg->arch.reg_out0 + 1, cfg->arch.reg_in0 + cinfo->ret.reg);
4399 break;
4400 case ArgAggregate:
4401 NOT_IMPLEMENTED;
4402 break;
4403 default:
4404 break;
4407 add_patch_info (cfg, code, MONO_PATCH_INFO_METHODCONST, method);
4408 ia64_movl (code, cfg->arch.reg_out0 + 0, 0);
4409 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)func);
4411 /* Restore return value */
4412 switch (cinfo->ret.storage) {
4413 case ArgNone:
4414 break;
4415 case ArgInIReg:
4416 ia64_mov (code, cinfo->ret.reg, cfg->arch.reg_saved_return_val);
4417 break;
4418 case ArgInFloatReg:
4419 ia64_adds_imm (code, GP_SCRATCH_REG, 16, IA64_SP);
4420 ia64_ldfd (code, cinfo->ret.reg, GP_SCRATCH_REG);
4421 break;
4422 case ArgValuetypeAddrInIReg:
4423 break;
4424 case ArgAggregate:
4425 break;
4426 default:
4427 break;
4430 ia64_codegen_close (code);
4432 return code.buf;
4435 void
4436 mono_arch_save_unwind_info (MonoCompile *cfg)
4438 unw_dyn_info_t *di;
4440 /* FIXME: Unregister this for dynamic methods */
4442 di = g_malloc0 (sizeof (unw_dyn_info_t));
4443 di->start_ip = (unw_word_t) cfg->native_code;
4444 di->end_ip = (unw_word_t) cfg->native_code + cfg->code_len;
4445 di->gp = 0;
4446 di->format = UNW_INFO_FORMAT_DYNAMIC;
4447 di->u.pi.name_ptr = (unw_word_t)mono_method_full_name (cfg->method, TRUE);
4448 di->u.pi.regions = cfg->arch.r_pro;
4450 _U_dyn_register (di);
4454 unw_dyn_region_info_t *region = di->u.pi.regions;
4456 printf ("Unwind info for method %s:\n", mono_method_full_name (cfg->method, TRUE));
4457 while (region) {
4458 printf (" [Region: %d]\n", region->insn_count);
4459 region = region->next;
4465 void
4466 mono_arch_flush_icache (guint8 *code, gint size)
4468 guint8* p = (guint8*)((guint64)code & ~(0x3f));
4469 guint8* end = (guint8*)((guint64)code + size);
4471 #ifdef __INTEL_COMPILER
4472 /* icc doesn't define an fc.i instrinsic, but fc==fc.i on itanium 2 */
4473 while (p < end) {
4474 __fc ((guint64)p);
4475 p += 32;
4477 #else
4478 while (p < end) {
4479 __asm__ __volatile__ ("fc.i %0"::"r"(p));
4480 /* FIXME: This could be increased to 128 on some cpus */
4481 p += 32;
4483 #endif
4486 void
4487 mono_arch_flush_register_windows (void)
4489 /* Not needed because of libunwind */
4492 gboolean
4493 mono_arch_is_inst_imm (gint64 imm)
4495 /* The lowering pass will take care of it */
4497 return TRUE;
4501 * Determine whenever the trap whose info is in SIGINFO is caused by
4502 * integer overflow.
4504 gboolean
4505 mono_arch_is_int_overflow (void *sigctx, void *info)
4507 /* Division is emulated with explicit overflow checks */
4508 return FALSE;
4511 guint32
4512 mono_arch_get_patch_offset (guint8 *code)
4514 NOT_IMPLEMENTED;
4516 return 0;
4519 gpointer*
4520 mono_arch_get_delegate_method_ptr_addr (guint8* code, mgreg_t *regs)
4522 NOT_IMPLEMENTED;
4524 return NULL;
4527 void
4528 mono_arch_finish_init (void)
4532 void
4533 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
4537 #ifdef MONO_ARCH_HAVE_IMT
4540 * LOCKING: called with the domain lock held
4542 gpointer
4543 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
4544 gpointer fail_tramp)
4546 int i;
4547 int size = 0;
4548 guint8 *start, *buf;
4549 Ia64CodegenState code;
4551 size = count * 256;
4552 buf = g_malloc0 (size);
4553 ia64_codegen_init (code, buf);
4555 /* IA64_R9 contains the IMT method */
4557 for (i = 0; i < count; ++i) {
4558 MonoIMTCheckItem *item = imt_entries [i];
4559 ia64_begin_bundle (code);
4560 item->code_target = (guint8*)code.buf + code.nins;
4561 if (item->is_equals) {
4562 gboolean fail_case = !item->check_target_idx && fail_tramp;
4564 if (item->check_target_idx || fail_case) {
4565 if (!item->compare_done || fail_case) {
4566 ia64_movl (code, GP_SCRATCH_REG, item->key);
4567 ia64_cmp_eq (code, 6, 7, IA64_R9, GP_SCRATCH_REG);
4569 item->jmp_code = (guint8*)code.buf + code.nins;
4570 ia64_br_cond_pred (code, 7, 0);
4572 if (item->has_target_code) {
4573 ia64_movl (code, GP_SCRATCH_REG, item->value.target_code);
4574 } else {
4575 ia64_movl (code, GP_SCRATCH_REG, &(vtable->vtable [item->value.vtable_slot]));
4576 ia64_ld8 (code, GP_SCRATCH_REG, GP_SCRATCH_REG);
4578 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
4579 ia64_br_cond_reg (code, IA64_B6);
4581 if (fail_case) {
4582 ia64_begin_bundle (code);
4583 ia64_patch (item->jmp_code, (guint8*)code.buf + code.nins);
4584 ia64_movl (code, GP_SCRATCH_REG, fail_tramp);
4585 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
4586 ia64_br_cond_reg (code, IA64_B6);
4587 item->jmp_code = NULL;
4589 } else {
4590 /* enable the commented code to assert on wrong method */
4591 #if ENABLE_WRONG_METHOD_CHECK
4592 g_assert_not_reached ();
4593 #endif
4594 ia64_movl (code, GP_SCRATCH_REG, &(vtable->vtable [item->value.vtable_slot]));
4595 ia64_ld8 (code, GP_SCRATCH_REG, GP_SCRATCH_REG);
4596 ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
4597 ia64_br_cond_reg (code, IA64_B6);
4598 #if ENABLE_WRONG_METHOD_CHECK
4599 g_assert_not_reached ();
4600 #endif
4602 } else {
4603 ia64_movl (code, GP_SCRATCH_REG, item->key);
4604 ia64_cmp_geu (code, 6, 7, IA64_R9, GP_SCRATCH_REG);
4605 item->jmp_code = (guint8*)code.buf + code.nins;
4606 ia64_br_cond_pred (code, 6, 0);
4609 /* patch the branches to get to the target items */
4610 for (i = 0; i < count; ++i) {
4611 MonoIMTCheckItem *item = imt_entries [i];
4612 if (item->jmp_code) {
4613 if (item->check_target_idx) {
4614 ia64_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
4619 ia64_codegen_close (code);
4620 g_assert (code.buf - buf <= size);
4622 size = code.buf - buf;
4623 if (fail_tramp) {
4624 start = mono_method_alloc_generic_virtual_thunk (domain, size + 16);
4625 start = (gpointer)ALIGN_TO (start, 16);
4626 } else {
4627 start = mono_domain_code_reserve (domain, size);
4629 memcpy (start, buf, size);
4631 mono_arch_flush_icache (start, size);
4633 mono_stats.imt_thunks_size += size;
4635 return start;
4638 MonoMethod*
4639 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
4641 return (MonoMethod*)regs [IA64_R9];
4644 void
4645 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
4647 /* Done by the implementation of the CALL_MEMBASE opcodes */
4649 #endif
4651 gpointer
4652 mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
4654 return (gpointer)regs [IA64_R10];
4657 gpointer
4658 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
4660 return NULL;
4663 MonoInst*
4664 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4666 MonoInst *ins = NULL;
4668 if (cmethod->klass->image == mono_defaults.corlib &&
4669 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4670 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4673 * We don't use the generic version in mini_emit_inst_for_method () since we
4674 * ia64 has atomic_add_imm opcodes.
4676 if (strcmp (cmethod->name, "Increment") == 0) {
4677 guint32 opcode;
4679 if (fsig->params [0]->type == MONO_TYPE_I4)
4680 opcode = OP_ATOMIC_ADD_IMM_NEW_I4;
4681 else if (fsig->params [0]->type == MONO_TYPE_I8)
4682 opcode = OP_ATOMIC_ADD_IMM_NEW_I8;
4683 else
4684 g_assert_not_reached ();
4685 MONO_INST_NEW (cfg, ins, opcode);
4686 ins->dreg = mono_alloc_preg (cfg);
4687 ins->inst_imm = 1;
4688 ins->inst_basereg = args [0]->dreg;
4689 ins->inst_offset = 0;
4690 MONO_ADD_INS (cfg->cbb, ins);
4691 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4692 guint32 opcode;
4694 if (fsig->params [0]->type == MONO_TYPE_I4)
4695 opcode = OP_ATOMIC_ADD_IMM_NEW_I4;
4696 else if (fsig->params [0]->type == MONO_TYPE_I8)
4697 opcode = OP_ATOMIC_ADD_IMM_NEW_I8;
4698 else
4699 g_assert_not_reached ();
4700 MONO_INST_NEW (cfg, ins, opcode);
4701 ins->dreg = mono_alloc_preg (cfg);
4702 ins->inst_imm = -1;
4703 ins->inst_basereg = args [0]->dreg;
4704 ins->inst_offset = 0;
4705 MONO_ADD_INS (cfg->cbb, ins);
4706 } else if (strcmp (cmethod->name, "Add") == 0) {
4707 guint32 opcode;
4708 gboolean is_imm = FALSE;
4709 gint64 imm = 0;
4711 if ((args [1]->opcode == OP_ICONST) || (args [1]->opcode == OP_I8CONST)) {
4712 imm = (args [1]->opcode == OP_ICONST) ? args [1]->inst_c0 : args [1]->inst_l;
4714 is_imm = (imm == 1 || imm == 4 || imm == 8 || imm == 16 || imm == -1 || imm == -4 || imm == -8 || imm == -16);
4717 if (is_imm) {
4718 if (fsig->params [0]->type == MONO_TYPE_I4)
4719 opcode = OP_ATOMIC_ADD_IMM_NEW_I4;
4720 else if (fsig->params [0]->type == MONO_TYPE_I8)
4721 opcode = OP_ATOMIC_ADD_IMM_NEW_I8;
4722 else
4723 g_assert_not_reached ();
4725 MONO_INST_NEW (cfg, ins, opcode);
4726 ins->dreg = mono_alloc_ireg (cfg);
4727 ins->inst_basereg = args [0]->dreg;
4728 ins->inst_offset = 0;
4729 ins->inst_imm = imm;
4730 ins->type = (opcode == OP_ATOMIC_ADD_IMM_NEW_I4) ? STACK_I4 : STACK_I8;
4731 } else {
4732 if (fsig->params [0]->type == MONO_TYPE_I4)
4733 opcode = OP_ATOMIC_ADD_NEW_I4;
4734 else if (fsig->params [0]->type == MONO_TYPE_I8)
4735 opcode = OP_ATOMIC_ADD_NEW_I8;
4736 else
4737 g_assert_not_reached ();
4739 MONO_INST_NEW (cfg, ins, opcode);
4740 ins->dreg = mono_alloc_ireg (cfg);
4741 ins->inst_basereg = args [0]->dreg;
4742 ins->inst_offset = 0;
4743 ins->sreg2 = args [1]->dreg;
4744 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4746 MONO_ADD_INS (cfg->cbb, ins);
4750 return ins;
4753 gboolean
4754 mono_arch_print_tree (MonoInst *tree, int arity)
4756 return 0;
4759 MonoInst*
4760 mono_arch_get_domain_intrinsic (MonoCompile* cfg)
4762 return mono_get_domain_intrinsic (cfg);
4765 mgreg_t
4766 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
4768 /* FIXME: implement */
4769 g_assert_not_reached ();