update rx (mobile builds).
[mono-project.git] / mono / mini / mini-hppa.c
blobc410f07dc0c58f4c73de3017d2fa9476a37c4fd9
1 /*
2 * mini-hppa.c: HPPA backend for the Mono code generator
4 * Copyright (c) 2007 Randolph Chung
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "mini.h"
26 #include <string.h>
27 #include <pthread.h>
28 #include <unistd.h>
30 #include <unistd.h>
31 #include <sys/mman.h>
33 #include <mono/metadata/appdomain.h>
34 #include <mono/metadata/debug-helpers.h>
35 #include <mono/metadata/tokentype.h>
36 #include <mono/utils/mono-math.h>
38 #include "mini-hppa.h"
39 #include "trace.h"
40 #include "cpu-hppa.h"
42 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
43 #define SIGNAL_STACK_SIZE (64 * 1024)
45 #define DEBUG(a) // a
46 #define DEBUG_FUNC_ENTER() // printf("Entering %s\n", __FUNCTION__)
47 #define DEBUG_FUNC_EXIT() // printf("Exiting %s\n", __FUNCTION__)
49 static const guchar
50 branch_b0_table [] = {
51 TRUE, /* OP_HPPA_BEQ */
52 FALSE, /* OP_HPPA_BGE */
53 FALSE, /* OP_HPPA_BGT */
54 TRUE, /* OP_HPPA_BLE */
55 TRUE, /* OP_HPPA_BLT */
56 FALSE, /* OP_HPPA_BNE */
57 FALSE, /* OP_HPPA_BGE_UN */
58 FALSE, /* OP_HPPA_BGT_UN */
59 TRUE, /* OP_HPPA_BLE_UN */
60 TRUE, /* OP_HPPA_BLT_UN */
63 static const guchar
64 branch_b1_table [] = {
65 HPPA_CMP_COND_EQ, /* OP_HPPA_BEQ */
66 HPPA_CMP_COND_SLT, /* OP_HPPA_BGE */
67 HPPA_CMP_COND_SLE, /* OP_HPPA_BGT */
68 HPPA_CMP_COND_SLE, /* OP_HPPA_BLE */
69 HPPA_CMP_COND_SLT, /* OP_HPPA_BLT */
70 HPPA_CMP_COND_EQ, /* OP_HPPA_BNE_UN */
71 HPPA_CMP_COND_ULT, /* OP_HPPA_BGE_UN */
72 HPPA_CMP_COND_ULE, /* OP_HPPA_BGT_UN */
73 HPPA_CMP_COND_ULE, /* OP_HPPA_BLE_UN */
74 HPPA_CMP_COND_ULT, /* OP_HPPA_BLT_UN */
77 /* Note that these are inverted from the OP_xxx, because we nullify
78 * the branch if the condition is met
80 static const guchar
81 float_branch_table [] = {
82 26, /* OP_FBEQ */
83 11, /* OP_FBGE */
84 15, /* OP_FBGT */
85 19, /* OP_FBLE */
86 23, /* OP_FBLT */
87 4, /* OP_FBNE_UN */
88 8, /* OP_FBGE_UN */
89 13, /* OP_FBGT_UN */
90 17, /* OP_FBLE_UN */
91 20, /* OP_FBLT_UN */
94 static const guchar
95 float_ceq_table [] = {
96 26, /* OP_FCEQ */
97 15, /* OP_FCGT */
98 13, /* OP_FCGT_UN */
99 23, /* OP_FCLT */
100 21, /* OP_FCLT_UN */
104 * Branches have short (14 or 17 bit) targets on HPPA. To make longer jumps,
105 * we will need to rely on stubs - basically we create stub structures in
106 * the epilogue that uses a long branch to the destination, and any short
107 * jumps inside a method that cannot reach the destination directly will
108 * branch first to the stub.
110 typedef struct MonoOvfJump {
111 union {
112 MonoBasicBlock *bb;
113 const char *exception;
114 } data;
115 guint32 ip_offset;
116 } MonoOvfJump;
118 /* Create a literal 0.0 double for FNEG */
119 double hppa_zero = 0;
121 const char*
122 mono_arch_regname (int reg)
124 static const char * rnames[] = {
125 "hppa_r0", "hppa_r1", "hppa_rp", "hppa_r3", "hppa_r4",
126 "hppa_r5", "hppa_r6", "hppa_r7", "hppa_r8", "hppa_r9",
127 "hppa_r10", "hppa_r11", "hppa_r12", "hppa_r13", "hppa_r14",
128 "hppa_r15", "hppa_r16", "hppa_r17", "hppa_r18", "hppa_r19",
129 "hppa_r20", "hppa_r21", "hppa_r22", "hppa_r23", "hppa_r24",
130 "hppa_r25", "hppa_r26", "hppa_r27", "hppa_r28", "hppa_r29",
131 "hppa_sp", "hppa_r31"
133 if (reg >= 0 && reg < MONO_MAX_IREGS)
134 return rnames [reg];
135 return "unknown";
138 const char*
139 mono_arch_fregname (int reg)
141 static const char *rnames [] = {
142 "hppa_fr0", "hppa_fr1", "hppa_fr2", "hppa_fr3", "hppa_fr4",
143 "hppa_fr5", "hppa_fr6", "hppa_fr7", "hppa_fr8", "hppa_fr9",
144 "hppa_fr10", "hppa_fr11", "hppa_fr12", "hppa_fr13", "hppa_fr14",
145 "hppa_fr15", "hppa_fr16", "hppa_fr17", "hppa_fr18", "hppa_fr19",
146 "hppa_fr20", "hppa_fr21", "hppa_fr22", "hppa_fr23", "hppa_fr24",
147 "hppa_fr25", "hppa_fr26", "hppa_fr27", "hppa_fr28", "hppa_fr29",
148 "hppa_fr30", "hppa_fr31",
151 if (reg >= 0 && reg < MONO_MAX_FREGS)
152 return rnames [reg];
153 else
154 return "unknown";
158 * Initialize the cpu to execute managed code.
160 void
161 mono_arch_cpu_init (void)
163 guint32 dummy;
164 mono_arch_cpu_optimizations(&dummy);
168 * Initialize architecture specific code.
170 void
171 mono_arch_init (void)
176 * Cleanup architecture specific code.
178 void
179 mono_arch_cleanup (void)
184 * This function returns the optimizations supported on this cpu.
186 guint32
187 mono_arch_cpu_optimizations (guint32 *exclude_mask)
189 guint32 opts = 0;
190 *exclude_mask = 0;
191 return opts;
195 * This function test for all SIMD functions supported.
197 * Returns a bitmask corresponding to all supported versions.
200 guint32
201 mono_arch_cpu_enumerate_simd_versions (void)
203 /* SIMD is currently unimplemented */
204 return 0;
207 void
208 mono_arch_flush_icache (guint8 *code, gint size)
210 guint8* p = (guint8*)((guint32)code & ~(0x3f));
211 guint8* end = (guint8*)((guint32)code + size);
212 while (p < end) {
213 __asm__ __volatile__ ("fdc %%r0(%%sr3, %0)\n"
214 "sync\n"
215 "fic %%r0(%%sr3, %0)\n"
216 "sync\n"
217 : : "r"(p));
218 p += 32; /* can be 64 on pa20 cpus */
222 void
223 mono_arch_flush_register_windows (void)
225 /* No register windows on hppa */
228 typedef enum {
229 ArgInIReg,
230 ArgInIRegPair,
231 ArgInFReg,
232 ArgInDReg,
233 ArgOnStack,
234 } ArgStorage;
236 typedef struct {
237 gint16 offset;
238 gint16 size;
239 guint8 type;
240 gint8 reg;
241 ArgStorage storage;
242 } ArgInfo;
244 typedef struct {
245 int nargs;
246 guint32 stack_usage;
247 int struct_return;
248 ArgInfo ret;
249 ArgInfo sig_cookie;
250 ArgInfo args [1];
251 } CallInfo;
253 #define PARAM_REGS 4
254 #define ARGS_OFFSET 36
256 static void
257 add_parameter (CallInfo *cinfo, ArgInfo *ainfo, MonoType *type)
259 int is_fp = (type->type == MONO_TYPE_R4 || type->type == MONO_TYPE_R8);
260 int ofs, align;
262 DEBUG_FUNC_ENTER ();
263 ainfo->reg = -1;
264 ainfo->size = mono_type_size (type, &align);
265 ainfo->type = type->type;
267 if (ainfo->size <= 4) {
268 cinfo->stack_usage += 4;
269 ainfo->offset = cinfo->stack_usage - (4 - ainfo->size);
271 else if (ainfo->size <= 8)
273 cinfo->stack_usage += 8;
274 cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, 8);
275 ainfo->offset = cinfo->stack_usage - (8 - ainfo->size);
277 else
279 cinfo->stack_usage += ainfo->size;
280 cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, align);
281 ainfo->offset = cinfo->stack_usage;
284 ofs = (ALIGN_TO (ainfo->offset, 4) - ARGS_OFFSET) / 4;
285 if (ofs < PARAM_REGS) {
286 if (!is_fp) {
287 if (ainfo->size <= 4)
288 ainfo->storage = ArgInIReg;
289 else
290 ainfo->storage = ArgInIRegPair;
291 ainfo->reg = hppa_r26 - ofs;
292 } else if (type->type == MONO_TYPE_R4) {
293 ainfo->storage = ArgInFReg;
294 ainfo->reg = hppa_fr4 + ofs;
295 } else { /* type->type == MONO_TYPE_R8 */
296 ainfo->storage = ArgInDReg;
297 ainfo->reg = hppa_fr4 + ofs;
300 else {
301 /* frame pointer based offset */
302 ainfo->reg = hppa_r3;
303 ainfo->storage = ArgOnStack;
306 /* All offsets are negative relative to the frame pointer */
307 ainfo->offset = -ainfo->offset;
309 DEBUG_FUNC_EXIT ();
312 static void
313 analyze_return (CallInfo *cinfo, MonoMethodSignature *sig)
315 MonoType *type;
316 int align;
317 int size;
319 type = sig->ret;
320 size = mono_type_size (type, &align);
322 /* ref: mono_type_to_stind */
323 cinfo->ret.type = type->type;
324 if (type->byref) {
325 cinfo->ret.storage = ArgInIReg;
326 cinfo->ret.reg = hppa_r28;
327 } else {
328 handle_enum:
329 switch (type->type) {
330 case MONO_TYPE_VOID:
331 break;
332 case MONO_TYPE_BOOLEAN:
333 case MONO_TYPE_I1:
334 case MONO_TYPE_U1:
335 case MONO_TYPE_I2:
336 case MONO_TYPE_U2:
337 case MONO_TYPE_CHAR:
338 case MONO_TYPE_I4:
339 case MONO_TYPE_U4:
340 case MONO_TYPE_I:
341 case MONO_TYPE_U:
342 case MONO_TYPE_PTR:
343 case MONO_TYPE_FNPTR:
344 case MONO_TYPE_CLASS:
345 case MONO_TYPE_STRING:
346 case MONO_TYPE_OBJECT:
347 case MONO_TYPE_SZARRAY:
348 case MONO_TYPE_ARRAY:
349 cinfo->ret.storage = ArgInIReg;
350 cinfo->ret.reg = hppa_r28;
351 break;
352 case MONO_TYPE_U8:
353 case MONO_TYPE_I8:
354 cinfo->ret.storage = ArgInIRegPair;
355 cinfo->ret.reg = hppa_r28;
356 break;
357 case MONO_TYPE_R4:
358 cinfo->ret.storage = ArgInFReg;
359 cinfo->ret.reg = hppa_fr4;
360 break;
361 case MONO_TYPE_R8:
362 cinfo->ret.storage = ArgInDReg;
363 cinfo->ret.reg = hppa_fr4;
364 break;
365 case MONO_TYPE_GENERICINST:
366 type = &type->data.generic_class->container_class->byval_arg;
367 goto handle_enum;
369 case MONO_TYPE_VALUETYPE:
370 if (type->data.klass->enumtype) {
371 type = mono_class_enum_basetype (type->data.klass);
372 goto handle_enum;
374 /* Fall through */
375 case MONO_TYPE_TYPEDBYREF:
376 cinfo->struct_return = 1;
377 /* cinfo->ret.storage tells us how the ABI expects
378 * the parameter to be returned
380 if (size <= 4) {
381 cinfo->ret.storage = ArgInIReg;
382 cinfo->ret.reg = hppa_r28;
383 } else if (size <= 8) {
384 cinfo->ret.storage = ArgInIRegPair;
385 cinfo->ret.reg = hppa_r28;
386 } else {
387 cinfo->ret.storage = ArgOnStack;
388 cinfo->ret.reg = hppa_sp;
391 /* We always allocate stack space for this because the
392 * arch-indep code expects us to
394 cinfo->stack_usage += size;
395 cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, align);
396 cinfo->ret.offset = -cinfo->stack_usage;
397 break;
399 default:
400 g_error ("Can't handle as return value 0x%x", sig->ret->type);
406 * get_call_info:
408 * Obtain information about a call according to the calling convention.
410 static CallInfo*
411 get_call_info (MonoMethodSignature *sig, gboolean is_pinvoke)
413 guint32 i;
414 int n = sig->hasthis + sig->param_count;
415 CallInfo *cinfo;
416 MonoType *type;
417 MonoType ptrtype;
418 int dummy;
420 ptrtype.type = MONO_TYPE_PTR;
422 DEBUG_FUNC_ENTER();
423 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
425 /* The area below ARGS_OFFSET is the linkage area... */
426 cinfo->stack_usage = ARGS_OFFSET - 4;
427 /* -4, because the first argument will allocate the area it needs */
429 /* this */
430 if (sig->hasthis) {
431 add_parameter (cinfo, cinfo->args + 0, &ptrtype);
432 DEBUG (printf ("param <this>: assigned to reg %s offset %d\n", mono_arch_regname (cinfo->args[0].reg), cinfo->args[0].offset));
435 /* TODO: What to do with varargs? */
437 for (i = 0; i < sig->param_count; ++i) {
438 ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
439 if (sig->params [i]->byref)
440 type = &ptrtype;
441 else
442 type = mono_type_get_underlying_type (sig->params [i]);
443 add_parameter (cinfo, ainfo, type);
445 DEBUG (printf ("param %d: type %d size %d assigned to reg %s offset %d\n", i, type->type, mono_type_size (type, &dummy), mono_arch_regname (ainfo->reg), ainfo->offset));
448 analyze_return (cinfo, sig);
450 DEBUG_FUNC_EXIT();
451 return cinfo;
454 GList *
455 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
457 GList *vars = NULL;
458 int i;
460 DEBUG_FUNC_ENTER();
461 for (i = 0; i < cfg->num_varinfo; i++) {
462 MonoInst *ins = cfg->varinfo [i];
463 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
465 /* unused vars */
466 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
467 continue;
469 if ((ins->flags & (MONO_INST_IS_DEAD|MONO_INST_VOLATILE|MONO_INST_INDIRECT)) ||
470 (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
471 continue;
473 if (mono_is_regsize_var (ins->inst_vtype)) {
474 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
475 g_assert (i == vmv->idx);
476 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
479 DEBUG_FUNC_EXIT();
481 return vars;
484 GList *
485 mono_arch_get_global_int_regs (MonoCompile *cfg)
487 GList *regs = NULL;
488 int i;
490 /* r3 is sometimes used as our frame pointer, so don't allocate it
491 * r19 is the GOT pointer, don't allocate it either
494 DEBUG_FUNC_ENTER();
495 for (i = 4; i <= 18; i++)
496 regs = g_list_prepend (regs, GUINT_TO_POINTER (i));
497 DEBUG_FUNC_EXIT();
499 return regs;
503 * mono_arch_regalloc_cost:
505 * Return the cost, in number of memory references, of the action of
506 * allocating the variable VMV into a register during global register
507 * allocation.
509 guint32
510 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
512 /* FIXME */
513 return 0;
517 * Set var information according to the calling convention.
518 * The locals var stuff should most likely be split in another method.
520 * updates m->stack_offset based on the amount of stack space needed for
521 * local vars
523 void
524 mono_arch_allocate_vars (MonoCompile *m)
526 MonoMethodSignature *sig;
527 MonoMethodHeader *header;
528 MonoInst *inst;
529 int i, offset, size, align, curinst;
530 guint32 stack_ptr;
531 guint rettype;
532 CallInfo *cinfo;
534 DEBUG_FUNC_ENTER();
535 m->flags |= MONO_CFG_HAS_SPILLUP;
537 header = m->header;
539 sig = mono_method_signature (m->method);
540 DEBUG (printf ("Allocating locals - incoming params:\n"));
541 cinfo = get_call_info (sig, FALSE);
544 * We use the ABI calling conventions for managed code as well.
546 if (m->flags & MONO_CFG_HAS_ALLOCA) {
547 stack_ptr = hppa_r4;
548 m->used_int_regs |= 1 << hppa_r4;
549 } else {
550 stack_ptr = hppa_sp;
553 /* Before this function is called, we would have looked at all
554 * calls from this method and figured out how much space is needed
555 * for the param area.
557 * Locals are allocated backwards, right before the param area
559 /* TODO: in some cases we don't need the frame pointer... */
560 m->frame_reg = hppa_r3;
561 offset = m->param_area;
563 /* Return values can be passed back either in four ways:
564 * r28 is used for data <= 4 bytes (32-bit ABI)
565 * r28/r29 are used for data >4 && <= 8 bytes
566 * fr4 is used for floating point data
567 * data larger than 8 bytes is returned on the stack pointed to
568 * by r28
570 * This code needs to be in sync with how CEE_RET is handled
571 * in mono_method_to_ir (). In some cases when we return small
572 * structs, the ABI specifies that they should be returned in
573 * registers, but the code in mono_method_to_ir () always emits
574 * a memcpy for valuetype returns, so we need to make sure we
575 * allocate space on the stack for this copy.
577 if (cinfo->struct_return) {
578 /* this is used to stash the incoming r28 pointer */
579 offset += sizeof (gpointer);
580 m->ret->opcode = OP_REGOFFSET;
581 m->ret->inst_basereg = stack_ptr;
582 m->ret->inst_offset = -offset;
583 } else if (sig->ret->type != MONO_TYPE_VOID) {
584 m->ret->opcode = OP_REGVAR;
585 m->ret->inst_c0 = cinfo->ret.reg;
588 curinst = m->locals_start;
589 for (i = curinst; i < m->num_varinfo; ++i) {
590 inst = m->varinfo [i];
592 if (inst->opcode == OP_REGVAR) {
593 DEBUG (printf ("allocating local %d to %s\n", i, mono_arch_regname (inst->dreg)));
594 continue;
597 if (inst->flags & MONO_INST_IS_DEAD)
598 continue;
600 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
601 * pinvoke wrappers when they call functions returning structure */
602 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
603 size = mono_class_native_size (inst->inst_vtype->data.klass, &align);
604 else
605 size = mini_type_stack_size (cfg->generic_sharing_context, inst->inst_vtype, &align);
608 * This is needed since structures containing doubles must be doubleword
609 * aligned.
610 * FIXME: Do this only if needed.
612 if (MONO_TYPE_ISSTRUCT (inst->inst_vtype))
613 align = 8;
616 * variables are accessed as negative offsets from hppa_sp
618 inst->opcode = OP_REGOFFSET;
619 inst->inst_basereg = stack_ptr;
620 offset += size;
621 offset = ALIGN_TO (offset, align);
622 inst->inst_offset = -offset;
624 DEBUG (printf ("allocating local %d (size = %d) to [%s - %d]\n", i, size, mono_arch_regname (inst->inst_basereg), -inst->inst_offset));
627 if (sig->call_convention == MONO_CALL_VARARG) {
628 /* TODO */
631 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
632 ArgInfo *ainfo = &cinfo->args [i];
633 inst = m->args [i];
634 if (inst->opcode != OP_REGVAR) {
635 switch (ainfo->storage) {
636 case ArgInIReg:
637 case ArgInIRegPair:
638 case ArgInFReg:
639 case ArgInDReg:
640 /* Currently mono requests all incoming registers
641 * be assigned to a stack location :-(
643 #if 0
644 if (!(inst->flags & (MONO_INST_VOLATILE | MONO_INST_INDIRECT))) {
645 inst->opcode = OP_REGVAR;
646 inst->dreg = ainfo->reg;
647 DEBUG (printf ("param %d in register %s\n", i, mono_arch_regname (inst->dreg)));
648 break;
650 #endif
651 /* fallthrough */
652 case ArgOnStack:
653 inst->opcode = OP_REGOFFSET;
654 inst->inst_basereg = hppa_r3;
655 inst->inst_offset = ainfo->offset;
656 DEBUG (printf ("param %d stored on stack [%s - %d]\n", i, mono_arch_regname (hppa_r3), -inst->inst_offset));
657 break;
662 m->stack_offset = offset; /* Includes cfg->param_area */
664 g_free (cinfo);
665 DEBUG_FUNC_EXIT();
669 * take the arguments and generate the arch-specific
670 * instructions to properly call the function in call.
671 * This includes pushing, moving arguments to the right register
672 * etc.
674 * sets call->stack_usage and cfg->param_area
676 MonoCallInst*
677 mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call, int is_virtual)
679 MonoInst *arg, *in;
680 MonoMethodSignature *sig;
681 int i, n;
682 CallInfo *cinfo;
683 ArgInfo *ainfo;
685 DEBUG_FUNC_ENTER();
686 DEBUG (printf ("is_virtual = %d\n", is_virtual));
688 sig = call->signature;
689 n = sig->param_count + sig->hasthis;
691 DEBUG (printf ("Calling method with %d parameters\n", n));
693 cinfo = get_call_info (sig, sig->pinvoke);
695 // DEBUG
696 g_assert (sig->call_convention != MONO_CALL_VARARG);
698 for (i = 0; i < n; ++i) {
699 ainfo = &cinfo->args [i];
701 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
702 /* TODO */
705 if (is_virtual && i == 0) {
706 /* the argument will be attached to the call instruction */
707 in = call->args [i];
708 call->used_iregs |= 1 << ainfo->reg;
709 } else {
710 MONO_INST_NEW (cfg, arg, OP_OUTARG);
711 in = call->args [i];
712 arg->cil_code = in->cil_code;
713 arg->inst_left = in;
714 arg->inst_call = call;
715 arg->type = in->type;
717 /* prepend, we'll need to reverse them later */
718 arg->next = call->out_args;
719 call->out_args = arg;
721 switch (ainfo->storage) {
722 case ArgInIReg:
723 case ArgInIRegPair: {
724 MonoHPPAArgInfo *ai = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoHPPAArgInfo));
725 ai->reg = ainfo->reg;
726 ai->size = ainfo->size;
727 ai->offset = ainfo->offset;
728 ai->pass_in_reg = 1;
729 arg->backend.data = ai;
731 call->used_iregs |= 1 << ainfo->reg;
732 if (ainfo->storage == ArgInIRegPair)
733 call->used_iregs |= 1 << (ainfo->reg + 1);
734 if (ainfo->type == MONO_TYPE_VALUETYPE)
735 arg->opcode = OP_OUTARG_VT;
736 break;
738 case ArgOnStack: {
739 MonoHPPAArgInfo *ai = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoHPPAArgInfo));
740 ai->reg = hppa_sp;
741 ai->size = ainfo->size;
742 ai->offset = ainfo->offset;
743 ai->pass_in_reg = 0;
744 arg->backend.data = ai;
745 if (ainfo->type == MONO_TYPE_VALUETYPE)
746 arg->opcode = OP_OUTARG_VT;
747 else
748 arg->opcode = OP_OUTARG_MEMBASE;
749 call->used_iregs |= 1 << ainfo->reg;
750 break;
752 case ArgInFReg:
753 arg->backend.reg3 = ainfo->reg;
754 arg->opcode = OP_OUTARG_R4;
755 call->used_fregs |= 1 << ainfo->reg;
756 break;
757 case ArgInDReg:
758 arg->backend.reg3 = ainfo->reg;
759 arg->opcode = OP_OUTARG_R8;
760 call->used_fregs |= 1 << ainfo->reg;
761 break;
762 default:
763 NOT_IMPLEMENTED;
769 * Reverse the call->out_args list.
772 MonoInst *prev = NULL, *list = call->out_args, *next;
773 while (list) {
774 next = list->next;
775 list->next = prev;
776 prev = list;
777 list = next;
779 call->out_args = prev;
781 call->stack_usage = cinfo->stack_usage;
782 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
783 cfg->param_area = ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT);
785 cfg->flags |= MONO_CFG_HAS_CALLS;
787 g_free (cinfo);
789 DEBUG_FUNC_EXIT();
790 return call;
793 void
794 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
798 void
799 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
801 DEBUG_FUNC_ENTER();
802 DEBUG_FUNC_EXIT();
805 static void
806 insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *to_insert)
808 if (ins == NULL) {
809 ins = bb->code;
810 bb->code = to_insert;
811 to_insert->next = ins;
812 } else {
813 to_insert->next = ins->next;
814 ins->next = to_insert;
818 #define NEW_INS(cfg,dest,op) do { \
819 (dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
820 (dest)->opcode = (op); \
821 insert_after_ins (bb, last_ins, (dest)); \
822 } while (0)
824 static int
825 map_to_reg_reg_op (int op)
827 switch (op) {
828 case OP_ADD_IMM:
829 return CEE_ADD;
830 case OP_SUB_IMM:
831 return CEE_SUB;
832 case OP_AND_IMM:
833 return CEE_AND;
834 case OP_COMPARE_IMM:
835 return OP_COMPARE;
836 case OP_ADDCC_IMM:
837 return OP_ADDCC;
838 case OP_ADC_IMM:
839 return OP_ADC;
840 case OP_SUBCC_IMM:
841 return OP_SUBCC;
842 case OP_SBB_IMM:
843 return OP_SBB;
844 case OP_OR_IMM:
845 return CEE_OR;
846 case OP_XOR_IMM:
847 return CEE_XOR;
848 case OP_MUL_IMM:
849 return CEE_MUL;
850 case OP_LOAD_MEMBASE:
851 return OP_LOAD_MEMINDEX;
852 case OP_LOADI4_MEMBASE:
853 return OP_LOADI4_MEMINDEX;
854 case OP_LOADU4_MEMBASE:
855 return OP_LOADU4_MEMINDEX;
856 case OP_LOADU1_MEMBASE:
857 return OP_LOADU1_MEMINDEX;
858 case OP_LOADI2_MEMBASE:
859 return OP_LOADI2_MEMINDEX;
860 case OP_LOADU2_MEMBASE:
861 return OP_LOADU2_MEMINDEX;
862 case OP_LOADI1_MEMBASE:
863 return OP_LOADI1_MEMINDEX;
864 case OP_LOADR4_MEMBASE:
865 return OP_LOADR4_MEMINDEX;
866 case OP_LOADR8_MEMBASE:
867 return OP_LOADR8_MEMINDEX;
868 case OP_STOREI1_MEMBASE_REG:
869 return OP_STOREI1_MEMINDEX;
870 case OP_STOREI2_MEMBASE_REG:
871 return OP_STOREI2_MEMINDEX;
872 case OP_STOREI4_MEMBASE_REG:
873 return OP_STOREI4_MEMINDEX;
874 case OP_STORE_MEMBASE_REG:
875 return OP_STORE_MEMINDEX;
876 case OP_STORER4_MEMBASE_REG:
877 return OP_STORER4_MEMINDEX;
878 case OP_STORER8_MEMBASE_REG:
879 return OP_STORER8_MEMINDEX;
880 case OP_STORE_MEMBASE_IMM:
881 return OP_STORE_MEMBASE_REG;
882 case OP_STOREI1_MEMBASE_IMM:
883 return OP_STOREI1_MEMBASE_REG;
884 case OP_STOREI2_MEMBASE_IMM:
885 return OP_STOREI2_MEMBASE_REG;
886 case OP_STOREI4_MEMBASE_IMM:
887 return OP_STOREI4_MEMBASE_REG;
889 g_assert_not_reached ();
893 * Remove from the instruction list the instructions that can't be
894 * represented with very simple instructions with no register
895 * requirements.
897 void
898 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
900 MonoInst *ins, *next, *temp, *last_ins = NULL;
901 int imm;
903 MONO_BB_FOR_EACH_INS (bb, ins) {
904 loop_start:
905 switch (ins->opcode) {
906 case OP_ADD_IMM:
907 case OP_ADDCC_IMM:
908 if (!hppa_check_bits (ins->inst_imm, 11)) {
909 NEW_INS (cfg, temp, OP_ICONST);
910 temp->inst_c0 = ins->inst_imm;
911 temp->dreg = mono_alloc_ireg (cfg);
912 ins->sreg2 = temp->dreg;
913 ins->opcode = map_to_reg_reg_op (ins->opcode);
915 break;
916 case OP_SUB_IMM:
917 case OP_SUBCC_IMM:
918 if (!hppa_check_bits (ins->inst_imm, 11)) {
919 NEW_INS (cfg, temp, OP_ICONST);
920 temp->inst_c0 = ins->inst_imm;
921 temp->dreg = mono_alloc_ireg (cfg);
922 ins->sreg2 = temp->dreg;
923 ins->opcode = map_to_reg_reg_op (ins->opcode);
925 break;
927 case OP_MUL_IMM:
928 if (ins->inst_imm == 1) {
929 ins->opcode = OP_MOVE;
930 break;
932 if (ins->inst_imm == 0) {
933 ins->opcode = OP_ICONST;
934 ins->inst_c0 = 0;
935 break;
937 imm = mono_is_power_of_two (ins->inst_imm);
938 if (imm > 0) {
939 ins->opcode = OP_SHL_IMM;
940 ins->inst_imm = imm;
941 break;
943 else {
944 int tmp = mono_alloc_ireg (cfg);
945 NEW_INS (cfg, temp, OP_ICONST);
946 temp->inst_c0 = ins->inst_c0;
947 temp->dreg = tmp;
949 ins->opcode = CEE_MUL;
950 ins->sreg2 = tmp;
951 /* Need to rewrite the CEE_MUL too... */
952 goto loop_start;
954 break;
956 case CEE_MUL: {
957 int freg1 = mono_alloc_freg (cfg);
958 int freg2 = mono_alloc_freg (cfg);
960 NEW_INS(cfg, temp, OP_STORE_MEMBASE_REG);
961 temp->sreg1 = ins->sreg1;
962 temp->inst_destbasereg = hppa_sp;
963 temp->inst_offset = -16;
965 NEW_INS(cfg, temp, OP_LOADR4_MEMBASE);
966 temp->dreg = freg1;
967 temp->inst_basereg = hppa_sp;
968 temp->inst_offset = -16;
970 NEW_INS(cfg, temp, OP_STORE_MEMBASE_REG);
971 temp->sreg1 = ins->sreg2;
972 temp->inst_destbasereg = hppa_sp;
973 temp->inst_offset = -16;
975 NEW_INS(cfg, temp, OP_LOADR4_MEMBASE);
976 temp->dreg = freg2;
977 temp->inst_basereg = hppa_sp;
978 temp->inst_offset = -16;
980 NEW_INS (cfg, temp, OP_HPPA_XMPYU);
981 temp->dreg = freg2;
982 temp->sreg1 = freg1;
983 temp->sreg2 = freg2;
985 NEW_INS(cfg, temp, OP_HPPA_STORER4_RIGHT);
986 temp->sreg1 = freg2;
987 temp->inst_destbasereg = hppa_sp;
988 temp->inst_offset = -16;
990 ins->opcode = OP_LOAD_MEMBASE;
991 ins->inst_basereg = hppa_sp;
992 ins->inst_offset = -16;
994 break;
996 default:
997 break;
999 last_ins = ins;
1001 bb->last_ins = last_ins;
1002 bb->max_vreg = cfg->next_vreg;
1006 void
1007 hppa_patch (guint32 *code, const gpointer target)
1009 guint32 ins = *code;
1010 gint32 val = (gint32)target;
1011 gint32 disp = (val - (gint32)code - 8) >> 2;
1012 int reg1, reg2;
1014 DEBUG (printf ("patching 0x%08x (0x%08x) to point to 0x%08x (disp = %d)\n", code, ins, val, disp));
1016 switch (*code >> 26) {
1017 case 0x08: /* ldil, next insn can be a ldo, ldw, or ble */
1018 *code = *code & ~0x1fffff;
1019 *code = *code | hppa_op_imm21 (hppa_lsel (val));
1020 code++;
1022 if ((*code >> 26) == 0x0D) { /* ldo */
1023 *code = *code & ~0x3fff;
1024 *code = *code | hppa_op_imm14 (hppa_rsel (val));
1025 } else if ((*code >> 26) == 0x12) { /* ldw */
1026 *code = *code & ~0x3fff;
1027 *code = *code | hppa_op_imm14 (hppa_rsel (val));
1028 } else if ((*code >> 26) == 0x39) { /* ble */
1029 *code = *code & ~0x1f1ffd;
1030 *code = *code | hppa_op_imm17 (hppa_rsel (val));
1033 break;
1035 case 0x3A: /* bl */
1036 if (disp == 0) {
1037 hppa_nop (code);
1038 break;
1040 if (!hppa_check_bits (disp, 17))
1041 goto jump_overflow;
1042 reg1 = (*code >> 21) & 0x1f;
1043 *code = (*code & ~0x1f1ffd) | hppa_op_imm17(disp);
1044 break;
1046 case 0x20: /* combt */
1047 case 0x22: /* combf */
1048 if (!hppa_check_bits (disp >> 2, 12))
1049 goto jump_overflow;
1050 *code = (*code & ~0x1ffd) | hppa_op_imm12(disp);
1051 break;
1053 default:
1054 g_warning ("Unpatched opcode %x\n", *code >> 26);
1057 return;
1059 jump_overflow:
1060 g_warning ("cannot branch to target, insn is %08x, displacement is %d\n", (int)*code, (int)disp);
1061 g_assert_not_reached ();
1064 static guint32 *
1065 emit_float_to_int (MonoCompile *cfg, guint32 *code, int dreg, int sreg, int size, gboolean is_signed)
1067 /* sreg is a float, dreg is an integer reg. */
1068 hppa_fcnvfxt (code, HPPA_FP_FMT_DBL, HPPA_FP_FMT_SGL, sreg, sreg);
1069 hppa_fstws (code, sreg, 0, -16, hppa_sp);
1070 hppa_ldw (code, -16, hppa_sp, dreg);
1071 if (!is_signed) {
1072 if (size == 1)
1073 hppa_extru (code, dreg, 31, 8, dreg);
1074 else if (size == 2)
1075 hppa_extru (code, dreg, 31, 16, dreg);
1076 } else {
1077 if (size == 1)
1078 hppa_extrs (code, dreg, 31, 8, dreg);
1079 else if (size == 2)
1080 hppa_extrs (code, dreg, 31, 16, dreg);
1082 return code;
1085 /* Clobbers r1, r20, r21 */
1086 static guint32 *
1087 emit_memcpy (guint32 *code, int doff, int dreg, int soff, int sreg, int size)
1089 /* r20 is the destination */
1090 hppa_set (code, doff, hppa_r20);
1091 hppa_add (code, hppa_r20, dreg, hppa_r20);
1093 /* r21 is the source */
1094 hppa_set (code, soff, hppa_r21);
1095 hppa_add (code, hppa_r21, sreg, hppa_r21);
1097 while (size >= 4) {
1098 hppa_ldw (code, 0, hppa_r21, hppa_r1);
1099 hppa_stw (code, hppa_r1, 0, hppa_r20);
1100 hppa_ldo (code, 4, hppa_r21, hppa_r21);
1101 hppa_ldo (code, 4, hppa_r20, hppa_r20);
1102 size -= 4;
1104 while (size >= 2) {
1105 hppa_ldh (code, 0, hppa_r21, hppa_r1);
1106 hppa_sth (code, hppa_r1, 0, hppa_r20);
1107 hppa_ldo (code, 2, hppa_r21, hppa_r21);
1108 hppa_ldo (code, 2, hppa_r20, hppa_r20);
1109 size -= 2;
1111 while (size > 0) {
1112 hppa_ldb (code, 0, hppa_r21, hppa_r1);
1113 hppa_stb (code, hppa_r1, 0, hppa_r20);
1114 hppa_ldo (code, 1, hppa_r21, hppa_r21);
1115 hppa_ldo (code, 1, hppa_r20, hppa_r20);
1116 size -= 1;
1119 return code;
1123 * mono_arch_get_vcall_slot_addr:
1125 * Determine the vtable slot used by a virtual call.
1127 gpointer*
1128 mono_arch_get_vcall_slot_addr (guint8 *code8, mgreg_t *regs)
1130 guint32 *code = (guint32*)((unsigned long)code8 & ~3);
1132 DEBUG_FUNC_ENTER();
1134 code -= 2;
1135 /* This is the special virtual call token */
1136 if (code [-1] != 0x34000eee) /* ldo 0x777(r0),r0 */
1137 return NULL;
1139 if ((code [0] >> 26) == 0x39 && /* ble */
1140 (code [-2] >> 26) == 0x12) { /* ldw */
1141 guint32 ldw = code [-2];
1142 guint32 reg = (ldw >> 21) & 0x1f;
1143 gint32 disp = ((ldw & 1) ? (-1 << 13) : 0) | ((ldw & 0x3fff) >> 1);
1144 /* FIXME: we are not guaranteed that reg is saved in the LMF.
1145 * In fact, it probably isn't, since it is allocated as a
1146 * callee register. Right now just return an address; this
1147 * is sufficient for non-AOT operation
1149 // return (gpointer)((guint8*)regs [reg] + disp);
1150 return code;
1152 else
1153 g_assert_not_reached ();
1155 DEBUG_FUNC_EXIT();
1158 /* ins->dreg = *(ins->inst_desgbasereg + ins->inst_offset) */
1159 #define EMIT_LOAD_MEMBASE(ins, op) do { \
1160 if (!hppa_check_bits (ins->inst_offset, 14)) { \
1161 hppa_set (code, ins->inst_offset, hppa_r1); \
1162 hppa_ ## op ## x (code, hppa_r1, ins->inst_basereg, ins->dreg); \
1164 else { \
1165 hppa_ ## op (code, ins->inst_offset, ins->inst_basereg, ins->dreg); \
1167 } while (0)
1169 #define EMIT_COND_BRANCH_FLAGS(ins,r1,r2,b0,b1) do {\
1170 if (b0) \
1171 hppa_combf (code, r1, r2, b1, 2); \
1172 else \
1173 hppa_combt (code, r1, r2, b1, 2); \
1174 hppa_nop (code); \
1175 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1176 hppa_bl (code, 0, hppa_r0); \
1177 hppa_nop (code); \
1178 } while (0)
1180 #define EMIT_COND_BRANCH(ins,r1,r2,cond) EMIT_COND_BRANCH_FLAGS(ins, r1, r2, branch_b0_table [(cond)], branch_b1_table [(cond)])
1182 #define EMIT_FLOAT_COND_BRANCH_FLAGS(ins,r1,r2,b0) do {\
1183 hppa_fcmp (code, HPPA_FP_FMT_DBL, b0, r1, r2); \
1184 hppa_ftest (code, 0); \
1185 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1186 hppa_bl (code, 8, hppa_r0); \
1187 hppa_nop (code); \
1188 } while (0)
1190 #define EMIT_FLOAT_COND_BRANCH(ins,r1,r2,cond) EMIT_FLOAT_COND_BRANCH_FLAGS(ins, r1, r2, float_branch_table [cond])
1192 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(r1,r2,b0,b1,exc_name) \
1193 do { \
1194 MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump)); \
1195 ovfj->data.exception = (exc_name); \
1196 ovfj->ip_offset = (guint8*)code - cfg->native_code; \
1197 hppa_bl (code, 8, hppa_r2); \
1198 hppa_depi (code, 0, 31, 2, hppa_r2); \
1199 hppa_ldo (code, 8, hppa_r2, hppa_r2); \
1200 if (b0) \
1201 hppa_combf (code, r1, r2, b1, 2); \
1202 else \
1203 hppa_combt (code, r1, r2, b1, 2); \
1204 hppa_nop (code); \
1205 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_EXC_OVF, ovfj); \
1206 hppa_bl (code, 0, hppa_r0); \
1207 hppa_nop (code); \
1208 } while (0)
1210 #define EMIT_COND_SYSTEM_EXCEPTION(r1,r2,cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(r1, r2, branch_b0_table [(cond)], branch_b1_table [(cond)], (exc_name))
1212 /* TODO: MEM_INDEX_REG - cannot be r1 */
1213 #define MEM_INDEX_REG hppa_r31
1214 /* *(ins->inst_destbasereg + ins->inst_offset) = ins->inst_imm */
1215 #define EMIT_STORE_MEMBASE_IMM(ins, op) do { \
1216 guint32 sreg; \
1217 if (ins->inst_imm == 0) \
1218 sreg = hppa_r0; \
1219 else { \
1220 hppa_set (code, ins->inst_imm, hppa_r1); \
1221 sreg = hppa_r1; \
1223 if (!hppa_check_bits (ins->inst_offset, 14)) { \
1224 hppa_set (code, ins->inst_offset, MEM_INDEX_REG); \
1225 hppa_addl (code, ins->inst_destbasereg, MEM_INDEX_REG, MEM_INDEX_REG); \
1226 hppa_ ## op (code, sreg, 0, MEM_INDEX_REG); \
1228 else { \
1229 hppa_ ## op (code, sreg, ins->inst_offset, ins->inst_destbasereg); \
1231 } while (0)
1233 /* *(ins->inst_destbasereg + ins->inst_offset) = ins->sreg1 */
1234 #define EMIT_STORE_MEMBASE_REG(ins, op) do { \
1235 if (!hppa_check_bits (ins->inst_offset, 14)) { \
1236 hppa_set (code, ins->inst_offset, MEM_INDEX_REG); \
1237 hppa_addl (code, ins->inst_destbasereg, MEM_INDEX_REG, MEM_INDEX_REG); \
1238 hppa_ ## op (code, ins->sreg1, 0, MEM_INDEX_REG); \
1240 else { \
1241 hppa_ ## op (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg); \
1243 } while (0)
1245 void
1246 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
1248 MonoInst *ins;
1249 MonoCallInst *call;
1250 guint offset;
1251 guint32 *code = (guint32*)(cfg->native_code + cfg->code_len);
1252 MonoInst *last_ins = NULL;
1253 int max_len, cpos;
1254 const char *spec;
1256 DEBUG_FUNC_ENTER();
1258 if (cfg->verbose_level > 2)
1259 g_print ("[%s::%s] Basic block %d starting at offset 0x%x\n", cfg->method->klass->name, cfg->method->name, bb->block_num, bb->native_offset);
1261 cpos = bb->max_offset;
1263 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
1264 NOT_IMPLEMENTED;
1267 MONO_BB_FOR_EACH_INS (bb, ins) {
1268 guint8* code_start;
1270 offset = (guint8*)code - cfg->native_code;
1272 spec = ins_get_spec (ins->opcode);
1274 max_len = ((guint8 *)spec) [MONO_INST_LEN];
1276 if (offset > (cfg->code_size - max_len - 16)) {
1277 cfg->code_size *= 2;
1278 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
1279 code = (guint32*)(cfg->native_code + offset);
1280 cfg->stat_code_reallocs++;
1282 code_start = (guint8*)code;
1283 // if (ins->cil_code)
1284 // g_print ("cil code\n");
1285 mono_debug_record_line_number (cfg, ins, offset);
1287 switch (ins->opcode) {
1288 case OP_RELAXED_NOP:
1289 break;
1290 case OP_STOREI1_MEMBASE_IMM:
1291 EMIT_STORE_MEMBASE_IMM (ins, stb);
1292 break;
1293 case OP_STOREI2_MEMBASE_IMM:
1294 EMIT_STORE_MEMBASE_IMM (ins, sth);
1295 break;
1296 case OP_STORE_MEMBASE_IMM:
1297 case OP_STOREI4_MEMBASE_IMM:
1298 EMIT_STORE_MEMBASE_IMM (ins, stw);
1299 break;
1300 case OP_STOREI1_MEMBASE_REG:
1301 EMIT_STORE_MEMBASE_REG (ins, stb);
1302 break;
1303 case OP_STOREI2_MEMBASE_REG:
1304 EMIT_STORE_MEMBASE_REG (ins, sth);
1305 break;
1306 case OP_STORE_MEMBASE_REG:
1307 case OP_STOREI4_MEMBASE_REG:
1308 EMIT_STORE_MEMBASE_REG (ins, stw);
1309 break;
1310 case OP_LOADU1_MEMBASE:
1311 EMIT_LOAD_MEMBASE (ins, ldb);
1312 break;
1313 case OP_LOADI1_MEMBASE:
1314 EMIT_LOAD_MEMBASE (ins, ldb);
1315 hppa_extrs (code, ins->dreg, 31, 8, ins->dreg);
1316 break;
1317 case OP_LOADU2_MEMBASE:
1318 EMIT_LOAD_MEMBASE (ins, ldh);
1319 break;
1320 case OP_LOADI2_MEMBASE:
1321 EMIT_LOAD_MEMBASE (ins, ldh);
1322 hppa_extrs (code, ins->dreg, 31, 16, ins->dreg);
1323 break;
1324 case OP_LOAD_MEMBASE:
1325 case OP_LOADI4_MEMBASE:
1326 case OP_LOADU4_MEMBASE:
1327 EMIT_LOAD_MEMBASE (ins, ldw);
1328 break;
1329 case CEE_CONV_I1:
1330 hppa_extrs (code, ins->sreg1, 31, 8, ins->dreg);
1331 break;
1332 case CEE_CONV_I2:
1333 hppa_extrs (code, ins->sreg1, 31, 16, ins->dreg);
1334 break;
1335 case CEE_CONV_U1:
1336 hppa_extru (code, ins->sreg1, 31, 8, ins->dreg);
1337 break;
1338 case CEE_CONV_U2:
1339 hppa_extru (code, ins->sreg1, 31, 16, ins->dreg);
1340 break;
1341 case CEE_CONV_U:
1342 case CEE_CONV_I4:
1343 case CEE_CONV_U4:
1344 case OP_MOVE:
1345 if (ins->sreg1 != ins->dreg)
1346 hppa_copy (code, ins->sreg1, ins->dreg);
1347 break;
1348 case OP_SETLRET:
1349 hppa_copy (code, ins->sreg1 + 1, ins->dreg);
1350 hppa_copy (code, ins->sreg1, ins->dreg + 1);
1351 break;
1353 case OP_BREAK:
1354 /* break 4,8 - this is what gdb normally uses... */
1355 *code++ = 0x00010004;
1356 break;
1357 case OP_ADDCC:
1358 case CEE_ADD:
1359 hppa_add (code, ins->sreg1, ins->sreg2, ins->dreg);
1360 break;
1361 case OP_ADC:
1362 hppa_addc (code, ins->sreg1, ins->sreg2, ins->dreg);
1363 break;
1364 case OP_ADDCC_IMM:
1365 case OP_ADD_IMM:
1366 hppa_addi (code, ins->inst_imm, ins->sreg1, ins->dreg);
1367 break;
1368 case OP_ADC_IMM:
1369 hppa_set (code, ins->inst_imm, hppa_r1);
1370 hppa_addc (code, ins->sreg1, hppa_r1, ins->dreg);
1371 break;
1372 case OP_HPPA_ADD_OVF: {
1373 MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump));
1374 hppa_bl (code, 8, hppa_r2);
1375 hppa_depi (code, 0, 31, 2, hppa_r2);
1376 hppa_ldo (code, 12, hppa_r2, hppa_r2);
1378 if (ins->backend.reg3 == CEE_ADD_OVF)
1379 hppa_add_cond (code, HPPA_ADD_COND_NSV, ins->sreg1, ins->sreg2, ins->dreg);
1380 else
1381 hppa_add_cond (code, HPPA_ADD_COND_NUV, ins->sreg1, ins->sreg2, ins->dreg);
1383 ovfj->data.exception = "OverflowException";
1384 ovfj->ip_offset = (guint8*)code - cfg->native_code;
1385 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_EXC_OVF, ovfj);
1386 hppa_bl_n (code, 8, hppa_r0);
1387 break;
1389 case OP_HPPA_ADDC_OVF: {
1390 MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump));
1391 hppa_bl (code, 8, hppa_r2);
1392 hppa_depi (code, 0, 31, 2, hppa_r2);
1393 hppa_ldo (code, 12, hppa_r2, hppa_r2);
1395 if (ins->backend.reg3 == OP_LADD_OVF)
1396 hppa_addc_cond (code, HPPA_ADD_COND_NSV, ins->sreg1, ins->sreg2, ins->dreg);
1397 else
1398 hppa_addc_cond (code, HPPA_ADD_COND_NUV, ins->sreg1, ins->sreg2, ins->dreg);
1400 ovfj->data.exception = "OverflowException";
1401 ovfj->ip_offset = (guint8*)code - cfg->native_code;
1402 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_EXC_OVF, ovfj);
1403 hppa_bl_n (code, 8, hppa_r0);
1404 break;
1406 case OP_SUBCC:
1407 case CEE_SUB:
1408 hppa_sub (code, ins->sreg1, ins->sreg2, ins->dreg);
1409 break;
1410 case OP_SUBCC_IMM:
1411 case OP_SUB_IMM:
1412 hppa_addi (code, -ins->inst_imm, ins->sreg1, ins->dreg);
1413 break;
1414 case OP_SBB:
1415 hppa_subb (code, ins->sreg1, ins->sreg2, ins->dreg);
1416 break;
1417 case OP_SBB_IMM:
1418 hppa_set (code, ins->inst_imm, hppa_r1);
1419 hppa_subb (code, ins->sreg1, hppa_r1, ins->dreg);
1420 break;
1421 case OP_HPPA_SUB_OVF: {
1422 MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump));
1423 hppa_bl (code, 8, hppa_r2);
1424 hppa_depi (code, 0, 31, 2, hppa_r2);
1425 hppa_ldo (code, 12, hppa_r2, hppa_r2);
1426 hppa_sub_cond (code, HPPA_SUB_COND_NSV, ins->sreg1, ins->sreg2, ins->dreg);
1427 ovfj->data.exception = "OverflowException";
1428 ovfj->ip_offset = (guint8*)code - cfg->native_code;
1429 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_EXC_OVF, ovfj);
1430 hppa_bl_n (code, 8, hppa_r0);
1431 break;
1433 case OP_HPPA_SUBB_OVF: {
1434 MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump));
1435 hppa_bl (code, 8, hppa_r2);
1436 hppa_depi (code, 0, 31, 2, hppa_r2);
1437 hppa_ldo (code, 12, hppa_r2, hppa_r2);
1439 hppa_subb_cond (code, HPPA_SUB_COND_NSV, ins->sreg1, ins->sreg2, ins->dreg);
1440 ovfj->data.exception = "OverflowException";
1441 ovfj->ip_offset = (guint8*)code - cfg->native_code;
1442 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_EXC_OVF, ovfj);
1443 hppa_bl_n (code, 8, hppa_r0);
1444 break;
1447 case CEE_AND:
1448 hppa_and (code, ins->sreg1, ins->sreg2, ins->dreg);
1449 break;
1450 case OP_AND_IMM:
1451 hppa_set (code, ins->inst_imm, hppa_r1);
1452 hppa_and (code, ins->sreg1, hppa_r1, ins->dreg);
1453 break;
1455 case CEE_OR:
1456 hppa_or (code, ins->sreg1, ins->sreg2, ins->dreg);
1457 break;
1459 case OP_OR_IMM:
1460 hppa_set (code, ins->inst_imm, hppa_r1);
1461 hppa_or (code, ins->sreg1, hppa_r1, ins->dreg);
1462 break;
1464 case CEE_XOR:
1465 hppa_xor (code, ins->sreg1, ins->sreg2, ins->dreg);
1466 break;
1467 case OP_XOR_IMM:
1468 hppa_set (code, ins->inst_imm, hppa_r1);
1469 hppa_xor (code, ins->sreg1, hppa_r1, ins->dreg);
1470 break;
1471 case CEE_SHL:
1472 if (ins->sreg1 != ins->dreg) {
1473 hppa_shl (code, ins->sreg1, ins->sreg2, ins->dreg);
1475 else {
1476 hppa_copy (code, ins->sreg1, hppa_r1);
1477 hppa_shl (code, hppa_r1, ins->sreg2, ins->dreg);
1479 break;
1480 case OP_SHL_IMM:
1481 case OP_ISHL_IMM:
1482 g_assert (ins->inst_imm < 32);
1483 if (ins->sreg1 != ins->dreg) {
1484 hppa_zdep (code, ins->sreg1, 31-ins->inst_imm, 32-ins->inst_imm, ins->dreg);
1486 else {
1487 hppa_copy (code, ins->sreg1, hppa_r1);
1488 hppa_zdep (code, hppa_r1, 31-ins->inst_imm, 32-ins->inst_imm, ins->dreg);
1490 break;
1491 case CEE_SHR:
1492 if (ins->sreg1 != ins->dreg) {
1493 hppa_shr (code, ins->sreg1, ins->sreg2, ins->dreg);
1495 else {
1496 hppa_copy (code, ins->sreg1, hppa_r1);
1497 hppa_shr (code, hppa_r1, ins->sreg2, ins->dreg);
1499 break;
1500 case OP_SHR_IMM:
1501 g_assert (ins->inst_imm < 32);
1502 if (ins->sreg1 != ins->dreg) {
1503 hppa_extrs (code, ins->sreg1, 31-ins->inst_imm, 32-ins->inst_imm, ins->dreg);
1505 else {
1506 hppa_copy (code, ins->sreg1, hppa_r1);
1507 hppa_extrs (code, hppa_r1, 31-ins->inst_imm, 32-ins->inst_imm, ins->dreg);
1509 break;
1510 case OP_SHR_UN_IMM:
1511 g_assert (ins->inst_imm < 32);
1512 if (ins->sreg1 != ins->dreg) {
1513 hppa_extru (code, ins->sreg1, 31-ins->inst_imm, 32-ins->inst_imm, ins->dreg);
1515 else {
1516 hppa_copy (code, ins->sreg1, hppa_r1);
1517 hppa_extru (code, hppa_r1, 31-ins->inst_imm, 32-ins->inst_imm, ins->dreg);
1519 break;
1520 case CEE_SHR_UN:
1521 if (ins->sreg1 != ins->dreg) {
1522 hppa_lshr (code, ins->sreg1, ins->sreg2, ins->dreg);
1524 else {
1525 hppa_copy (code, ins->sreg1, hppa_r1);
1526 hppa_lshr (code, hppa_r1, ins->sreg2, ins->dreg);
1528 break;
1529 case CEE_NOT:
1530 hppa_not (code, ins->sreg1, ins->dreg);
1531 break;
1532 case CEE_NEG:
1533 hppa_subi (code, 0, ins->sreg1, ins->dreg);
1534 break;
1536 case CEE_MUL:
1537 case OP_MUL_IMM:
1538 /* Should have been rewritten using xmpyu */
1539 g_assert_not_reached ();
1541 case OP_ICONST:
1542 if ((ins->inst_c0 > 0 && ins->inst_c0 >= (1 << 13)) ||
1543 (ins->inst_c0 < 0 && ins->inst_c0 < -(1 << 13))) {
1544 hppa_ldil (code, hppa_lsel (ins->inst_c0), ins->dreg);
1545 hppa_ldo (code, hppa_rsel (ins->inst_c0), ins->dreg, ins->dreg);
1546 } else {
1547 hppa_ldo (code, ins->inst_c0, hppa_r0, ins->dreg);
1549 break;
1550 case OP_AOTCONST:
1551 g_assert_not_reached ();
1553 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
1554 hppa_set_template (code, ins->dreg);
1556 g_warning ("unimplemented opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
1557 NOT_IMPLEMENTED;
1558 break;
1559 case OP_FMOVE:
1560 if (ins->sreg1 != ins->dreg)
1561 hppa_fcpy (code, HPPA_FP_FMT_DBL, ins->sreg1, ins->dreg);
1562 break;
1564 case OP_HPPA_OUTARG_R4CONST:
1565 hppa_set (code, (unsigned int)ins->inst_p0, hppa_r1);
1566 hppa_fldwx (code, hppa_r0, hppa_r1, ins->dreg, 0);
1567 break;
1569 case OP_HPPA_OUTARG_REGOFFSET:
1570 hppa_ldo (code, ins->inst_offset, ins->inst_basereg, ins->dreg);
1571 break;
1573 case OP_JMP:
1575 * Keep in sync with mono_arch_emit_epilog
1577 g_assert (!cfg->method->save_lmf);
1578 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
1579 hppa_bl (code, 8, hppa_r0);
1580 break;
1581 case OP_CHECK_THIS:
1582 /* ensure ins->sreg1 is not NULL */
1583 hppa_ldw (code, 0, ins->sreg1, hppa_r1);
1584 break;
1585 case OP_ARGLIST:
1586 break;
1587 case OP_FCALL:
1588 case OP_LCALL:
1589 case OP_VCALL:
1590 case OP_VOIDCALL:
1591 case OP_CALL:
1592 call = (MonoCallInst*)ins;
1593 if (ins->flags & MONO_INST_HAS_METHOD)
1594 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
1595 else
1596 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
1597 hppa_ldil (code, 0, hppa_r1);
1598 hppa_ldo (code, 0, hppa_r1, hppa_r1);
1600 * We may have loaded an actual function address, or
1601 * it might be a plabel. Check to see if the plabel
1602 * bit is set, and load the actual fptr from it if
1603 * needed
1605 hppa_bb_n (code, HPPA_BIT_COND_MSB_CLR, hppa_r1, 30, 2);
1606 hppa_depi (code, 0, 31, 2, hppa_r1);
1607 hppa_ldw (code, 4, hppa_r1, hppa_r19);
1608 hppa_ldw (code, 0, hppa_r1, hppa_r1);
1609 hppa_ble (code, 0, hppa_r1);
1610 hppa_copy (code, hppa_r31, hppa_r2);
1611 if (call->signature->ret->type == MONO_TYPE_R4)
1612 hppa_fcnvff (code, HPPA_FP_FMT_SGL, HPPA_FP_FMT_DBL, hppa_fr4, hppa_fr4);
1613 break;
1614 case OP_FCALL_REG:
1615 case OP_LCALL_REG:
1616 case OP_VCALL_REG:
1617 case OP_VOIDCALL_REG:
1618 case OP_CALL_REG:
1619 call = (MonoCallInst*)ins;
1620 g_assert (!call->virtual);
1621 hppa_copy (code, ins->sreg1, hppa_r1);
1622 hppa_bb_n (code, HPPA_BIT_COND_MSB_CLR, hppa_r1, 30, 2);
1623 hppa_depi (code, 0, 31, 2, hppa_r1);
1624 hppa_ldw (code, 4, hppa_r1, hppa_r19);
1625 hppa_ldw (code, 0, hppa_r1, hppa_r1);
1626 hppa_ble (code, 0, hppa_r1);
1627 hppa_copy (code, hppa_r31, hppa_r2);
1628 if (call->signature->ret->type == MONO_TYPE_R4)
1629 hppa_fcnvff (code, HPPA_FP_FMT_SGL, HPPA_FP_FMT_DBL, hppa_fr4, hppa_fr4);
1630 break;
1631 case OP_FCALL_MEMBASE:
1632 case OP_LCALL_MEMBASE:
1633 case OP_VCALL_MEMBASE:
1634 case OP_VOIDCALL_MEMBASE:
1635 case OP_CALL_MEMBASE:
1636 call = (MonoCallInst*)ins;
1637 /* jump to ins->inst_sreg1 + ins->inst_offset */
1638 hppa_ldw (code, ins->inst_offset, ins->sreg1, hppa_r1);
1640 /* For virtual calls, emit a special token that can
1641 * be used by get_vcall_slot_addr
1643 if (call->virtual)
1644 hppa_ldo (code, 0x777, hppa_r0, hppa_r0);
1645 hppa_ble (code, 0, hppa_r1);
1646 hppa_copy (code, hppa_r31, hppa_r2);
1647 break;
1648 case OP_LOCALLOC: {
1649 guint32 size_reg;
1651 /* Keep alignment */
1652 hppa_ldo (code, MONO_ARCH_LOCALLOC_ALIGNMENT - 1, ins->sreg1, ins->dreg);
1653 hppa_depi (code, 0, 31, 6, ins->dreg);
1654 hppa_copy (code, hppa_sp, hppa_r1);
1655 hppa_addl (code, ins->dreg, hppa_sp, hppa_sp);
1656 hppa_copy (code, hppa_r1, ins->dreg);
1658 if (ins->flags & MONO_INST_INIT) {
1659 hppa_stw (code, hppa_r0, 0, hppa_r1);
1660 hppa_combt (code, hppa_r1, hppa_sp, HPPA_CMP_COND_ULT, -3);
1661 hppa_ldo (code, 4, hppa_r1, hppa_r1);
1663 break;
1666 case OP_THROW:
1667 hppa_copy (code, ins->sreg1, hppa_r26);
1668 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
1669 (gpointer)"mono_arch_throw_exception");
1670 hppa_ldil (code, 0, hppa_r1);
1671 hppa_ldo (code, 0, hppa_r1, hppa_r1);
1672 hppa_ble (code, 0, hppa_r1);
1673 hppa_copy (code, hppa_r31, hppa_r2);
1674 /* should never return */
1675 *code++ = 0xffeeddcc;
1676 break;
1677 case OP_RETHROW:
1678 hppa_copy (code, ins->sreg1, hppa_r26);
1679 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
1680 (gpointer)"mono_arch_rethrow_exception");
1681 hppa_ldil (code, 0, hppa_r1);
1682 hppa_ldo (code, 0, hppa_r1, hppa_r1);
1683 hppa_ble (code, 0, hppa_r1);
1684 hppa_copy (code, hppa_r31, hppa_r2);
1685 /* should never return */
1686 *code++ = 0xffeeddcc;
1687 break;
1688 case OP_START_HANDLER:
1689 if (hppa_check_bits (ins->inst_left->inst_offset, 14))
1690 hppa_stw (code, hppa_r2, ins->inst_left->inst_offset, ins->inst_left->inst_basereg);
1691 else {
1692 hppa_set (code, ins->inst_left->inst_offset, hppa_r1);
1693 hppa_addl (code, ins->inst_left->inst_basereg, hppa_r1, hppa_r1);
1694 hppa_stw (code, hppa_r2, 0, hppa_r1);
1696 break;
1697 case OP_ENDFILTER:
1698 if (ins->sreg1 != hppa_r26)
1699 hppa_copy (code, ins->sreg1, hppa_r26);
1700 if (hppa_check_bits (ins->inst_left->inst_offset, 14))
1701 hppa_ldw (code, ins->inst_left->inst_offset, ins->inst_left->inst_basereg, hppa_r2);
1702 else {
1703 hppa_set (code, ins->inst_left->inst_offset, hppa_r1);
1704 hppa_ldwx (code, hppa_r1, ins->inst_left->inst_basereg, hppa_r2);
1706 hppa_bv (code, hppa_r0, hppa_r2);
1707 hppa_nop (code);
1708 break;
1709 case OP_ENDFINALLY:
1710 if (hppa_check_bits (ins->inst_left->inst_offset, 14))
1711 hppa_ldw (code, ins->inst_left->inst_offset, ins->inst_left->inst_basereg, hppa_r1);
1712 else {
1713 hppa_set (code, ins->inst_left->inst_offset, hppa_r1);
1714 hppa_ldwx (code, hppa_r1, ins->inst_left->inst_basereg, hppa_r1);
1716 hppa_bv (code, hppa_r0, hppa_r1);
1717 hppa_nop (code);
1718 break;
1719 case OP_CALL_HANDLER:
1720 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
1721 hppa_bl (code, 0, hppa_r2);
1722 hppa_nop (code);
1723 break;
1724 case OP_LABEL:
1725 ins->inst_c0 = (guint8*)code - cfg->native_code;
1726 break;
1727 case OP_BR: {
1728 guint32 target;
1729 DEBUG (printf ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins));
1730 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
1731 hppa_bl (code, 8, hppa_r0);
1732 /* TODO: if the branch is too long, we may need to
1733 * use a long-branch sequence:
1734 * hppa_ldil (code, 0, hppa_r1);
1735 * hppa_ldo (code, 0, hppa_r1, hppa_r1);
1736 * hppa_bv (code, hppa_r0, hppa_r1);
1738 hppa_nop (code);
1739 break;
1741 case OP_BR_REG:
1742 hppa_bv (code, hppa_r0, ins->sreg1);
1743 hppa_nop(code);
1744 break;
1746 case OP_SWITCH: {
1747 int i;
1749 max_len += 8 * GPOINTER_TO_INT (ins->klass);
1750 if (offset > (cfg->code_size - max_len - 16)) {
1751 cfg->code_size += max_len;
1752 cfg->code_size *= 2;
1753 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
1754 code = cfg->native_code + offset;
1755 code_start = (guint8*)code;
1757 hppa_blr (code, ins->sreg1, hppa_r0);
1758 hppa_nop (code);
1759 for (i = 0; i < GPOINTER_TO_INT (ins->klass); ++i) {
1760 *code++ = 0xdeadbeef;
1761 *code++ = 0xdeadbeef;
1763 break;
1766 /* comclr is cool :-) */
1767 case OP_HPPA_CEQ:
1768 hppa_comclr_cond (code, HPPA_SUB_COND_NE, ins->sreg1, ins->sreg2, ins->dreg);
1769 hppa_ldo (code, 1, hppa_r0, ins->dreg);
1770 break;
1772 case OP_HPPA_CLT:
1773 hppa_comclr_cond (code, HPPA_SUB_COND_SGE, ins->sreg1, ins->sreg2, ins->dreg);
1774 hppa_ldo (code, 1, hppa_r0, ins->dreg);
1775 break;
1777 case OP_HPPA_CLT_UN:
1778 hppa_comclr_cond (code, HPPA_SUB_COND_UGE, ins->sreg1, ins->sreg2, ins->dreg);
1779 hppa_ldo (code, 1, hppa_r0, ins->dreg);
1780 break;
1782 case OP_HPPA_CGT:
1783 hppa_comclr_cond (code, HPPA_SUB_COND_SLE, ins->sreg1, ins->sreg2, ins->dreg);
1784 hppa_ldo (code, 1, hppa_r0, ins->dreg);
1785 break;
1787 case OP_HPPA_CGT_UN:
1788 hppa_comclr_cond (code, HPPA_SUB_COND_ULE, ins->sreg1, ins->sreg2, ins->dreg);
1789 hppa_ldo (code, 1, hppa_r0, ins->dreg);
1790 break;
1792 case OP_CEQ:
1793 case OP_CLT:
1794 case OP_CLT_UN:
1795 case OP_CGT:
1796 case OP_CGT_UN:
1797 case OP_COND_EXC_EQ:
1798 case OP_COND_EXC_NE_UN:
1799 case OP_COND_EXC_LT:
1800 case OP_COND_EXC_LT_UN:
1801 case OP_COND_EXC_GT:
1802 case OP_COND_EXC_GT_UN:
1803 case OP_COND_EXC_GE:
1804 case OP_COND_EXC_GE_UN:
1805 case OP_COND_EXC_LE:
1806 case OP_COND_EXC_LE_UN:
1807 case OP_COND_EXC_OV:
1808 case OP_COND_EXC_NO:
1809 case OP_COND_EXC_C:
1810 case OP_COND_EXC_NC:
1811 case OP_COND_EXC_IOV:
1812 case OP_COND_EXC_IC:
1813 case CEE_BEQ:
1814 case CEE_BNE_UN:
1815 case CEE_BLT:
1816 case CEE_BLT_UN:
1817 case CEE_BGT:
1818 case CEE_BGT_UN:
1819 case CEE_BGE:
1820 case CEE_BGE_UN:
1821 case CEE_BLE:
1822 case CEE_BLE_UN:
1823 case OP_COMPARE:
1824 case OP_LCOMPARE:
1825 case OP_ICOMPARE:
1826 case OP_COMPARE_IMM:
1827 case OP_ICOMPARE_IMM:
1828 g_warning ("got opcode %s in %s(), should be reduced\n", mono_inst_name (ins->opcode), __FUNCTION__);
1829 g_assert_not_reached ();
1830 break;
1832 case OP_HPPA_BEQ:
1833 case OP_HPPA_BNE:
1834 case OP_HPPA_BLT:
1835 case OP_HPPA_BLT_UN:
1836 case OP_HPPA_BGT:
1837 case OP_HPPA_BGT_UN:
1838 case OP_HPPA_BGE:
1839 case OP_HPPA_BGE_UN:
1840 case OP_HPPA_BLE:
1841 case OP_HPPA_BLE_UN:
1842 EMIT_COND_BRANCH (ins, ins->sreg1, ins->sreg2, ins->opcode - OP_HPPA_BEQ);
1843 break;
1845 case OP_HPPA_COND_EXC_EQ:
1846 case OP_HPPA_COND_EXC_GE:
1847 case OP_HPPA_COND_EXC_GT:
1848 case OP_HPPA_COND_EXC_LE:
1849 case OP_HPPA_COND_EXC_LT:
1850 case OP_HPPA_COND_EXC_NE_UN:
1851 case OP_HPPA_COND_EXC_GE_UN:
1852 case OP_HPPA_COND_EXC_GT_UN:
1853 case OP_HPPA_COND_EXC_LE_UN:
1854 case OP_HPPA_COND_EXC_LT_UN:
1855 EMIT_COND_SYSTEM_EXCEPTION (ins->sreg1, ins->sreg2, ins->opcode - OP_HPPA_COND_EXC_EQ, ins->inst_p1);
1856 break;
1858 case OP_HPPA_COND_EXC_OV:
1859 case OP_HPPA_COND_EXC_NO:
1860 case OP_HPPA_COND_EXC_C:
1861 case OP_HPPA_COND_EXC_NC:
1862 NOT_IMPLEMENTED;
1864 /* floating point opcodes */
1865 case OP_R8CONST:
1866 hppa_set (code, (unsigned int)ins->inst_p0, hppa_r1);
1867 hppa_flddx (code, hppa_r0, hppa_r1, ins->dreg);
1868 break;
1869 case OP_R4CONST:
1870 hppa_set (code, (unsigned int)ins->inst_p0, hppa_r1);
1871 hppa_fldwx (code, hppa_r0, hppa_r1, hppa_fr31, 0);
1872 hppa_fcnvff (code, HPPA_FP_FMT_SGL, HPPA_FP_FMT_DBL, hppa_fr31, ins->dreg);
1873 break;
1874 case OP_STORER8_MEMBASE_REG:
1875 hppa_set (code, ins->inst_offset, hppa_r1);
1876 hppa_fstdx (code, ins->sreg1, hppa_r1, ins->inst_destbasereg);
1877 break;
1878 case OP_LOADR8_MEMBASE:
1879 hppa_set (code, ins->inst_offset, hppa_r1);
1880 hppa_flddx (code, hppa_r1, ins->inst_basereg, ins->dreg);
1881 break;
1882 case OP_STORER4_MEMBASE_REG:
1883 hppa_fcnvff (code, HPPA_FP_FMT_DBL, HPPA_FP_FMT_SGL, ins->sreg1, hppa_fr31);
1884 if (hppa_check_bits (ins->inst_offset, 5)) {
1885 hppa_fstws (code, hppa_fr31, 0, ins->inst_offset, ins->inst_destbasereg);
1886 } else {
1887 hppa_set (code, ins->inst_offset, hppa_r1);
1888 hppa_fstwx (code, hppa_fr31, 0, hppa_r1, ins->inst_destbasereg);
1890 break;
1891 case OP_HPPA_STORER4_LEFT:
1892 case OP_HPPA_STORER4_RIGHT:
1893 if (hppa_check_bits (ins->inst_offset, 5)) {
1894 hppa_fstws (code, ins->sreg1, (ins->opcode == OP_HPPA_STORER4_RIGHT), ins->inst_offset, ins->inst_destbasereg);
1895 } else {
1896 hppa_set (code, ins->inst_offset, hppa_r1);
1897 hppa_fstwx (code, ins->sreg1, (ins->opcode == OP_HPPA_STORER4_RIGHT), hppa_r1, ins->inst_destbasereg);
1899 break;
1900 case OP_LOADR4_MEMBASE:
1901 if (hppa_check_bits (ins->inst_offset, 5)) {
1902 hppa_fldws (code, ins->inst_offset, ins->inst_basereg, hppa_fr31, 0);
1903 } else {
1904 hppa_set (code, ins->inst_offset, hppa_r1);
1905 hppa_fldwx (code, hppa_r1, ins->inst_basereg, hppa_fr31, 0);
1907 hppa_fcnvff (code, HPPA_FP_FMT_SGL, HPPA_FP_FMT_DBL, hppa_fr31, ins->dreg);
1908 break;
1909 case OP_HPPA_LOADR4_LEFT:
1910 case OP_HPPA_LOADR4_RIGHT:
1911 if (hppa_check_bits (ins->inst_offset, 5)) {
1912 hppa_fldws (code, ins->inst_offset, ins->inst_basereg, ins->dreg, (ins->opcode == OP_HPPA_LOADR4_RIGHT));
1913 } else {
1914 hppa_set (code, ins->inst_offset, hppa_r1);
1915 hppa_fldwx (code, hppa_r1, ins->inst_basereg, ins->dreg, (ins->opcode == OP_HPPA_LOADR4_RIGHT));
1917 break;
1919 case CEE_CONV_R4:
1920 hppa_stw (code, ins->sreg1, -16, hppa_sp);
1921 hppa_fldws (code, -16, hppa_sp, hppa_fr31, 0);
1922 hppa_fcnvxf (code, HPPA_FP_FMT_SGL, HPPA_FP_FMT_SGL, hppa_fr31, ins->dreg);
1923 hppa_fcnvff (code, HPPA_FP_FMT_SGL, HPPA_FP_FMT_DBL, ins->dreg, ins->dreg);
1924 break;
1926 case OP_FCONV_TO_R4:
1927 /* reduce precision */
1928 hppa_fcnvff (code, HPPA_FP_FMT_DBL, HPPA_FP_FMT_SGL, ins->sreg1, ins->dreg);
1929 hppa_fcnvff (code, HPPA_FP_FMT_SGL, HPPA_FP_FMT_DBL, ins->dreg, ins->dreg);
1930 break;
1932 case OP_HPPA_SETF4REG:
1933 hppa_fcnvff (code, HPPA_FP_FMT_DBL, HPPA_FP_FMT_SGL, ins->sreg1, ins->dreg);
1934 break;
1935 case CEE_CONV_R8:
1936 hppa_stw (code, ins->sreg1, -16, hppa_sp);
1937 hppa_fldws (code, -16, hppa_sp, hppa_fr31, 0);
1938 hppa_fcnvxf (code, HPPA_FP_FMT_SGL, HPPA_FP_FMT_DBL, hppa_fr31, ins->dreg);
1939 break;
1941 case OP_FCONV_TO_I1:
1942 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
1943 break;
1944 case OP_FCONV_TO_U1:
1945 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
1946 break;
1947 case OP_FCONV_TO_I2:
1948 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
1949 break;
1950 case OP_FCONV_TO_U2:
1951 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
1952 break;
1953 case OP_FCONV_TO_I4:
1954 case OP_FCONV_TO_I:
1955 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
1956 break;
1957 case OP_FCONV_TO_U4:
1958 case OP_FCONV_TO_U:
1959 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
1960 break;
1962 case OP_FCONV_TO_I8:
1963 case OP_FCONV_TO_U8:
1964 g_assert_not_reached ();
1965 /* Implemented as helper calls */
1966 break;
1967 case OP_LCONV_TO_R_UN:
1968 g_assert_not_reached ();
1969 /* Implemented as helper calls */
1970 break;
1972 case OP_LCONV_TO_OVF_I:
1973 NOT_IMPLEMENTED;
1974 break;
1976 case OP_FADD:
1977 hppa_fadd (code, HPPA_FP_FMT_DBL, ins->sreg1, ins->sreg2, ins->dreg);
1978 break;
1979 case OP_FSUB:
1980 hppa_fsub (code, HPPA_FP_FMT_DBL, ins->sreg1, ins->sreg2, ins->dreg);
1981 break;
1982 case OP_FMUL:
1983 hppa_fmul (code, HPPA_FP_FMT_DBL, ins->sreg1, ins->sreg2, ins->dreg);
1984 break;
1985 case OP_FDIV:
1986 hppa_fdiv (code, HPPA_FP_FMT_DBL, ins->sreg1, ins->sreg2, ins->dreg);
1987 break;
1988 case OP_FREM:
1989 NOT_IMPLEMENTED;
1990 break;
1992 case OP_FCOMPARE:
1993 g_assert_not_reached();
1994 break;
1996 case OP_FCEQ:
1997 case OP_FCLT:
1998 case OP_FCLT_UN:
1999 case OP_FCGT:
2000 case OP_FCGT_UN:
2001 hppa_fcmp (code, HPPA_FP_FMT_DBL, float_ceq_table [ins->opcode - OP_FCEQ], ins->sreg1, ins->sreg2);
2002 hppa_ftest (code, 0);
2003 hppa_bl (code, 12, hppa_r0);
2004 hppa_ldo (code, 1, hppa_r0, ins->dreg);
2005 hppa_ldo (code, 0, hppa_r0, ins->dreg);
2006 break;
2008 case OP_FBEQ:
2009 case OP_FBLT:
2010 case OP_FBGT:
2011 case OP_FBGE:
2012 case OP_FBLE:
2013 case OP_FBNE_UN:
2014 case OP_FBLT_UN:
2015 case OP_FBGT_UN:
2016 case OP_FBGE_UN:
2017 case OP_FBLE_UN:
2018 EMIT_FLOAT_COND_BRANCH (ins, ins->sreg1, ins->sreg2, ins->opcode - OP_FBEQ);
2019 break;
2021 case OP_CKFINITE:
2022 case OP_MEMORY_BARRIER:
2023 break;
2025 case OP_HPPA_XMPYU:
2026 hppa_xmpyu (code, ins->sreg1, ins->sreg2, ins->dreg);
2027 break;
2029 default:
2030 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
2031 g_assert_not_reached ();
2034 if ((((guint8*)code) - code_start) > max_len) {
2035 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
2036 mono_inst_name (ins->opcode), max_len, ((guint8*)code) - code_start);
2037 g_assert_not_reached ();
2040 cpos += max_len;
2042 last_ins = ins;
2045 cfg->code_len = (guint8*)code - cfg->native_code;
2046 DEBUG_FUNC_EXIT();
2049 void
2050 mono_arch_register_lowlevel_calls (void)
2054 void
2055 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
2057 MonoJumpInfo *patch_info;
2059 DEBUG_FUNC_ENTER();
2060 /* FIXME: Move part of this to arch independent code */
2061 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
2062 unsigned char *ip = patch_info->ip.i + code;
2063 gpointer target;
2065 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
2066 DEBUG (printf ("patch_info->type = %d, target = %p\n", patch_info->type, target));
2068 switch (patch_info->type) {
2069 case MONO_PATCH_INFO_NONE:
2070 case MONO_PATCH_INFO_BB_OVF:
2071 case MONO_PATCH_INFO_EXC_OVF:
2072 continue;
2074 case MONO_PATCH_INFO_IP:
2075 hppa_patch ((guint32 *)ip, ip);
2076 continue;
2078 case MONO_PATCH_INFO_CLASS_INIT: {
2079 break;
2081 case MONO_PATCH_INFO_METHOD_JUMP: {
2082 break;
2084 case MONO_PATCH_INFO_SWITCH: {
2085 int i;
2086 gpointer *table = (gpointer *)target;
2087 ip += 8;
2088 for (i = 0; i < patch_info->data.table->table_size; i++) {
2089 DEBUG (printf ("Patching switch table, table[%d] = %p\n", i, table[i]));
2090 hppa_ldil (ip, hppa_lsel (table [i]), hppa_r1);
2091 hppa_be_n (ip, hppa_rsel (table [i]), hppa_r1);
2093 continue;
2095 default:
2096 break;
2098 hppa_patch ((guint32 *)ip, target);
2101 DEBUG_FUNC_EXIT();
2104 void*
2105 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
2107 guint32 *code = (guint32*)p;
2109 DEBUG_FUNC_ENTER();
2111 hppa_set (code, cfg->method, hppa_r26);
2112 hppa_copy (code, hppa_r0, hppa_r25); /* NULL sp for now */
2113 hppa_set (code, func, hppa_r1);
2114 hppa_depi (code, 0, 31, 2, hppa_r1);
2115 hppa_ldw (code, 0, hppa_r1, hppa_r1);
2116 hppa_ble (code, 0, hppa_r1);
2117 hppa_copy (code, hppa_r31, hppa_r2);
2119 DEBUG_FUNC_EXIT();
2120 return code;
2123 enum {
2124 SAVE_NONE,
2125 SAVE_STRUCT,
2126 SAVE_ONE,
2127 SAVE_TWO,
2128 SAVE_FP
2131 void*
2132 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
2134 guint32 *code = (guint32*)p;
2135 DEBUG_FUNC_ENTER();
2136 #if 0
2137 int save_mode = SAVE_NONE;
2138 MonoMethod *method = cfg->method;
2140 switch (mono_type_get_underlying_type (mono_method_signature (method)->ret)->type) {
2141 case MONO_TYPE_VOID:
2142 /* special case string .ctor icall */
2143 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
2144 save_mode = SAVE_ONE;
2145 else
2146 save_mode = SAVE_NONE;
2147 break;
2148 case MONO_TYPE_I8:
2149 case MONO_TYPE_U8:
2150 #ifdef SPARCV9
2151 save_mode = SAVE_ONE;
2152 #else
2153 save_mode = SAVE_TWO;
2154 #endif
2155 break;
2156 case MONO_TYPE_R4:
2157 case MONO_TYPE_R8:
2158 save_mode = SAVE_FP;
2159 break;
2160 case MONO_TYPE_VALUETYPE:
2161 save_mode = SAVE_STRUCT;
2162 break;
2163 default:
2164 save_mode = SAVE_ONE;
2165 break;
2168 /* Save the result to the stack and also put it into the output registers */
2170 switch (save_mode) {
2171 case SAVE_TWO:
2172 /* V8 only */
2173 sparc_st_imm (code, sparc_i0, sparc_fp, 68);
2174 sparc_st_imm (code, sparc_i0, sparc_fp, 72);
2175 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
2176 sparc_mov_reg_reg (code, sparc_i1, sparc_o2);
2177 break;
2178 case SAVE_ONE:
2179 sparc_sti_imm (code, sparc_i0, sparc_fp, ARGS_OFFSET);
2180 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
2181 break;
2182 case SAVE_FP:
2183 #ifdef SPARCV9
2184 sparc_stdf_imm (code, sparc_f0, sparc_fp, ARGS_OFFSET);
2185 #else
2186 sparc_stdf_imm (code, sparc_f0, sparc_fp, 72);
2187 sparc_ld_imm (code, sparc_fp, 72, sparc_o1);
2188 sparc_ld_imm (code, sparc_fp, 72 + 4, sparc_o2);
2189 #endif
2190 break;
2191 case SAVE_STRUCT:
2192 #ifdef SPARCV9
2193 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
2194 #else
2195 sparc_ld_imm (code, sparc_fp, 64, sparc_o1);
2196 #endif
2197 break;
2198 case SAVE_NONE:
2199 default:
2200 break;
2203 sparc_set (code, cfg->method, sparc_o0);
2205 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, func);
2206 EMIT_CALL ();
2208 /* Restore result */
2210 switch (save_mode) {
2211 case SAVE_TWO:
2212 sparc_ld_imm (code, sparc_fp, 68, sparc_i0);
2213 sparc_ld_imm (code, sparc_fp, 72, sparc_i0);
2214 break;
2215 case SAVE_ONE:
2216 sparc_ldi_imm (code, sparc_fp, ARGS_OFFSET, sparc_i0);
2217 break;
2218 case SAVE_FP:
2219 sparc_lddf_imm (code, sparc_fp, ARGS_OFFSET, sparc_f0);
2220 break;
2221 case SAVE_NONE:
2222 default:
2223 break;
2225 #endif
2226 DEBUG_FUNC_EXIT();
2227 return code;
2231 * The HPPA stack frame should look like this:
2233 * ---------------------
2234 * incoming params area
2235 * ---------------------
2236 * linkage area size = ARGS_OFFSET
2237 * --------------------- fp = psp
2238 * HPPA_STACK_LMF_OFFSET
2239 * ---------------------
2240 * MonoLMF structure or saved registers
2241 * -------------------
2242 * locals size = cfg->stack_offset - cfg->param_area
2243 * ---------------------
2244 * params area size = cfg->param_area - ARGS_OFFSET (aligned)
2245 * ---------------------
2246 * callee linkage area size = ARGS_OFFSET
2247 * --------------------- sp
2249 guint8 *
2250 mono_arch_emit_prolog (MonoCompile *cfg)
2252 MonoMethod *method = cfg->method;
2253 MonoBasicBlock *bb;
2254 MonoMethodSignature *sig;
2255 MonoInst *inst;
2256 int alloc_size, pos, max_offset, i;
2257 guint8 *code;
2258 CallInfo *cinfo;
2259 int tracing = 0;
2260 int lmf_offset = 0;
2262 DEBUG_FUNC_ENTER();
2263 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
2264 tracing = 1;
2266 sig = mono_method_signature (method);
2267 cfg->code_size = 512 + sig->param_count * 20;
2268 code = cfg->native_code = g_malloc (cfg->code_size);
2270 /* TODO: enable tail call optimization */
2271 if (1 || cfg->flags & MONO_CFG_HAS_CALLS) {
2272 hppa_stw (code, hppa_r2, -20, hppa_sp);
2275 /* locals area */
2276 pos = HPPA_STACK_LMF_OFFSET;
2278 /* figure out how much space we need for spilling */
2279 if (!method->save_lmf) {
2280 /* spill callee-save registers */
2281 guint32 mask = cfg->used_int_regs & MONO_ARCH_CALLEE_SAVED_REGS;
2282 for (i = 0; i < 32; i++) {
2283 if ((1 << i) & mask)
2284 pos += sizeof (gulong);
2286 } else {
2287 lmf_offset = pos;
2288 pos += sizeof (MonoLMF);
2291 alloc_size = ALIGN_TO (pos + cfg->stack_offset, MONO_ARCH_FRAME_ALIGNMENT);
2292 g_assert ((alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) == 0);
2294 cfg->stack_usage = alloc_size;
2296 if (alloc_size) {
2297 hppa_copy (code, hppa_r3, hppa_r1);
2298 hppa_copy (code, hppa_sp, hppa_r3);
2299 if (hppa_check_bits (alloc_size, 14))
2300 hppa_stwm (code, hppa_r1, alloc_size, hppa_sp);
2301 else {
2302 hppa_stwm (code, hppa_r1, 8100, hppa_sp);
2303 hppa_addil (code, hppa_lsel (alloc_size - 8100), hppa_sp);
2304 hppa_ldo (code, hppa_rsel (alloc_size - 8100), hppa_r1, hppa_sp);
2308 /* compute max_offset in order to use short forward jumps
2309 * we always do it on hppa because the immediate displacement
2310 * for jumps is small
2312 max_offset = 0;
2313 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2314 MonoInst *ins = bb->code;
2315 bb->max_offset = max_offset;
2317 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
2318 max_offset += 6;
2320 MONO_BB_FOR_EACH_INS (bb, ins)
2321 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
2324 DEBUG (printf ("Incoming arguments: \n"));
2325 cinfo = get_call_info (sig, sig->pinvoke);
2327 /* We do this first so that we don't have to worry about the LMF-
2328 * saving code clobbering r28
2330 if (cinfo->struct_return)
2331 hppa_stw (code, hppa_r28, cfg->ret->inst_offset, hppa_sp);
2333 /* Save the LMF or the spilled registers */
2334 pos = HPPA_STACK_LMF_OFFSET;
2335 if (!method->save_lmf) {
2336 /* spill callee-save registers */
2337 guint32 mask = cfg->used_int_regs & MONO_ARCH_CALLEE_SAVED_REGS;
2338 for (i = 0; i < 32; i++) {
2339 if ((1 << i) & mask) {
2340 if (i == hppa_r3) {
2341 hppa_ldw (code, 0, hppa_r3, hppa_r1);
2342 hppa_stw (code, hppa_r1, pos, hppa_r3);
2343 } else
2344 hppa_stw (code, i, pos, hppa_r3);
2345 pos += sizeof (gulong);
2348 } else {
2349 int ofs = lmf_offset + G_STRUCT_OFFSET (MonoLMF, regs);
2350 int reg;
2352 hppa_ldw (code, 0, hppa_r3, hppa_r1);
2353 hppa_stw (code, hppa_r1, ofs, hppa_r3);
2354 ofs += sizeof (gulong);
2355 for (reg = 4; reg < 32; reg++) {
2356 if (HPPA_IS_SAVED_GREG (reg)) {
2357 hppa_stw (code, reg, ofs, hppa_r3);
2358 ofs += sizeof (gulong);
2361 /* We shouldn't need to save the FP regs.... */
2362 ofs = ALIGN_TO (ofs, sizeof(double));
2363 hppa_set (code, ofs, hppa_r1);
2364 for (reg = 0; reg < 32; reg++) {
2365 if (HPPA_IS_SAVED_FREG (reg)) {
2366 hppa_fstdx (code, reg, hppa_r1, hppa_r3);
2367 hppa_ldo (code, sizeof(double), hppa_r1, hppa_r1);
2371 /* We also spill the arguments onto the stack, because
2372 * the call to hppa_get_lmf_addr below can clobber them
2374 * This goes in the param area that is always allocated
2376 ofs = -36;
2377 for (reg = hppa_r26; reg >= hppa_r23; reg--) {
2378 hppa_stw (code, reg, ofs, hppa_sp);
2379 ofs -= 4;
2383 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
2384 hppa_copy (code, hppa_r30, hppa_r4);
2386 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
2387 hppa_set (code, cfg->domain, hppa_r26);
2388 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD, (gpointer)"mono_jit_thread_attach");
2389 hppa_ldil (code, 0, hppa_r1);
2390 hppa_ldo (code, 0, hppa_r1, hppa_r1);
2391 hppa_depi (code, 0, 31, 2, hppa_r1);
2392 hppa_ldw (code, 0, hppa_r1, hppa_r1);
2393 hppa_ble (code, 0, hppa_r1);
2394 hppa_copy (code, hppa_r31, hppa_r2);
2397 if (method->save_lmf) {
2398 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2399 (gpointer)"mono_get_lmf_addr");
2400 hppa_ldil (code, 0, hppa_r1);
2401 hppa_ldo (code, 0, hppa_r1, hppa_r1);
2402 hppa_depi (code, 0, 31, 2, hppa_r1);
2403 hppa_ldw (code, 0, hppa_r1, hppa_r1);
2404 hppa_ble (code, 0, hppa_r1);
2405 hppa_copy (code, hppa_r31, hppa_r2);
2407 /* lmf_offset is the offset from the previous stack pointer,
2408 * The pointer to the struct is put in hppa_r22 (new_lmf).
2409 * The callee-saved registers are already in the MonoLMF
2410 * structure
2413 /* hppa_r22 = new_lmf (on the stack) */
2414 hppa_ldo (code, lmf_offset, hppa_r3, hppa_r22);
2415 /* lmf_offset is the offset from the previous stack pointer,
2417 hppa_stw (code, hppa_r28, G_STRUCT_OFFSET(MonoLMF, lmf_addr), hppa_r22);
2418 /* new_lmf->previous_lmf = *lmf_addr */
2419 hppa_ldw (code, 0, hppa_r28, hppa_r1);
2420 hppa_stw (code, hppa_r1, G_STRUCT_OFFSET(MonoLMF, previous_lmf), hppa_r22);
2421 /* *(lmf_addr) = r22 */
2422 hppa_stw (code, hppa_r22, 0, hppa_r28);
2423 hppa_set (code, method, hppa_r1);
2424 hppa_stw (code, hppa_r1, G_STRUCT_OFFSET(MonoLMF, method), hppa_r22);
2425 hppa_stw (code, hppa_sp, G_STRUCT_OFFSET(MonoLMF, ebp), hppa_r22);
2426 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
2427 hppa_ldil (code, 0, hppa_r1);
2428 hppa_ldo (code, 0, hppa_r1, hppa_r1);
2429 hppa_stw (code, hppa_r1, G_STRUCT_OFFSET(MonoLMF, eip), hppa_r22);
2431 /* Now reload the arguments from the stack */
2432 hppa_ldw (code, -36, hppa_sp, hppa_r26);
2433 hppa_ldw (code, -40, hppa_sp, hppa_r25);
2434 hppa_ldw (code, -44, hppa_sp, hppa_r24);
2435 hppa_ldw (code, -48, hppa_sp, hppa_r23);
2438 /* load arguments allocated to register from the stack */
2439 pos = 0;
2441 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2442 ArgInfo *ainfo = cinfo->args + i;
2443 inst = cfg->args [pos];
2445 if (inst->opcode == OP_REGVAR) {
2446 /* Want the argument in a register */
2447 switch (ainfo->storage) {
2448 case ArgInIReg:
2449 if (ainfo->reg != inst->dreg)
2450 hppa_copy (code, ainfo->reg, inst->dreg);
2451 DEBUG (printf ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg)));
2452 break;
2454 case ArgInIRegPair:
2455 if (ainfo->reg != inst->dreg) {
2456 hppa_copy (code, ainfo->reg, inst->dreg);
2457 hppa_copy (code, ainfo->reg + 1, inst->dreg + 1);
2459 DEBUG (printf ("Argument %d assigned to register %s, %s\n", pos, mono_arch_regname (inst->dreg), mono_arch_regname (inst->dreg + 1)));
2460 break;
2462 case ArgInFReg:
2463 if (ainfo->reg != inst->dreg)
2464 hppa_fcpy (code, HPPA_FP_FMT_SGL, ainfo->reg, inst->dreg);
2465 DEBUG (printf ("Argument %d assigned to single register %s\n", pos, mono_arch_fregname (inst->dreg)));
2466 break;
2468 case ArgInDReg:
2469 if (ainfo->reg != inst->dreg)
2470 hppa_fcpy (code, HPPA_FP_FMT_DBL, ainfo->reg, inst->dreg);
2471 DEBUG (printf ("Argument %d assigned to double register %s\n", pos, mono_arch_fregname (inst->dreg)));
2472 break;
2474 case ArgOnStack:
2475 switch (ainfo->size) {
2476 case 1:
2477 hppa_ldb (code, ainfo->offset, hppa_r3, inst->dreg);
2478 break;
2479 case 2:
2480 hppa_ldh (code, ainfo->offset, hppa_r3, inst->dreg);
2481 break;
2482 case 4:
2483 hppa_ldw (code, ainfo->offset, hppa_r3, inst->dreg);
2484 break;
2485 default:
2486 g_assert_not_reached ();
2490 DEBUG (printf ("Argument %d loaded from the stack [%s - %d]\n", pos, mono_arch_regname (hppa_r3), -ainfo->offset));
2491 break;
2493 default:
2494 g_assert_not_reached ();
2497 else {
2498 /* Want the argument on the stack */
2499 switch (ainfo->storage)
2501 case ArgInIReg: {
2502 int off, reg;
2503 DEBUG (printf ("Argument %d stored from register %s to stack [%s + %d]\n", pos, mono_arch_regname (ainfo->reg), mono_arch_regname (inst->inst_basereg), inst->inst_offset));
2504 if (hppa_check_bits (inst->inst_offset, 14)) {
2505 off = inst->inst_offset;
2506 reg = inst->inst_basereg;
2508 else {
2509 hppa_set (code, inst->inst_offset, hppa_r1);
2510 hppa_add (code, hppa_r1, inst->inst_basereg, hppa_r1);
2511 off = 0;
2512 reg = hppa_r1;
2514 switch (ainfo->size)
2516 case 1:
2517 hppa_stb (code, ainfo->reg, off, reg);
2518 break;
2519 case 2:
2520 hppa_sth (code, ainfo->reg, off, reg);
2521 break;
2522 case 4:
2523 hppa_stw (code, ainfo->reg, off, reg);
2524 break;
2525 default:
2526 g_assert_not_reached ();
2528 break;
2530 case ArgInIRegPair:
2531 DEBUG (printf ("Argument %d stored from register (%s,%s) to stack [%s + %d]\n", pos, mono_arch_regname (ainfo->reg), mono_arch_regname (ainfo->reg+1), mono_arch_regname (inst->inst_basereg), inst->inst_offset));
2532 if (hppa_check_bits (inst->inst_offset + 4, 14)) {
2533 hppa_stw (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
2534 hppa_stw (code, ainfo->reg + 1, inst->inst_offset + 4, inst->inst_basereg);
2536 else {
2537 hppa_ldo (code, inst->inst_offset, inst->inst_basereg, hppa_r1);
2538 hppa_stw (code, ainfo->reg, 0, hppa_r1);
2539 hppa_stw (code, ainfo->reg + 1, 4, hppa_r1);
2541 break;
2543 case ArgInFReg:
2544 DEBUG (printf ("Argument %d (float) stored from register %s to stack [%s + %d]\n", pos, mono_arch_fregname (ainfo->reg), mono_arch_regname (inst->inst_basereg), inst->inst_offset));
2545 hppa_ldo (code, inst->inst_offset, inst->inst_basereg, hppa_r1);
2546 hppa_fstwx (code, ainfo->reg, 0, hppa_r0, hppa_r1);
2547 break;
2549 case ArgInDReg:
2550 DEBUG (printf ("Argument %d (double) stored from register %s to stack [%s + %d]\n", pos, mono_arch_fregname (ainfo->reg), mono_arch_regname (inst->inst_basereg), inst->inst_offset));
2551 hppa_ldo (code, inst->inst_offset, inst->inst_basereg, hppa_r1);
2552 hppa_fstdx (code, ainfo->reg, hppa_r0, hppa_r1);
2553 break;
2555 case ArgOnStack:
2556 DEBUG (printf ("Argument %d copied from [%s - %d] to [%s + %d] (size=%d)\n", pos, mono_arch_regname (hppa_r3), -ainfo->offset, mono_arch_regname (inst->inst_basereg), inst->inst_offset, ainfo->size));
2557 if (inst->inst_offset != ainfo->offset ||
2558 inst->inst_basereg != hppa_r3)
2559 code = emit_memcpy (code, inst->inst_offset, inst->inst_basereg, ainfo->offset, hppa_r3, ainfo->size);
2560 break;
2562 default:
2563 g_assert_not_reached ();
2567 pos++;
2571 if (tracing)
2572 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
2574 if (getenv("HPPA_BREAK")) {
2575 *(guint32*)code = 0x00010004;
2576 code += 4;
2579 cfg->code_len = code - cfg->native_code;
2580 g_assert (cfg->code_len < cfg->code_size);
2581 g_free (cinfo);
2583 DEBUG_FUNC_EXIT();
2584 return code;
2588 void
2589 mono_arch_emit_epilog (MonoCompile *cfg)
2591 MonoMethod *method = cfg->method;
2592 MonoMethodSignature *sig;
2593 guint32 *code;
2594 int max_epilog_size = 16 + 20 * 4;
2595 int pos;
2597 DEBUG_FUNC_ENTER();
2598 sig = mono_method_signature (cfg->method);
2599 if (cfg->method->save_lmf)
2600 max_epilog_size += 128;
2602 if (mono_jit_trace_calls != NULL)
2603 max_epilog_size += 50;
2605 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
2606 max_epilog_size += 50;
2608 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
2609 cfg->code_size *= 2;
2610 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2611 cfg->stat_code_reallocs++;
2614 code = (guint32*)(cfg->native_code + cfg->code_len);
2616 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
2617 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
2619 pos = HPPA_STACK_LMF_OFFSET;
2620 if (cfg->method->save_lmf) {
2621 int reg;
2622 hppa_ldo (code, pos, hppa_r3, hppa_r22);
2623 hppa_ldw (code, G_STRUCT_OFFSET(MonoLMF, previous_lmf), hppa_r22, hppa_r21);
2624 hppa_ldw (code, G_STRUCT_OFFSET(MonoLMF, lmf_addr), hppa_r22, hppa_r20);
2625 hppa_stw (code, hppa_r21, G_STRUCT_OFFSET(MonoLMF, previous_lmf), hppa_r20);
2627 pos += G_STRUCT_OFFSET(MonoLMF, regs) + sizeof (gulong);
2628 /* We skip the restore of r3 here, it is restored from the
2629 * stack anyway. This makes the code a bit easier.
2631 for (reg = 4; reg < 31; reg++) {
2632 if (HPPA_IS_SAVED_GREG (reg)) {
2633 hppa_ldw (code, pos, hppa_r3, reg);
2634 pos += sizeof(gulong);
2638 pos = ALIGN_TO (pos, sizeof (double));
2639 hppa_set (code, pos, hppa_r1);
2640 for (reg = 0; reg < 31; reg++) {
2641 if (HPPA_IS_SAVED_FREG (reg)) {
2642 hppa_flddx (code, hppa_r1, hppa_r3, reg);
2643 hppa_ldo (code, sizeof (double), hppa_r1, hppa_r1);
2644 pos += sizeof (double);
2647 } else {
2648 guint32 mask = cfg->used_int_regs & MONO_ARCH_CALLEE_SAVED_REGS;
2649 int i;
2650 for (i = 0; i < 32; i++) {
2651 if (i == hppa_r3)
2652 continue;
2653 if ((1 << i) & mask) {
2654 hppa_ldw (code, pos, hppa_r3, i);
2655 pos += sizeof (gulong);
2660 if (sig->ret->type != MONO_TYPE_VOID &&
2661 mono_type_to_stind (sig->ret) == CEE_STOBJ) {
2662 CallInfo *cinfo = get_call_info (sig, sig->pinvoke);
2664 switch (cinfo->ret.storage) {
2665 case ArgInIReg:
2666 hppa_ldw (code, cfg->ret->inst_offset, hppa_sp, hppa_r28);
2667 hppa_ldw (code, 0, hppa_r28, hppa_r28);
2668 break;
2669 case ArgInIRegPair:
2670 hppa_ldw (code, cfg->ret->inst_offset, hppa_sp, hppa_r28);
2671 hppa_ldw (code, 4, hppa_r28, hppa_r29);
2672 hppa_ldw (code, 0, hppa_r28, hppa_r28);
2673 break;
2674 case ArgOnStack:
2675 /* Nothing to do */
2676 break;
2677 default:
2678 g_assert_not_reached ();
2680 g_free (cinfo);
2683 if (1 || cfg->flags & MONO_CFG_HAS_CALLS)
2684 hppa_ldw (code, -20, hppa_r3, hppa_r2);
2685 hppa_ldo (code, 64, hppa_r3, hppa_sp);
2686 hppa_bv (code, hppa_r0, hppa_r2);
2687 hppa_ldwm (code, -64, hppa_sp, hppa_r3);
2689 cfg->code_len = (guint8*)code - cfg->native_code;
2691 g_assert (cfg->code_len < cfg->code_size);
2692 DEBUG_FUNC_EXIT();
2695 /* remove once throw_exception_by_name is eliminated */
2696 static int
2697 exception_id_by_name (const char *name)
2699 if (strcmp (name, "IndexOutOfRangeException") == 0)
2700 return MONO_EXC_INDEX_OUT_OF_RANGE;
2701 if (strcmp (name, "OverflowException") == 0)
2702 return MONO_EXC_OVERFLOW;
2703 if (strcmp (name, "ArithmeticException") == 0)
2704 return MONO_EXC_ARITHMETIC;
2705 if (strcmp (name, "DivideByZeroException") == 0)
2706 return MONO_EXC_DIVIDE_BY_ZERO;
2707 if (strcmp (name, "InvalidCastException") == 0)
2708 return MONO_EXC_INVALID_CAST;
2709 if (strcmp (name, "NullReferenceException") == 0)
2710 return MONO_EXC_NULL_REF;
2711 if (strcmp (name, "ArrayTypeMismatchException") == 0)
2712 return MONO_EXC_ARRAY_TYPE_MISMATCH;
2713 g_error ("Unknown intrinsic exception %s\n", name);
2714 return 0;
2717 void
2718 mono_arch_emit_exceptions (MonoCompile *cfg)
2720 MonoJumpInfo *patch_info;
2721 int i;
2722 guint8 *code;
2723 const guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM] = {NULL};
2724 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM] = {0};
2725 int max_epilog_size = 50;
2727 DEBUG_FUNC_ENTER();
2729 /* count the number of exception infos */
2732 * make sure we have enough space for exceptions
2734 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
2735 switch (patch_info->type) {
2736 case MONO_PATCH_INFO_BB_OVF:
2737 g_assert_not_reached ();
2738 break;
2740 case MONO_PATCH_INFO_EXC_OVF: {
2741 const MonoOvfJump *ovfj = patch_info->data.target;
2742 max_epilog_size += 8;
2743 i = exception_id_by_name (ovfj->data.exception);
2744 if (!exc_throw_found [i]) {
2745 max_epilog_size += 24;
2746 exc_throw_found [i] = TRUE;
2748 break;
2751 case MONO_PATCH_INFO_EXC:
2752 i = exception_id_by_name (patch_info->data.target);
2753 if (!exc_throw_found [i]) {
2754 max_epilog_size += 24;
2755 exc_throw_found [i] = TRUE;
2757 break;
2759 default:
2760 break;
2764 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
2765 cfg->code_size *= 2;
2766 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2767 cfg->stat_code_reallocs++;
2770 code = cfg->native_code + cfg->code_len;
2772 /* add code to raise exceptions */
2773 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
2774 switch (patch_info->type) {
2775 case MONO_PATCH_INFO_BB_OVF: {
2776 /* TODO */
2777 break;
2779 case MONO_PATCH_INFO_EXC_OVF: {
2780 const MonoOvfJump *ovfj = patch_info->data.target;
2781 MonoJumpInfo *newji;
2782 unsigned char *ip = patch_info->ip.i + cfg->native_code;
2783 unsigned char *stub = code;
2785 /* Patch original call, point it at the stub */
2786 hppa_patch ((guint32 *)ip, code);
2788 /* Write the stub */
2789 /* SUBTLE: this has to be PIC, because the code block
2790 * can be relocated
2792 hppa_bl_n (code, 8, hppa_r0);
2793 hppa_nop (code);
2795 /* Add a patch info to patch the stub to point to the exception code */
2796 newji = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfo));
2797 newji->type = MONO_PATCH_INFO_EXC;
2798 newji->ip.i = stub - cfg->native_code;
2799 newji->data.target = ovfj->data.exception;
2800 newji->next = patch_info->next;
2801 patch_info->next = newji;
2802 break;
2804 case MONO_PATCH_INFO_EXC: {
2805 unsigned char *ip = patch_info->ip.i + cfg->native_code;
2806 i = exception_id_by_name (patch_info->data.target);
2807 if (exc_throw_pos [i]) {
2808 hppa_patch ((guint32 *)ip, exc_throw_pos [i]);
2809 patch_info->type = MONO_PATCH_INFO_NONE;
2810 break;
2811 } else {
2812 exc_throw_pos [i] = code;
2814 hppa_patch ((guint32 *)ip, code);
2815 hppa_set (code, patch_info->data.target, hppa_r26);
2816 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
2817 patch_info->data.name = "mono_arch_throw_exception_by_name";
2818 patch_info->ip.i = code - cfg->native_code;
2820 /* Assume the caller has set r2, we can't set it
2821 * here based on ip, because the caller may
2822 * be relocated (also the "ip" may be from an overflow
2823 * stub)
2825 hppa_ldil (code, 0, hppa_r1);
2826 hppa_ldo (code, 0, hppa_r1, hppa_r1);
2827 hppa_bv (code, hppa_r0, hppa_r1);
2828 hppa_nop (code);
2829 break;
2831 default:
2832 /* do nothing */
2833 break;
2837 cfg->code_len = code - cfg->native_code;
2839 g_assert (cfg->code_len < cfg->code_size);
2840 DEBUG_FUNC_EXIT();
2843 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
2845 #error "--with-sigaltstack=yes not supported on hppa"
2847 #endif
2849 void
2850 mono_arch_finish_init (void)
2854 void
2855 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
2859 void
2860 mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg)
2862 /* add the this argument */
2863 if (this_reg != -1) {
2864 MonoInst *this;
2865 MONO_INST_NEW (cfg, this, OP_MOVE);
2866 this->type = this_type;
2867 this->sreg1 = this_reg;
2868 this->dreg = mono_alloc_ireg (cfg);
2869 mono_bblock_add_inst (cfg->cbb, this);
2870 mono_call_inst_add_outarg_reg (cfg, inst, this->dreg, hppa_r26, FALSE);
2873 if (vt_reg != -1) {
2874 MonoInst *vtarg;
2875 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
2876 vtarg->type = STACK_MP;
2877 vtarg->sreg1 = vt_reg;
2878 vtarg->dreg = mono_alloc_ireg (cfg);
2879 mono_bblock_add_inst (cfg->cbb, vtarg);
2880 mono_call_inst_add_outarg_reg (cfg, inst, vtarg->dreg, hppa_r28, FALSE);
2885 MonoInst*
2886 mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
2888 MonoInst *ins = NULL;
2889 DEBUG_FUNC_ENTER();
2890 DEBUG_FUNC_EXIT();
2892 return ins;
2896 * mono_arch_get_argument_info:
2897 * @csig: a method signature
2898 * @param_count: the number of parameters to consider
2899 * @arg_info: an array to store the result infos
2901 * Gathers information on parameters such as size, alignment and
2902 * padding. arg_info should be large enought to hold param_count + 1 entries.
2904 * Returns the size of the activation frame.
2907 mono_arch_get_argument_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
2909 int k, align;
2910 CallInfo *cinfo;
2911 ArgInfo *ainfo;
2913 DEBUG_FUNC_ENTER();
2914 cinfo = get_call_info (csig, FALSE);
2916 if (csig->hasthis) {
2917 ainfo = &cinfo->args [0];
2918 arg_info [0].offset = ainfo->offset;
2921 for (k = 0; k < param_count; k++) {
2922 ainfo = &cinfo->args [k + csig->hasthis];
2924 arg_info [k + 1].offset = ainfo->offset;
2925 arg_info [k + 1].size = mono_type_size (csig->params [k], &align);
2928 g_free (cinfo);
2929 DEBUG_FUNC_EXIT();
2932 gboolean
2933 mono_arch_print_tree (MonoInst *tree, int arity)
2935 return 0;
2938 MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
2940 return NULL;
2943 gpointer
2944 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
2946 /* FIXME: implement */
2947 g_assert_not_reached ();