2 * mini-hppa.c: HPPA backend for the Mono code generator
4 * Copyright (c) 2007 Randolph Chung
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
33 #include <mono/metadata/appdomain.h>
34 #include <mono/metadata/debug-helpers.h>
35 #include <mono/metadata/tokentype.h>
36 #include <mono/utils/mono-math.h>
38 #include "mini-hppa.h"
42 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
43 #define SIGNAL_STACK_SIZE (64 * 1024)
46 #define DEBUG_FUNC_ENTER() // printf("Entering %s\n", __FUNCTION__)
47 #define DEBUG_FUNC_EXIT() // printf("Exiting %s\n", __FUNCTION__)
50 branch_b0_table
[] = {
51 TRUE
, /* OP_HPPA_BEQ */
52 FALSE
, /* OP_HPPA_BGE */
53 FALSE
, /* OP_HPPA_BGT */
54 TRUE
, /* OP_HPPA_BLE */
55 TRUE
, /* OP_HPPA_BLT */
56 FALSE
, /* OP_HPPA_BNE */
57 FALSE
, /* OP_HPPA_BGE_UN */
58 FALSE
, /* OP_HPPA_BGT_UN */
59 TRUE
, /* OP_HPPA_BLE_UN */
60 TRUE
, /* OP_HPPA_BLT_UN */
64 branch_b1_table
[] = {
65 HPPA_CMP_COND_EQ
, /* OP_HPPA_BEQ */
66 HPPA_CMP_COND_SLT
, /* OP_HPPA_BGE */
67 HPPA_CMP_COND_SLE
, /* OP_HPPA_BGT */
68 HPPA_CMP_COND_SLE
, /* OP_HPPA_BLE */
69 HPPA_CMP_COND_SLT
, /* OP_HPPA_BLT */
70 HPPA_CMP_COND_EQ
, /* OP_HPPA_BNE_UN */
71 HPPA_CMP_COND_ULT
, /* OP_HPPA_BGE_UN */
72 HPPA_CMP_COND_ULE
, /* OP_HPPA_BGT_UN */
73 HPPA_CMP_COND_ULE
, /* OP_HPPA_BLE_UN */
74 HPPA_CMP_COND_ULT
, /* OP_HPPA_BLT_UN */
77 /* Note that these are inverted from the OP_xxx, because we nullify
78 * the branch if the condition is met
81 float_branch_table
[] = {
95 float_ceq_table
[] = {
104 * Branches have short (14 or 17 bit) targets on HPPA. To make longer jumps,
105 * we will need to rely on stubs - basically we create stub structures in
106 * the epilogue that uses a long branch to the destination, and any short
107 * jumps inside a method that cannot reach the destination directly will
108 * branch first to the stub.
110 typedef struct MonoOvfJump
{
113 const char *exception
;
118 /* Create a literal 0.0 double for FNEG */
119 double hppa_zero
= 0;
122 mono_arch_regname (int reg
)
124 static const char * rnames
[] = {
125 "hppa_r0", "hppa_r1", "hppa_rp", "hppa_r3", "hppa_r4",
126 "hppa_r5", "hppa_r6", "hppa_r7", "hppa_r8", "hppa_r9",
127 "hppa_r10", "hppa_r11", "hppa_r12", "hppa_r13", "hppa_r14",
128 "hppa_r15", "hppa_r16", "hppa_r17", "hppa_r18", "hppa_r19",
129 "hppa_r20", "hppa_r21", "hppa_r22", "hppa_r23", "hppa_r24",
130 "hppa_r25", "hppa_r26", "hppa_r27", "hppa_r28", "hppa_r29",
131 "hppa_sp", "hppa_r31"
133 if (reg
>= 0 && reg
< MONO_MAX_IREGS
)
139 mono_arch_fregname (int reg
)
141 static const char *rnames
[] = {
142 "hppa_fr0", "hppa_fr1", "hppa_fr2", "hppa_fr3", "hppa_fr4",
143 "hppa_fr5", "hppa_fr6", "hppa_fr7", "hppa_fr8", "hppa_fr9",
144 "hppa_fr10", "hppa_fr11", "hppa_fr12", "hppa_fr13", "hppa_fr14",
145 "hppa_fr15", "hppa_fr16", "hppa_fr17", "hppa_fr18", "hppa_fr19",
146 "hppa_fr20", "hppa_fr21", "hppa_fr22", "hppa_fr23", "hppa_fr24",
147 "hppa_fr25", "hppa_fr26", "hppa_fr27", "hppa_fr28", "hppa_fr29",
148 "hppa_fr30", "hppa_fr31",
151 if (reg
>= 0 && reg
< MONO_MAX_FREGS
)
158 * Initialize the cpu to execute managed code.
161 mono_arch_cpu_init (void)
164 mono_arch_cpu_optimizations(&dummy
);
168 * Initialize architecture specific code.
171 mono_arch_init (void)
176 * Cleanup architecture specific code.
179 mono_arch_cleanup (void)
184 * This function returns the optimizations supported on this cpu.
187 mono_arch_cpu_optimizations (guint32
*exclude_mask
)
195 * This function test for all SIMD functions supported.
197 * Returns a bitmask corresponding to all supported versions.
201 mono_arch_cpu_enumerate_simd_versions (void)
203 /* SIMD is currently unimplemented */
208 mono_arch_flush_icache (guint8
*code
, gint size
)
210 guint8
* p
= (guint8
*)((guint32
)code
& ~(0x3f));
211 guint8
* end
= (guint8
*)((guint32
)code
+ size
);
213 __asm__
__volatile__ ("fdc %%r0(%%sr3, %0)\n"
215 "fic %%r0(%%sr3, %0)\n"
218 p
+= 32; /* can be 64 on pa20 cpus */
223 mono_arch_flush_register_windows (void)
225 /* No register windows on hppa */
254 #define ARGS_OFFSET 36
257 add_parameter (CallInfo
*cinfo
, ArgInfo
*ainfo
, MonoType
*type
)
259 int is_fp
= (type
->type
== MONO_TYPE_R4
|| type
->type
== MONO_TYPE_R8
);
264 ainfo
->size
= mono_type_size (type
, &align
);
265 ainfo
->type
= type
->type
;
267 if (ainfo
->size
<= 4) {
268 cinfo
->stack_usage
+= 4;
269 ainfo
->offset
= cinfo
->stack_usage
- (4 - ainfo
->size
);
271 else if (ainfo
->size
<= 8)
273 cinfo
->stack_usage
+= 8;
274 cinfo
->stack_usage
= ALIGN_TO (cinfo
->stack_usage
, 8);
275 ainfo
->offset
= cinfo
->stack_usage
- (8 - ainfo
->size
);
279 cinfo
->stack_usage
+= ainfo
->size
;
280 cinfo
->stack_usage
= ALIGN_TO (cinfo
->stack_usage
, align
);
281 ainfo
->offset
= cinfo
->stack_usage
;
284 ofs
= (ALIGN_TO (ainfo
->offset
, 4) - ARGS_OFFSET
) / 4;
285 if (ofs
< PARAM_REGS
) {
287 if (ainfo
->size
<= 4)
288 ainfo
->storage
= ArgInIReg
;
290 ainfo
->storage
= ArgInIRegPair
;
291 ainfo
->reg
= hppa_r26
- ofs
;
292 } else if (type
->type
== MONO_TYPE_R4
) {
293 ainfo
->storage
= ArgInFReg
;
294 ainfo
->reg
= hppa_fr4
+ ofs
;
295 } else { /* type->type == MONO_TYPE_R8 */
296 ainfo
->storage
= ArgInDReg
;
297 ainfo
->reg
= hppa_fr4
+ ofs
;
301 /* frame pointer based offset */
302 ainfo
->reg
= hppa_r3
;
303 ainfo
->storage
= ArgOnStack
;
306 /* All offsets are negative relative to the frame pointer */
307 ainfo
->offset
= -ainfo
->offset
;
313 analyze_return (CallInfo
*cinfo
, MonoMethodSignature
*sig
)
320 size
= mono_type_size (type
, &align
);
322 /* ref: mono_type_to_stind */
323 cinfo
->ret
.type
= type
->type
;
325 cinfo
->ret
.storage
= ArgInIReg
;
326 cinfo
->ret
.reg
= hppa_r28
;
329 switch (type
->type
) {
332 case MONO_TYPE_BOOLEAN
:
343 case MONO_TYPE_FNPTR
:
344 case MONO_TYPE_CLASS
:
345 case MONO_TYPE_STRING
:
346 case MONO_TYPE_OBJECT
:
347 case MONO_TYPE_SZARRAY
:
348 case MONO_TYPE_ARRAY
:
349 cinfo
->ret
.storage
= ArgInIReg
;
350 cinfo
->ret
.reg
= hppa_r28
;
354 cinfo
->ret
.storage
= ArgInIRegPair
;
355 cinfo
->ret
.reg
= hppa_r28
;
358 cinfo
->ret
.storage
= ArgInFReg
;
359 cinfo
->ret
.reg
= hppa_fr4
;
362 cinfo
->ret
.storage
= ArgInDReg
;
363 cinfo
->ret
.reg
= hppa_fr4
;
365 case MONO_TYPE_GENERICINST
:
366 type
= &type
->data
.generic_class
->container_class
->byval_arg
;
369 case MONO_TYPE_VALUETYPE
:
370 if (type
->data
.klass
->enumtype
) {
371 type
= mono_class_enum_basetype (type
->data
.klass
);
375 case MONO_TYPE_TYPEDBYREF
:
376 cinfo
->struct_return
= 1;
377 /* cinfo->ret.storage tells us how the ABI expects
378 * the parameter to be returned
381 cinfo
->ret
.storage
= ArgInIReg
;
382 cinfo
->ret
.reg
= hppa_r28
;
383 } else if (size
<= 8) {
384 cinfo
->ret
.storage
= ArgInIRegPair
;
385 cinfo
->ret
.reg
= hppa_r28
;
387 cinfo
->ret
.storage
= ArgOnStack
;
388 cinfo
->ret
.reg
= hppa_sp
;
391 /* We always allocate stack space for this because the
392 * arch-indep code expects us to
394 cinfo
->stack_usage
+= size
;
395 cinfo
->stack_usage
= ALIGN_TO (cinfo
->stack_usage
, align
);
396 cinfo
->ret
.offset
= -cinfo
->stack_usage
;
400 g_error ("Can't handle as return value 0x%x", sig
->ret
->type
);
408 * Obtain information about a call according to the calling convention.
411 get_call_info (MonoMethodSignature
*sig
, gboolean is_pinvoke
)
414 int n
= sig
->hasthis
+ sig
->param_count
;
420 ptrtype
.type
= MONO_TYPE_PTR
;
423 cinfo
= g_malloc0 (sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
425 /* The area below ARGS_OFFSET is the linkage area... */
426 cinfo
->stack_usage
= ARGS_OFFSET
- 4;
427 /* -4, because the first argument will allocate the area it needs */
431 add_parameter (cinfo
, cinfo
->args
+ 0, &ptrtype
);
432 DEBUG (printf ("param <this>: assigned to reg %s offset %d\n", mono_arch_regname (cinfo
->args
[0].reg
), cinfo
->args
[0].offset
));
435 /* TODO: What to do with varargs? */
437 for (i
= 0; i
< sig
->param_count
; ++i
) {
438 ArgInfo
*ainfo
= &cinfo
->args
[sig
->hasthis
+ i
];
439 if (sig
->params
[i
]->byref
)
442 type
= mono_type_get_underlying_type (sig
->params
[i
]);
443 add_parameter (cinfo
, ainfo
, type
);
445 DEBUG (printf ("param %d: type %d size %d assigned to reg %s offset %d\n", i
, type
->type
, mono_type_size (type
, &dummy
), mono_arch_regname (ainfo
->reg
), ainfo
->offset
));
448 analyze_return (cinfo
, sig
);
455 mono_arch_get_allocatable_int_vars (MonoCompile
*cfg
)
461 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
462 MonoInst
*ins
= cfg
->varinfo
[i
];
463 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
466 if (vmv
->range
.first_use
.abs_pos
>= vmv
->range
.last_use
.abs_pos
)
469 if ((ins
->flags
& (MONO_INST_IS_DEAD
|MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)) ||
470 (ins
->opcode
!= OP_LOCAL
&& ins
->opcode
!= OP_ARG
))
473 if (mono_is_regsize_var (ins
->inst_vtype
)) {
474 g_assert (MONO_VARINFO (cfg
, i
)->reg
== -1);
475 g_assert (i
== vmv
->idx
);
476 vars
= mono_varlist_insert_sorted (cfg
, vars
, vmv
, FALSE
);
485 mono_arch_get_global_int_regs (MonoCompile
*cfg
)
490 /* r3 is sometimes used as our frame pointer, so don't allocate it
491 * r19 is the GOT pointer, don't allocate it either
495 for (i
= 4; i
<= 18; i
++)
496 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (i
));
503 * mono_arch_regalloc_cost:
505 * Return the cost, in number of memory references, of the action of
506 * allocating the variable VMV into a register during global register
510 mono_arch_regalloc_cost (MonoCompile
*cfg
, MonoMethodVar
*vmv
)
517 * Set var information according to the calling convention.
518 * The locals var stuff should most likely be split in another method.
520 * updates m->stack_offset based on the amount of stack space needed for
524 mono_arch_allocate_vars (MonoCompile
*m
)
526 MonoMethodSignature
*sig
;
527 MonoMethodHeader
*header
;
529 int i
, offset
, size
, align
, curinst
;
535 m
->flags
|= MONO_CFG_HAS_SPILLUP
;
539 sig
= mono_method_signature (m
->method
);
540 DEBUG (printf ("Allocating locals - incoming params:\n"));
541 cinfo
= get_call_info (sig
, FALSE
);
544 * We use the ABI calling conventions for managed code as well.
546 if (m
->flags
& MONO_CFG_HAS_ALLOCA
) {
548 m
->used_int_regs
|= 1 << hppa_r4
;
553 /* Before this function is called, we would have looked at all
554 * calls from this method and figured out how much space is needed
555 * for the param area.
557 * Locals are allocated backwards, right before the param area
559 /* TODO: in some cases we don't need the frame pointer... */
560 m
->frame_reg
= hppa_r3
;
561 offset
= m
->param_area
;
563 /* Return values can be passed back either in four ways:
564 * r28 is used for data <= 4 bytes (32-bit ABI)
565 * r28/r29 are used for data >4 && <= 8 bytes
566 * fr4 is used for floating point data
567 * data larger than 8 bytes is returned on the stack pointed to
570 * This code needs to be in sync with how CEE_RET is handled
571 * in mono_method_to_ir (). In some cases when we return small
572 * structs, the ABI specifies that they should be returned in
573 * registers, but the code in mono_method_to_ir () always emits
574 * a memcpy for valuetype returns, so we need to make sure we
575 * allocate space on the stack for this copy.
577 if (cinfo
->struct_return
) {
578 /* this is used to stash the incoming r28 pointer */
579 offset
+= sizeof (gpointer
);
580 m
->ret
->opcode
= OP_REGOFFSET
;
581 m
->ret
->inst_basereg
= stack_ptr
;
582 m
->ret
->inst_offset
= -offset
;
583 } else if (sig
->ret
->type
!= MONO_TYPE_VOID
) {
584 m
->ret
->opcode
= OP_REGVAR
;
585 m
->ret
->inst_c0
= cinfo
->ret
.reg
;
588 curinst
= m
->locals_start
;
589 for (i
= curinst
; i
< m
->num_varinfo
; ++i
) {
590 inst
= m
->varinfo
[i
];
592 if (inst
->opcode
== OP_REGVAR
) {
593 DEBUG (printf ("allocating local %d to %s\n", i
, mono_arch_regname (inst
->dreg
)));
597 if (inst
->flags
& MONO_INST_IS_DEAD
)
600 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
601 * pinvoke wrappers when they call functions returning structure */
602 if (inst
->backend
.is_pinvoke
&& MONO_TYPE_ISSTRUCT (inst
->inst_vtype
) && inst
->inst_vtype
->type
!= MONO_TYPE_TYPEDBYREF
)
603 size
= mono_class_native_size (inst
->inst_vtype
->data
.klass
, &align
);
605 size
= mini_type_stack_size (cfg
->generic_sharing_context
, inst
->inst_vtype
, &align
);
608 * This is needed since structures containing doubles must be doubleword
610 * FIXME: Do this only if needed.
612 if (MONO_TYPE_ISSTRUCT (inst
->inst_vtype
))
616 * variables are accessed as negative offsets from hppa_sp
618 inst
->opcode
= OP_REGOFFSET
;
619 inst
->inst_basereg
= stack_ptr
;
621 offset
= ALIGN_TO (offset
, align
);
622 inst
->inst_offset
= -offset
;
624 DEBUG (printf ("allocating local %d (size = %d) to [%s - %d]\n", i
, size
, mono_arch_regname (inst
->inst_basereg
), -inst
->inst_offset
));
627 if (sig
->call_convention
== MONO_CALL_VARARG
) {
631 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
632 ArgInfo
*ainfo
= &cinfo
->args
[i
];
634 if (inst
->opcode
!= OP_REGVAR
) {
635 switch (ainfo
->storage
) {
640 /* Currently mono requests all incoming registers
641 * be assigned to a stack location :-(
644 if (!(inst
->flags
& (MONO_INST_VOLATILE
| MONO_INST_INDIRECT
))) {
645 inst
->opcode
= OP_REGVAR
;
646 inst
->dreg
= ainfo
->reg
;
647 DEBUG (printf ("param %d in register %s\n", i
, mono_arch_regname (inst
->dreg
)));
653 inst
->opcode
= OP_REGOFFSET
;
654 inst
->inst_basereg
= hppa_r3
;
655 inst
->inst_offset
= ainfo
->offset
;
656 DEBUG (printf ("param %d stored on stack [%s - %d]\n", i
, mono_arch_regname (hppa_r3
), -inst
->inst_offset
));
662 m
->stack_offset
= offset
; /* Includes cfg->param_area */
669 * take the arguments and generate the arch-specific
670 * instructions to properly call the function in call.
671 * This includes pushing, moving arguments to the right register
674 * sets call->stack_usage and cfg->param_area
677 mono_arch_call_opcode (MonoCompile
*cfg
, MonoBasicBlock
* bb
, MonoCallInst
*call
, int is_virtual
)
680 MonoMethodSignature
*sig
;
686 DEBUG (printf ("is_virtual = %d\n", is_virtual
));
688 sig
= call
->signature
;
689 n
= sig
->param_count
+ sig
->hasthis
;
691 DEBUG (printf ("Calling method with %d parameters\n", n
));
693 cinfo
= get_call_info (sig
, sig
->pinvoke
);
696 g_assert (sig
->call_convention
!= MONO_CALL_VARARG
);
698 for (i
= 0; i
< n
; ++i
) {
699 ainfo
= &cinfo
->args
[i
];
701 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
705 if (is_virtual
&& i
== 0) {
706 /* the argument will be attached to the call instruction */
708 call
->used_iregs
|= 1 << ainfo
->reg
;
710 MONO_INST_NEW (cfg
, arg
, OP_OUTARG
);
712 arg
->cil_code
= in
->cil_code
;
714 arg
->inst_call
= call
;
715 arg
->type
= in
->type
;
717 /* prepend, we'll need to reverse them later */
718 arg
->next
= call
->out_args
;
719 call
->out_args
= arg
;
721 switch (ainfo
->storage
) {
723 case ArgInIRegPair
: {
724 MonoHPPAArgInfo
*ai
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoHPPAArgInfo
));
725 ai
->reg
= ainfo
->reg
;
726 ai
->size
= ainfo
->size
;
727 ai
->offset
= ainfo
->offset
;
729 arg
->backend
.data
= ai
;
731 call
->used_iregs
|= 1 << ainfo
->reg
;
732 if (ainfo
->storage
== ArgInIRegPair
)
733 call
->used_iregs
|= 1 << (ainfo
->reg
+ 1);
734 if (ainfo
->type
== MONO_TYPE_VALUETYPE
)
735 arg
->opcode
= OP_OUTARG_VT
;
739 MonoHPPAArgInfo
*ai
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoHPPAArgInfo
));
741 ai
->size
= ainfo
->size
;
742 ai
->offset
= ainfo
->offset
;
744 arg
->backend
.data
= ai
;
745 if (ainfo
->type
== MONO_TYPE_VALUETYPE
)
746 arg
->opcode
= OP_OUTARG_VT
;
748 arg
->opcode
= OP_OUTARG_MEMBASE
;
749 call
->used_iregs
|= 1 << ainfo
->reg
;
753 arg
->backend
.reg3
= ainfo
->reg
;
754 arg
->opcode
= OP_OUTARG_R4
;
755 call
->used_fregs
|= 1 << ainfo
->reg
;
758 arg
->backend
.reg3
= ainfo
->reg
;
759 arg
->opcode
= OP_OUTARG_R8
;
760 call
->used_fregs
|= 1 << ainfo
->reg
;
769 * Reverse the call->out_args list.
772 MonoInst
*prev
= NULL
, *list
= call
->out_args
, *next
;
779 call
->out_args
= prev
;
781 call
->stack_usage
= cinfo
->stack_usage
;
782 cfg
->param_area
= MAX (cfg
->param_area
, call
->stack_usage
);
783 cfg
->param_area
= ALIGN_TO (cfg
->param_area
, MONO_ARCH_FRAME_ALIGNMENT
);
785 cfg
->flags
|= MONO_CFG_HAS_CALLS
;
794 mono_arch_peephole_pass_1 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
799 mono_arch_peephole_pass_2 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
806 insert_after_ins (MonoBasicBlock
*bb
, MonoInst
*ins
, MonoInst
*to_insert
)
810 bb
->code
= to_insert
;
811 to_insert
->next
= ins
;
813 to_insert
->next
= ins
->next
;
814 ins
->next
= to_insert
;
818 #define NEW_INS(cfg,dest,op) do { \
819 (dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
820 (dest)->opcode = (op); \
821 insert_after_ins (bb, last_ins, (dest)); \
825 map_to_reg_reg_op (int op
)
850 case OP_LOAD_MEMBASE
:
851 return OP_LOAD_MEMINDEX
;
852 case OP_LOADI4_MEMBASE
:
853 return OP_LOADI4_MEMINDEX
;
854 case OP_LOADU4_MEMBASE
:
855 return OP_LOADU4_MEMINDEX
;
856 case OP_LOADU1_MEMBASE
:
857 return OP_LOADU1_MEMINDEX
;
858 case OP_LOADI2_MEMBASE
:
859 return OP_LOADI2_MEMINDEX
;
860 case OP_LOADU2_MEMBASE
:
861 return OP_LOADU2_MEMINDEX
;
862 case OP_LOADI1_MEMBASE
:
863 return OP_LOADI1_MEMINDEX
;
864 case OP_LOADR4_MEMBASE
:
865 return OP_LOADR4_MEMINDEX
;
866 case OP_LOADR8_MEMBASE
:
867 return OP_LOADR8_MEMINDEX
;
868 case OP_STOREI1_MEMBASE_REG
:
869 return OP_STOREI1_MEMINDEX
;
870 case OP_STOREI2_MEMBASE_REG
:
871 return OP_STOREI2_MEMINDEX
;
872 case OP_STOREI4_MEMBASE_REG
:
873 return OP_STOREI4_MEMINDEX
;
874 case OP_STORE_MEMBASE_REG
:
875 return OP_STORE_MEMINDEX
;
876 case OP_STORER4_MEMBASE_REG
:
877 return OP_STORER4_MEMINDEX
;
878 case OP_STORER8_MEMBASE_REG
:
879 return OP_STORER8_MEMINDEX
;
880 case OP_STORE_MEMBASE_IMM
:
881 return OP_STORE_MEMBASE_REG
;
882 case OP_STOREI1_MEMBASE_IMM
:
883 return OP_STOREI1_MEMBASE_REG
;
884 case OP_STOREI2_MEMBASE_IMM
:
885 return OP_STOREI2_MEMBASE_REG
;
886 case OP_STOREI4_MEMBASE_IMM
:
887 return OP_STOREI4_MEMBASE_REG
;
889 g_assert_not_reached ();
893 * Remove from the instruction list the instructions that can't be
894 * represented with very simple instructions with no register
898 mono_arch_lowering_pass (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
900 MonoInst
*ins
, *next
, *temp
, *last_ins
= NULL
;
903 MONO_BB_FOR_EACH_INS (bb
, ins
) {
905 switch (ins
->opcode
) {
908 if (!hppa_check_bits (ins
->inst_imm
, 11)) {
909 NEW_INS (cfg
, temp
, OP_ICONST
);
910 temp
->inst_c0
= ins
->inst_imm
;
911 temp
->dreg
= mono_alloc_ireg (cfg
);
912 ins
->sreg2
= temp
->dreg
;
913 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
918 if (!hppa_check_bits (ins
->inst_imm
, 11)) {
919 NEW_INS (cfg
, temp
, OP_ICONST
);
920 temp
->inst_c0
= ins
->inst_imm
;
921 temp
->dreg
= mono_alloc_ireg (cfg
);
922 ins
->sreg2
= temp
->dreg
;
923 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
928 if (ins
->inst_imm
== 1) {
929 ins
->opcode
= OP_MOVE
;
932 if (ins
->inst_imm
== 0) {
933 ins
->opcode
= OP_ICONST
;
937 imm
= mono_is_power_of_two (ins
->inst_imm
);
939 ins
->opcode
= OP_SHL_IMM
;
944 int tmp
= mono_alloc_ireg (cfg
);
945 NEW_INS (cfg
, temp
, OP_ICONST
);
946 temp
->inst_c0
= ins
->inst_c0
;
949 ins
->opcode
= CEE_MUL
;
951 /* Need to rewrite the CEE_MUL too... */
957 int freg1
= mono_alloc_freg (cfg
);
958 int freg2
= mono_alloc_freg (cfg
);
960 NEW_INS(cfg
, temp
, OP_STORE_MEMBASE_REG
);
961 temp
->sreg1
= ins
->sreg1
;
962 temp
->inst_destbasereg
= hppa_sp
;
963 temp
->inst_offset
= -16;
965 NEW_INS(cfg
, temp
, OP_LOADR4_MEMBASE
);
967 temp
->inst_basereg
= hppa_sp
;
968 temp
->inst_offset
= -16;
970 NEW_INS(cfg
, temp
, OP_STORE_MEMBASE_REG
);
971 temp
->sreg1
= ins
->sreg2
;
972 temp
->inst_destbasereg
= hppa_sp
;
973 temp
->inst_offset
= -16;
975 NEW_INS(cfg
, temp
, OP_LOADR4_MEMBASE
);
977 temp
->inst_basereg
= hppa_sp
;
978 temp
->inst_offset
= -16;
980 NEW_INS (cfg
, temp
, OP_HPPA_XMPYU
);
985 NEW_INS(cfg
, temp
, OP_HPPA_STORER4_RIGHT
);
987 temp
->inst_destbasereg
= hppa_sp
;
988 temp
->inst_offset
= -16;
990 ins
->opcode
= OP_LOAD_MEMBASE
;
991 ins
->inst_basereg
= hppa_sp
;
992 ins
->inst_offset
= -16;
1001 bb
->last_ins
= last_ins
;
1002 bb
->max_vreg
= cfg
->next_vreg
;
1007 hppa_patch (guint32
*code
, const gpointer target
)
1009 guint32 ins
= *code
;
1010 gint32 val
= (gint32
)target
;
1011 gint32 disp
= (val
- (gint32
)code
- 8) >> 2;
1014 DEBUG (printf ("patching 0x%08x (0x%08x) to point to 0x%08x (disp = %d)\n", code
, ins
, val
, disp
));
1016 switch (*code
>> 26) {
1017 case 0x08: /* ldil, next insn can be a ldo, ldw, or ble */
1018 *code
= *code
& ~0x1fffff;
1019 *code
= *code
| hppa_op_imm21 (hppa_lsel (val
));
1022 if ((*code
>> 26) == 0x0D) { /* ldo */
1023 *code
= *code
& ~0x3fff;
1024 *code
= *code
| hppa_op_imm14 (hppa_rsel (val
));
1025 } else if ((*code
>> 26) == 0x12) { /* ldw */
1026 *code
= *code
& ~0x3fff;
1027 *code
= *code
| hppa_op_imm14 (hppa_rsel (val
));
1028 } else if ((*code
>> 26) == 0x39) { /* ble */
1029 *code
= *code
& ~0x1f1ffd;
1030 *code
= *code
| hppa_op_imm17 (hppa_rsel (val
));
1040 if (!hppa_check_bits (disp
, 17))
1042 reg1
= (*code
>> 21) & 0x1f;
1043 *code
= (*code
& ~0x1f1ffd) | hppa_op_imm17(disp
);
1046 case 0x20: /* combt */
1047 case 0x22: /* combf */
1048 if (!hppa_check_bits (disp
>> 2, 12))
1050 *code
= (*code
& ~0x1ffd) | hppa_op_imm12(disp
);
1054 g_warning ("Unpatched opcode %x\n", *code
>> 26);
1060 g_warning ("cannot branch to target, insn is %08x, displacement is %d\n", (int)*code
, (int)disp
);
1061 g_assert_not_reached ();
1065 emit_float_to_int (MonoCompile
*cfg
, guint32
*code
, int dreg
, int sreg
, int size
, gboolean is_signed
)
1067 /* sreg is a float, dreg is an integer reg. */
1068 hppa_fcnvfxt (code
, HPPA_FP_FMT_DBL
, HPPA_FP_FMT_SGL
, sreg
, sreg
);
1069 hppa_fstws (code
, sreg
, 0, -16, hppa_sp
);
1070 hppa_ldw (code
, -16, hppa_sp
, dreg
);
1073 hppa_extru (code
, dreg
, 31, 8, dreg
);
1075 hppa_extru (code
, dreg
, 31, 16, dreg
);
1078 hppa_extrs (code
, dreg
, 31, 8, dreg
);
1080 hppa_extrs (code
, dreg
, 31, 16, dreg
);
1085 /* Clobbers r1, r20, r21 */
1087 emit_memcpy (guint32
*code
, int doff
, int dreg
, int soff
, int sreg
, int size
)
1089 /* r20 is the destination */
1090 hppa_set (code
, doff
, hppa_r20
);
1091 hppa_add (code
, hppa_r20
, dreg
, hppa_r20
);
1093 /* r21 is the source */
1094 hppa_set (code
, soff
, hppa_r21
);
1095 hppa_add (code
, hppa_r21
, sreg
, hppa_r21
);
1098 hppa_ldw (code
, 0, hppa_r21
, hppa_r1
);
1099 hppa_stw (code
, hppa_r1
, 0, hppa_r20
);
1100 hppa_ldo (code
, 4, hppa_r21
, hppa_r21
);
1101 hppa_ldo (code
, 4, hppa_r20
, hppa_r20
);
1105 hppa_ldh (code
, 0, hppa_r21
, hppa_r1
);
1106 hppa_sth (code
, hppa_r1
, 0, hppa_r20
);
1107 hppa_ldo (code
, 2, hppa_r21
, hppa_r21
);
1108 hppa_ldo (code
, 2, hppa_r20
, hppa_r20
);
1112 hppa_ldb (code
, 0, hppa_r21
, hppa_r1
);
1113 hppa_stb (code
, hppa_r1
, 0, hppa_r20
);
1114 hppa_ldo (code
, 1, hppa_r21
, hppa_r21
);
1115 hppa_ldo (code
, 1, hppa_r20
, hppa_r20
);
1123 * mono_arch_get_vcall_slot_addr:
1125 * Determine the vtable slot used by a virtual call.
1128 mono_arch_get_vcall_slot_addr (guint8
*code8
, mgreg_t
*regs
)
1130 guint32
*code
= (guint32
*)((unsigned long)code8
& ~3);
1135 /* This is the special virtual call token */
1136 if (code
[-1] != 0x34000eee) /* ldo 0x777(r0),r0 */
1139 if ((code
[0] >> 26) == 0x39 && /* ble */
1140 (code
[-2] >> 26) == 0x12) { /* ldw */
1141 guint32 ldw
= code
[-2];
1142 guint32 reg
= (ldw
>> 21) & 0x1f;
1143 gint32 disp
= ((ldw
& 1) ? (-1 << 13) : 0) | ((ldw
& 0x3fff) >> 1);
1144 /* FIXME: we are not guaranteed that reg is saved in the LMF.
1145 * In fact, it probably isn't, since it is allocated as a
1146 * callee register. Right now just return an address; this
1147 * is sufficient for non-AOT operation
1149 // return (gpointer)((guint8*)regs [reg] + disp);
1153 g_assert_not_reached ();
1158 /* ins->dreg = *(ins->inst_desgbasereg + ins->inst_offset) */
1159 #define EMIT_LOAD_MEMBASE(ins, op) do { \
1160 if (!hppa_check_bits (ins->inst_offset, 14)) { \
1161 hppa_set (code, ins->inst_offset, hppa_r1); \
1162 hppa_ ## op ## x (code, hppa_r1, ins->inst_basereg, ins->dreg); \
1165 hppa_ ## op (code, ins->inst_offset, ins->inst_basereg, ins->dreg); \
1169 #define EMIT_COND_BRANCH_FLAGS(ins,r1,r2,b0,b1) do {\
1171 hppa_combf (code, r1, r2, b1, 2); \
1173 hppa_combt (code, r1, r2, b1, 2); \
1175 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1176 hppa_bl (code, 0, hppa_r0); \
1180 #define EMIT_COND_BRANCH(ins,r1,r2,cond) EMIT_COND_BRANCH_FLAGS(ins, r1, r2, branch_b0_table [(cond)], branch_b1_table [(cond)])
1182 #define EMIT_FLOAT_COND_BRANCH_FLAGS(ins,r1,r2,b0) do {\
1183 hppa_fcmp (code, HPPA_FP_FMT_DBL, b0, r1, r2); \
1184 hppa_ftest (code, 0); \
1185 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1186 hppa_bl (code, 8, hppa_r0); \
1190 #define EMIT_FLOAT_COND_BRANCH(ins,r1,r2,cond) EMIT_FLOAT_COND_BRANCH_FLAGS(ins, r1, r2, float_branch_table [cond])
1192 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(r1,r2,b0,b1,exc_name) \
1194 MonoOvfJump *ovfj = mono_mempool_alloc (cfg->mempool, sizeof (MonoOvfJump)); \
1195 ovfj->data.exception = (exc_name); \
1196 ovfj->ip_offset = (guint8*)code - cfg->native_code; \
1197 hppa_bl (code, 8, hppa_r2); \
1198 hppa_depi (code, 0, 31, 2, hppa_r2); \
1199 hppa_ldo (code, 8, hppa_r2, hppa_r2); \
1201 hppa_combf (code, r1, r2, b1, 2); \
1203 hppa_combt (code, r1, r2, b1, 2); \
1205 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_EXC_OVF, ovfj); \
1206 hppa_bl (code, 0, hppa_r0); \
1210 #define EMIT_COND_SYSTEM_EXCEPTION(r1,r2,cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(r1, r2, branch_b0_table [(cond)], branch_b1_table [(cond)], (exc_name))
1212 /* TODO: MEM_INDEX_REG - cannot be r1 */
1213 #define MEM_INDEX_REG hppa_r31
1214 /* *(ins->inst_destbasereg + ins->inst_offset) = ins->inst_imm */
1215 #define EMIT_STORE_MEMBASE_IMM(ins, op) do { \
1217 if (ins->inst_imm == 0) \
1220 hppa_set (code, ins->inst_imm, hppa_r1); \
1223 if (!hppa_check_bits (ins->inst_offset, 14)) { \
1224 hppa_set (code, ins->inst_offset, MEM_INDEX_REG); \
1225 hppa_addl (code, ins->inst_destbasereg, MEM_INDEX_REG, MEM_INDEX_REG); \
1226 hppa_ ## op (code, sreg, 0, MEM_INDEX_REG); \
1229 hppa_ ## op (code, sreg, ins->inst_offset, ins->inst_destbasereg); \
1233 /* *(ins->inst_destbasereg + ins->inst_offset) = ins->sreg1 */
1234 #define EMIT_STORE_MEMBASE_REG(ins, op) do { \
1235 if (!hppa_check_bits (ins->inst_offset, 14)) { \
1236 hppa_set (code, ins->inst_offset, MEM_INDEX_REG); \
1237 hppa_addl (code, ins->inst_destbasereg, MEM_INDEX_REG, MEM_INDEX_REG); \
1238 hppa_ ## op (code, ins->sreg1, 0, MEM_INDEX_REG); \
1241 hppa_ ## op (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg); \
1246 mono_arch_output_basic_block (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
1251 guint32
*code
= (guint32
*)(cfg
->native_code
+ cfg
->code_len
);
1252 MonoInst
*last_ins
= NULL
;
1258 if (cfg
->verbose_level
> 2)
1259 g_print ("[%s::%s] Basic block %d starting at offset 0x%x\n", cfg
->method
->klass
->name
, cfg
->method
->name
, bb
->block_num
, bb
->native_offset
);
1261 cpos
= bb
->max_offset
;
1263 if (cfg
->prof_options
& MONO_PROFILE_COVERAGE
) {
1267 MONO_BB_FOR_EACH_INS (bb
, ins
) {
1270 offset
= (guint8
*)code
- cfg
->native_code
;
1272 spec
= ins_get_spec (ins
->opcode
);
1274 max_len
= ((guint8
*)spec
) [MONO_INST_LEN
];
1276 if (offset
> (cfg
->code_size
- max_len
- 16)) {
1277 cfg
->code_size
*= 2;
1278 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
1279 code
= (guint32
*)(cfg
->native_code
+ offset
);
1280 cfg
->stat_code_reallocs
++;
1282 code_start
= (guint8
*)code
;
1283 // if (ins->cil_code)
1284 // g_print ("cil code\n");
1285 mono_debug_record_line_number (cfg
, ins
, offset
);
1287 switch (ins
->opcode
) {
1288 case OP_RELAXED_NOP
:
1290 case OP_STOREI1_MEMBASE_IMM
:
1291 EMIT_STORE_MEMBASE_IMM (ins
, stb
);
1293 case OP_STOREI2_MEMBASE_IMM
:
1294 EMIT_STORE_MEMBASE_IMM (ins
, sth
);
1296 case OP_STORE_MEMBASE_IMM
:
1297 case OP_STOREI4_MEMBASE_IMM
:
1298 EMIT_STORE_MEMBASE_IMM (ins
, stw
);
1300 case OP_STOREI1_MEMBASE_REG
:
1301 EMIT_STORE_MEMBASE_REG (ins
, stb
);
1303 case OP_STOREI2_MEMBASE_REG
:
1304 EMIT_STORE_MEMBASE_REG (ins
, sth
);
1306 case OP_STORE_MEMBASE_REG
:
1307 case OP_STOREI4_MEMBASE_REG
:
1308 EMIT_STORE_MEMBASE_REG (ins
, stw
);
1310 case OP_LOADU1_MEMBASE
:
1311 EMIT_LOAD_MEMBASE (ins
, ldb
);
1313 case OP_LOADI1_MEMBASE
:
1314 EMIT_LOAD_MEMBASE (ins
, ldb
);
1315 hppa_extrs (code
, ins
->dreg
, 31, 8, ins
->dreg
);
1317 case OP_LOADU2_MEMBASE
:
1318 EMIT_LOAD_MEMBASE (ins
, ldh
);
1320 case OP_LOADI2_MEMBASE
:
1321 EMIT_LOAD_MEMBASE (ins
, ldh
);
1322 hppa_extrs (code
, ins
->dreg
, 31, 16, ins
->dreg
);
1324 case OP_LOAD_MEMBASE
:
1325 case OP_LOADI4_MEMBASE
:
1326 case OP_LOADU4_MEMBASE
:
1327 EMIT_LOAD_MEMBASE (ins
, ldw
);
1330 hppa_extrs (code
, ins
->sreg1
, 31, 8, ins
->dreg
);
1333 hppa_extrs (code
, ins
->sreg1
, 31, 16, ins
->dreg
);
1336 hppa_extru (code
, ins
->sreg1
, 31, 8, ins
->dreg
);
1339 hppa_extru (code
, ins
->sreg1
, 31, 16, ins
->dreg
);
1345 if (ins
->sreg1
!= ins
->dreg
)
1346 hppa_copy (code
, ins
->sreg1
, ins
->dreg
);
1349 hppa_copy (code
, ins
->sreg1
+ 1, ins
->dreg
);
1350 hppa_copy (code
, ins
->sreg1
, ins
->dreg
+ 1);
1354 /* break 4,8 - this is what gdb normally uses... */
1355 *code
++ = 0x00010004;
1359 hppa_add (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1362 hppa_addc (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1366 hppa_addi (code
, ins
->inst_imm
, ins
->sreg1
, ins
->dreg
);
1369 hppa_set (code
, ins
->inst_imm
, hppa_r1
);
1370 hppa_addc (code
, ins
->sreg1
, hppa_r1
, ins
->dreg
);
1372 case OP_HPPA_ADD_OVF
: {
1373 MonoOvfJump
*ovfj
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoOvfJump
));
1374 hppa_bl (code
, 8, hppa_r2
);
1375 hppa_depi (code
, 0, 31, 2, hppa_r2
);
1376 hppa_ldo (code
, 12, hppa_r2
, hppa_r2
);
1378 if (ins
->backend
.reg3
== CEE_ADD_OVF
)
1379 hppa_add_cond (code
, HPPA_ADD_COND_NSV
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1381 hppa_add_cond (code
, HPPA_ADD_COND_NUV
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1383 ovfj
->data
.exception
= "OverflowException";
1384 ovfj
->ip_offset
= (guint8
*)code
- cfg
->native_code
;
1385 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, MONO_PATCH_INFO_EXC_OVF
, ovfj
);
1386 hppa_bl_n (code
, 8, hppa_r0
);
1389 case OP_HPPA_ADDC_OVF
: {
1390 MonoOvfJump
*ovfj
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoOvfJump
));
1391 hppa_bl (code
, 8, hppa_r2
);
1392 hppa_depi (code
, 0, 31, 2, hppa_r2
);
1393 hppa_ldo (code
, 12, hppa_r2
, hppa_r2
);
1395 if (ins
->backend
.reg3
== OP_LADD_OVF
)
1396 hppa_addc_cond (code
, HPPA_ADD_COND_NSV
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1398 hppa_addc_cond (code
, HPPA_ADD_COND_NUV
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1400 ovfj
->data
.exception
= "OverflowException";
1401 ovfj
->ip_offset
= (guint8
*)code
- cfg
->native_code
;
1402 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, MONO_PATCH_INFO_EXC_OVF
, ovfj
);
1403 hppa_bl_n (code
, 8, hppa_r0
);
1408 hppa_sub (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1412 hppa_addi (code
, -ins
->inst_imm
, ins
->sreg1
, ins
->dreg
);
1415 hppa_subb (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1418 hppa_set (code
, ins
->inst_imm
, hppa_r1
);
1419 hppa_subb (code
, ins
->sreg1
, hppa_r1
, ins
->dreg
);
1421 case OP_HPPA_SUB_OVF
: {
1422 MonoOvfJump
*ovfj
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoOvfJump
));
1423 hppa_bl (code
, 8, hppa_r2
);
1424 hppa_depi (code
, 0, 31, 2, hppa_r2
);
1425 hppa_ldo (code
, 12, hppa_r2
, hppa_r2
);
1426 hppa_sub_cond (code
, HPPA_SUB_COND_NSV
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1427 ovfj
->data
.exception
= "OverflowException";
1428 ovfj
->ip_offset
= (guint8
*)code
- cfg
->native_code
;
1429 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, MONO_PATCH_INFO_EXC_OVF
, ovfj
);
1430 hppa_bl_n (code
, 8, hppa_r0
);
1433 case OP_HPPA_SUBB_OVF
: {
1434 MonoOvfJump
*ovfj
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoOvfJump
));
1435 hppa_bl (code
, 8, hppa_r2
);
1436 hppa_depi (code
, 0, 31, 2, hppa_r2
);
1437 hppa_ldo (code
, 12, hppa_r2
, hppa_r2
);
1439 hppa_subb_cond (code
, HPPA_SUB_COND_NSV
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1440 ovfj
->data
.exception
= "OverflowException";
1441 ovfj
->ip_offset
= (guint8
*)code
- cfg
->native_code
;
1442 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, MONO_PATCH_INFO_EXC_OVF
, ovfj
);
1443 hppa_bl_n (code
, 8, hppa_r0
);
1448 hppa_and (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1451 hppa_set (code
, ins
->inst_imm
, hppa_r1
);
1452 hppa_and (code
, ins
->sreg1
, hppa_r1
, ins
->dreg
);
1456 hppa_or (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1460 hppa_set (code
, ins
->inst_imm
, hppa_r1
);
1461 hppa_or (code
, ins
->sreg1
, hppa_r1
, ins
->dreg
);
1465 hppa_xor (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1468 hppa_set (code
, ins
->inst_imm
, hppa_r1
);
1469 hppa_xor (code
, ins
->sreg1
, hppa_r1
, ins
->dreg
);
1472 if (ins
->sreg1
!= ins
->dreg
) {
1473 hppa_shl (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1476 hppa_copy (code
, ins
->sreg1
, hppa_r1
);
1477 hppa_shl (code
, hppa_r1
, ins
->sreg2
, ins
->dreg
);
1482 g_assert (ins
->inst_imm
< 32);
1483 if (ins
->sreg1
!= ins
->dreg
) {
1484 hppa_zdep (code
, ins
->sreg1
, 31-ins
->inst_imm
, 32-ins
->inst_imm
, ins
->dreg
);
1487 hppa_copy (code
, ins
->sreg1
, hppa_r1
);
1488 hppa_zdep (code
, hppa_r1
, 31-ins
->inst_imm
, 32-ins
->inst_imm
, ins
->dreg
);
1492 if (ins
->sreg1
!= ins
->dreg
) {
1493 hppa_shr (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1496 hppa_copy (code
, ins
->sreg1
, hppa_r1
);
1497 hppa_shr (code
, hppa_r1
, ins
->sreg2
, ins
->dreg
);
1501 g_assert (ins
->inst_imm
< 32);
1502 if (ins
->sreg1
!= ins
->dreg
) {
1503 hppa_extrs (code
, ins
->sreg1
, 31-ins
->inst_imm
, 32-ins
->inst_imm
, ins
->dreg
);
1506 hppa_copy (code
, ins
->sreg1
, hppa_r1
);
1507 hppa_extrs (code
, hppa_r1
, 31-ins
->inst_imm
, 32-ins
->inst_imm
, ins
->dreg
);
1511 g_assert (ins
->inst_imm
< 32);
1512 if (ins
->sreg1
!= ins
->dreg
) {
1513 hppa_extru (code
, ins
->sreg1
, 31-ins
->inst_imm
, 32-ins
->inst_imm
, ins
->dreg
);
1516 hppa_copy (code
, ins
->sreg1
, hppa_r1
);
1517 hppa_extru (code
, hppa_r1
, 31-ins
->inst_imm
, 32-ins
->inst_imm
, ins
->dreg
);
1521 if (ins
->sreg1
!= ins
->dreg
) {
1522 hppa_lshr (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1525 hppa_copy (code
, ins
->sreg1
, hppa_r1
);
1526 hppa_lshr (code
, hppa_r1
, ins
->sreg2
, ins
->dreg
);
1530 hppa_not (code
, ins
->sreg1
, ins
->dreg
);
1533 hppa_subi (code
, 0, ins
->sreg1
, ins
->dreg
);
1538 /* Should have been rewritten using xmpyu */
1539 g_assert_not_reached ();
1542 if ((ins
->inst_c0
> 0 && ins
->inst_c0
>= (1 << 13)) ||
1543 (ins
->inst_c0
< 0 && ins
->inst_c0
< -(1 << 13))) {
1544 hppa_ldil (code
, hppa_lsel (ins
->inst_c0
), ins
->dreg
);
1545 hppa_ldo (code
, hppa_rsel (ins
->inst_c0
), ins
->dreg
, ins
->dreg
);
1547 hppa_ldo (code
, ins
->inst_c0
, hppa_r0
, ins
->dreg
);
1551 g_assert_not_reached ();
1553 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
1554 hppa_set_template (code, ins->dreg);
1556 g_warning ("unimplemented opcode %s in %s()\n", mono_inst_name (ins
->opcode
), __FUNCTION__
);
1560 if (ins
->sreg1
!= ins
->dreg
)
1561 hppa_fcpy (code
, HPPA_FP_FMT_DBL
, ins
->sreg1
, ins
->dreg
);
1564 case OP_HPPA_OUTARG_R4CONST
:
1565 hppa_set (code
, (unsigned int)ins
->inst_p0
, hppa_r1
);
1566 hppa_fldwx (code
, hppa_r0
, hppa_r1
, ins
->dreg
, 0);
1569 case OP_HPPA_OUTARG_REGOFFSET
:
1570 hppa_ldo (code
, ins
->inst_offset
, ins
->inst_basereg
, ins
->dreg
);
1575 * Keep in sync with mono_arch_emit_epilog
1577 g_assert (!cfg
->method
->save_lmf
);
1578 mono_add_patch_info (cfg
, (guint8
*) code
- cfg
->native_code
, MONO_PATCH_INFO_METHOD_JUMP
, ins
->inst_p0
);
1579 hppa_bl (code
, 8, hppa_r0
);
1582 /* ensure ins->sreg1 is not NULL */
1583 hppa_ldw (code
, 0, ins
->sreg1
, hppa_r1
);
1592 call
= (MonoCallInst
*)ins
;
1593 if (ins
->flags
& MONO_INST_HAS_METHOD
)
1594 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_METHOD
, call
->method
);
1596 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_ABS
, call
->fptr
);
1597 hppa_ldil (code
, 0, hppa_r1
);
1598 hppa_ldo (code
, 0, hppa_r1
, hppa_r1
);
1600 * We may have loaded an actual function address, or
1601 * it might be a plabel. Check to see if the plabel
1602 * bit is set, and load the actual fptr from it if
1605 hppa_bb_n (code
, HPPA_BIT_COND_MSB_CLR
, hppa_r1
, 30, 2);
1606 hppa_depi (code
, 0, 31, 2, hppa_r1
);
1607 hppa_ldw (code
, 4, hppa_r1
, hppa_r19
);
1608 hppa_ldw (code
, 0, hppa_r1
, hppa_r1
);
1609 hppa_ble (code
, 0, hppa_r1
);
1610 hppa_copy (code
, hppa_r31
, hppa_r2
);
1611 if (call
->signature
->ret
->type
== MONO_TYPE_R4
)
1612 hppa_fcnvff (code
, HPPA_FP_FMT_SGL
, HPPA_FP_FMT_DBL
, hppa_fr4
, hppa_fr4
);
1617 case OP_VOIDCALL_REG
:
1619 call
= (MonoCallInst
*)ins
;
1620 g_assert (!call
->virtual);
1621 hppa_copy (code
, ins
->sreg1
, hppa_r1
);
1622 hppa_bb_n (code
, HPPA_BIT_COND_MSB_CLR
, hppa_r1
, 30, 2);
1623 hppa_depi (code
, 0, 31, 2, hppa_r1
);
1624 hppa_ldw (code
, 4, hppa_r1
, hppa_r19
);
1625 hppa_ldw (code
, 0, hppa_r1
, hppa_r1
);
1626 hppa_ble (code
, 0, hppa_r1
);
1627 hppa_copy (code
, hppa_r31
, hppa_r2
);
1628 if (call
->signature
->ret
->type
== MONO_TYPE_R4
)
1629 hppa_fcnvff (code
, HPPA_FP_FMT_SGL
, HPPA_FP_FMT_DBL
, hppa_fr4
, hppa_fr4
);
1631 case OP_FCALL_MEMBASE
:
1632 case OP_LCALL_MEMBASE
:
1633 case OP_VCALL_MEMBASE
:
1634 case OP_VOIDCALL_MEMBASE
:
1635 case OP_CALL_MEMBASE
:
1636 call
= (MonoCallInst
*)ins
;
1637 /* jump to ins->inst_sreg1 + ins->inst_offset */
1638 hppa_ldw (code
, ins
->inst_offset
, ins
->sreg1
, hppa_r1
);
1640 /* For virtual calls, emit a special token that can
1641 * be used by get_vcall_slot_addr
1644 hppa_ldo (code
, 0x777, hppa_r0
, hppa_r0
);
1645 hppa_ble (code
, 0, hppa_r1
);
1646 hppa_copy (code
, hppa_r31
, hppa_r2
);
1651 /* Keep alignment */
1652 hppa_ldo (code
, MONO_ARCH_LOCALLOC_ALIGNMENT
- 1, ins
->sreg1
, ins
->dreg
);
1653 hppa_depi (code
, 0, 31, 6, ins
->dreg
);
1654 hppa_copy (code
, hppa_sp
, hppa_r1
);
1655 hppa_addl (code
, ins
->dreg
, hppa_sp
, hppa_sp
);
1656 hppa_copy (code
, hppa_r1
, ins
->dreg
);
1658 if (ins
->flags
& MONO_INST_INIT
) {
1659 hppa_stw (code
, hppa_r0
, 0, hppa_r1
);
1660 hppa_combt (code
, hppa_r1
, hppa_sp
, HPPA_CMP_COND_ULT
, -3);
1661 hppa_ldo (code
, 4, hppa_r1
, hppa_r1
);
1667 hppa_copy (code
, ins
->sreg1
, hppa_r26
);
1668 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
1669 (gpointer
)"mono_arch_throw_exception");
1670 hppa_ldil (code
, 0, hppa_r1
);
1671 hppa_ldo (code
, 0, hppa_r1
, hppa_r1
);
1672 hppa_ble (code
, 0, hppa_r1
);
1673 hppa_copy (code
, hppa_r31
, hppa_r2
);
1674 /* should never return */
1675 *code
++ = 0xffeeddcc;
1678 hppa_copy (code
, ins
->sreg1
, hppa_r26
);
1679 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
1680 (gpointer
)"mono_arch_rethrow_exception");
1681 hppa_ldil (code
, 0, hppa_r1
);
1682 hppa_ldo (code
, 0, hppa_r1
, hppa_r1
);
1683 hppa_ble (code
, 0, hppa_r1
);
1684 hppa_copy (code
, hppa_r31
, hppa_r2
);
1685 /* should never return */
1686 *code
++ = 0xffeeddcc;
1688 case OP_START_HANDLER
:
1689 if (hppa_check_bits (ins
->inst_left
->inst_offset
, 14))
1690 hppa_stw (code
, hppa_r2
, ins
->inst_left
->inst_offset
, ins
->inst_left
->inst_basereg
);
1692 hppa_set (code
, ins
->inst_left
->inst_offset
, hppa_r1
);
1693 hppa_addl (code
, ins
->inst_left
->inst_basereg
, hppa_r1
, hppa_r1
);
1694 hppa_stw (code
, hppa_r2
, 0, hppa_r1
);
1698 if (ins
->sreg1
!= hppa_r26
)
1699 hppa_copy (code
, ins
->sreg1
, hppa_r26
);
1700 if (hppa_check_bits (ins
->inst_left
->inst_offset
, 14))
1701 hppa_ldw (code
, ins
->inst_left
->inst_offset
, ins
->inst_left
->inst_basereg
, hppa_r2
);
1703 hppa_set (code
, ins
->inst_left
->inst_offset
, hppa_r1
);
1704 hppa_ldwx (code
, hppa_r1
, ins
->inst_left
->inst_basereg
, hppa_r2
);
1706 hppa_bv (code
, hppa_r0
, hppa_r2
);
1710 if (hppa_check_bits (ins
->inst_left
->inst_offset
, 14))
1711 hppa_ldw (code
, ins
->inst_left
->inst_offset
, ins
->inst_left
->inst_basereg
, hppa_r1
);
1713 hppa_set (code
, ins
->inst_left
->inst_offset
, hppa_r1
);
1714 hppa_ldwx (code
, hppa_r1
, ins
->inst_left
->inst_basereg
, hppa_r1
);
1716 hppa_bv (code
, hppa_r0
, hppa_r1
);
1719 case OP_CALL_HANDLER
:
1720 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
1721 hppa_bl (code
, 0, hppa_r2
);
1725 ins
->inst_c0
= (guint8
*)code
- cfg
->native_code
;
1729 DEBUG (printf ("target: %p, next: %p, curr: %p, last: %p\n", ins
->inst_target_bb
, bb
->next_bb
, ins
, bb
->last_ins
));
1730 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
1731 hppa_bl (code
, 8, hppa_r0
);
1732 /* TODO: if the branch is too long, we may need to
1733 * use a long-branch sequence:
1734 * hppa_ldil (code, 0, hppa_r1);
1735 * hppa_ldo (code, 0, hppa_r1, hppa_r1);
1736 * hppa_bv (code, hppa_r0, hppa_r1);
1742 hppa_bv (code
, hppa_r0
, ins
->sreg1
);
1749 max_len
+= 8 * GPOINTER_TO_INT (ins
->klass
);
1750 if (offset
> (cfg
->code_size
- max_len
- 16)) {
1751 cfg
->code_size
+= max_len
;
1752 cfg
->code_size
*= 2;
1753 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
1754 code
= cfg
->native_code
+ offset
;
1755 code_start
= (guint8
*)code
;
1757 hppa_blr (code
, ins
->sreg1
, hppa_r0
);
1759 for (i
= 0; i
< GPOINTER_TO_INT (ins
->klass
); ++i
) {
1760 *code
++ = 0xdeadbeef;
1761 *code
++ = 0xdeadbeef;
1766 /* comclr is cool :-) */
1768 hppa_comclr_cond (code
, HPPA_SUB_COND_NE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1769 hppa_ldo (code
, 1, hppa_r0
, ins
->dreg
);
1773 hppa_comclr_cond (code
, HPPA_SUB_COND_SGE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1774 hppa_ldo (code
, 1, hppa_r0
, ins
->dreg
);
1777 case OP_HPPA_CLT_UN
:
1778 hppa_comclr_cond (code
, HPPA_SUB_COND_UGE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1779 hppa_ldo (code
, 1, hppa_r0
, ins
->dreg
);
1783 hppa_comclr_cond (code
, HPPA_SUB_COND_SLE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1784 hppa_ldo (code
, 1, hppa_r0
, ins
->dreg
);
1787 case OP_HPPA_CGT_UN
:
1788 hppa_comclr_cond (code
, HPPA_SUB_COND_ULE
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1789 hppa_ldo (code
, 1, hppa_r0
, ins
->dreg
);
1797 case OP_COND_EXC_EQ
:
1798 case OP_COND_EXC_NE_UN
:
1799 case OP_COND_EXC_LT
:
1800 case OP_COND_EXC_LT_UN
:
1801 case OP_COND_EXC_GT
:
1802 case OP_COND_EXC_GT_UN
:
1803 case OP_COND_EXC_GE
:
1804 case OP_COND_EXC_GE_UN
:
1805 case OP_COND_EXC_LE
:
1806 case OP_COND_EXC_LE_UN
:
1807 case OP_COND_EXC_OV
:
1808 case OP_COND_EXC_NO
:
1810 case OP_COND_EXC_NC
:
1811 case OP_COND_EXC_IOV
:
1812 case OP_COND_EXC_IC
:
1826 case OP_COMPARE_IMM
:
1827 case OP_ICOMPARE_IMM
:
1828 g_warning ("got opcode %s in %s(), should be reduced\n", mono_inst_name (ins
->opcode
), __FUNCTION__
);
1829 g_assert_not_reached ();
1835 case OP_HPPA_BLT_UN
:
1837 case OP_HPPA_BGT_UN
:
1839 case OP_HPPA_BGE_UN
:
1841 case OP_HPPA_BLE_UN
:
1842 EMIT_COND_BRANCH (ins
, ins
->sreg1
, ins
->sreg2
, ins
->opcode
- OP_HPPA_BEQ
);
1845 case OP_HPPA_COND_EXC_EQ
:
1846 case OP_HPPA_COND_EXC_GE
:
1847 case OP_HPPA_COND_EXC_GT
:
1848 case OP_HPPA_COND_EXC_LE
:
1849 case OP_HPPA_COND_EXC_LT
:
1850 case OP_HPPA_COND_EXC_NE_UN
:
1851 case OP_HPPA_COND_EXC_GE_UN
:
1852 case OP_HPPA_COND_EXC_GT_UN
:
1853 case OP_HPPA_COND_EXC_LE_UN
:
1854 case OP_HPPA_COND_EXC_LT_UN
:
1855 EMIT_COND_SYSTEM_EXCEPTION (ins
->sreg1
, ins
->sreg2
, ins
->opcode
- OP_HPPA_COND_EXC_EQ
, ins
->inst_p1
);
1858 case OP_HPPA_COND_EXC_OV
:
1859 case OP_HPPA_COND_EXC_NO
:
1860 case OP_HPPA_COND_EXC_C
:
1861 case OP_HPPA_COND_EXC_NC
:
1864 /* floating point opcodes */
1866 hppa_set (code
, (unsigned int)ins
->inst_p0
, hppa_r1
);
1867 hppa_flddx (code
, hppa_r0
, hppa_r1
, ins
->dreg
);
1870 hppa_set (code
, (unsigned int)ins
->inst_p0
, hppa_r1
);
1871 hppa_fldwx (code
, hppa_r0
, hppa_r1
, hppa_fr31
, 0);
1872 hppa_fcnvff (code
, HPPA_FP_FMT_SGL
, HPPA_FP_FMT_DBL
, hppa_fr31
, ins
->dreg
);
1874 case OP_STORER8_MEMBASE_REG
:
1875 hppa_set (code
, ins
->inst_offset
, hppa_r1
);
1876 hppa_fstdx (code
, ins
->sreg1
, hppa_r1
, ins
->inst_destbasereg
);
1878 case OP_LOADR8_MEMBASE
:
1879 hppa_set (code
, ins
->inst_offset
, hppa_r1
);
1880 hppa_flddx (code
, hppa_r1
, ins
->inst_basereg
, ins
->dreg
);
1882 case OP_STORER4_MEMBASE_REG
:
1883 hppa_fcnvff (code
, HPPA_FP_FMT_DBL
, HPPA_FP_FMT_SGL
, ins
->sreg1
, hppa_fr31
);
1884 if (hppa_check_bits (ins
->inst_offset
, 5)) {
1885 hppa_fstws (code
, hppa_fr31
, 0, ins
->inst_offset
, ins
->inst_destbasereg
);
1887 hppa_set (code
, ins
->inst_offset
, hppa_r1
);
1888 hppa_fstwx (code
, hppa_fr31
, 0, hppa_r1
, ins
->inst_destbasereg
);
1891 case OP_HPPA_STORER4_LEFT
:
1892 case OP_HPPA_STORER4_RIGHT
:
1893 if (hppa_check_bits (ins
->inst_offset
, 5)) {
1894 hppa_fstws (code
, ins
->sreg1
, (ins
->opcode
== OP_HPPA_STORER4_RIGHT
), ins
->inst_offset
, ins
->inst_destbasereg
);
1896 hppa_set (code
, ins
->inst_offset
, hppa_r1
);
1897 hppa_fstwx (code
, ins
->sreg1
, (ins
->opcode
== OP_HPPA_STORER4_RIGHT
), hppa_r1
, ins
->inst_destbasereg
);
1900 case OP_LOADR4_MEMBASE
:
1901 if (hppa_check_bits (ins
->inst_offset
, 5)) {
1902 hppa_fldws (code
, ins
->inst_offset
, ins
->inst_basereg
, hppa_fr31
, 0);
1904 hppa_set (code
, ins
->inst_offset
, hppa_r1
);
1905 hppa_fldwx (code
, hppa_r1
, ins
->inst_basereg
, hppa_fr31
, 0);
1907 hppa_fcnvff (code
, HPPA_FP_FMT_SGL
, HPPA_FP_FMT_DBL
, hppa_fr31
, ins
->dreg
);
1909 case OP_HPPA_LOADR4_LEFT
:
1910 case OP_HPPA_LOADR4_RIGHT
:
1911 if (hppa_check_bits (ins
->inst_offset
, 5)) {
1912 hppa_fldws (code
, ins
->inst_offset
, ins
->inst_basereg
, ins
->dreg
, (ins
->opcode
== OP_HPPA_LOADR4_RIGHT
));
1914 hppa_set (code
, ins
->inst_offset
, hppa_r1
);
1915 hppa_fldwx (code
, hppa_r1
, ins
->inst_basereg
, ins
->dreg
, (ins
->opcode
== OP_HPPA_LOADR4_RIGHT
));
1920 hppa_stw (code
, ins
->sreg1
, -16, hppa_sp
);
1921 hppa_fldws (code
, -16, hppa_sp
, hppa_fr31
, 0);
1922 hppa_fcnvxf (code
, HPPA_FP_FMT_SGL
, HPPA_FP_FMT_SGL
, hppa_fr31
, ins
->dreg
);
1923 hppa_fcnvff (code
, HPPA_FP_FMT_SGL
, HPPA_FP_FMT_DBL
, ins
->dreg
, ins
->dreg
);
1926 case OP_FCONV_TO_R4
:
1927 /* reduce precision */
1928 hppa_fcnvff (code
, HPPA_FP_FMT_DBL
, HPPA_FP_FMT_SGL
, ins
->sreg1
, ins
->dreg
);
1929 hppa_fcnvff (code
, HPPA_FP_FMT_SGL
, HPPA_FP_FMT_DBL
, ins
->dreg
, ins
->dreg
);
1932 case OP_HPPA_SETF4REG
:
1933 hppa_fcnvff (code
, HPPA_FP_FMT_DBL
, HPPA_FP_FMT_SGL
, ins
->sreg1
, ins
->dreg
);
1936 hppa_stw (code
, ins
->sreg1
, -16, hppa_sp
);
1937 hppa_fldws (code
, -16, hppa_sp
, hppa_fr31
, 0);
1938 hppa_fcnvxf (code
, HPPA_FP_FMT_SGL
, HPPA_FP_FMT_DBL
, hppa_fr31
, ins
->dreg
);
1941 case OP_FCONV_TO_I1
:
1942 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, TRUE
);
1944 case OP_FCONV_TO_U1
:
1945 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, FALSE
);
1947 case OP_FCONV_TO_I2
:
1948 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, TRUE
);
1950 case OP_FCONV_TO_U2
:
1951 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, FALSE
);
1953 case OP_FCONV_TO_I4
:
1955 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, TRUE
);
1957 case OP_FCONV_TO_U4
:
1959 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, FALSE
);
1962 case OP_FCONV_TO_I8
:
1963 case OP_FCONV_TO_U8
:
1964 g_assert_not_reached ();
1965 /* Implemented as helper calls */
1967 case OP_LCONV_TO_R_UN
:
1968 g_assert_not_reached ();
1969 /* Implemented as helper calls */
1972 case OP_LCONV_TO_OVF_I
:
1977 hppa_fadd (code
, HPPA_FP_FMT_DBL
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1980 hppa_fsub (code
, HPPA_FP_FMT_DBL
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1983 hppa_fmul (code
, HPPA_FP_FMT_DBL
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1986 hppa_fdiv (code
, HPPA_FP_FMT_DBL
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
1993 g_assert_not_reached();
2001 hppa_fcmp (code
, HPPA_FP_FMT_DBL
, float_ceq_table
[ins
->opcode
- OP_FCEQ
], ins
->sreg1
, ins
->sreg2
);
2002 hppa_ftest (code
, 0);
2003 hppa_bl (code
, 12, hppa_r0
);
2004 hppa_ldo (code
, 1, hppa_r0
, ins
->dreg
);
2005 hppa_ldo (code
, 0, hppa_r0
, ins
->dreg
);
2018 EMIT_FLOAT_COND_BRANCH (ins
, ins
->sreg1
, ins
->sreg2
, ins
->opcode
- OP_FBEQ
);
2022 case OP_MEMORY_BARRIER
:
2026 hppa_xmpyu (code
, ins
->sreg1
, ins
->sreg2
, ins
->dreg
);
2030 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins
->opcode
), __FUNCTION__
);
2031 g_assert_not_reached ();
2034 if ((((guint8
*)code
) - code_start
) > max_len
) {
2035 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
2036 mono_inst_name (ins
->opcode
), max_len
, ((guint8
*)code
) - code_start
);
2037 g_assert_not_reached ();
2045 cfg
->code_len
= (guint8
*)code
- cfg
->native_code
;
2050 mono_arch_register_lowlevel_calls (void)
2055 mono_arch_patch_code (MonoMethod
*method
, MonoDomain
*domain
, guint8
*code
, MonoJumpInfo
*ji
, MonoCodeManager
*dyn_code_mp
, gboolean run_cctors
)
2057 MonoJumpInfo
*patch_info
;
2060 /* FIXME: Move part of this to arch independent code */
2061 for (patch_info
= ji
; patch_info
; patch_info
= patch_info
->next
) {
2062 unsigned char *ip
= patch_info
->ip
.i
+ code
;
2065 target
= mono_resolve_patch_target (method
, domain
, code
, patch_info
, run_cctors
);
2066 DEBUG (printf ("patch_info->type = %d, target = %p\n", patch_info
->type
, target
));
2068 switch (patch_info
->type
) {
2069 case MONO_PATCH_INFO_NONE
:
2070 case MONO_PATCH_INFO_BB_OVF
:
2071 case MONO_PATCH_INFO_EXC_OVF
:
2074 case MONO_PATCH_INFO_IP
:
2075 hppa_patch ((guint32
*)ip
, ip
);
2078 case MONO_PATCH_INFO_CLASS_INIT
: {
2081 case MONO_PATCH_INFO_METHOD_JUMP
: {
2084 case MONO_PATCH_INFO_SWITCH
: {
2086 gpointer
*table
= (gpointer
*)target
;
2088 for (i
= 0; i
< patch_info
->data
.table
->table_size
; i
++) {
2089 DEBUG (printf ("Patching switch table, table[%d] = %p\n", i
, table
[i
]));
2090 hppa_ldil (ip
, hppa_lsel (table
[i
]), hppa_r1
);
2091 hppa_be_n (ip
, hppa_rsel (table
[i
]), hppa_r1
);
2098 hppa_patch ((guint32
*)ip
, target
);
2105 mono_arch_instrument_prolog (MonoCompile
*cfg
, void *func
, void *p
, gboolean enable_arguments
)
2107 guint32
*code
= (guint32
*)p
;
2111 hppa_set (code
, cfg
->method
, hppa_r26
);
2112 hppa_copy (code
, hppa_r0
, hppa_r25
); /* NULL sp for now */
2113 hppa_set (code
, func
, hppa_r1
);
2114 hppa_depi (code
, 0, 31, 2, hppa_r1
);
2115 hppa_ldw (code
, 0, hppa_r1
, hppa_r1
);
2116 hppa_ble (code
, 0, hppa_r1
);
2117 hppa_copy (code
, hppa_r31
, hppa_r2
);
2132 mono_arch_instrument_epilog_full (MonoCompile
*cfg
, void *func
, void *p
, gboolean enable_arguments
, gboolean preserve_argument_registers
)
2134 guint32
*code
= (guint32
*)p
;
2137 int save_mode
= SAVE_NONE
;
2138 MonoMethod
*method
= cfg
->method
;
2140 switch (mono_type_get_underlying_type (mono_method_signature (method
)->ret
)->type
) {
2141 case MONO_TYPE_VOID
:
2142 /* special case string .ctor icall */
2143 if (strcmp (".ctor", method
->name
) && method
->klass
== mono_defaults
.string_class
)
2144 save_mode
= SAVE_ONE
;
2146 save_mode
= SAVE_NONE
;
2151 save_mode
= SAVE_ONE
;
2153 save_mode
= SAVE_TWO
;
2158 save_mode
= SAVE_FP
;
2160 case MONO_TYPE_VALUETYPE
:
2161 save_mode
= SAVE_STRUCT
;
2164 save_mode
= SAVE_ONE
;
2168 /* Save the result to the stack and also put it into the output registers */
2170 switch (save_mode
) {
2173 sparc_st_imm (code
, sparc_i0
, sparc_fp
, 68);
2174 sparc_st_imm (code
, sparc_i0
, sparc_fp
, 72);
2175 sparc_mov_reg_reg (code
, sparc_i0
, sparc_o1
);
2176 sparc_mov_reg_reg (code
, sparc_i1
, sparc_o2
);
2179 sparc_sti_imm (code
, sparc_i0
, sparc_fp
, ARGS_OFFSET
);
2180 sparc_mov_reg_reg (code
, sparc_i0
, sparc_o1
);
2184 sparc_stdf_imm (code
, sparc_f0
, sparc_fp
, ARGS_OFFSET
);
2186 sparc_stdf_imm (code
, sparc_f0
, sparc_fp
, 72);
2187 sparc_ld_imm (code
, sparc_fp
, 72, sparc_o1
);
2188 sparc_ld_imm (code
, sparc_fp
, 72 + 4, sparc_o2
);
2193 sparc_mov_reg_reg (code
, sparc_i0
, sparc_o1
);
2195 sparc_ld_imm (code
, sparc_fp
, 64, sparc_o1
);
2203 sparc_set (code
, cfg
->method
, sparc_o0
);
2205 mono_add_patch_info (cfg
, (guint8
*)code
- cfg
->native_code
, MONO_PATCH_INFO_ABS
, func
);
2208 /* Restore result */
2210 switch (save_mode
) {
2212 sparc_ld_imm (code
, sparc_fp
, 68, sparc_i0
);
2213 sparc_ld_imm (code
, sparc_fp
, 72, sparc_i0
);
2216 sparc_ldi_imm (code
, sparc_fp
, ARGS_OFFSET
, sparc_i0
);
2219 sparc_lddf_imm (code
, sparc_fp
, ARGS_OFFSET
, sparc_f0
);
2231 * The HPPA stack frame should look like this:
2233 * ---------------------
2234 * incoming params area
2235 * ---------------------
2236 * linkage area size = ARGS_OFFSET
2237 * --------------------- fp = psp
2238 * HPPA_STACK_LMF_OFFSET
2239 * ---------------------
2240 * MonoLMF structure or saved registers
2241 * -------------------
2242 * locals size = cfg->stack_offset - cfg->param_area
2243 * ---------------------
2244 * params area size = cfg->param_area - ARGS_OFFSET (aligned)
2245 * ---------------------
2246 * callee linkage area size = ARGS_OFFSET
2247 * --------------------- sp
2250 mono_arch_emit_prolog (MonoCompile
*cfg
)
2252 MonoMethod
*method
= cfg
->method
;
2254 MonoMethodSignature
*sig
;
2256 int alloc_size
, pos
, max_offset
, i
;
2263 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
))
2266 sig
= mono_method_signature (method
);
2267 cfg
->code_size
= 512 + sig
->param_count
* 20;
2268 code
= cfg
->native_code
= g_malloc (cfg
->code_size
);
2270 /* TODO: enable tail call optimization */
2271 if (1 || cfg
->flags
& MONO_CFG_HAS_CALLS
) {
2272 hppa_stw (code
, hppa_r2
, -20, hppa_sp
);
2276 pos
= HPPA_STACK_LMF_OFFSET
;
2278 /* figure out how much space we need for spilling */
2279 if (!method
->save_lmf
) {
2280 /* spill callee-save registers */
2281 guint32 mask
= cfg
->used_int_regs
& MONO_ARCH_CALLEE_SAVED_REGS
;
2282 for (i
= 0; i
< 32; i
++) {
2283 if ((1 << i
) & mask
)
2284 pos
+= sizeof (gulong
);
2288 pos
+= sizeof (MonoLMF
);
2291 alloc_size
= ALIGN_TO (pos
+ cfg
->stack_offset
, MONO_ARCH_FRAME_ALIGNMENT
);
2292 g_assert ((alloc_size
& (MONO_ARCH_FRAME_ALIGNMENT
- 1)) == 0);
2294 cfg
->stack_usage
= alloc_size
;
2297 hppa_copy (code
, hppa_r3
, hppa_r1
);
2298 hppa_copy (code
, hppa_sp
, hppa_r3
);
2299 if (hppa_check_bits (alloc_size
, 14))
2300 hppa_stwm (code
, hppa_r1
, alloc_size
, hppa_sp
);
2302 hppa_stwm (code
, hppa_r1
, 8100, hppa_sp
);
2303 hppa_addil (code
, hppa_lsel (alloc_size
- 8100), hppa_sp
);
2304 hppa_ldo (code
, hppa_rsel (alloc_size
- 8100), hppa_r1
, hppa_sp
);
2308 /* compute max_offset in order to use short forward jumps
2309 * we always do it on hppa because the immediate displacement
2310 * for jumps is small
2313 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
2314 MonoInst
*ins
= bb
->code
;
2315 bb
->max_offset
= max_offset
;
2317 if (cfg
->prof_options
& MONO_PROFILE_COVERAGE
)
2320 MONO_BB_FOR_EACH_INS (bb
, ins
)
2321 max_offset
+= ((guint8
*)ins_get_spec (ins
->opcode
))[MONO_INST_LEN
];
2324 DEBUG (printf ("Incoming arguments: \n"));
2325 cinfo
= get_call_info (sig
, sig
->pinvoke
);
2327 /* We do this first so that we don't have to worry about the LMF-
2328 * saving code clobbering r28
2330 if (cinfo
->struct_return
)
2331 hppa_stw (code
, hppa_r28
, cfg
->ret
->inst_offset
, hppa_sp
);
2333 /* Save the LMF or the spilled registers */
2334 pos
= HPPA_STACK_LMF_OFFSET
;
2335 if (!method
->save_lmf
) {
2336 /* spill callee-save registers */
2337 guint32 mask
= cfg
->used_int_regs
& MONO_ARCH_CALLEE_SAVED_REGS
;
2338 for (i
= 0; i
< 32; i
++) {
2339 if ((1 << i
) & mask
) {
2341 hppa_ldw (code
, 0, hppa_r3
, hppa_r1
);
2342 hppa_stw (code
, hppa_r1
, pos
, hppa_r3
);
2344 hppa_stw (code
, i
, pos
, hppa_r3
);
2345 pos
+= sizeof (gulong
);
2349 int ofs
= lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, regs
);
2352 hppa_ldw (code
, 0, hppa_r3
, hppa_r1
);
2353 hppa_stw (code
, hppa_r1
, ofs
, hppa_r3
);
2354 ofs
+= sizeof (gulong
);
2355 for (reg
= 4; reg
< 32; reg
++) {
2356 if (HPPA_IS_SAVED_GREG (reg
)) {
2357 hppa_stw (code
, reg
, ofs
, hppa_r3
);
2358 ofs
+= sizeof (gulong
);
2361 /* We shouldn't need to save the FP regs.... */
2362 ofs
= ALIGN_TO (ofs
, sizeof(double));
2363 hppa_set (code
, ofs
, hppa_r1
);
2364 for (reg
= 0; reg
< 32; reg
++) {
2365 if (HPPA_IS_SAVED_FREG (reg
)) {
2366 hppa_fstdx (code
, reg
, hppa_r1
, hppa_r3
);
2367 hppa_ldo (code
, sizeof(double), hppa_r1
, hppa_r1
);
2371 /* We also spill the arguments onto the stack, because
2372 * the call to hppa_get_lmf_addr below can clobber them
2374 * This goes in the param area that is always allocated
2377 for (reg
= hppa_r26
; reg
>= hppa_r23
; reg
--) {
2378 hppa_stw (code
, reg
, ofs
, hppa_sp
);
2383 if (cfg
->flags
& MONO_CFG_HAS_ALLOCA
)
2384 hppa_copy (code
, hppa_r30
, hppa_r4
);
2386 if (method
->wrapper_type
== MONO_WRAPPER_NATIVE_TO_MANAGED
) {
2387 hppa_set (code
, cfg
->domain
, hppa_r26
);
2388 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
, (gpointer
)"mono_jit_thread_attach");
2389 hppa_ldil (code
, 0, hppa_r1
);
2390 hppa_ldo (code
, 0, hppa_r1
, hppa_r1
);
2391 hppa_depi (code
, 0, 31, 2, hppa_r1
);
2392 hppa_ldw (code
, 0, hppa_r1
, hppa_r1
);
2393 hppa_ble (code
, 0, hppa_r1
);
2394 hppa_copy (code
, hppa_r31
, hppa_r2
);
2397 if (method
->save_lmf
) {
2398 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
2399 (gpointer
)"mono_get_lmf_addr");
2400 hppa_ldil (code
, 0, hppa_r1
);
2401 hppa_ldo (code
, 0, hppa_r1
, hppa_r1
);
2402 hppa_depi (code
, 0, 31, 2, hppa_r1
);
2403 hppa_ldw (code
, 0, hppa_r1
, hppa_r1
);
2404 hppa_ble (code
, 0, hppa_r1
);
2405 hppa_copy (code
, hppa_r31
, hppa_r2
);
2407 /* lmf_offset is the offset from the previous stack pointer,
2408 * The pointer to the struct is put in hppa_r22 (new_lmf).
2409 * The callee-saved registers are already in the MonoLMF
2413 /* hppa_r22 = new_lmf (on the stack) */
2414 hppa_ldo (code
, lmf_offset
, hppa_r3
, hppa_r22
);
2415 /* lmf_offset is the offset from the previous stack pointer,
2417 hppa_stw (code
, hppa_r28
, G_STRUCT_OFFSET(MonoLMF
, lmf_addr
), hppa_r22
);
2418 /* new_lmf->previous_lmf = *lmf_addr */
2419 hppa_ldw (code
, 0, hppa_r28
, hppa_r1
);
2420 hppa_stw (code
, hppa_r1
, G_STRUCT_OFFSET(MonoLMF
, previous_lmf
), hppa_r22
);
2421 /* *(lmf_addr) = r22 */
2422 hppa_stw (code
, hppa_r22
, 0, hppa_r28
);
2423 hppa_set (code
, method
, hppa_r1
);
2424 hppa_stw (code
, hppa_r1
, G_STRUCT_OFFSET(MonoLMF
, method
), hppa_r22
);
2425 hppa_stw (code
, hppa_sp
, G_STRUCT_OFFSET(MonoLMF
, ebp
), hppa_r22
);
2426 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_IP
, NULL
);
2427 hppa_ldil (code
, 0, hppa_r1
);
2428 hppa_ldo (code
, 0, hppa_r1
, hppa_r1
);
2429 hppa_stw (code
, hppa_r1
, G_STRUCT_OFFSET(MonoLMF
, eip
), hppa_r22
);
2431 /* Now reload the arguments from the stack */
2432 hppa_ldw (code
, -36, hppa_sp
, hppa_r26
);
2433 hppa_ldw (code
, -40, hppa_sp
, hppa_r25
);
2434 hppa_ldw (code
, -44, hppa_sp
, hppa_r24
);
2435 hppa_ldw (code
, -48, hppa_sp
, hppa_r23
);
2438 /* load arguments allocated to register from the stack */
2441 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
2442 ArgInfo
*ainfo
= cinfo
->args
+ i
;
2443 inst
= cfg
->args
[pos
];
2445 if (inst
->opcode
== OP_REGVAR
) {
2446 /* Want the argument in a register */
2447 switch (ainfo
->storage
) {
2449 if (ainfo
->reg
!= inst
->dreg
)
2450 hppa_copy (code
, ainfo
->reg
, inst
->dreg
);
2451 DEBUG (printf ("Argument %d assigned to register %s\n", pos
, mono_arch_regname (inst
->dreg
)));
2455 if (ainfo
->reg
!= inst
->dreg
) {
2456 hppa_copy (code
, ainfo
->reg
, inst
->dreg
);
2457 hppa_copy (code
, ainfo
->reg
+ 1, inst
->dreg
+ 1);
2459 DEBUG (printf ("Argument %d assigned to register %s, %s\n", pos
, mono_arch_regname (inst
->dreg
), mono_arch_regname (inst
->dreg
+ 1)));
2463 if (ainfo
->reg
!= inst
->dreg
)
2464 hppa_fcpy (code
, HPPA_FP_FMT_SGL
, ainfo
->reg
, inst
->dreg
);
2465 DEBUG (printf ("Argument %d assigned to single register %s\n", pos
, mono_arch_fregname (inst
->dreg
)));
2469 if (ainfo
->reg
!= inst
->dreg
)
2470 hppa_fcpy (code
, HPPA_FP_FMT_DBL
, ainfo
->reg
, inst
->dreg
);
2471 DEBUG (printf ("Argument %d assigned to double register %s\n", pos
, mono_arch_fregname (inst
->dreg
)));
2475 switch (ainfo
->size
) {
2477 hppa_ldb (code
, ainfo
->offset
, hppa_r3
, inst
->dreg
);
2480 hppa_ldh (code
, ainfo
->offset
, hppa_r3
, inst
->dreg
);
2483 hppa_ldw (code
, ainfo
->offset
, hppa_r3
, inst
->dreg
);
2486 g_assert_not_reached ();
2490 DEBUG (printf ("Argument %d loaded from the stack [%s - %d]\n", pos
, mono_arch_regname (hppa_r3
), -ainfo
->offset
));
2494 g_assert_not_reached ();
2498 /* Want the argument on the stack */
2499 switch (ainfo
->storage
)
2503 DEBUG (printf ("Argument %d stored from register %s to stack [%s + %d]\n", pos
, mono_arch_regname (ainfo
->reg
), mono_arch_regname (inst
->inst_basereg
), inst
->inst_offset
));
2504 if (hppa_check_bits (inst
->inst_offset
, 14)) {
2505 off
= inst
->inst_offset
;
2506 reg
= inst
->inst_basereg
;
2509 hppa_set (code
, inst
->inst_offset
, hppa_r1
);
2510 hppa_add (code
, hppa_r1
, inst
->inst_basereg
, hppa_r1
);
2514 switch (ainfo
->size
)
2517 hppa_stb (code
, ainfo
->reg
, off
, reg
);
2520 hppa_sth (code
, ainfo
->reg
, off
, reg
);
2523 hppa_stw (code
, ainfo
->reg
, off
, reg
);
2526 g_assert_not_reached ();
2531 DEBUG (printf ("Argument %d stored from register (%s,%s) to stack [%s + %d]\n", pos
, mono_arch_regname (ainfo
->reg
), mono_arch_regname (ainfo
->reg
+1), mono_arch_regname (inst
->inst_basereg
), inst
->inst_offset
));
2532 if (hppa_check_bits (inst
->inst_offset
+ 4, 14)) {
2533 hppa_stw (code
, ainfo
->reg
, inst
->inst_offset
, inst
->inst_basereg
);
2534 hppa_stw (code
, ainfo
->reg
+ 1, inst
->inst_offset
+ 4, inst
->inst_basereg
);
2537 hppa_ldo (code
, inst
->inst_offset
, inst
->inst_basereg
, hppa_r1
);
2538 hppa_stw (code
, ainfo
->reg
, 0, hppa_r1
);
2539 hppa_stw (code
, ainfo
->reg
+ 1, 4, hppa_r1
);
2544 DEBUG (printf ("Argument %d (float) stored from register %s to stack [%s + %d]\n", pos
, mono_arch_fregname (ainfo
->reg
), mono_arch_regname (inst
->inst_basereg
), inst
->inst_offset
));
2545 hppa_ldo (code
, inst
->inst_offset
, inst
->inst_basereg
, hppa_r1
);
2546 hppa_fstwx (code
, ainfo
->reg
, 0, hppa_r0
, hppa_r1
);
2550 DEBUG (printf ("Argument %d (double) stored from register %s to stack [%s + %d]\n", pos
, mono_arch_fregname (ainfo
->reg
), mono_arch_regname (inst
->inst_basereg
), inst
->inst_offset
));
2551 hppa_ldo (code
, inst
->inst_offset
, inst
->inst_basereg
, hppa_r1
);
2552 hppa_fstdx (code
, ainfo
->reg
, hppa_r0
, hppa_r1
);
2556 DEBUG (printf ("Argument %d copied from [%s - %d] to [%s + %d] (size=%d)\n", pos
, mono_arch_regname (hppa_r3
), -ainfo
->offset
, mono_arch_regname (inst
->inst_basereg
), inst
->inst_offset
, ainfo
->size
));
2557 if (inst
->inst_offset
!= ainfo
->offset
||
2558 inst
->inst_basereg
!= hppa_r3
)
2559 code
= emit_memcpy (code
, inst
->inst_offset
, inst
->inst_basereg
, ainfo
->offset
, hppa_r3
, ainfo
->size
);
2563 g_assert_not_reached ();
2572 code
= mono_arch_instrument_prolog (cfg
, mono_trace_enter_method
, code
, TRUE
);
2574 if (getenv("HPPA_BREAK")) {
2575 *(guint32
*)code
= 0x00010004;
2579 cfg
->code_len
= code
- cfg
->native_code
;
2580 g_assert (cfg
->code_len
< cfg
->code_size
);
2589 mono_arch_emit_epilog (MonoCompile
*cfg
)
2591 MonoMethod
*method
= cfg
->method
;
2592 MonoMethodSignature
*sig
;
2594 int max_epilog_size
= 16 + 20 * 4;
2598 sig
= mono_method_signature (cfg
->method
);
2599 if (cfg
->method
->save_lmf
)
2600 max_epilog_size
+= 128;
2602 if (mono_jit_trace_calls
!= NULL
)
2603 max_epilog_size
+= 50;
2605 if (cfg
->prof_options
& MONO_PROFILE_ENTER_LEAVE
)
2606 max_epilog_size
+= 50;
2608 while (cfg
->code_len
+ max_epilog_size
> (cfg
->code_size
- 16)) {
2609 cfg
->code_size
*= 2;
2610 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
2611 cfg
->stat_code_reallocs
++;
2614 code
= (guint32
*)(cfg
->native_code
+ cfg
->code_len
);
2616 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
))
2617 code
= mono_arch_instrument_epilog (cfg
, mono_trace_leave_method
, code
, TRUE
);
2619 pos
= HPPA_STACK_LMF_OFFSET
;
2620 if (cfg
->method
->save_lmf
) {
2622 hppa_ldo (code
, pos
, hppa_r3
, hppa_r22
);
2623 hppa_ldw (code
, G_STRUCT_OFFSET(MonoLMF
, previous_lmf
), hppa_r22
, hppa_r21
);
2624 hppa_ldw (code
, G_STRUCT_OFFSET(MonoLMF
, lmf_addr
), hppa_r22
, hppa_r20
);
2625 hppa_stw (code
, hppa_r21
, G_STRUCT_OFFSET(MonoLMF
, previous_lmf
), hppa_r20
);
2627 pos
+= G_STRUCT_OFFSET(MonoLMF
, regs
) + sizeof (gulong
);
2628 /* We skip the restore of r3 here, it is restored from the
2629 * stack anyway. This makes the code a bit easier.
2631 for (reg
= 4; reg
< 31; reg
++) {
2632 if (HPPA_IS_SAVED_GREG (reg
)) {
2633 hppa_ldw (code
, pos
, hppa_r3
, reg
);
2634 pos
+= sizeof(gulong
);
2638 pos
= ALIGN_TO (pos
, sizeof (double));
2639 hppa_set (code
, pos
, hppa_r1
);
2640 for (reg
= 0; reg
< 31; reg
++) {
2641 if (HPPA_IS_SAVED_FREG (reg
)) {
2642 hppa_flddx (code
, hppa_r1
, hppa_r3
, reg
);
2643 hppa_ldo (code
, sizeof (double), hppa_r1
, hppa_r1
);
2644 pos
+= sizeof (double);
2648 guint32 mask
= cfg
->used_int_regs
& MONO_ARCH_CALLEE_SAVED_REGS
;
2650 for (i
= 0; i
< 32; i
++) {
2653 if ((1 << i
) & mask
) {
2654 hppa_ldw (code
, pos
, hppa_r3
, i
);
2655 pos
+= sizeof (gulong
);
2660 if (sig
->ret
->type
!= MONO_TYPE_VOID
&&
2661 mono_type_to_stind (sig
->ret
) == CEE_STOBJ
) {
2662 CallInfo
*cinfo
= get_call_info (sig
, sig
->pinvoke
);
2664 switch (cinfo
->ret
.storage
) {
2666 hppa_ldw (code
, cfg
->ret
->inst_offset
, hppa_sp
, hppa_r28
);
2667 hppa_ldw (code
, 0, hppa_r28
, hppa_r28
);
2670 hppa_ldw (code
, cfg
->ret
->inst_offset
, hppa_sp
, hppa_r28
);
2671 hppa_ldw (code
, 4, hppa_r28
, hppa_r29
);
2672 hppa_ldw (code
, 0, hppa_r28
, hppa_r28
);
2678 g_assert_not_reached ();
2683 if (1 || cfg
->flags
& MONO_CFG_HAS_CALLS
)
2684 hppa_ldw (code
, -20, hppa_r3
, hppa_r2
);
2685 hppa_ldo (code
, 64, hppa_r3
, hppa_sp
);
2686 hppa_bv (code
, hppa_r0
, hppa_r2
);
2687 hppa_ldwm (code
, -64, hppa_sp
, hppa_r3
);
2689 cfg
->code_len
= (guint8
*)code
- cfg
->native_code
;
2691 g_assert (cfg
->code_len
< cfg
->code_size
);
2695 /* remove once throw_exception_by_name is eliminated */
2697 exception_id_by_name (const char *name
)
2699 if (strcmp (name
, "IndexOutOfRangeException") == 0)
2700 return MONO_EXC_INDEX_OUT_OF_RANGE
;
2701 if (strcmp (name
, "OverflowException") == 0)
2702 return MONO_EXC_OVERFLOW
;
2703 if (strcmp (name
, "ArithmeticException") == 0)
2704 return MONO_EXC_ARITHMETIC
;
2705 if (strcmp (name
, "DivideByZeroException") == 0)
2706 return MONO_EXC_DIVIDE_BY_ZERO
;
2707 if (strcmp (name
, "InvalidCastException") == 0)
2708 return MONO_EXC_INVALID_CAST
;
2709 if (strcmp (name
, "NullReferenceException") == 0)
2710 return MONO_EXC_NULL_REF
;
2711 if (strcmp (name
, "ArrayTypeMismatchException") == 0)
2712 return MONO_EXC_ARRAY_TYPE_MISMATCH
;
2713 g_error ("Unknown intrinsic exception %s\n", name
);
2718 mono_arch_emit_exceptions (MonoCompile
*cfg
)
2720 MonoJumpInfo
*patch_info
;
2723 const guint8
* exc_throw_pos
[MONO_EXC_INTRINS_NUM
] = {NULL
};
2724 guint8 exc_throw_found
[MONO_EXC_INTRINS_NUM
] = {0};
2725 int max_epilog_size
= 50;
2729 /* count the number of exception infos */
2732 * make sure we have enough space for exceptions
2734 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
2735 switch (patch_info
->type
) {
2736 case MONO_PATCH_INFO_BB_OVF
:
2737 g_assert_not_reached ();
2740 case MONO_PATCH_INFO_EXC_OVF
: {
2741 const MonoOvfJump
*ovfj
= patch_info
->data
.target
;
2742 max_epilog_size
+= 8;
2743 i
= exception_id_by_name (ovfj
->data
.exception
);
2744 if (!exc_throw_found
[i
]) {
2745 max_epilog_size
+= 24;
2746 exc_throw_found
[i
] = TRUE
;
2751 case MONO_PATCH_INFO_EXC
:
2752 i
= exception_id_by_name (patch_info
->data
.target
);
2753 if (!exc_throw_found
[i
]) {
2754 max_epilog_size
+= 24;
2755 exc_throw_found
[i
] = TRUE
;
2764 while (cfg
->code_len
+ max_epilog_size
> (cfg
->code_size
- 16)) {
2765 cfg
->code_size
*= 2;
2766 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
2767 cfg
->stat_code_reallocs
++;
2770 code
= cfg
->native_code
+ cfg
->code_len
;
2772 /* add code to raise exceptions */
2773 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
2774 switch (patch_info
->type
) {
2775 case MONO_PATCH_INFO_BB_OVF
: {
2779 case MONO_PATCH_INFO_EXC_OVF
: {
2780 const MonoOvfJump
*ovfj
= patch_info
->data
.target
;
2781 MonoJumpInfo
*newji
;
2782 unsigned char *ip
= patch_info
->ip
.i
+ cfg
->native_code
;
2783 unsigned char *stub
= code
;
2785 /* Patch original call, point it at the stub */
2786 hppa_patch ((guint32
*)ip
, code
);
2788 /* Write the stub */
2789 /* SUBTLE: this has to be PIC, because the code block
2792 hppa_bl_n (code
, 8, hppa_r0
);
2795 /* Add a patch info to patch the stub to point to the exception code */
2796 newji
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoJumpInfo
));
2797 newji
->type
= MONO_PATCH_INFO_EXC
;
2798 newji
->ip
.i
= stub
- cfg
->native_code
;
2799 newji
->data
.target
= ovfj
->data
.exception
;
2800 newji
->next
= patch_info
->next
;
2801 patch_info
->next
= newji
;
2804 case MONO_PATCH_INFO_EXC
: {
2805 unsigned char *ip
= patch_info
->ip
.i
+ cfg
->native_code
;
2806 i
= exception_id_by_name (patch_info
->data
.target
);
2807 if (exc_throw_pos
[i
]) {
2808 hppa_patch ((guint32
*)ip
, exc_throw_pos
[i
]);
2809 patch_info
->type
= MONO_PATCH_INFO_NONE
;
2812 exc_throw_pos
[i
] = code
;
2814 hppa_patch ((guint32
*)ip
, code
);
2815 hppa_set (code
, patch_info
->data
.target
, hppa_r26
);
2816 patch_info
->type
= MONO_PATCH_INFO_INTERNAL_METHOD
;
2817 patch_info
->data
.name
= "mono_arch_throw_exception_by_name";
2818 patch_info
->ip
.i
= code
- cfg
->native_code
;
2820 /* Assume the caller has set r2, we can't set it
2821 * here based on ip, because the caller may
2822 * be relocated (also the "ip" may be from an overflow
2825 hppa_ldil (code
, 0, hppa_r1
);
2826 hppa_ldo (code
, 0, hppa_r1
, hppa_r1
);
2827 hppa_bv (code
, hppa_r0
, hppa_r1
);
2837 cfg
->code_len
= code
- cfg
->native_code
;
2839 g_assert (cfg
->code_len
< cfg
->code_size
);
2843 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
2845 #error "--with-sigaltstack=yes not supported on hppa"
2850 mono_arch_finish_init (void)
2855 mono_arch_free_jit_tls_data (MonoJitTlsData
*tls
)
2860 mono_arch_emit_this_vret_args (MonoCompile
*cfg
, MonoCallInst
*inst
, int this_reg
, int this_type
, int vt_reg
)
2862 /* add the this argument */
2863 if (this_reg
!= -1) {
2865 MONO_INST_NEW (cfg
, this, OP_MOVE
);
2866 this->type
= this_type
;
2867 this->sreg1
= this_reg
;
2868 this->dreg
= mono_alloc_ireg (cfg
);
2869 mono_bblock_add_inst (cfg
->cbb
, this);
2870 mono_call_inst_add_outarg_reg (cfg
, inst
, this->dreg
, hppa_r26
, FALSE
);
2875 MONO_INST_NEW (cfg
, vtarg
, OP_MOVE
);
2876 vtarg
->type
= STACK_MP
;
2877 vtarg
->sreg1
= vt_reg
;
2878 vtarg
->dreg
= mono_alloc_ireg (cfg
);
2879 mono_bblock_add_inst (cfg
->cbb
, vtarg
);
2880 mono_call_inst_add_outarg_reg (cfg
, inst
, vtarg
->dreg
, hppa_r28
, FALSE
);
2886 mono_arch_get_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
2888 MonoInst
*ins
= NULL
;
2896 * mono_arch_get_argument_info:
2897 * @csig: a method signature
2898 * @param_count: the number of parameters to consider
2899 * @arg_info: an array to store the result infos
2901 * Gathers information on parameters such as size, alignment and
2902 * padding. arg_info should be large enought to hold param_count + 1 entries.
2904 * Returns the size of the activation frame.
2907 mono_arch_get_argument_info (MonoGenericSharingContext
*gsctx
, MonoMethodSignature
*csig
, int param_count
, MonoJitArgumentInfo
*arg_info
)
2914 cinfo
= get_call_info (csig
, FALSE
);
2916 if (csig
->hasthis
) {
2917 ainfo
= &cinfo
->args
[0];
2918 arg_info
[0].offset
= ainfo
->offset
;
2921 for (k
= 0; k
< param_count
; k
++) {
2922 ainfo
= &cinfo
->args
[k
+ csig
->hasthis
];
2924 arg_info
[k
+ 1].offset
= ainfo
->offset
;
2925 arg_info
[k
+ 1].size
= mono_type_size (csig
->params
[k
], &align
);
2933 mono_arch_print_tree (MonoInst
*tree
, int arity
)
2938 MonoInst
* mono_arch_get_domain_intrinsic (MonoCompile
* cfg
)
2944 mono_arch_context_get_int_reg (MonoContext
*ctx
, int reg
)
2946 /* FIXME: implement */
2947 g_assert_not_reached ();