2 * mini-amd64.c: AMD64 backend for the Mono code generator
7 * Paolo Molaro (lupus@ximian.com)
8 * Dietmar Maurer (dietmar@ximian.com)
10 * Zoltan Varga (vargaz@gmail.com)
12 * (C) 2003 Ximian, Inc.
13 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
14 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
23 #include <mono/metadata/appdomain.h>
24 #include <mono/metadata/debug-helpers.h>
25 #include <mono/metadata/threads.h>
26 #include <mono/metadata/profiler-private.h>
27 #include <mono/metadata/mono-debug.h>
28 #include <mono/metadata/gc-internal.h>
29 #include <mono/utils/mono-math.h>
30 #include <mono/utils/mono-mmap.h>
31 #include <mono/utils/mono-memory-model.h>
32 #include <mono/utils/mono-tls.h>
36 #include "mini-amd64.h"
37 #include "cpu-amd64.h"
38 #include "debugger-agent.h"
41 static gint lmf_tls_offset
= -1;
42 static gint lmf_addr_tls_offset
= -1;
43 static gint appdomain_tls_offset
= -1;
46 static gboolean optimize_for_xen
= TRUE
;
48 #define optimize_for_xen 0
51 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
53 #define IS_IMM32(val) ((((guint64)val) >> 32) == 0)
55 #define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
58 /* Under windows, the calling convention is never stdcall */
59 #define CALLCONV_IS_STDCALL(call_conv) (FALSE)
61 #define CALLCONV_IS_STDCALL(call_conv) ((call_conv) == MONO_CALL_STDCALL)
64 /* This mutex protects architecture specific caches */
65 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
66 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
67 static CRITICAL_SECTION mini_arch_mutex
;
70 mono_breakpoint_info
[MONO_BREAKPOINT_ARRAY_SIZE
];
72 /* Structure used by the sequence points in AOTed code */
74 gpointer ss_trigger_page
;
75 gpointer bp_trigger_page
;
76 gpointer bp_addrs
[MONO_ZERO_LEN_ARRAY
];
80 * The code generated for sequence points reads from this location, which is
81 * made read-only when single stepping is enabled.
83 static gpointer ss_trigger_page
;
85 /* Enabled breakpoints read from this trigger page */
86 static gpointer bp_trigger_page
;
88 /* The size of the breakpoint sequence */
89 static int breakpoint_size
;
91 /* The size of the breakpoint instruction causing the actual fault */
92 static int breakpoint_fault_size
;
94 /* The size of the single step instruction causing the actual fault */
95 static int single_step_fault_size
;
98 /* On Win64 always reserve first 32 bytes for first four arguments */
99 #define ARGS_OFFSET 48
101 #define ARGS_OFFSET 16
103 #define GP_SCRATCH_REG AMD64_R11
106 * AMD64 register usage:
107 * - callee saved registers are used for global register allocation
108 * - %r11 is used for materializing 64 bit constants in opcodes
109 * - the rest is used for local allocation
113 * Floating point comparison results:
123 mono_arch_regname (int reg
)
126 case AMD64_RAX
: return "%rax";
127 case AMD64_RBX
: return "%rbx";
128 case AMD64_RCX
: return "%rcx";
129 case AMD64_RDX
: return "%rdx";
130 case AMD64_RSP
: return "%rsp";
131 case AMD64_RBP
: return "%rbp";
132 case AMD64_RDI
: return "%rdi";
133 case AMD64_RSI
: return "%rsi";
134 case AMD64_R8
: return "%r8";
135 case AMD64_R9
: return "%r9";
136 case AMD64_R10
: return "%r10";
137 case AMD64_R11
: return "%r11";
138 case AMD64_R12
: return "%r12";
139 case AMD64_R13
: return "%r13";
140 case AMD64_R14
: return "%r14";
141 case AMD64_R15
: return "%r15";
146 static const char * packed_xmmregs
[] = {
147 "p:xmm0", "p:xmm1", "p:xmm2", "p:xmm3", "p:xmm4", "p:xmm5", "p:xmm6", "p:xmm7", "p:xmm8",
148 "p:xmm9", "p:xmm10", "p:xmm11", "p:xmm12", "p:xmm13", "p:xmm14", "p:xmm15"
151 static const char * single_xmmregs
[] = {
152 "s:xmm0", "s:xmm1", "s:xmm2", "s:xmm3", "s:xmm4", "s:xmm5", "s:xmm6", "s:xmm7", "s:xmm8",
153 "s:xmm9", "s:xmm10", "s:xmm11", "s:xmm12", "s:xmm13", "s:xmm14", "s:xmm15"
157 mono_arch_fregname (int reg
)
159 if (reg
< AMD64_XMM_NREG
)
160 return single_xmmregs
[reg
];
166 mono_arch_xregname (int reg
)
168 if (reg
< AMD64_XMM_NREG
)
169 return packed_xmmregs
[reg
];
178 return mono_debug_count ();
184 static inline gboolean
185 amd64_is_near_call (guint8
*code
)
188 if ((code
[0] >= 0x40) && (code
[0] <= 0x4f))
191 return code
[0] == 0xe8;
194 #ifdef __native_client_codegen__
196 /* Keep track of instruction "depth", that is, the level of sub-instruction */
197 /* for any given instruction. For instance, amd64_call_reg resolves to */
198 /* amd64_call_reg_internal, which uses amd64_alu_* macros, etc. */
199 /* We only want to force bundle alignment for the top level instruction, */
200 /* so NaCl pseudo-instructions can be implemented with sub instructions. */
201 static MonoNativeTlsKey nacl_instruction_depth
;
203 static MonoNativeTlsKey nacl_rex_tag
;
204 static MonoNativeTlsKey nacl_legacy_prefix_tag
;
207 amd64_nacl_clear_legacy_prefix_tag ()
209 mono_native_tls_set_value (nacl_legacy_prefix_tag
, NULL
);
213 amd64_nacl_tag_legacy_prefix (guint8
* code
)
215 if (mono_native_tls_get_value (nacl_legacy_prefix_tag
) == NULL
)
216 mono_native_tls_set_value (nacl_legacy_prefix_tag
, code
);
220 amd64_nacl_tag_rex (guint8
* code
)
222 mono_native_tls_set_value (nacl_rex_tag
, code
);
226 amd64_nacl_get_legacy_prefix_tag ()
228 return (guint8
*)mono_native_tls_get_value (nacl_legacy_prefix_tag
);
232 amd64_nacl_get_rex_tag ()
234 return (guint8
*)mono_native_tls_get_value (nacl_rex_tag
);
237 /* Increment the instruction "depth" described above */
239 amd64_nacl_instruction_pre ()
241 intptr_t depth
= (intptr_t) mono_native_tls_get_value (nacl_instruction_depth
);
243 mono_native_tls_set_value (nacl_instruction_depth
, (gpointer
)depth
);
246 /* amd64_nacl_instruction_post: Decrement instruction "depth", force bundle */
247 /* alignment if depth == 0 (top level instruction) */
248 /* IN: start, end pointers to instruction beginning and end */
249 /* OUT: start, end pointers to beginning and end after possible alignment */
250 /* GLOBALS: nacl_instruction_depth defined above */
252 amd64_nacl_instruction_post (guint8
**start
, guint8
**end
)
254 intptr_t depth
= (intptr_t) mono_native_tls_get_value (nacl_instruction_depth
);
256 mono_native_tls_set_value (nacl_instruction_depth
, (void*)depth
);
258 g_assert ( depth
>= 0 );
260 uintptr_t space_in_block
;
262 guint8
*prefix
= amd64_nacl_get_legacy_prefix_tag ();
263 /* if legacy prefix is present, and if it was emitted before */
264 /* the start of the instruction sequence, adjust the start */
265 if (prefix
!= NULL
&& prefix
< *start
) {
266 g_assert (*start
- prefix
<= 3);/* only 3 are allowed */
269 space_in_block
= kNaClAlignment
- ((uintptr_t)(*start
) & kNaClAlignmentMask
);
270 instlen
= (uintptr_t)(*end
- *start
);
271 /* Only check for instructions which are less than */
272 /* kNaClAlignment. The only instructions that should ever */
273 /* be that long are call sequences, which are already */
274 /* padded out to align the return to the next bundle. */
275 if (instlen
> space_in_block
&& instlen
< kNaClAlignment
) {
276 const size_t MAX_NACL_INST_LENGTH
= kNaClAlignment
;
277 guint8 copy_of_instruction
[MAX_NACL_INST_LENGTH
];
278 const size_t length
= (size_t)((*end
)-(*start
));
279 g_assert (length
< MAX_NACL_INST_LENGTH
);
281 memcpy (copy_of_instruction
, *start
, length
);
282 *start
= mono_arch_nacl_pad (*start
, space_in_block
);
283 memcpy (*start
, copy_of_instruction
, length
);
284 *end
= *start
+ length
;
286 amd64_nacl_clear_legacy_prefix_tag ();
287 amd64_nacl_tag_rex (NULL
);
291 /* amd64_nacl_membase_handler: ensure all access to memory of the form */
292 /* OFFSET(%rXX) is sandboxed. For allowable base registers %rip, %rbp, */
293 /* %rsp, and %r15, emit the membase as usual. For all other registers, */
294 /* make sure the upper 32-bits are cleared, and use that register in the */
295 /* index field of a new address of this form: OFFSET(%r15,%eXX,1) */
297 /* pointer to current instruction stream (in the */
298 /* middle of an instruction, after opcode is emitted) */
299 /* basereg/offset/dreg */
300 /* operands of normal membase address */
302 /* pointer to the end of the membase/memindex emit */
303 /* GLOBALS: nacl_rex_tag */
304 /* position in instruction stream that rex prefix was emitted */
305 /* nacl_legacy_prefix_tag */
306 /* (possibly NULL) position in instruction of legacy x86 prefix */
308 amd64_nacl_membase_handler (guint8
** code
, gint8 basereg
, gint32 offset
, gint8 dreg
)
310 gint8 true_basereg
= basereg
;
312 /* Cache these values, they might change */
313 /* as new instructions are emitted below. */
314 guint8
* rex_tag
= amd64_nacl_get_rex_tag ();
315 guint8
* legacy_prefix_tag
= amd64_nacl_get_legacy_prefix_tag ();
317 /* 'basereg' is given masked to 0x7 at this point, so check */
318 /* the rex prefix to see if this is an extended register. */
319 if ((rex_tag
!= NULL
) && IS_REX(*rex_tag
) && (*rex_tag
& AMD64_REX_B
)) {
323 #define X86_LEA_OPCODE (0x8D)
325 if (!amd64_is_valid_nacl_base (true_basereg
) && (*(*code
-1) != X86_LEA_OPCODE
)) {
326 guint8
* old_instruction_start
;
328 /* This will hold the 'mov %eXX, %eXX' that clears the upper */
329 /* 32-bits of the old base register (new index register) */
331 guint8
* buf_ptr
= buf
;
334 g_assert (rex_tag
!= NULL
);
336 if (IS_REX(*rex_tag
)) {
337 /* The old rex.B should be the new rex.X */
338 if (*rex_tag
& AMD64_REX_B
) {
339 *rex_tag
|= AMD64_REX_X
;
341 /* Since our new base is %r15 set rex.B */
342 *rex_tag
|= AMD64_REX_B
;
344 /* Shift the instruction by one byte */
345 /* so we can insert a rex prefix */
346 memmove (rex_tag
+ 1, rex_tag
, (size_t)(*code
- rex_tag
));
348 /* New rex prefix only needs rex.B for %r15 base */
349 *rex_tag
= AMD64_REX(AMD64_REX_B
);
352 if (legacy_prefix_tag
) {
353 old_instruction_start
= legacy_prefix_tag
;
355 old_instruction_start
= rex_tag
;
358 /* Clears the upper 32-bits of the previous base register */
359 amd64_mov_reg_reg_size (buf_ptr
, true_basereg
, true_basereg
, 4);
360 insert_len
= buf_ptr
- buf
;
362 /* Move the old instruction forward to make */
363 /* room for 'mov' stored in 'buf_ptr' */
364 memmove (old_instruction_start
+ insert_len
, old_instruction_start
, (size_t)(*code
- old_instruction_start
));
366 memcpy (old_instruction_start
, buf
, insert_len
);
368 /* Sandboxed replacement for the normal membase_emit */
369 x86_memindex_emit (*code
, dreg
, AMD64_R15
, offset
, basereg
, 0);
372 /* Normal default behavior, emit membase memory location */
373 x86_membase_emit_body (*code
, dreg
, basereg
, offset
);
378 static inline unsigned char*
379 amd64_skip_nops (unsigned char* code
)
384 if ( code
[0] == 0x90) {
388 if ( code
[0] == 0x66 && code
[1] == 0x90) {
392 if (code
[0] == 0x0f && code
[1] == 0x1f
393 && code
[2] == 0x00) {
397 if (code
[0] == 0x0f && code
[1] == 0x1f
398 && code
[2] == 0x40 && code
[3] == 0x00) {
402 if (code
[0] == 0x0f && code
[1] == 0x1f
403 && code
[2] == 0x44 && code
[3] == 0x00
404 && code
[4] == 0x00) {
408 if (code
[0] == 0x66 && code
[1] == 0x0f
409 && code
[2] == 0x1f && code
[3] == 0x44
410 && code
[4] == 0x00 && code
[5] == 0x00) {
414 if (code
[0] == 0x0f && code
[1] == 0x1f
415 && code
[2] == 0x80 && code
[3] == 0x00
416 && code
[4] == 0x00 && code
[5] == 0x00
417 && code
[6] == 0x00) {
421 if (code
[0] == 0x0f && code
[1] == 0x1f
422 && code
[2] == 0x84 && code
[3] == 0x00
423 && code
[4] == 0x00 && code
[5] == 0x00
424 && code
[6] == 0x00 && code
[7] == 0x00) {
433 mono_arch_nacl_skip_nops (guint8
* code
)
435 return amd64_skip_nops(code
);
438 #endif /*__native_client_codegen__*/
441 amd64_patch (unsigned char* code
, gpointer target
)
445 #ifdef __native_client_codegen__
446 code
= amd64_skip_nops (code
);
448 #if defined(__native_client_codegen__) && defined(__native_client__)
449 if (nacl_is_code_address (code
)) {
450 /* For tail calls, code is patched after being installed */
451 /* but not through the normal "patch callsite" method. */
452 unsigned char buf
[kNaClAlignment
];
453 unsigned char *aligned_code
= (uintptr_t)code
& ~kNaClAlignmentMask
;
455 memcpy (buf
, aligned_code
, kNaClAlignment
);
456 /* Patch a temp buffer of bundle size, */
457 /* then install to actual location. */
458 amd64_patch (buf
+ ((uintptr_t)code
- (uintptr_t)aligned_code
), target
);
459 ret
= nacl_dyncode_modify (aligned_code
, buf
, kNaClAlignment
);
463 target
= nacl_modify_patch_target (target
);
467 if ((code
[0] >= 0x40) && (code
[0] <= 0x4f)) {
472 if ((code
[0] & 0xf8) == 0xb8) {
473 /* amd64_set_reg_template */
474 *(guint64
*)(code
+ 1) = (guint64
)target
;
476 else if ((code
[0] == 0x8b) && rex
&& x86_modrm_mod (code
[1]) == 0 && x86_modrm_rm (code
[1]) == 5) {
477 /* mov 0(%rip), %dreg */
478 *(guint32
*)(code
+ 2) = (guint32
)(guint64
)target
- 7;
480 else if ((code
[0] == 0xff) && (code
[1] == 0x15)) {
481 /* call *<OFFSET>(%rip) */
482 *(guint32
*)(code
+ 2) = ((guint32
)(guint64
)target
) - 7;
484 else if (code
[0] == 0xe8) {
486 gint64 disp
= (guint8
*)target
- (guint8
*)code
;
487 g_assert (amd64_is_imm32 (disp
));
488 x86_patch (code
, (unsigned char*)target
);
491 x86_patch (code
, (unsigned char*)target
);
495 mono_amd64_patch (unsigned char* code
, gpointer target
)
497 amd64_patch (code
, target
);
506 ArgValuetypeAddrInIReg
,
507 ArgNone
/* only in pair_storage */
515 /* Only if storage == ArgValuetypeInReg */
516 ArgStorage pair_storage
[2];
526 gboolean need_stack_align
;
527 gboolean vtype_retaddr
;
528 /* The index of the vret arg in the argument list */
535 #define DEBUG(a) if (cfg->verbose_level > 1) a
540 static AMD64_Reg_No param_regs
[] = { AMD64_RCX
, AMD64_RDX
, AMD64_R8
, AMD64_R9
};
542 static AMD64_Reg_No return_regs
[] = { AMD64_RAX
, AMD64_RDX
};
546 static AMD64_Reg_No param_regs
[] = { AMD64_RDI
, AMD64_RSI
, AMD64_RDX
, AMD64_RCX
, AMD64_R8
, AMD64_R9
};
548 static AMD64_Reg_No return_regs
[] = { AMD64_RAX
, AMD64_RDX
};
552 add_general (guint32
*gr
, guint32
*stack_size
, ArgInfo
*ainfo
)
554 ainfo
->offset
= *stack_size
;
556 if (*gr
>= PARAM_REGS
) {
557 ainfo
->storage
= ArgOnStack
;
558 /* Since the same stack slot size is used for all arg */
559 /* types, it needs to be big enough to hold them all */
560 (*stack_size
) += sizeof(mgreg_t
);
563 ainfo
->storage
= ArgInIReg
;
564 ainfo
->reg
= param_regs
[*gr
];
570 #define FLOAT_PARAM_REGS 4
572 #define FLOAT_PARAM_REGS 8
576 add_float (guint32
*gr
, guint32
*stack_size
, ArgInfo
*ainfo
, gboolean is_double
)
578 ainfo
->offset
= *stack_size
;
580 if (*gr
>= FLOAT_PARAM_REGS
) {
581 ainfo
->storage
= ArgOnStack
;
582 /* Since the same stack slot size is used for both float */
583 /* types, it needs to be big enough to hold them both */
584 (*stack_size
) += sizeof(mgreg_t
);
587 /* A double register */
589 ainfo
->storage
= ArgInDoubleSSEReg
;
591 ainfo
->storage
= ArgInFloatSSEReg
;
597 typedef enum ArgumentClass
{
605 merge_argument_class_from_type (MonoType
*type
, ArgumentClass class1
)
607 ArgumentClass class2
= ARG_CLASS_NO_CLASS
;
610 ptype
= mini_type_get_underlying_type (NULL
, type
);
611 switch (ptype
->type
) {
612 case MONO_TYPE_BOOLEAN
:
622 case MONO_TYPE_STRING
:
623 case MONO_TYPE_OBJECT
:
624 case MONO_TYPE_CLASS
:
625 case MONO_TYPE_SZARRAY
:
627 case MONO_TYPE_FNPTR
:
628 case MONO_TYPE_ARRAY
:
631 class2
= ARG_CLASS_INTEGER
;
636 class2
= ARG_CLASS_INTEGER
;
638 class2
= ARG_CLASS_SSE
;
642 case MONO_TYPE_TYPEDBYREF
:
643 g_assert_not_reached ();
645 case MONO_TYPE_GENERICINST
:
646 if (!mono_type_generic_inst_is_valuetype (ptype
)) {
647 class2
= ARG_CLASS_INTEGER
;
651 case MONO_TYPE_VALUETYPE
: {
652 MonoMarshalType
*info
= mono_marshal_load_type_info (ptype
->data
.klass
);
655 for (i
= 0; i
< info
->num_fields
; ++i
) {
657 class2
= merge_argument_class_from_type (info
->fields
[i
].field
->type
, class2
);
662 g_assert_not_reached ();
666 if (class1
== class2
)
668 else if (class1
== ARG_CLASS_NO_CLASS
)
670 else if ((class1
== ARG_CLASS_MEMORY
) || (class2
== ARG_CLASS_MEMORY
))
671 class1
= ARG_CLASS_MEMORY
;
672 else if ((class1
== ARG_CLASS_INTEGER
) || (class2
== ARG_CLASS_INTEGER
))
673 class1
= ARG_CLASS_INTEGER
;
675 class1
= ARG_CLASS_SSE
;
679 #ifdef __native_client_codegen__
680 const guint kNaClAlignment
= kNaClAlignmentAMD64
;
681 const guint kNaClAlignmentMask
= kNaClAlignmentMaskAMD64
;
683 /* Default alignment for Native Client is 32-byte. */
684 gint8 nacl_align_byte
= -32; /* signed version of 0xe0 */
686 /* mono_arch_nacl_pad: Add pad bytes of alignment instructions at code, */
687 /* Check that alignment doesn't cross an alignment boundary. */
689 mono_arch_nacl_pad(guint8
*code
, int pad
)
691 const int kMaxPadding
= 8; /* see amd64-codegen.h:amd64_padding_size() */
693 if (pad
== 0) return code
;
694 /* assertion: alignment cannot cross a block boundary */
695 g_assert (((uintptr_t)code
& (~kNaClAlignmentMask
)) ==
696 (((uintptr_t)code
+ pad
- 1) & (~kNaClAlignmentMask
)));
697 while (pad
>= kMaxPadding
) {
698 amd64_padding (code
, kMaxPadding
);
701 if (pad
!= 0) amd64_padding (code
, pad
);
707 add_valuetype (MonoGenericSharingContext
*gsctx
, MonoMethodSignature
*sig
, ArgInfo
*ainfo
, MonoType
*type
,
709 guint32
*gr
, guint32
*fr
, guint32
*stack_size
)
711 guint32 size
, quad
, nquads
, i
;
712 /* Keep track of the size used in each quad so we can */
713 /* use the right size when copying args/return vars. */
714 guint32 quadsize
[2] = {8, 8};
715 ArgumentClass args
[2];
716 MonoMarshalType
*info
= NULL
;
718 MonoGenericSharingContext tmp_gsctx
;
719 gboolean pass_on_stack
= FALSE
;
722 * The gsctx currently contains no data, it is only used for checking whenever
723 * open types are allowed, some callers like mono_arch_get_argument_info ()
724 * don't pass it to us, so work around that.
729 klass
= mono_class_from_mono_type (type
);
730 size
= mini_type_stack_size_full (gsctx
, &klass
->byval_arg
, NULL
, sig
->pinvoke
);
732 if (!sig
->pinvoke
&& !disable_vtypes_in_regs
&& ((is_return
&& (size
== 8)) || (!is_return
&& (size
<= 16)))) {
733 /* We pass and return vtypes of size 8 in a register */
734 } else if (!sig
->pinvoke
|| (size
== 0) || (size
> 16)) {
735 pass_on_stack
= TRUE
;
739 pass_on_stack
= TRUE
;
743 /* If this struct can't be split up naturally into 8-byte */
744 /* chunks (registers), pass it on the stack. */
745 if (sig
->pinvoke
&& !pass_on_stack
) {
749 info
= mono_marshal_load_type_info (klass
);
751 for (i
= 0; i
< info
->num_fields
; ++i
) {
752 field_size
= mono_marshal_type_size (info
->fields
[i
].field
->type
,
753 info
->fields
[i
].mspec
,
754 &align
, TRUE
, klass
->unicode
);
755 if ((info
->fields
[i
].offset
< 8) && (info
->fields
[i
].offset
+ field_size
) > 8) {
756 pass_on_stack
= TRUE
;
763 /* Allways pass in memory */
764 ainfo
->offset
= *stack_size
;
765 *stack_size
+= ALIGN_TO (size
, 8);
766 ainfo
->storage
= ArgOnStack
;
771 /* FIXME: Handle structs smaller than 8 bytes */
772 //if ((size % 8) != 0)
781 /* Always pass in 1 or 2 integer registers */
782 args
[0] = ARG_CLASS_INTEGER
;
783 args
[1] = ARG_CLASS_INTEGER
;
784 /* Only the simplest cases are supported */
785 if (is_return
&& nquads
!= 1) {
786 args
[0] = ARG_CLASS_MEMORY
;
787 args
[1] = ARG_CLASS_MEMORY
;
791 * Implement the algorithm from section 3.2.3 of the X86_64 ABI.
792 * The X87 and SSEUP stuff is left out since there are no such types in
795 info
= mono_marshal_load_type_info (klass
);
799 if (info
->native_size
> 16) {
800 ainfo
->offset
= *stack_size
;
801 *stack_size
+= ALIGN_TO (info
->native_size
, 8);
802 ainfo
->storage
= ArgOnStack
;
807 switch (info
->native_size
) {
808 case 1: case 2: case 4: case 8:
812 ainfo
->storage
= ArgOnStack
;
813 ainfo
->offset
= *stack_size
;
814 *stack_size
+= ALIGN_TO (info
->native_size
, 8);
817 ainfo
->storage
= ArgValuetypeAddrInIReg
;
819 if (*gr
< PARAM_REGS
) {
820 ainfo
->pair_storage
[0] = ArgInIReg
;
821 ainfo
->pair_regs
[0] = param_regs
[*gr
];
825 ainfo
->pair_storage
[0] = ArgOnStack
;
826 ainfo
->offset
= *stack_size
;
835 args
[0] = ARG_CLASS_NO_CLASS
;
836 args
[1] = ARG_CLASS_NO_CLASS
;
837 for (quad
= 0; quad
< nquads
; ++quad
) {
840 ArgumentClass class1
;
842 if (info
->num_fields
== 0)
843 class1
= ARG_CLASS_MEMORY
;
845 class1
= ARG_CLASS_NO_CLASS
;
846 for (i
= 0; i
< info
->num_fields
; ++i
) {
847 size
= mono_marshal_type_size (info
->fields
[i
].field
->type
,
848 info
->fields
[i
].mspec
,
849 &align
, TRUE
, klass
->unicode
);
850 if ((info
->fields
[i
].offset
< 8) && (info
->fields
[i
].offset
+ size
) > 8) {
851 /* Unaligned field */
855 /* Skip fields in other quad */
856 if ((quad
== 0) && (info
->fields
[i
].offset
>= 8))
858 if ((quad
== 1) && (info
->fields
[i
].offset
< 8))
861 /* How far into this quad this data extends.*/
862 /* (8 is size of quad) */
863 quadsize
[quad
] = info
->fields
[i
].offset
+ size
- (quad
* 8);
865 class1
= merge_argument_class_from_type (info
->fields
[i
].field
->type
, class1
);
867 g_assert (class1
!= ARG_CLASS_NO_CLASS
);
868 args
[quad
] = class1
;
872 /* Post merger cleanup */
873 if ((args
[0] == ARG_CLASS_MEMORY
) || (args
[1] == ARG_CLASS_MEMORY
))
874 args
[0] = args
[1] = ARG_CLASS_MEMORY
;
876 /* Allocate registers */
881 ainfo
->storage
= ArgValuetypeInReg
;
882 ainfo
->pair_storage
[0] = ainfo
->pair_storage
[1] = ArgNone
;
883 ainfo
->nregs
= nquads
;
884 for (quad
= 0; quad
< nquads
; ++quad
) {
885 switch (args
[quad
]) {
886 case ARG_CLASS_INTEGER
:
887 if (*gr
>= PARAM_REGS
)
888 args
[quad
] = ARG_CLASS_MEMORY
;
890 ainfo
->pair_storage
[quad
] = ArgInIReg
;
892 ainfo
->pair_regs
[quad
] = return_regs
[*gr
];
894 ainfo
->pair_regs
[quad
] = param_regs
[*gr
];
899 if (*fr
>= FLOAT_PARAM_REGS
)
900 args
[quad
] = ARG_CLASS_MEMORY
;
902 if (quadsize
[quad
] <= 4)
903 ainfo
->pair_storage
[quad
] = ArgInFloatSSEReg
;
904 else ainfo
->pair_storage
[quad
] = ArgInDoubleSSEReg
;
905 ainfo
->pair_regs
[quad
] = *fr
;
909 case ARG_CLASS_MEMORY
:
912 g_assert_not_reached ();
916 if ((args
[0] == ARG_CLASS_MEMORY
) || (args
[1] == ARG_CLASS_MEMORY
)) {
917 /* Revert possible register assignments */
921 ainfo
->offset
= *stack_size
;
923 *stack_size
+= ALIGN_TO (info
->native_size
, 8);
925 *stack_size
+= nquads
* sizeof(mgreg_t
);
926 ainfo
->storage
= ArgOnStack
;
934 * Obtain information about a call according to the calling convention.
935 * For AMD64, see the "System V ABI, x86-64 Architecture Processor Supplement
936 * Draft Version 0.23" document for more information.
939 get_call_info (MonoGenericSharingContext
*gsctx
, MonoMemPool
*mp
, MonoMethodSignature
*sig
)
941 guint32 i
, gr
, fr
, pstart
;
943 int n
= sig
->hasthis
+ sig
->param_count
;
944 guint32 stack_size
= 0;
946 gboolean is_pinvoke
= sig
->pinvoke
;
949 cinfo
= mono_mempool_alloc0 (mp
, sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
951 cinfo
= g_malloc0 (sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
960 ret_type
= mini_type_get_underlying_type (gsctx
, sig
->ret
);
961 switch (ret_type
->type
) {
962 case MONO_TYPE_BOOLEAN
:
973 case MONO_TYPE_FNPTR
:
974 case MONO_TYPE_CLASS
:
975 case MONO_TYPE_OBJECT
:
976 case MONO_TYPE_SZARRAY
:
977 case MONO_TYPE_ARRAY
:
978 case MONO_TYPE_STRING
:
979 cinfo
->ret
.storage
= ArgInIReg
;
980 cinfo
->ret
.reg
= AMD64_RAX
;
984 cinfo
->ret
.storage
= ArgInIReg
;
985 cinfo
->ret
.reg
= AMD64_RAX
;
988 cinfo
->ret
.storage
= ArgInFloatSSEReg
;
989 cinfo
->ret
.reg
= AMD64_XMM0
;
992 cinfo
->ret
.storage
= ArgInDoubleSSEReg
;
993 cinfo
->ret
.reg
= AMD64_XMM0
;
995 case MONO_TYPE_GENERICINST
:
996 if (!mono_type_generic_inst_is_valuetype (ret_type
)) {
997 cinfo
->ret
.storage
= ArgInIReg
;
998 cinfo
->ret
.reg
= AMD64_RAX
;
1002 case MONO_TYPE_VALUETYPE
: {
1003 guint32 tmp_gr
= 0, tmp_fr
= 0, tmp_stacksize
= 0;
1005 add_valuetype (gsctx
, sig
, &cinfo
->ret
, sig
->ret
, TRUE
, &tmp_gr
, &tmp_fr
, &tmp_stacksize
);
1006 if (cinfo
->ret
.storage
== ArgOnStack
) {
1007 cinfo
->vtype_retaddr
= TRUE
;
1008 /* The caller passes the address where the value is stored */
1012 case MONO_TYPE_TYPEDBYREF
:
1013 /* Same as a valuetype with size 24 */
1014 cinfo
->vtype_retaddr
= TRUE
;
1016 case MONO_TYPE_VOID
:
1019 g_error ("Can't handle as return value 0x%x", sig
->ret
->type
);
1025 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1026 * the first argument, allowing 'this' to be always passed in the first arg reg.
1027 * Also do this if the first argument is a reference type, since virtual calls
1028 * are sometimes made using calli without sig->hasthis set, like in the delegate
1031 if (cinfo
->vtype_retaddr
&& !is_pinvoke
&& (sig
->hasthis
|| (sig
->param_count
> 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx
, sig
->params
[0]))))) {
1033 add_general (&gr
, &stack_size
, cinfo
->args
+ 0);
1035 add_general (&gr
, &stack_size
, &cinfo
->args
[sig
->hasthis
+ 0]);
1038 add_general (&gr
, &stack_size
, &cinfo
->ret
);
1039 cinfo
->vret_arg_index
= 1;
1043 add_general (&gr
, &stack_size
, cinfo
->args
+ 0);
1045 if (cinfo
->vtype_retaddr
)
1046 add_general (&gr
, &stack_size
, &cinfo
->ret
);
1049 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (n
== 0)) {
1051 fr
= FLOAT_PARAM_REGS
;
1053 /* Emit the signature cookie just before the implicit arguments */
1054 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
);
1057 for (i
= pstart
; i
< sig
->param_count
; ++i
) {
1058 ArgInfo
*ainfo
= &cinfo
->args
[sig
->hasthis
+ i
];
1062 /* The float param registers and other param registers must be the same index on Windows x64.*/
1069 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
1070 /* We allways pass the sig cookie on the stack for simplicity */
1072 * Prevent implicit arguments + the sig cookie from being passed
1076 fr
= FLOAT_PARAM_REGS
;
1078 /* Emit the signature cookie just before the implicit arguments */
1079 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
);
1082 ptype
= mini_type_get_underlying_type (gsctx
, sig
->params
[i
]);
1083 switch (ptype
->type
) {
1084 case MONO_TYPE_BOOLEAN
:
1087 add_general (&gr
, &stack_size
, ainfo
);
1091 case MONO_TYPE_CHAR
:
1092 add_general (&gr
, &stack_size
, ainfo
);
1096 add_general (&gr
, &stack_size
, ainfo
);
1101 case MONO_TYPE_FNPTR
:
1102 case MONO_TYPE_CLASS
:
1103 case MONO_TYPE_OBJECT
:
1104 case MONO_TYPE_STRING
:
1105 case MONO_TYPE_SZARRAY
:
1106 case MONO_TYPE_ARRAY
:
1107 add_general (&gr
, &stack_size
, ainfo
);
1109 case MONO_TYPE_GENERICINST
:
1110 if (!mono_type_generic_inst_is_valuetype (ptype
)) {
1111 add_general (&gr
, &stack_size
, ainfo
);
1115 case MONO_TYPE_VALUETYPE
:
1116 add_valuetype (gsctx
, sig
, ainfo
, sig
->params
[i
], FALSE
, &gr
, &fr
, &stack_size
);
1118 case MONO_TYPE_TYPEDBYREF
:
1120 add_valuetype (gsctx
, sig
, ainfo
, sig
->params
[i
], FALSE
, &gr
, &fr
, &stack_size
);
1122 stack_size
+= sizeof (MonoTypedRef
);
1123 ainfo
->storage
= ArgOnStack
;
1128 add_general (&gr
, &stack_size
, ainfo
);
1131 add_float (&fr
, &stack_size
, ainfo
, FALSE
);
1134 add_float (&fr
, &stack_size
, ainfo
, TRUE
);
1137 g_assert_not_reached ();
1141 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (n
> 0) && (sig
->sentinelpos
== sig
->param_count
)) {
1143 fr
= FLOAT_PARAM_REGS
;
1145 /* Emit the signature cookie just before the implicit arguments */
1146 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
);
1150 // There always is 32 bytes reserved on the stack when calling on Winx64
1154 #ifndef MONO_AMD64_NO_PUSHES
1155 if (stack_size
& 0x8) {
1156 /* The AMD64 ABI requires each stack frame to be 16 byte aligned */
1157 cinfo
->need_stack_align
= TRUE
;
1162 cinfo
->stack_usage
= stack_size
;
1163 cinfo
->reg_usage
= gr
;
1164 cinfo
->freg_usage
= fr
;
1169 * mono_arch_get_argument_info:
1170 * @csig: a method signature
1171 * @param_count: the number of parameters to consider
1172 * @arg_info: an array to store the result infos
1174 * Gathers information on parameters such as size, alignment and
1175 * padding. arg_info should be large enought to hold param_count + 1 entries.
1177 * Returns the size of the argument area on the stack.
1180 mono_arch_get_argument_info (MonoGenericSharingContext
*gsctx
, MonoMethodSignature
*csig
, int param_count
, MonoJitArgumentInfo
*arg_info
)
1183 CallInfo
*cinfo
= get_call_info (NULL
, NULL
, csig
);
1184 guint32 args_size
= cinfo
->stack_usage
;
1186 /* The arguments are saved to a stack area in mono_arch_instrument_prolog */
1187 if (csig
->hasthis
) {
1188 arg_info
[0].offset
= 0;
1191 for (k
= 0; k
< param_count
; k
++) {
1192 arg_info
[k
+ 1].offset
= ((k
+ csig
->hasthis
) * 8);
1194 arg_info
[k
+ 1].size
= 0;
1203 mono_amd64_tail_call_supported (MonoMethodSignature
*caller_sig
, MonoMethodSignature
*callee_sig
)
1208 c1
= get_call_info (NULL
, NULL
, caller_sig
);
1209 c2
= get_call_info (NULL
, NULL
, callee_sig
);
1210 res
= c1
->stack_usage
>= c2
->stack_usage
;
1211 if (callee_sig
->ret
&& MONO_TYPE_ISSTRUCT (callee_sig
->ret
) && c2
->ret
.storage
!= ArgValuetypeInReg
)
1212 /* An address on the callee's stack is passed as the first argument */
1222 cpuid (int id
, int* p_eax
, int* p_ebx
, int* p_ecx
, int* p_edx
)
1224 #if defined(MONO_CROSS_COMPILE)
1228 __asm__
__volatile__ ("cpuid"
1229 : "=a" (*p_eax
), "=b" (*p_ebx
), "=c" (*p_ecx
), "=d" (*p_edx
)
1244 * Initialize the cpu to execute managed code.
1247 mono_arch_cpu_init (void)
1252 /* spec compliance requires running with double precision */
1253 __asm__
__volatile__ ("fnstcw %0\n": "=m" (fpcw
));
1254 fpcw
&= ~X86_FPCW_PRECC_MASK
;
1255 fpcw
|= X86_FPCW_PREC_DOUBLE
;
1256 __asm__
__volatile__ ("fldcw %0\n": : "m" (fpcw
));
1257 __asm__
__volatile__ ("fnstcw %0\n": "=m" (fpcw
));
1259 /* TODO: This is crashing on Win64 right now.
1260 * _control87 (_PC_53, MCW_PC);
1266 * Initialize architecture specific code.
1269 mono_arch_init (void)
1273 InitializeCriticalSection (&mini_arch_mutex
);
1274 #if defined(__native_client_codegen__)
1275 mono_native_tls_alloc (&nacl_instruction_depth
, NULL
);
1276 mono_native_tls_set_value (nacl_instruction_depth
, (gpointer
)0);
1277 mono_native_tls_alloc (&nacl_rex_tag
, NULL
);
1278 mono_native_tls_alloc (&nacl_legacy_prefix_tag
, NULL
);
1281 #ifdef MONO_ARCH_NOMAP32BIT
1282 flags
= MONO_MMAP_READ
;
1283 /* amd64_mov_reg_imm () + amd64_mov_reg_membase () */
1284 breakpoint_size
= 13;
1285 breakpoint_fault_size
= 3;
1287 flags
= MONO_MMAP_READ
|MONO_MMAP_32BIT
;
1288 /* amd64_mov_reg_mem () */
1289 breakpoint_size
= 8;
1290 breakpoint_fault_size
= 8;
1293 /* amd64_alu_membase_imm_size (code, X86_CMP, AMD64_R11, 0, 0, 4); */
1294 single_step_fault_size
= 4;
1296 ss_trigger_page
= mono_valloc (NULL
, mono_pagesize (), flags
);
1297 bp_trigger_page
= mono_valloc (NULL
, mono_pagesize (), flags
);
1298 mono_mprotect (bp_trigger_page
, mono_pagesize (), 0);
1300 mono_aot_register_jit_icall ("mono_amd64_throw_exception", mono_amd64_throw_exception
);
1301 mono_aot_register_jit_icall ("mono_amd64_throw_corlib_exception", mono_amd64_throw_corlib_exception
);
1302 mono_aot_register_jit_icall ("mono_amd64_get_original_ip", mono_amd64_get_original_ip
);
1306 * Cleanup architecture specific code.
1309 mono_arch_cleanup (void)
1311 DeleteCriticalSection (&mini_arch_mutex
);
1312 #if defined(__native_client_codegen__)
1313 mono_native_tls_free (nacl_instruction_depth
);
1314 mono_native_tls_free (nacl_rex_tag
);
1315 mono_native_tls_free (nacl_legacy_prefix_tag
);
1320 * This function returns the optimizations supported on this cpu.
1323 mono_arch_cpu_optimizations (guint32
*exclude_mask
)
1325 int eax
, ebx
, ecx
, edx
;
1329 /* Feature Flags function, flags returned in EDX. */
1330 if (cpuid (1, &eax
, &ebx
, &ecx
, &edx
)) {
1331 if (edx
& (1 << 15)) {
1332 opts
|= MONO_OPT_CMOV
;
1334 opts
|= MONO_OPT_FCMOV
;
1336 *exclude_mask
|= MONO_OPT_FCMOV
;
1338 *exclude_mask
|= MONO_OPT_CMOV
;
1345 * This function test for all SSE functions supported.
1347 * Returns a bitmask corresponding to all supported versions.
1351 mono_arch_cpu_enumerate_simd_versions (void)
1353 int eax
, ebx
, ecx
, edx
;
1354 guint32 sse_opts
= 0;
1356 if (cpuid (1, &eax
, &ebx
, &ecx
, &edx
)) {
1357 if (edx
& (1 << 25))
1358 sse_opts
|= SIMD_VERSION_SSE1
;
1359 if (edx
& (1 << 26))
1360 sse_opts
|= SIMD_VERSION_SSE2
;
1362 sse_opts
|= SIMD_VERSION_SSE3
;
1364 sse_opts
|= SIMD_VERSION_SSSE3
;
1365 if (ecx
& (1 << 19))
1366 sse_opts
|= SIMD_VERSION_SSE41
;
1367 if (ecx
& (1 << 20))
1368 sse_opts
|= SIMD_VERSION_SSE42
;
1371 /* Yes, all this needs to be done to check for sse4a.
1372 See: "Amd: CPUID Specification"
1374 if (cpuid (0x80000000, &eax
, &ebx
, &ecx
, &edx
)) {
1375 /* eax greater or equal than 0x80000001, ebx = 'htuA', ecx = DMAc', edx = 'itne'*/
1376 if ((((unsigned int) eax
) >= 0x80000001) && (ebx
== 0x68747541) && (ecx
== 0x444D4163) && (edx
== 0x69746E65)) {
1377 cpuid (0x80000001, &eax
, &ebx
, &ecx
, &edx
);
1379 sse_opts
|= SIMD_VERSION_SSE4a
;
1389 mono_arch_get_allocatable_int_vars (MonoCompile
*cfg
)
1394 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
1395 MonoInst
*ins
= cfg
->varinfo
[i
];
1396 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
1399 if (vmv
->range
.first_use
.abs_pos
>= vmv
->range
.last_use
.abs_pos
)
1402 if ((ins
->flags
& (MONO_INST_IS_DEAD
|MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)) ||
1403 (ins
->opcode
!= OP_LOCAL
&& ins
->opcode
!= OP_ARG
))
1406 if (mono_is_regsize_var (ins
->inst_vtype
)) {
1407 g_assert (MONO_VARINFO (cfg
, i
)->reg
== -1);
1408 g_assert (i
== vmv
->idx
);
1409 vars
= g_list_prepend (vars
, vmv
);
1413 vars
= mono_varlist_sort (cfg
, vars
, 0);
1419 * mono_arch_compute_omit_fp:
1421 * Determine whenever the frame pointer can be eliminated.
1424 mono_arch_compute_omit_fp (MonoCompile
*cfg
)
1426 MonoMethodSignature
*sig
;
1427 MonoMethodHeader
*header
;
1431 if (cfg
->arch
.omit_fp_computed
)
1434 header
= cfg
->header
;
1436 sig
= mono_method_signature (cfg
->method
);
1438 if (!cfg
->arch
.cinfo
)
1439 cfg
->arch
.cinfo
= get_call_info (cfg
->generic_sharing_context
, cfg
->mempool
, sig
);
1440 cinfo
= cfg
->arch
.cinfo
;
1443 * FIXME: Remove some of the restrictions.
1445 cfg
->arch
.omit_fp
= TRUE
;
1446 cfg
->arch
.omit_fp_computed
= TRUE
;
1448 #ifdef __native_client_codegen__
1449 /* NaCl modules may not change the value of RBP, so it cannot be */
1450 /* used as a normal register, but it can be used as a frame pointer*/
1451 cfg
->disable_omit_fp
= TRUE
;
1452 cfg
->arch
.omit_fp
= FALSE
;
1455 if (cfg
->disable_omit_fp
)
1456 cfg
->arch
.omit_fp
= FALSE
;
1458 if (!debug_omit_fp ())
1459 cfg
->arch
.omit_fp
= FALSE
;
1461 if (cfg->method->save_lmf)
1462 cfg->arch.omit_fp = FALSE;
1464 if (cfg
->flags
& MONO_CFG_HAS_ALLOCA
)
1465 cfg
->arch
.omit_fp
= FALSE
;
1466 if (header
->num_clauses
)
1467 cfg
->arch
.omit_fp
= FALSE
;
1468 if (cfg
->param_area
)
1469 cfg
->arch
.omit_fp
= FALSE
;
1470 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
))
1471 cfg
->arch
.omit_fp
= FALSE
;
1472 if ((mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
)) ||
1473 (cfg
->prof_options
& MONO_PROFILE_ENTER_LEAVE
))
1474 cfg
->arch
.omit_fp
= FALSE
;
1475 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
1476 ArgInfo
*ainfo
= &cinfo
->args
[i
];
1478 if (ainfo
->storage
== ArgOnStack
) {
1480 * The stack offset can only be determined when the frame
1483 cfg
->arch
.omit_fp
= FALSE
;
1488 for (i
= cfg
->locals_start
; i
< cfg
->num_varinfo
; i
++) {
1489 MonoInst
*ins
= cfg
->varinfo
[i
];
1492 locals_size
+= mono_type_size (ins
->inst_vtype
, &ialign
);
1497 mono_arch_get_global_int_regs (MonoCompile
*cfg
)
1501 mono_arch_compute_omit_fp (cfg
);
1503 if (cfg
->globalra
) {
1504 if (cfg
->arch
.omit_fp
)
1505 regs
= g_list_prepend (regs
, (gpointer
)AMD64_RBP
);
1507 regs
= g_list_prepend (regs
, (gpointer
)AMD64_RBX
);
1508 regs
= g_list_prepend (regs
, (gpointer
)AMD64_R12
);
1509 regs
= g_list_prepend (regs
, (gpointer
)AMD64_R13
);
1510 regs
= g_list_prepend (regs
, (gpointer
)AMD64_R14
);
1511 #ifndef __native_client_codegen__
1512 regs
= g_list_prepend (regs
, (gpointer
)AMD64_R15
);
1515 regs
= g_list_prepend (regs
, (gpointer
)AMD64_R10
);
1516 regs
= g_list_prepend (regs
, (gpointer
)AMD64_R9
);
1517 regs
= g_list_prepend (regs
, (gpointer
)AMD64_R8
);
1518 regs
= g_list_prepend (regs
, (gpointer
)AMD64_RDI
);
1519 regs
= g_list_prepend (regs
, (gpointer
)AMD64_RSI
);
1520 regs
= g_list_prepend (regs
, (gpointer
)AMD64_RDX
);
1521 regs
= g_list_prepend (regs
, (gpointer
)AMD64_RCX
);
1522 regs
= g_list_prepend (regs
, (gpointer
)AMD64_RAX
);
1524 if (cfg
->arch
.omit_fp
)
1525 regs
= g_list_prepend (regs
, (gpointer
)AMD64_RBP
);
1527 /* We use the callee saved registers for global allocation */
1528 regs
= g_list_prepend (regs
, (gpointer
)AMD64_RBX
);
1529 regs
= g_list_prepend (regs
, (gpointer
)AMD64_R12
);
1530 regs
= g_list_prepend (regs
, (gpointer
)AMD64_R13
);
1531 regs
= g_list_prepend (regs
, (gpointer
)AMD64_R14
);
1532 #ifndef __native_client_codegen__
1533 regs
= g_list_prepend (regs
, (gpointer
)AMD64_R15
);
1536 regs
= g_list_prepend (regs
, (gpointer
)AMD64_RDI
);
1537 regs
= g_list_prepend (regs
, (gpointer
)AMD64_RSI
);
1545 mono_arch_get_global_fp_regs (MonoCompile
*cfg
)
1550 /* All XMM registers */
1551 for (i
= 0; i
< 16; ++i
)
1552 regs
= g_list_prepend (regs
, GINT_TO_POINTER (i
));
1558 mono_arch_get_iregs_clobbered_by_call (MonoCallInst
*call
)
1560 static GList
*r
= NULL
;
1565 regs
= g_list_prepend (regs
, (gpointer
)AMD64_RBP
);
1566 regs
= g_list_prepend (regs
, (gpointer
)AMD64_RBX
);
1567 regs
= g_list_prepend (regs
, (gpointer
)AMD64_R12
);
1568 regs
= g_list_prepend (regs
, (gpointer
)AMD64_R13
);
1569 regs
= g_list_prepend (regs
, (gpointer
)AMD64_R14
);
1570 #ifndef __native_client_codegen__
1571 regs
= g_list_prepend (regs
, (gpointer
)AMD64_R15
);
1574 regs
= g_list_prepend (regs
, (gpointer
)AMD64_R10
);
1575 regs
= g_list_prepend (regs
, (gpointer
)AMD64_R9
);
1576 regs
= g_list_prepend (regs
, (gpointer
)AMD64_R8
);
1577 regs
= g_list_prepend (regs
, (gpointer
)AMD64_RDI
);
1578 regs
= g_list_prepend (regs
, (gpointer
)AMD64_RSI
);
1579 regs
= g_list_prepend (regs
, (gpointer
)AMD64_RDX
);
1580 regs
= g_list_prepend (regs
, (gpointer
)AMD64_RCX
);
1581 regs
= g_list_prepend (regs
, (gpointer
)AMD64_RAX
);
1583 InterlockedCompareExchangePointer ((gpointer
*)&r
, regs
, NULL
);
1590 mono_arch_get_fregs_clobbered_by_call (MonoCallInst
*call
)
1593 static GList
*r
= NULL
;
1598 for (i
= 0; i
< AMD64_XMM_NREG
; ++i
)
1599 regs
= g_list_prepend (regs
, GINT_TO_POINTER (MONO_MAX_IREGS
+ i
));
1601 InterlockedCompareExchangePointer ((gpointer
*)&r
, regs
, NULL
);
1608 * mono_arch_regalloc_cost:
1610 * Return the cost, in number of memory references, of the action of
1611 * allocating the variable VMV into a register during global register
1615 mono_arch_regalloc_cost (MonoCompile
*cfg
, MonoMethodVar
*vmv
)
1617 MonoInst
*ins
= cfg
->varinfo
[vmv
->idx
];
1619 if (cfg
->method
->save_lmf
)
1620 /* The register is already saved */
1621 /* substract 1 for the invisible store in the prolog */
1622 return (ins
->opcode
== OP_ARG
) ? 0 : 1;
1625 return (ins
->opcode
== OP_ARG
) ? 1 : 2;
1629 * mono_arch_fill_argument_info:
1631 * Populate cfg->args, cfg->ret and cfg->vret_addr with information about the arguments
1635 mono_arch_fill_argument_info (MonoCompile
*cfg
)
1637 MonoMethodSignature
*sig
;
1638 MonoMethodHeader
*header
;
1643 header
= cfg
->header
;
1645 sig
= mono_method_signature (cfg
->method
);
1647 cinfo
= cfg
->arch
.cinfo
;
1650 * Contrary to mono_arch_allocate_vars (), the information should describe
1651 * where the arguments are at the beginning of the method, not where they can be
1652 * accessed during the execution of the method. The later makes no sense for the
1653 * global register allocator, since a variable can be in more than one location.
1655 if (sig
->ret
->type
!= MONO_TYPE_VOID
) {
1656 switch (cinfo
->ret
.storage
) {
1658 case ArgInFloatSSEReg
:
1659 case ArgInDoubleSSEReg
:
1660 if ((MONO_TYPE_ISSTRUCT (sig
->ret
) && !mono_class_from_mono_type (sig
->ret
)->enumtype
) || (sig
->ret
->type
== MONO_TYPE_TYPEDBYREF
)) {
1661 cfg
->vret_addr
->opcode
= OP_REGVAR
;
1662 cfg
->vret_addr
->inst_c0
= cinfo
->ret
.reg
;
1665 cfg
->ret
->opcode
= OP_REGVAR
;
1666 cfg
->ret
->inst_c0
= cinfo
->ret
.reg
;
1669 case ArgValuetypeInReg
:
1670 cfg
->ret
->opcode
= OP_REGOFFSET
;
1671 cfg
->ret
->inst_basereg
= -1;
1672 cfg
->ret
->inst_offset
= -1;
1675 g_assert_not_reached ();
1679 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
1680 ArgInfo
*ainfo
= &cinfo
->args
[i
];
1683 ins
= cfg
->args
[i
];
1685 if (sig
->hasthis
&& (i
== 0))
1686 arg_type
= &mono_defaults
.object_class
->byval_arg
;
1688 arg_type
= sig
->params
[i
- sig
->hasthis
];
1690 switch (ainfo
->storage
) {
1692 case ArgInFloatSSEReg
:
1693 case ArgInDoubleSSEReg
:
1694 ins
->opcode
= OP_REGVAR
;
1695 ins
->inst_c0
= ainfo
->reg
;
1698 ins
->opcode
= OP_REGOFFSET
;
1699 ins
->inst_basereg
= -1;
1700 ins
->inst_offset
= -1;
1702 case ArgValuetypeInReg
:
1704 ins
->opcode
= OP_NOP
;
1707 g_assert_not_reached ();
1713 mono_arch_allocate_vars (MonoCompile
*cfg
)
1715 MonoMethodSignature
*sig
;
1716 MonoMethodHeader
*header
;
1719 guint32 locals_stack_size
, locals_stack_align
;
1723 header
= cfg
->header
;
1725 sig
= mono_method_signature (cfg
->method
);
1727 cinfo
= cfg
->arch
.cinfo
;
1729 mono_arch_compute_omit_fp (cfg
);
1732 * We use the ABI calling conventions for managed code as well.
1733 * Exception: valuetypes are only sometimes passed or returned in registers.
1737 * The stack looks like this:
1738 * <incoming arguments passed on the stack>
1740 * <lmf/caller saved registers>
1743 * <localloc area> -> grows dynamically
1747 if (cfg
->arch
.omit_fp
) {
1748 cfg
->flags
|= MONO_CFG_HAS_SPILLUP
;
1749 cfg
->frame_reg
= AMD64_RSP
;
1752 /* Locals are allocated backwards from %fp */
1753 cfg
->frame_reg
= AMD64_RBP
;
1757 if (cfg
->method
->save_lmf
) {
1758 /* The LMF var is allocated normally */
1760 if (cfg
->arch
.omit_fp
)
1761 cfg
->arch
.reg_save_area_offset
= offset
;
1762 /* Reserve space for callee saved registers */
1763 for (i
= 0; i
< AMD64_NREG
; ++i
)
1764 if (AMD64_IS_CALLEE_SAVED_REG (i
) && (cfg
->used_int_regs
& (1 << i
))) {
1765 offset
+= sizeof(mgreg_t
);
1769 if (sig
->ret
->type
!= MONO_TYPE_VOID
) {
1770 switch (cinfo
->ret
.storage
) {
1772 case ArgInFloatSSEReg
:
1773 case ArgInDoubleSSEReg
:
1774 if ((MONO_TYPE_ISSTRUCT (sig
->ret
) && !mono_class_from_mono_type (sig
->ret
)->enumtype
) || (sig
->ret
->type
== MONO_TYPE_TYPEDBYREF
)) {
1775 if (cfg
->globalra
) {
1776 cfg
->vret_addr
->opcode
= OP_REGVAR
;
1777 cfg
->vret_addr
->inst_c0
= cinfo
->ret
.reg
;
1779 /* The register is volatile */
1780 cfg
->vret_addr
->opcode
= OP_REGOFFSET
;
1781 cfg
->vret_addr
->inst_basereg
= cfg
->frame_reg
;
1782 if (cfg
->arch
.omit_fp
) {
1783 cfg
->vret_addr
->inst_offset
= offset
;
1787 cfg
->vret_addr
->inst_offset
= -offset
;
1789 if (G_UNLIKELY (cfg
->verbose_level
> 1)) {
1790 printf ("vret_addr =");
1791 mono_print_ins (cfg
->vret_addr
);
1796 cfg
->ret
->opcode
= OP_REGVAR
;
1797 cfg
->ret
->inst_c0
= cinfo
->ret
.reg
;
1800 case ArgValuetypeInReg
:
1801 /* Allocate a local to hold the result, the epilog will copy it to the correct place */
1802 cfg
->ret
->opcode
= OP_REGOFFSET
;
1803 cfg
->ret
->inst_basereg
= cfg
->frame_reg
;
1804 if (cfg
->arch
.omit_fp
) {
1805 cfg
->ret
->inst_offset
= offset
;
1806 offset
+= cinfo
->ret
.pair_storage
[1] == ArgNone
? 8 : 16;
1808 offset
+= cinfo
->ret
.pair_storage
[1] == ArgNone
? 8 : 16;
1809 cfg
->ret
->inst_offset
= - offset
;
1813 g_assert_not_reached ();
1816 cfg
->ret
->dreg
= cfg
->ret
->inst_c0
;
1819 /* Allocate locals */
1820 if (!cfg
->globalra
) {
1821 offsets
= mono_allocate_stack_slots (cfg
, cfg
->arch
.omit_fp
? FALSE
: TRUE
, &locals_stack_size
, &locals_stack_align
);
1822 if (locals_stack_size
> MONO_ARCH_MAX_FRAME_SIZE
) {
1823 char *mname
= mono_method_full_name (cfg
->method
, TRUE
);
1824 cfg
->exception_type
= MONO_EXCEPTION_INVALID_PROGRAM
;
1825 cfg
->exception_message
= g_strdup_printf ("Method %s stack is too big.", mname
);
1830 if (locals_stack_align
) {
1831 offset
+= (locals_stack_align
- 1);
1832 offset
&= ~(locals_stack_align
- 1);
1834 if (cfg
->arch
.omit_fp
) {
1835 cfg
->locals_min_stack_offset
= offset
;
1836 cfg
->locals_max_stack_offset
= offset
+ locals_stack_size
;
1838 cfg
->locals_min_stack_offset
= - (offset
+ locals_stack_size
);
1839 cfg
->locals_max_stack_offset
= - offset
;
1842 for (i
= cfg
->locals_start
; i
< cfg
->num_varinfo
; i
++) {
1843 if (offsets
[i
] != -1) {
1844 MonoInst
*ins
= cfg
->varinfo
[i
];
1845 ins
->opcode
= OP_REGOFFSET
;
1846 ins
->inst_basereg
= cfg
->frame_reg
;
1847 if (cfg
->arch
.omit_fp
)
1848 ins
->inst_offset
= (offset
+ offsets
[i
]);
1850 ins
->inst_offset
= - (offset
+ offsets
[i
]);
1851 //printf ("allocated local %d to ", i); mono_print_tree_nl (ins);
1854 offset
+= locals_stack_size
;
1857 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
)) {
1858 g_assert (!cfg
->arch
.omit_fp
);
1859 g_assert (cinfo
->sig_cookie
.storage
== ArgOnStack
);
1860 cfg
->sig_cookie
= cinfo
->sig_cookie
.offset
+ ARGS_OFFSET
;
1863 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
1864 ins
= cfg
->args
[i
];
1865 if (ins
->opcode
!= OP_REGVAR
) {
1866 ArgInfo
*ainfo
= &cinfo
->args
[i
];
1867 gboolean inreg
= TRUE
;
1870 if (sig
->hasthis
&& (i
== 0))
1871 arg_type
= &mono_defaults
.object_class
->byval_arg
;
1873 arg_type
= sig
->params
[i
- sig
->hasthis
];
1875 if (cfg
->globalra
) {
1876 /* The new allocator needs info about the original locations of the arguments */
1877 switch (ainfo
->storage
) {
1879 case ArgInFloatSSEReg
:
1880 case ArgInDoubleSSEReg
:
1881 ins
->opcode
= OP_REGVAR
;
1882 ins
->inst_c0
= ainfo
->reg
;
1885 g_assert (!cfg
->arch
.omit_fp
);
1886 ins
->opcode
= OP_REGOFFSET
;
1887 ins
->inst_basereg
= cfg
->frame_reg
;
1888 ins
->inst_offset
= ainfo
->offset
+ ARGS_OFFSET
;
1890 case ArgValuetypeInReg
:
1891 ins
->opcode
= OP_REGOFFSET
;
1892 ins
->inst_basereg
= cfg
->frame_reg
;
1893 /* These arguments are saved to the stack in the prolog */
1894 offset
= ALIGN_TO (offset
, sizeof(mgreg_t
));
1895 if (cfg
->arch
.omit_fp
) {
1896 ins
->inst_offset
= offset
;
1897 offset
+= (ainfo
->storage
== ArgValuetypeInReg
) ? ainfo
->nregs
* sizeof (mgreg_t
) : sizeof (mgreg_t
);
1899 offset
+= (ainfo
->storage
== ArgValuetypeInReg
) ? ainfo
->nregs
* sizeof (mgreg_t
) : sizeof (mgreg_t
);
1900 ins
->inst_offset
= - offset
;
1904 g_assert_not_reached ();
1910 /* FIXME: Allocate volatile arguments to registers */
1911 if (ins
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
))
1915 * Under AMD64, all registers used to pass arguments to functions
1916 * are volatile across calls.
1917 * FIXME: Optimize this.
1919 if ((ainfo
->storage
== ArgInIReg
) || (ainfo
->storage
== ArgInFloatSSEReg
) || (ainfo
->storage
== ArgInDoubleSSEReg
) || (ainfo
->storage
== ArgValuetypeInReg
))
1922 ins
->opcode
= OP_REGOFFSET
;
1924 switch (ainfo
->storage
) {
1926 case ArgInFloatSSEReg
:
1927 case ArgInDoubleSSEReg
:
1929 ins
->opcode
= OP_REGVAR
;
1930 ins
->dreg
= ainfo
->reg
;
1934 g_assert (!cfg
->arch
.omit_fp
);
1935 ins
->opcode
= OP_REGOFFSET
;
1936 ins
->inst_basereg
= cfg
->frame_reg
;
1937 ins
->inst_offset
= ainfo
->offset
+ ARGS_OFFSET
;
1939 case ArgValuetypeInReg
:
1941 case ArgValuetypeAddrInIReg
: {
1943 g_assert (!cfg
->arch
.omit_fp
);
1945 MONO_INST_NEW (cfg
, indir
, 0);
1946 indir
->opcode
= OP_REGOFFSET
;
1947 if (ainfo
->pair_storage
[0] == ArgInIReg
) {
1948 indir
->inst_basereg
= cfg
->frame_reg
;
1949 offset
= ALIGN_TO (offset
, sizeof (gpointer
));
1950 offset
+= (sizeof (gpointer
));
1951 indir
->inst_offset
= - offset
;
1954 indir
->inst_basereg
= cfg
->frame_reg
;
1955 indir
->inst_offset
= ainfo
->offset
+ ARGS_OFFSET
;
1958 ins
->opcode
= OP_VTARG_ADDR
;
1959 ins
->inst_left
= indir
;
1967 if (!inreg
&& (ainfo
->storage
!= ArgOnStack
) && (ainfo
->storage
!= ArgValuetypeAddrInIReg
)) {
1968 ins
->opcode
= OP_REGOFFSET
;
1969 ins
->inst_basereg
= cfg
->frame_reg
;
1970 /* These arguments are saved to the stack in the prolog */
1971 offset
= ALIGN_TO (offset
, sizeof(mgreg_t
));
1972 if (cfg
->arch
.omit_fp
) {
1973 ins
->inst_offset
= offset
;
1974 offset
+= (ainfo
->storage
== ArgValuetypeInReg
) ? ainfo
->nregs
* sizeof (mgreg_t
) : sizeof (mgreg_t
);
1975 // Arguments are yet supported by the stack map creation code
1976 //cfg->locals_max_stack_offset = MAX (cfg->locals_max_stack_offset, offset);
1978 offset
+= (ainfo
->storage
== ArgValuetypeInReg
) ? ainfo
->nregs
* sizeof (mgreg_t
) : sizeof (mgreg_t
);
1979 ins
->inst_offset
= - offset
;
1980 //cfg->locals_min_stack_offset = MIN (cfg->locals_min_stack_offset, offset);
1986 cfg
->stack_offset
= offset
;
1990 mono_arch_create_vars (MonoCompile
*cfg
)
1992 MonoMethodSignature
*sig
;
1995 sig
= mono_method_signature (cfg
->method
);
1997 if (!cfg
->arch
.cinfo
)
1998 cfg
->arch
.cinfo
= get_call_info (cfg
->generic_sharing_context
, cfg
->mempool
, sig
);
1999 cinfo
= cfg
->arch
.cinfo
;
2001 if (cinfo
->ret
.storage
== ArgValuetypeInReg
)
2002 cfg
->ret_var_is_local
= TRUE
;
2004 if ((cinfo
->ret
.storage
!= ArgValuetypeInReg
) && MONO_TYPE_ISSTRUCT (sig
->ret
)) {
2005 cfg
->vret_addr
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_ARG
);
2006 if (G_UNLIKELY (cfg
->verbose_level
> 1)) {
2007 printf ("vret_addr = ");
2008 mono_print_ins (cfg
->vret_addr
);
2012 if (cfg
->gen_seq_points
) {
2015 if (cfg
->compile_aot
) {
2016 MonoInst
*ins
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
2017 ins
->flags
|= MONO_INST_VOLATILE
;
2018 cfg
->arch
.seq_point_info_var
= ins
;
2021 ins
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
2022 ins
->flags
|= MONO_INST_VOLATILE
;
2023 cfg
->arch
.ss_trigger_page_var
= ins
;
2026 #ifdef MONO_AMD64_NO_PUSHES
2028 * When this is set, we pass arguments on the stack by moves, and by allocating
2029 * a bigger stack frame, instead of pushes.
2030 * Pushes complicate exception handling because the arguments on the stack have
2031 * to be popped each time a frame is unwound. They also make fp elimination
2033 * FIXME: This doesn't work inside filter/finally clauses, since those execute
2034 * on a new frame which doesn't include a param area.
2036 cfg
->arch
.no_pushes
= TRUE
;
2039 if (cfg
->method
->save_lmf
) {
2040 MonoInst
*lmf_var
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
2041 lmf_var
->flags
|= MONO_INST_VOLATILE
;
2042 lmf_var
->flags
|= MONO_INST_LMF
;
2043 cfg
->arch
.lmf_var
= lmf_var
;
2046 #ifndef MONO_AMD64_NO_PUSHES
2047 cfg
->arch_eh_jit_info
= 1;
2052 add_outarg_reg (MonoCompile
*cfg
, MonoCallInst
*call
, ArgStorage storage
, int reg
, MonoInst
*tree
)
2058 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2059 ins
->dreg
= mono_alloc_ireg_copy (cfg
, tree
->dreg
);
2060 ins
->sreg1
= tree
->dreg
;
2061 MONO_ADD_INS (cfg
->cbb
, ins
);
2062 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, reg
, FALSE
);
2064 case ArgInFloatSSEReg
:
2065 MONO_INST_NEW (cfg
, ins
, OP_AMD64_SET_XMMREG_R4
);
2066 ins
->dreg
= mono_alloc_freg (cfg
);
2067 ins
->sreg1
= tree
->dreg
;
2068 MONO_ADD_INS (cfg
->cbb
, ins
);
2070 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, reg
, TRUE
);
2072 case ArgInDoubleSSEReg
:
2073 MONO_INST_NEW (cfg
, ins
, OP_FMOVE
);
2074 ins
->dreg
= mono_alloc_freg (cfg
);
2075 ins
->sreg1
= tree
->dreg
;
2076 MONO_ADD_INS (cfg
->cbb
, ins
);
2078 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, reg
, TRUE
);
2082 g_assert_not_reached ();
2087 arg_storage_to_load_membase (ArgStorage storage
)
2091 #if defined(__mono_ilp32__)
2092 return OP_LOADI8_MEMBASE
;
2094 return OP_LOAD_MEMBASE
;
2096 case ArgInDoubleSSEReg
:
2097 return OP_LOADR8_MEMBASE
;
2098 case ArgInFloatSSEReg
:
2099 return OP_LOADR4_MEMBASE
;
2101 g_assert_not_reached ();
2108 emit_sig_cookie (MonoCompile
*cfg
, MonoCallInst
*call
, CallInfo
*cinfo
)
2111 MonoMethodSignature
*tmp_sig
;
2114 if (call
->tail_call
)
2117 g_assert (cinfo
->sig_cookie
.storage
== ArgOnStack
);
2120 * mono_ArgIterator_Setup assumes the signature cookie is
2121 * passed first and all the arguments which were before it are
2122 * passed on the stack after the signature. So compensate by
2123 * passing a different signature.
2125 tmp_sig
= mono_metadata_signature_dup_full (cfg
->method
->klass
->image
, call
->signature
);
2126 tmp_sig
->param_count
-= call
->signature
->sentinelpos
;
2127 tmp_sig
->sentinelpos
= 0;
2128 memcpy (tmp_sig
->params
, call
->signature
->params
+ call
->signature
->sentinelpos
, tmp_sig
->param_count
* sizeof (MonoType
*));
2130 sig_reg
= mono_alloc_ireg (cfg
);
2131 MONO_EMIT_NEW_SIGNATURECONST (cfg
, sig_reg
, tmp_sig
);
2133 if (cfg
->arch
.no_pushes
) {
2134 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, AMD64_RSP
, cinfo
->sig_cookie
.offset
, sig_reg
);
2136 MONO_INST_NEW (cfg
, arg
, OP_X86_PUSH
);
2137 arg
->sreg1
= sig_reg
;
2138 MONO_ADD_INS (cfg
->cbb
, arg
);
2142 static inline LLVMArgStorage
2143 arg_storage_to_llvm_arg_storage (MonoCompile
*cfg
, ArgStorage storage
)
2147 return LLVMArgInIReg
;
2151 g_assert_not_reached ();
2158 mono_arch_get_llvm_call_info (MonoCompile
*cfg
, MonoMethodSignature
*sig
)
2164 LLVMCallInfo
*linfo
;
2167 n
= sig
->param_count
+ sig
->hasthis
;
2169 cinfo
= get_call_info (cfg
->generic_sharing_context
, cfg
->mempool
, sig
);
2171 linfo
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (LLVMCallInfo
) + (sizeof (LLVMArgInfo
) * n
));
2174 * LLVM always uses the native ABI while we use our own ABI, the
2175 * only difference is the handling of vtypes:
2176 * - we only pass/receive them in registers in some cases, and only
2177 * in 1 or 2 integer registers.
2179 if (cinfo
->ret
.storage
== ArgValuetypeInReg
) {
2181 cfg
->exception_message
= g_strdup ("pinvoke + vtypes");
2182 cfg
->disable_llvm
= TRUE
;
2186 linfo
->ret
.storage
= LLVMArgVtypeInReg
;
2187 for (j
= 0; j
< 2; ++j
)
2188 linfo
->ret
.pair_storage
[j
] = arg_storage_to_llvm_arg_storage (cfg
, cinfo
->ret
.pair_storage
[j
]);
2191 if (MONO_TYPE_ISSTRUCT (sig
->ret
) && cinfo
->ret
.storage
== ArgInIReg
) {
2192 /* Vtype returned using a hidden argument */
2193 linfo
->ret
.storage
= LLVMArgVtypeRetAddr
;
2194 linfo
->vret_arg_index
= cinfo
->vret_arg_index
;
2197 for (i
= 0; i
< n
; ++i
) {
2198 ainfo
= cinfo
->args
+ i
;
2200 if (i
>= sig
->hasthis
)
2201 t
= sig
->params
[i
- sig
->hasthis
];
2203 t
= &mono_defaults
.int_class
->byval_arg
;
2205 linfo
->args
[i
].storage
= LLVMArgNone
;
2207 switch (ainfo
->storage
) {
2209 linfo
->args
[i
].storage
= LLVMArgInIReg
;
2211 case ArgInDoubleSSEReg
:
2212 case ArgInFloatSSEReg
:
2213 linfo
->args
[i
].storage
= LLVMArgInFPReg
;
2216 if (MONO_TYPE_ISSTRUCT (t
)) {
2217 linfo
->args
[i
].storage
= LLVMArgVtypeByVal
;
2219 linfo
->args
[i
].storage
= LLVMArgInIReg
;
2221 if (t
->type
== MONO_TYPE_R4
)
2222 linfo
->args
[i
].storage
= LLVMArgInFPReg
;
2223 else if (t
->type
== MONO_TYPE_R8
)
2224 linfo
->args
[i
].storage
= LLVMArgInFPReg
;
2228 case ArgValuetypeInReg
:
2230 cfg
->exception_message
= g_strdup ("pinvoke + vtypes");
2231 cfg
->disable_llvm
= TRUE
;
2235 linfo
->args
[i
].storage
= LLVMArgVtypeInReg
;
2236 for (j
= 0; j
< 2; ++j
)
2237 linfo
->args
[i
].pair_storage
[j
] = arg_storage_to_llvm_arg_storage (cfg
, ainfo
->pair_storage
[j
]);
2240 cfg
->exception_message
= g_strdup ("ainfo->storage");
2241 cfg
->disable_llvm
= TRUE
;
2251 mono_arch_emit_call (MonoCompile
*cfg
, MonoCallInst
*call
)
2254 MonoMethodSignature
*sig
;
2255 int i
, n
, stack_size
;
2261 sig
= call
->signature
;
2262 n
= sig
->param_count
+ sig
->hasthis
;
2264 cinfo
= get_call_info (cfg
->generic_sharing_context
, cfg
->mempool
, sig
);
2266 if (COMPILE_LLVM (cfg
)) {
2267 /* We shouldn't be called in the llvm case */
2268 cfg
->disable_llvm
= TRUE
;
2272 if (cinfo
->need_stack_align
) {
2273 if (!cfg
->arch
.no_pushes
)
2274 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SUB_IMM
, X86_ESP
, X86_ESP
, 8);
2278 * Emit all arguments which are passed on the stack to prevent register
2279 * allocation problems.
2281 if (cfg
->arch
.no_pushes
) {
2282 for (i
= 0; i
< n
; ++i
) {
2284 ainfo
= cinfo
->args
+ i
;
2286 in
= call
->args
[i
];
2288 if (sig
->hasthis
&& i
== 0)
2289 t
= &mono_defaults
.object_class
->byval_arg
;
2291 t
= sig
->params
[i
- sig
->hasthis
];
2293 if (ainfo
->storage
== ArgOnStack
&& !MONO_TYPE_ISSTRUCT (t
) && !call
->tail_call
) {
2295 if (t
->type
== MONO_TYPE_R4
)
2296 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, AMD64_RSP
, ainfo
->offset
, in
->dreg
);
2297 else if (t
->type
== MONO_TYPE_R8
)
2298 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, AMD64_RSP
, ainfo
->offset
, in
->dreg
);
2300 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, AMD64_RSP
, ainfo
->offset
, in
->dreg
);
2302 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, AMD64_RSP
, ainfo
->offset
, in
->dreg
);
2304 if (cfg
->compute_gc_maps
) {
2307 EMIT_NEW_GC_PARAM_SLOT_LIVENESS_DEF (cfg
, def
, ainfo
->offset
, t
);
2314 * Emit all parameters passed in registers in non-reverse order for better readability
2315 * and to help the optimization in emit_prolog ().
2317 for (i
= 0; i
< n
; ++i
) {
2318 ainfo
= cinfo
->args
+ i
;
2320 in
= call
->args
[i
];
2322 if (ainfo
->storage
== ArgInIReg
)
2323 add_outarg_reg (cfg
, call
, ainfo
->storage
, ainfo
->reg
, in
);
2326 for (i
= n
- 1; i
>= 0; --i
) {
2327 ainfo
= cinfo
->args
+ i
;
2329 in
= call
->args
[i
];
2331 switch (ainfo
->storage
) {
2335 case ArgInFloatSSEReg
:
2336 case ArgInDoubleSSEReg
:
2337 add_outarg_reg (cfg
, call
, ainfo
->storage
, ainfo
->reg
, in
);
2340 case ArgValuetypeInReg
:
2341 case ArgValuetypeAddrInIReg
:
2342 if (ainfo
->storage
== ArgOnStack
&& call
->tail_call
) {
2343 MonoInst
*call_inst
= (MonoInst
*)call
;
2344 cfg
->args
[i
]->flags
|= MONO_INST_VOLATILE
;
2345 EMIT_NEW_ARGSTORE (cfg
, call_inst
, i
, in
);
2346 } else if ((i
>= sig
->hasthis
) && (MONO_TYPE_ISSTRUCT(sig
->params
[i
- sig
->hasthis
]))) {
2350 if (sig
->params
[i
- sig
->hasthis
]->type
== MONO_TYPE_TYPEDBYREF
) {
2351 size
= sizeof (MonoTypedRef
);
2352 align
= sizeof (gpointer
);
2356 size
= mono_type_native_stack_size (&in
->klass
->byval_arg
, &align
);
2359 * Other backends use mono_type_stack_size (), but that
2360 * aligns the size to 8, which is larger than the size of
2361 * the source, leading to reads of invalid memory if the
2362 * source is at the end of address space.
2364 size
= mono_class_value_size (in
->klass
, &align
);
2367 g_assert (in
->klass
);
2369 if (ainfo
->storage
== ArgOnStack
&& size
>= 10000) {
2370 /* Avoid asserts in emit_memcpy () */
2371 cfg
->exception_type
= MONO_EXCEPTION_INVALID_PROGRAM
;
2372 cfg
->exception_message
= g_strdup_printf ("Passing an argument of size '%d'.", size
);
2373 /* Continue normally */
2377 MONO_INST_NEW (cfg
, arg
, OP_OUTARG_VT
);
2378 arg
->sreg1
= in
->dreg
;
2379 arg
->klass
= in
->klass
;
2380 arg
->backend
.size
= size
;
2381 arg
->inst_p0
= call
;
2382 arg
->inst_p1
= mono_mempool_alloc (cfg
->mempool
, sizeof (ArgInfo
));
2383 memcpy (arg
->inst_p1
, ainfo
, sizeof (ArgInfo
));
2385 MONO_ADD_INS (cfg
->cbb
, arg
);
2388 if (cfg
->arch
.no_pushes
) {
2391 MONO_INST_NEW (cfg
, arg
, OP_X86_PUSH
);
2392 arg
->sreg1
= in
->dreg
;
2393 if (!sig
->params
[i
- sig
->hasthis
]->byref
) {
2394 if (sig
->params
[i
- sig
->hasthis
]->type
== MONO_TYPE_R4
) {
2395 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SUB_IMM
, X86_ESP
, X86_ESP
, 8);
2396 arg
->opcode
= OP_STORER4_MEMBASE_REG
;
2397 arg
->inst_destbasereg
= X86_ESP
;
2398 arg
->inst_offset
= 0;
2399 } else if (sig
->params
[i
- sig
->hasthis
]->type
== MONO_TYPE_R8
) {
2400 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SUB_IMM
, X86_ESP
, X86_ESP
, 8);
2401 arg
->opcode
= OP_STORER8_MEMBASE_REG
;
2402 arg
->inst_destbasereg
= X86_ESP
;
2403 arg
->inst_offset
= 0;
2406 MONO_ADD_INS (cfg
->cbb
, arg
);
2411 g_assert_not_reached ();
2414 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
))
2415 /* Emit the signature cookie just before the implicit arguments */
2416 emit_sig_cookie (cfg
, call
, cinfo
);
2419 /* Handle the case where there are no implicit arguments */
2420 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (n
== sig
->sentinelpos
))
2421 emit_sig_cookie (cfg
, call
, cinfo
);
2423 if (sig
->ret
&& MONO_TYPE_ISSTRUCT (sig
->ret
)) {
2426 if (cinfo
->ret
.storage
== ArgValuetypeInReg
) {
2427 if (cinfo
->ret
.pair_storage
[0] == ArgInIReg
&& cinfo
->ret
.pair_storage
[1] == ArgNone
) {
2429 * Tell the JIT to use a more efficient calling convention: call using
2430 * OP_CALL, compute the result location after the call, and save the
2433 call
->vret_in_reg
= TRUE
;
2435 * Nullify the instruction computing the vret addr to enable
2436 * future optimizations.
2439 NULLIFY_INS (call
->vret_var
);
2441 if (call
->tail_call
)
2444 * The valuetype is in RAX:RDX after the call, need to be copied to
2445 * the stack. Push the address here, so the call instruction can
2448 if (!cfg
->arch
.vret_addr_loc
) {
2449 cfg
->arch
.vret_addr_loc
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
2450 /* Prevent it from being register allocated or optimized away */
2451 ((MonoInst
*)cfg
->arch
.vret_addr_loc
)->flags
|= MONO_INST_VOLATILE
;
2454 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, ((MonoInst
*)cfg
->arch
.vret_addr_loc
)->dreg
, call
->vret_var
->dreg
);
2458 MONO_INST_NEW (cfg
, vtarg
, OP_MOVE
);
2459 vtarg
->sreg1
= call
->vret_var
->dreg
;
2460 vtarg
->dreg
= mono_alloc_preg (cfg
);
2461 MONO_ADD_INS (cfg
->cbb
, vtarg
);
2463 mono_call_inst_add_outarg_reg (cfg
, call
, vtarg
->dreg
, cinfo
->ret
.reg
, FALSE
);
2468 if (call
->inst
.opcode
!= OP_JMP
&& OP_TAILCALL
!= call
->inst
.opcode
) {
2469 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SUB_IMM
, X86_ESP
, X86_ESP
, 0x20);
2473 if (cfg
->method
->save_lmf
) {
2474 MONO_INST_NEW (cfg
, arg
, OP_AMD64_SAVE_SP_TO_LMF
);
2475 MONO_ADD_INS (cfg
->cbb
, arg
);
2478 call
->stack_usage
= cinfo
->stack_usage
;
2482 mono_arch_emit_outarg_vt (MonoCompile
*cfg
, MonoInst
*ins
, MonoInst
*src
)
2485 MonoCallInst
*call
= (MonoCallInst
*)ins
->inst_p0
;
2486 ArgInfo
*ainfo
= (ArgInfo
*)ins
->inst_p1
;
2487 int size
= ins
->backend
.size
;
2489 if (ainfo
->storage
== ArgValuetypeInReg
) {
2493 for (part
= 0; part
< 2; ++part
) {
2494 if (ainfo
->pair_storage
[part
] == ArgNone
)
2497 MONO_INST_NEW (cfg
, load
, arg_storage_to_load_membase (ainfo
->pair_storage
[part
]));
2498 load
->inst_basereg
= src
->dreg
;
2499 load
->inst_offset
= part
* sizeof(mgreg_t
);
2501 switch (ainfo
->pair_storage
[part
]) {
2503 load
->dreg
= mono_alloc_ireg (cfg
);
2505 case ArgInDoubleSSEReg
:
2506 case ArgInFloatSSEReg
:
2507 load
->dreg
= mono_alloc_freg (cfg
);
2510 g_assert_not_reached ();
2512 MONO_ADD_INS (cfg
->cbb
, load
);
2514 add_outarg_reg (cfg
, call
, ainfo
->pair_storage
[part
], ainfo
->pair_regs
[part
], load
);
2516 } else if (ainfo
->storage
== ArgValuetypeAddrInIReg
) {
2517 MonoInst
*vtaddr
, *load
;
2518 vtaddr
= mono_compile_create_var (cfg
, &ins
->klass
->byval_arg
, OP_LOCAL
);
2520 g_assert (!cfg
->arch
.no_pushes
);
2522 MONO_INST_NEW (cfg
, load
, OP_LDADDR
);
2523 load
->inst_p0
= vtaddr
;
2524 vtaddr
->flags
|= MONO_INST_INDIRECT
;
2525 load
->type
= STACK_MP
;
2526 load
->klass
= vtaddr
->klass
;
2527 load
->dreg
= mono_alloc_ireg (cfg
);
2528 MONO_ADD_INS (cfg
->cbb
, load
);
2529 mini_emit_memcpy (cfg
, load
->dreg
, 0, src
->dreg
, 0, size
, 4);
2531 if (ainfo
->pair_storage
[0] == ArgInIReg
) {
2532 MONO_INST_NEW (cfg
, arg
, OP_X86_LEA_MEMBASE
);
2533 arg
->dreg
= mono_alloc_ireg (cfg
);
2534 arg
->sreg1
= load
->dreg
;
2536 MONO_ADD_INS (cfg
->cbb
, arg
);
2537 mono_call_inst_add_outarg_reg (cfg
, call
, arg
->dreg
, ainfo
->pair_regs
[0], FALSE
);
2539 MONO_INST_NEW (cfg
, arg
, OP_X86_PUSH
);
2540 arg
->sreg1
= load
->dreg
;
2541 MONO_ADD_INS (cfg
->cbb
, arg
);
2545 if (cfg
->arch
.no_pushes
) {
2546 int dreg
= mono_alloc_ireg (cfg
);
2548 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, dreg
, src
->dreg
, 0);
2549 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, AMD64_RSP
, ainfo
->offset
, dreg
);
2551 /* Can't use this for < 8 since it does an 8 byte memory load */
2552 MONO_INST_NEW (cfg
, arg
, OP_X86_PUSH_MEMBASE
);
2553 arg
->inst_basereg
= src
->dreg
;
2554 arg
->inst_offset
= 0;
2555 MONO_ADD_INS (cfg
->cbb
, arg
);
2557 } else if (size
<= 40) {
2558 if (cfg
->arch
.no_pushes
) {
2559 mini_emit_memcpy (cfg
, AMD64_RSP
, ainfo
->offset
, src
->dreg
, 0, size
, 4);
2561 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SUB_IMM
, X86_ESP
, X86_ESP
, ALIGN_TO (size
, 8));
2562 mini_emit_memcpy (cfg
, X86_ESP
, 0, src
->dreg
, 0, size
, 4);
2565 if (cfg
->arch
.no_pushes
) {
2566 // FIXME: Code growth
2567 mini_emit_memcpy (cfg
, AMD64_RSP
, ainfo
->offset
, src
->dreg
, 0, size
, 4);
2569 MONO_INST_NEW (cfg
, arg
, OP_X86_PUSH_OBJ
);
2570 arg
->inst_basereg
= src
->dreg
;
2571 arg
->inst_offset
= 0;
2572 arg
->inst_imm
= size
;
2573 MONO_ADD_INS (cfg
->cbb
, arg
);
2577 if (cfg
->compute_gc_maps
) {
2579 EMIT_NEW_GC_PARAM_SLOT_LIVENESS_DEF (cfg
, def
, ainfo
->offset
, &ins
->klass
->byval_arg
);
2585 mono_arch_emit_setret (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
*val
)
2587 MonoType
*ret
= mini_type_get_underlying_type (NULL
, mono_method_signature (method
)->ret
);
2589 if (ret
->type
== MONO_TYPE_R4
) {
2590 if (COMPILE_LLVM (cfg
))
2591 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, cfg
->ret
->dreg
, val
->dreg
);
2593 MONO_EMIT_NEW_UNALU (cfg
, OP_AMD64_SET_XMMREG_R4
, cfg
->ret
->dreg
, val
->dreg
);
2595 } else if (ret
->type
== MONO_TYPE_R8
) {
2596 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, cfg
->ret
->dreg
, val
->dreg
);
2600 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
2603 #endif /* DISABLE_JIT */
2605 #define EMIT_COND_BRANCH(ins,cond,sign) \
2606 if (ins->inst_true_bb->native_offset) { \
2607 x86_branch (code, cond, cfg->native_code + ins->inst_true_bb->native_offset, sign); \
2609 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
2610 if ((cfg->opt & MONO_OPT_BRANCH) && \
2611 x86_is_imm8 (ins->inst_true_bb->max_offset - offset)) \
2612 x86_branch8 (code, cond, 0, sign); \
2614 x86_branch32 (code, cond, 0, sign); \
2618 MonoMethodSignature
*sig
;
2623 mgreg_t regs
[PARAM_REGS
];
2629 dyn_call_supported (MonoMethodSignature
*sig
, CallInfo
*cinfo
)
2637 switch (cinfo
->ret
.storage
) {
2641 case ArgValuetypeInReg
: {
2642 ArgInfo
*ainfo
= &cinfo
->ret
;
2644 if (ainfo
->pair_storage
[0] != ArgNone
&& ainfo
->pair_storage
[0] != ArgInIReg
)
2646 if (ainfo
->pair_storage
[1] != ArgNone
&& ainfo
->pair_storage
[1] != ArgInIReg
)
2654 for (i
= 0; i
< cinfo
->nargs
; ++i
) {
2655 ArgInfo
*ainfo
= &cinfo
->args
[i
];
2656 switch (ainfo
->storage
) {
2659 case ArgValuetypeInReg
:
2660 if (ainfo
->pair_storage
[0] != ArgNone
&& ainfo
->pair_storage
[0] != ArgInIReg
)
2662 if (ainfo
->pair_storage
[1] != ArgNone
&& ainfo
->pair_storage
[1] != ArgInIReg
)
2674 * mono_arch_dyn_call_prepare:
2676 * Return a pointer to an arch-specific structure which contains information
2677 * needed by mono_arch_get_dyn_call_args (). Return NULL if OP_DYN_CALL is not
2678 * supported for SIG.
2679 * This function is equivalent to ffi_prep_cif in libffi.
2682 mono_arch_dyn_call_prepare (MonoMethodSignature
*sig
)
2684 ArchDynCallInfo
*info
;
2687 cinfo
= get_call_info (NULL
, NULL
, sig
);
2689 if (!dyn_call_supported (sig
, cinfo
)) {
2694 info
= g_new0 (ArchDynCallInfo
, 1);
2695 // FIXME: Preprocess the info to speed up get_dyn_call_args ().
2697 info
->cinfo
= cinfo
;
2699 return (MonoDynCallInfo
*)info
;
2703 * mono_arch_dyn_call_free:
2705 * Free a MonoDynCallInfo structure.
2708 mono_arch_dyn_call_free (MonoDynCallInfo
*info
)
2710 ArchDynCallInfo
*ainfo
= (ArchDynCallInfo
*)info
;
2712 g_free (ainfo
->cinfo
);
2716 #if !defined(__native_client__)
2717 #define PTR_TO_GREG(ptr) (mgreg_t)(ptr)
2718 #define GREG_TO_PTR(greg) (gpointer)(greg)
2720 /* Correctly handle casts to/from 32-bit pointers without compiler warnings */
2721 #define PTR_TO_GREG(ptr) (mgreg_t)(uintptr_t)(ptr)
2722 #define GREG_TO_PTR(greg) (gpointer)(guint32)(greg)
2726 * mono_arch_get_start_dyn_call:
2728 * Convert the arguments ARGS to a format which can be passed to OP_DYN_CALL, and
2729 * store the result into BUF.
2730 * ARGS should be an array of pointers pointing to the arguments.
2731 * RET should point to a memory buffer large enought to hold the result of the
2733 * This function should be as fast as possible, any work which does not depend
2734 * on the actual values of the arguments should be done in
2735 * mono_arch_dyn_call_prepare ().
2736 * start_dyn_call + OP_DYN_CALL + finish_dyn_call is equivalent to ffi_call in
2740 mono_arch_start_dyn_call (MonoDynCallInfo
*info
, gpointer
**args
, guint8
*ret
, guint8
*buf
, int buf_len
)
2742 ArchDynCallInfo
*dinfo
= (ArchDynCallInfo
*)info
;
2743 DynCallArgs
*p
= (DynCallArgs
*)buf
;
2744 int arg_index
, greg
, i
, pindex
;
2745 MonoMethodSignature
*sig
= dinfo
->sig
;
2747 g_assert (buf_len
>= sizeof (DynCallArgs
));
2756 if (sig
->hasthis
|| dinfo
->cinfo
->vret_arg_index
== 1) {
2757 p
->regs
[greg
++] = PTR_TO_GREG(*(args
[arg_index
++]));
2762 if (dinfo
->cinfo
->vtype_retaddr
)
2763 p
->regs
[greg
++] = PTR_TO_GREG(ret
);
2765 for (i
= pindex
; i
< sig
->param_count
; i
++) {
2766 MonoType
*t
= mono_type_get_underlying_type (sig
->params
[i
]);
2767 gpointer
*arg
= args
[arg_index
++];
2770 p
->regs
[greg
++] = PTR_TO_GREG(*(arg
));
2775 case MONO_TYPE_STRING
:
2776 case MONO_TYPE_CLASS
:
2777 case MONO_TYPE_ARRAY
:
2778 case MONO_TYPE_SZARRAY
:
2779 case MONO_TYPE_OBJECT
:
2783 #if !defined(__mono_ilp32__)
2787 g_assert (dinfo
->cinfo
->args
[i
+ sig
->hasthis
].reg
== param_regs
[greg
]);
2788 p
->regs
[greg
++] = PTR_TO_GREG(*(arg
));
2790 #if defined(__mono_ilp32__)
2793 g_assert (dinfo
->cinfo
->args
[i
+ sig
->hasthis
].reg
== param_regs
[greg
]);
2794 p
->regs
[greg
++] = *(guint64
*)(arg
);
2797 case MONO_TYPE_BOOLEAN
:
2799 p
->regs
[greg
++] = *(guint8
*)(arg
);
2802 p
->regs
[greg
++] = *(gint8
*)(arg
);
2805 p
->regs
[greg
++] = *(gint16
*)(arg
);
2808 case MONO_TYPE_CHAR
:
2809 p
->regs
[greg
++] = *(guint16
*)(arg
);
2812 p
->regs
[greg
++] = *(gint32
*)(arg
);
2815 p
->regs
[greg
++] = *(guint32
*)(arg
);
2817 case MONO_TYPE_GENERICINST
:
2818 if (MONO_TYPE_IS_REFERENCE (t
)) {
2819 p
->regs
[greg
++] = PTR_TO_GREG(*(arg
));
2824 case MONO_TYPE_VALUETYPE
: {
2825 ArgInfo
*ainfo
= &dinfo
->cinfo
->args
[i
+ sig
->hasthis
];
2827 g_assert (ainfo
->storage
== ArgValuetypeInReg
);
2828 if (ainfo
->pair_storage
[0] != ArgNone
) {
2829 g_assert (ainfo
->pair_storage
[0] == ArgInIReg
);
2830 p
->regs
[greg
++] = ((mgreg_t
*)(arg
))[0];
2832 if (ainfo
->pair_storage
[1] != ArgNone
) {
2833 g_assert (ainfo
->pair_storage
[1] == ArgInIReg
);
2834 p
->regs
[greg
++] = ((mgreg_t
*)(arg
))[1];
2839 g_assert_not_reached ();
2843 g_assert (greg
<= PARAM_REGS
);
2847 * mono_arch_finish_dyn_call:
2849 * Store the result of a dyn call into the return value buffer passed to
2850 * start_dyn_call ().
2851 * This function should be as fast as possible, any work which does not depend
2852 * on the actual values of the arguments should be done in
2853 * mono_arch_dyn_call_prepare ().
2856 mono_arch_finish_dyn_call (MonoDynCallInfo
*info
, guint8
*buf
)
2858 ArchDynCallInfo
*dinfo
= (ArchDynCallInfo
*)info
;
2859 MonoMethodSignature
*sig
= dinfo
->sig
;
2860 guint8
*ret
= ((DynCallArgs
*)buf
)->ret
;
2861 mgreg_t res
= ((DynCallArgs
*)buf
)->res
;
2863 switch (mono_type_get_underlying_type (sig
->ret
)->type
) {
2864 case MONO_TYPE_VOID
:
2865 *(gpointer
*)ret
= NULL
;
2867 case MONO_TYPE_STRING
:
2868 case MONO_TYPE_CLASS
:
2869 case MONO_TYPE_ARRAY
:
2870 case MONO_TYPE_SZARRAY
:
2871 case MONO_TYPE_OBJECT
:
2875 *(gpointer
*)ret
= GREG_TO_PTR(res
);
2881 case MONO_TYPE_BOOLEAN
:
2882 *(guint8
*)ret
= res
;
2885 *(gint16
*)ret
= res
;
2888 case MONO_TYPE_CHAR
:
2889 *(guint16
*)ret
= res
;
2892 *(gint32
*)ret
= res
;
2895 *(guint32
*)ret
= res
;
2898 *(gint64
*)ret
= res
;
2901 *(guint64
*)ret
= res
;
2903 case MONO_TYPE_GENERICINST
:
2904 if (MONO_TYPE_IS_REFERENCE (sig
->ret
)) {
2905 *(gpointer
*)ret
= GREG_TO_PTR(res
);
2910 case MONO_TYPE_VALUETYPE
:
2911 if (dinfo
->cinfo
->vtype_retaddr
) {
2914 ArgInfo
*ainfo
= &dinfo
->cinfo
->ret
;
2916 g_assert (ainfo
->storage
== ArgValuetypeInReg
);
2918 if (ainfo
->pair_storage
[0] != ArgNone
) {
2919 g_assert (ainfo
->pair_storage
[0] == ArgInIReg
);
2920 ((mgreg_t
*)ret
)[0] = res
;
2923 g_assert (ainfo
->pair_storage
[1] == ArgNone
);
2927 g_assert_not_reached ();
2931 /* emit an exception if condition is fail */
2932 #define EMIT_COND_SYSTEM_EXCEPTION(cond,signed,exc_name) \
2934 MonoInst *tins = mono_branch_optimize_exception_target (cfg, bb, exc_name); \
2935 if (tins == NULL) { \
2936 mono_add_patch_info (cfg, code - cfg->native_code, \
2937 MONO_PATCH_INFO_EXC, exc_name); \
2938 x86_branch32 (code, cond, 0, signed); \
2940 EMIT_COND_BRANCH (tins, cond, signed); \
2944 #define EMIT_FPCOMPARE(code) do { \
2945 amd64_fcompp (code); \
2946 amd64_fnstsw (code); \
2949 #define EMIT_SSE2_FPFUNC(code, op, dreg, sreg1) do { \
2950 amd64_movsd_membase_reg (code, AMD64_RSP, -8, (sreg1)); \
2951 amd64_fld_membase (code, AMD64_RSP, -8, TRUE); \
2952 amd64_ ##op (code); \
2953 amd64_fst_membase (code, AMD64_RSP, -8, TRUE, TRUE); \
2954 amd64_movsd_reg_membase (code, (dreg), AMD64_RSP, -8); \
2958 emit_call_body (MonoCompile
*cfg
, guint8
*code
, guint32 patch_type
, gconstpointer data
)
2960 gboolean no_patch
= FALSE
;
2963 * FIXME: Add support for thunks
2966 gboolean near_call
= FALSE
;
2969 * Indirect calls are expensive so try to make a near call if possible.
2970 * The caller memory is allocated by the code manager so it is
2971 * guaranteed to be at a 32 bit offset.
2974 if (patch_type
!= MONO_PATCH_INFO_ABS
) {
2975 /* The target is in memory allocated using the code manager */
2978 if ((patch_type
== MONO_PATCH_INFO_METHOD
) || (patch_type
== MONO_PATCH_INFO_METHOD_JUMP
)) {
2979 if (((MonoMethod
*)data
)->klass
->image
->aot_module
)
2980 /* The callee might be an AOT method */
2982 if (((MonoMethod
*)data
)->dynamic
)
2983 /* The target is in malloc-ed memory */
2987 if (patch_type
== MONO_PATCH_INFO_INTERNAL_METHOD
) {
2989 * The call might go directly to a native function without
2992 MonoJitICallInfo
*mi
= mono_find_jit_icall_by_name (data
);
2994 gconstpointer target
= mono_icall_get_wrapper (mi
);
2995 if ((((guint64
)target
) >> 32) != 0)
3001 if (cfg
->abs_patches
&& g_hash_table_lookup (cfg
->abs_patches
, data
)) {
3003 * This is not really an optimization, but required because the
3004 * generic class init trampolines use R11 to pass the vtable.
3008 MonoJitICallInfo
*info
= mono_find_jit_icall_by_addr (data
);
3010 if ((cfg
->method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
) &&
3011 strstr (cfg
->method
->name
, info
->name
)) {
3012 /* A call to the wrapped function */
3013 if ((((guint64
)data
) >> 32) == 0)
3017 else if (info
->func
== info
->wrapper
) {
3019 if ((((guint64
)info
->func
) >> 32) == 0)
3023 /* See the comment in mono_codegen () */
3024 if ((info
->name
[0] != 'v') || (strstr (info
->name
, "ves_array_new_va_") == NULL
&& strstr (info
->name
, "ves_array_element_address_") == NULL
))
3028 else if ((((guint64
)data
) >> 32) == 0) {
3035 if (cfg
->method
->dynamic
)
3036 /* These methods are allocated using malloc */
3039 #ifdef MONO_ARCH_NOMAP32BIT
3043 /* The 64bit XEN kernel does not honour the MAP_32BIT flag. (#522894) */
3044 if (optimize_for_xen
)
3047 if (cfg
->compile_aot
) {
3054 * Align the call displacement to an address divisible by 4 so it does
3055 * not span cache lines. This is required for code patching to work on SMP
3058 if (!no_patch
&& ((guint32
)(code
+ 1 - cfg
->native_code
) % 4) != 0) {
3059 guint32 pad_size
= 4 - ((guint32
)(code
+ 1 - cfg
->native_code
) % 4);
3060 amd64_padding (code
, pad_size
);
3062 mono_add_patch_info (cfg
, code
- cfg
->native_code
, patch_type
, data
);
3063 amd64_call_code (code
, 0);
3066 mono_add_patch_info (cfg
, code
- cfg
->native_code
, patch_type
, data
);
3067 amd64_set_reg_template (code
, GP_SCRATCH_REG
);
3068 amd64_call_reg (code
, GP_SCRATCH_REG
);
3075 static inline guint8
*
3076 emit_call (MonoCompile
*cfg
, guint8
*code
, guint32 patch_type
, gconstpointer data
, gboolean win64_adjust_stack
)
3079 if (win64_adjust_stack
)
3080 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, 32);
3082 code
= emit_call_body (cfg
, code
, patch_type
, data
);
3084 if (win64_adjust_stack
)
3085 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_RSP
, 32);
3092 store_membase_imm_to_store_membase_reg (int opcode
)
3095 case OP_STORE_MEMBASE_IMM
:
3096 return OP_STORE_MEMBASE_REG
;
3097 case OP_STOREI4_MEMBASE_IMM
:
3098 return OP_STOREI4_MEMBASE_REG
;
3099 case OP_STOREI8_MEMBASE_IMM
:
3100 return OP_STOREI8_MEMBASE_REG
;
3108 #define INST_IGNORES_CFLAGS(opcode) (!(((opcode) == OP_ADC) || ((opcode) == OP_ADC_IMM) || ((opcode) == OP_IADC) || ((opcode) == OP_IADC_IMM) || ((opcode) == OP_SBB) || ((opcode) == OP_SBB_IMM) || ((opcode) == OP_ISBB) || ((opcode) == OP_ISBB_IMM)))
3111 * mono_arch_peephole_pass_1:
3113 * Perform peephole opts which should/can be performed before local regalloc
3116 mono_arch_peephole_pass_1 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
3120 MONO_BB_FOR_EACH_INS_SAFE (bb
, n
, ins
) {
3121 MonoInst
*last_ins
= ins
->prev
;
3123 switch (ins
->opcode
) {
3127 if ((ins
->sreg1
< MONO_MAX_IREGS
) && (ins
->dreg
>= MONO_MAX_IREGS
) && (ins
->inst_imm
> 0)) {
3129 * X86_LEA is like ADD, but doesn't have the
3130 * sreg1==dreg restriction. inst_imm > 0 is needed since LEA sign-extends
3131 * its operand to 64 bit.
3133 ins
->opcode
= OP_X86_LEA_MEMBASE
;
3134 ins
->inst_basereg
= ins
->sreg1
;
3139 if ((ins
->sreg1
== ins
->sreg2
) && (ins
->sreg1
== ins
->dreg
)) {
3143 * Replace STORE_MEMBASE_IMM 0 with STORE_MEMBASE_REG since
3144 * the latter has length 2-3 instead of 6 (reverse constant
3145 * propagation). These instruction sequences are very common
3146 * in the initlocals bblock.
3148 for (ins2
= ins
->next
; ins2
; ins2
= ins2
->next
) {
3149 if (((ins2
->opcode
== OP_STORE_MEMBASE_IMM
) || (ins2
->opcode
== OP_STOREI4_MEMBASE_IMM
) || (ins2
->opcode
== OP_STOREI8_MEMBASE_IMM
) || (ins2
->opcode
== OP_STORE_MEMBASE_IMM
)) && (ins2
->inst_imm
== 0)) {
3150 ins2
->opcode
= store_membase_imm_to_store_membase_reg (ins2
->opcode
);
3151 ins2
->sreg1
= ins
->dreg
;
3152 } else if ((ins2
->opcode
== OP_STOREI1_MEMBASE_IMM
) || (ins2
->opcode
== OP_STOREI2_MEMBASE_IMM
) || (ins2
->opcode
== OP_STOREI8_MEMBASE_REG
) || (ins2
->opcode
== OP_STORE_MEMBASE_REG
)) {
3154 } else if (((ins2
->opcode
== OP_ICONST
) || (ins2
->opcode
== OP_I8CONST
)) && (ins2
->dreg
== ins
->dreg
) && (ins2
->inst_c0
== 0)) {
3163 case OP_COMPARE_IMM
:
3164 case OP_LCOMPARE_IMM
:
3165 /* OP_COMPARE_IMM (reg, 0)
3167 * OP_AMD64_TEST_NULL (reg)
3170 ins
->opcode
= OP_AMD64_TEST_NULL
;
3172 case OP_ICOMPARE_IMM
:
3174 ins
->opcode
= OP_X86_TEST_NULL
;
3176 case OP_AMD64_ICOMPARE_MEMBASE_IMM
:
3178 * OP_STORE_MEMBASE_REG reg, offset(basereg)
3179 * OP_X86_COMPARE_MEMBASE_IMM offset(basereg), imm
3181 * OP_STORE_MEMBASE_REG reg, offset(basereg)
3182 * OP_COMPARE_IMM reg, imm
3184 * Note: if imm = 0 then OP_COMPARE_IMM replaced with OP_X86_TEST_NULL
3186 if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_REG
) &&
3187 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
3188 ins
->inst_offset
== last_ins
->inst_offset
) {
3189 ins
->opcode
= OP_ICOMPARE_IMM
;
3190 ins
->sreg1
= last_ins
->sreg1
;
3192 /* check if we can remove cmp reg,0 with test null */
3194 ins
->opcode
= OP_X86_TEST_NULL
;
3200 mono_peephole_ins (bb
, ins
);
3205 mono_arch_peephole_pass_2 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
3209 MONO_BB_FOR_EACH_INS_SAFE (bb
, n
, ins
) {
3210 switch (ins
->opcode
) {
3213 /* reg = 0 -> XOR (reg, reg) */
3214 /* XOR sets cflags on x86, so we cant do it always */
3215 if (ins
->inst_c0
== 0 && (!ins
->next
|| (ins
->next
&& INST_IGNORES_CFLAGS (ins
->next
->opcode
)))) {
3216 ins
->opcode
= OP_LXOR
;
3217 ins
->sreg1
= ins
->dreg
;
3218 ins
->sreg2
= ins
->dreg
;
3226 * Use IXOR to avoid a rex prefix if possible. The cpu will sign extend the
3227 * 0 result into 64 bits.
3229 if ((ins
->sreg1
== ins
->sreg2
) && (ins
->sreg1
== ins
->dreg
)) {
3230 ins
->opcode
= OP_IXOR
;
3234 if ((ins
->sreg1
== ins
->sreg2
) && (ins
->sreg1
== ins
->dreg
)) {
3238 * Replace STORE_MEMBASE_IMM 0 with STORE_MEMBASE_REG since
3239 * the latter has length 2-3 instead of 6 (reverse constant
3240 * propagation). These instruction sequences are very common
3241 * in the initlocals bblock.
3243 for (ins2
= ins
->next
; ins2
; ins2
= ins2
->next
) {
3244 if (((ins2
->opcode
== OP_STORE_MEMBASE_IMM
) || (ins2
->opcode
== OP_STOREI4_MEMBASE_IMM
) || (ins2
->opcode
== OP_STOREI8_MEMBASE_IMM
) || (ins2
->opcode
== OP_STORE_MEMBASE_IMM
)) && (ins2
->inst_imm
== 0)) {
3245 ins2
->opcode
= store_membase_imm_to_store_membase_reg (ins2
->opcode
);
3246 ins2
->sreg1
= ins
->dreg
;
3247 } else if ((ins2
->opcode
== OP_STOREI1_MEMBASE_IMM
) || (ins2
->opcode
== OP_STOREI2_MEMBASE_IMM
) || (ins2
->opcode
== OP_STOREI4_MEMBASE_REG
) || (ins2
->opcode
== OP_STOREI8_MEMBASE_REG
) || (ins2
->opcode
== OP_STORE_MEMBASE_REG
) || (ins2
->opcode
== OP_LIVERANGE_START
) || (ins2
->opcode
== OP_GC_LIVENESS_DEF
) || (ins2
->opcode
== OP_GC_LIVENESS_USE
)) {
3249 } else if (((ins2
->opcode
== OP_ICONST
) || (ins2
->opcode
== OP_I8CONST
)) && (ins2
->dreg
== ins
->dreg
) && (ins2
->inst_c0
== 0)) {
3259 if ((ins
->inst_imm
== 1) && (ins
->dreg
== ins
->sreg1
))
3260 ins
->opcode
= OP_X86_INC_REG
;
3263 if ((ins
->inst_imm
== 1) && (ins
->dreg
== ins
->sreg1
))
3264 ins
->opcode
= OP_X86_DEC_REG
;
3268 mono_peephole_ins (bb
, ins
);
3272 #define NEW_INS(cfg,ins,dest,op) do { \
3273 MONO_INST_NEW ((cfg), (dest), (op)); \
3274 (dest)->cil_code = (ins)->cil_code; \
3275 mono_bblock_insert_before_ins (bb, ins, (dest)); \
3279 * mono_arch_lowering_pass:
3281 * Converts complex opcodes into simpler ones so that each IR instruction
3282 * corresponds to one machine instruction.
3285 mono_arch_lowering_pass (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
3287 MonoInst
*ins
, *n
, *temp
;
3290 * FIXME: Need to add more instructions, but the current machine
3291 * description can't model some parts of the composite instructions like
3294 MONO_BB_FOR_EACH_INS_SAFE (bb
, n
, ins
) {
3295 switch (ins
->opcode
) {
3299 case OP_IDIV_UN_IMM
:
3300 case OP_IREM_UN_IMM
:
3301 mono_decompose_op_imm (cfg
, bb
, ins
);
3304 /* Keep the opcode if we can implement it efficiently */
3305 if (!((ins
->inst_imm
> 0) && (mono_is_power_of_two (ins
->inst_imm
) != -1)))
3306 mono_decompose_op_imm (cfg
, bb
, ins
);
3308 case OP_COMPARE_IMM
:
3309 case OP_LCOMPARE_IMM
:
3310 if (!amd64_is_imm32 (ins
->inst_imm
)) {
3311 NEW_INS (cfg
, ins
, temp
, OP_I8CONST
);
3312 temp
->inst_c0
= ins
->inst_imm
;
3313 temp
->dreg
= mono_alloc_ireg (cfg
);
3314 ins
->opcode
= OP_COMPARE
;
3315 ins
->sreg2
= temp
->dreg
;
3318 #ifndef __mono_ilp32__
3319 case OP_LOAD_MEMBASE
:
3321 case OP_LOADI8_MEMBASE
:
3322 #ifndef __native_client_codegen__
3323 /* Don't generate memindex opcodes (to simplify */
3324 /* read sandboxing) */
3325 if (!amd64_is_imm32 (ins
->inst_offset
)) {
3326 NEW_INS (cfg
, ins
, temp
, OP_I8CONST
);
3327 temp
->inst_c0
= ins
->inst_offset
;
3328 temp
->dreg
= mono_alloc_ireg (cfg
);
3329 ins
->opcode
= OP_AMD64_LOADI8_MEMINDEX
;
3330 ins
->inst_indexreg
= temp
->dreg
;
3334 #ifndef __mono_ilp32__
3335 case OP_STORE_MEMBASE_IMM
:
3337 case OP_STOREI8_MEMBASE_IMM
:
3338 if (!amd64_is_imm32 (ins
->inst_imm
)) {
3339 NEW_INS (cfg
, ins
, temp
, OP_I8CONST
);
3340 temp
->inst_c0
= ins
->inst_imm
;
3341 temp
->dreg
= mono_alloc_ireg (cfg
);
3342 ins
->opcode
= OP_STOREI8_MEMBASE_REG
;
3343 ins
->sreg1
= temp
->dreg
;
3346 #ifdef MONO_ARCH_SIMD_INTRINSICS
3347 case OP_EXPAND_I1
: {
3348 int temp_reg1
= mono_alloc_ireg (cfg
);
3349 int temp_reg2
= mono_alloc_ireg (cfg
);
3350 int original_reg
= ins
->sreg1
;
3352 NEW_INS (cfg
, ins
, temp
, OP_ICONV_TO_U1
);
3353 temp
->sreg1
= original_reg
;
3354 temp
->dreg
= temp_reg1
;
3356 NEW_INS (cfg
, ins
, temp
, OP_SHL_IMM
);
3357 temp
->sreg1
= temp_reg1
;
3358 temp
->dreg
= temp_reg2
;
3361 NEW_INS (cfg
, ins
, temp
, OP_LOR
);
3362 temp
->sreg1
= temp
->dreg
= temp_reg2
;
3363 temp
->sreg2
= temp_reg1
;
3365 ins
->opcode
= OP_EXPAND_I2
;
3366 ins
->sreg1
= temp_reg2
;
3375 bb
->max_vreg
= cfg
->next_vreg
;
3379 branch_cc_table
[] = {
3380 X86_CC_EQ
, X86_CC_GE
, X86_CC_GT
, X86_CC_LE
, X86_CC_LT
,
3381 X86_CC_NE
, X86_CC_GE
, X86_CC_GT
, X86_CC_LE
, X86_CC_LT
,
3382 X86_CC_O
, X86_CC_NO
, X86_CC_C
, X86_CC_NC
3385 /* Maps CMP_... constants to X86_CC_... constants */
3388 X86_CC_EQ
, X86_CC_NE
, X86_CC_LE
, X86_CC_GE
, X86_CC_LT
, X86_CC_GT
,
3389 X86_CC_LE
, X86_CC_GE
, X86_CC_LT
, X86_CC_GT
3393 cc_signed_table
[] = {
3394 TRUE
, TRUE
, TRUE
, TRUE
, TRUE
, TRUE
,
3395 FALSE
, FALSE
, FALSE
, FALSE
3398 /*#include "cprop.c"*/
3400 static unsigned char*
3401 emit_float_to_int (MonoCompile
*cfg
, guchar
*code
, int dreg
, int sreg
, int size
, gboolean is_signed
)
3403 amd64_sse_cvttsd2si_reg_reg (code
, dreg
, sreg
);
3406 amd64_widen_reg (code
, dreg
, dreg
, is_signed
, FALSE
);
3408 amd64_widen_reg (code
, dreg
, dreg
, is_signed
, TRUE
);
3412 static unsigned char*
3413 mono_emit_stack_alloc (MonoCompile
*cfg
, guchar
*code
, MonoInst
* tree
)
3415 int sreg
= tree
->sreg1
;
3416 int need_touch
= FALSE
;
3418 #if defined(HOST_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
3419 if (!tree
->flags
& MONO_INST_INIT
)
3428 * If requested stack size is larger than one page,
3429 * perform stack-touch operation
3432 * Generate stack probe code.
3433 * Under Windows, it is necessary to allocate one page at a time,
3434 * "touching" stack after each successful sub-allocation. This is
3435 * because of the way stack growth is implemented - there is a
3436 * guard page before the lowest stack page that is currently commited.
3437 * Stack normally grows sequentially so OS traps access to the
3438 * guard page and commits more pages when needed.
3440 amd64_test_reg_imm (code
, sreg
, ~0xFFF);
3441 br
[0] = code
; x86_branch8 (code
, X86_CC_Z
, 0, FALSE
);
3443 br
[2] = code
; /* loop */
3444 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, 0x1000);
3445 amd64_test_membase_reg (code
, AMD64_RSP
, 0, AMD64_RSP
);
3446 amd64_alu_reg_imm (code
, X86_SUB
, sreg
, 0x1000);
3447 amd64_alu_reg_imm (code
, X86_CMP
, sreg
, 0x1000);
3448 br
[3] = code
; x86_branch8 (code
, X86_CC_AE
, 0, FALSE
);
3449 amd64_patch (br
[3], br
[2]);
3450 amd64_test_reg_reg (code
, sreg
, sreg
);
3451 br
[4] = code
; x86_branch8 (code
, X86_CC_Z
, 0, FALSE
);
3452 amd64_alu_reg_reg (code
, X86_SUB
, AMD64_RSP
, sreg
);
3454 br
[1] = code
; x86_jump8 (code
, 0);
3456 amd64_patch (br
[0], code
);
3457 amd64_alu_reg_reg (code
, X86_SUB
, AMD64_RSP
, sreg
);
3458 amd64_patch (br
[1], code
);
3459 amd64_patch (br
[4], code
);
3462 amd64_alu_reg_reg (code
, X86_SUB
, AMD64_RSP
, tree
->sreg1
);
3464 if (tree
->flags
& MONO_INST_INIT
) {
3466 if (tree
->dreg
!= AMD64_RAX
&& sreg
!= AMD64_RAX
) {
3467 amd64_push_reg (code
, AMD64_RAX
);
3470 if (tree
->dreg
!= AMD64_RCX
&& sreg
!= AMD64_RCX
) {
3471 amd64_push_reg (code
, AMD64_RCX
);
3474 if (tree
->dreg
!= AMD64_RDI
&& sreg
!= AMD64_RDI
) {
3475 amd64_push_reg (code
, AMD64_RDI
);
3479 amd64_shift_reg_imm (code
, X86_SHR
, sreg
, 3);
3480 if (sreg
!= AMD64_RCX
)
3481 amd64_mov_reg_reg (code
, AMD64_RCX
, sreg
, 8);
3482 amd64_alu_reg_reg (code
, X86_XOR
, AMD64_RAX
, AMD64_RAX
);
3484 amd64_lea_membase (code
, AMD64_RDI
, AMD64_RSP
, offset
);
3485 if (cfg
->param_area
&& cfg
->arch
.no_pushes
)
3486 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_RDI
, cfg
->param_area
);
3488 #if defined(__default_codegen__)
3489 amd64_prefix (code
, X86_REP_PREFIX
);
3491 #elif defined(__native_client_codegen__)
3492 /* NaCl stos pseudo-instruction */
3493 amd64_codegen_pre(code
);
3494 /* First, clear the upper 32 bits of RDI (mov %edi, %edi) */
3495 amd64_mov_reg_reg (code
, AMD64_RDI
, AMD64_RDI
, 4);
3496 /* Add %r15 to %rdi using lea, condition flags unaffected. */
3497 amd64_lea_memindex_size (code
, AMD64_RDI
, AMD64_R15
, 0, AMD64_RDI
, 0, 8);
3498 amd64_prefix (code
, X86_REP_PREFIX
);
3500 amd64_codegen_post(code
);
3501 #endif /* __native_client_codegen__ */
3503 if (tree
->dreg
!= AMD64_RDI
&& sreg
!= AMD64_RDI
)
3504 amd64_pop_reg (code
, AMD64_RDI
);
3505 if (tree
->dreg
!= AMD64_RCX
&& sreg
!= AMD64_RCX
)
3506 amd64_pop_reg (code
, AMD64_RCX
);
3507 if (tree
->dreg
!= AMD64_RAX
&& sreg
!= AMD64_RAX
)
3508 amd64_pop_reg (code
, AMD64_RAX
);
3514 emit_move_return_value (MonoCompile
*cfg
, MonoInst
*ins
, guint8
*code
)
3519 /* Move return value to the target register */
3520 /* FIXME: do this in the local reg allocator */
3521 switch (ins
->opcode
) {
3524 case OP_CALL_MEMBASE
:
3527 case OP_LCALL_MEMBASE
:
3528 g_assert (ins
->dreg
== AMD64_RAX
);
3532 case OP_FCALL_MEMBASE
:
3533 if (((MonoCallInst
*)ins
)->signature
->ret
->type
== MONO_TYPE_R4
) {
3534 amd64_sse_cvtss2sd_reg_reg (code
, ins
->dreg
, AMD64_XMM0
);
3537 if (ins
->dreg
!= AMD64_XMM0
)
3538 amd64_sse_movsd_reg_reg (code
, ins
->dreg
, AMD64_XMM0
);
3543 case OP_VCALL_MEMBASE
:
3546 case OP_VCALL2_MEMBASE
:
3547 cinfo
= get_call_info (cfg
->generic_sharing_context
, cfg
->mempool
, ((MonoCallInst
*)ins
)->signature
);
3548 if (cinfo
->ret
.storage
== ArgValuetypeInReg
) {
3549 MonoInst
*loc
= cfg
->arch
.vret_addr_loc
;
3551 /* Load the destination address */
3552 g_assert (loc
->opcode
== OP_REGOFFSET
);
3553 amd64_mov_reg_membase (code
, AMD64_RCX
, loc
->inst_basereg
, loc
->inst_offset
, sizeof(gpointer
));
3555 for (quad
= 0; quad
< 2; quad
++) {
3556 switch (cinfo
->ret
.pair_storage
[quad
]) {
3558 amd64_mov_membase_reg (code
, AMD64_RCX
, (quad
* sizeof(mgreg_t
)), cinfo
->ret
.pair_regs
[quad
], sizeof(mgreg_t
));
3560 case ArgInFloatSSEReg
:
3561 amd64_movss_membase_reg (code
, AMD64_RCX
, (quad
* 8), cinfo
->ret
.pair_regs
[quad
]);
3563 case ArgInDoubleSSEReg
:
3564 amd64_movsd_membase_reg (code
, AMD64_RCX
, (quad
* 8), cinfo
->ret
.pair_regs
[quad
]);
3579 #endif /* DISABLE_JIT */
3582 static int tls_gs_offset
;
3586 mono_amd64_have_tls_get (void)
3589 static gboolean have_tls_get
= FALSE
;
3590 static gboolean inited
= FALSE
;
3594 return have_tls_get
;
3596 ins
= (guint8
*)pthread_getspecific
;
3599 * We're looking for these two instructions:
3601 * mov %gs:[offset](,%rdi,8),%rax
3604 have_tls_get
= ins
[0] == 0x65 &&
3616 tls_gs_offset
= ins
[5];
3618 return have_tls_get
;
3625 * mono_amd64_emit_tls_get:
3626 * @code: buffer to store code to
3627 * @dreg: hard register where to place the result
3628 * @tls_offset: offset info
3630 * mono_amd64_emit_tls_get emits in @code the native code that puts in
3631 * the dreg register the item in the thread local storage identified
3634 * Returns: a pointer to the end of the stored code
3637 mono_amd64_emit_tls_get (guint8
* code
, int dreg
, int tls_offset
)
3640 g_assert (tls_offset
< 64);
3641 x86_prefix (code
, X86_GS_PREFIX
);
3642 amd64_mov_reg_mem (code
, dreg
, (tls_offset
* 8) + 0x1480, 8);
3643 #elif defined(__APPLE__)
3644 x86_prefix (code
, X86_GS_PREFIX
);
3645 amd64_mov_reg_mem (code
, dreg
, tls_gs_offset
+ (tls_offset
* 8), 8);
3647 if (optimize_for_xen
) {
3648 x86_prefix (code
, X86_FS_PREFIX
);
3649 amd64_mov_reg_mem (code
, dreg
, 0, 8);
3650 amd64_mov_reg_membase (code
, dreg
, dreg
, tls_offset
, 8);
3652 x86_prefix (code
, X86_FS_PREFIX
);
3653 amd64_mov_reg_mem (code
, dreg
, tls_offset
, 8);
3662 * Emit code to initialize an LMF structure at LMF_OFFSET.
3665 emit_setup_lmf (MonoCompile
*cfg
, guint8
*code
, gint32 lmf_offset
, int cfa_offset
)
3670 * The ip field is not set, the exception handling code will obtain it from the stack location pointed to by the sp field.
3673 * sp is saved right before calls but we need to save it here too so
3674 * async stack walks would work.
3676 amd64_mov_membase_reg (code
, cfg
->frame_reg
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, rsp
), AMD64_RSP
, 8);
3677 /* Skip method (only needed for trampoline LMF frames) */
3678 /* Save callee saved regs */
3679 for (i
= 0; i
< MONO_MAX_IREGS
; ++i
) {
3683 case AMD64_RBX
: offset
= G_STRUCT_OFFSET (MonoLMF
, rbx
); break;
3684 case AMD64_RBP
: offset
= G_STRUCT_OFFSET (MonoLMF
, rbp
); break;
3685 case AMD64_R12
: offset
= G_STRUCT_OFFSET (MonoLMF
, r12
); break;
3686 case AMD64_R13
: offset
= G_STRUCT_OFFSET (MonoLMF
, r13
); break;
3687 case AMD64_R14
: offset
= G_STRUCT_OFFSET (MonoLMF
, r14
); break;
3688 #ifndef __native_client_codegen__
3689 case AMD64_R15
: offset
= G_STRUCT_OFFSET (MonoLMF
, r15
); break;
3692 case AMD64_RDI
: offset
= G_STRUCT_OFFSET (MonoLMF
, rdi
); break;
3693 case AMD64_RSI
: offset
= G_STRUCT_OFFSET (MonoLMF
, rsi
); break;
3701 amd64_mov_membase_reg (code
, cfg
->frame_reg
, lmf_offset
+ offset
, i
, 8);
3702 if ((cfg
->arch
.omit_fp
|| (i
!= AMD64_RBP
)) && cfa_offset
!= -1)
3703 mono_emit_unwind_op_offset (cfg
, code
, i
, - (cfa_offset
- (lmf_offset
+ offset
)));
3707 /* These can't contain refs */
3708 mini_gc_set_slot_type_from_fp (cfg
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, previous_lmf
), SLOT_NOREF
);
3709 mini_gc_set_slot_type_from_fp (cfg
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, lmf_addr
), SLOT_NOREF
);
3710 mini_gc_set_slot_type_from_fp (cfg
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, method
), SLOT_NOREF
);
3711 mini_gc_set_slot_type_from_fp (cfg
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, rip
), SLOT_NOREF
);
3712 mini_gc_set_slot_type_from_fp (cfg
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, rsp
), SLOT_NOREF
);
3714 /* These are handled automatically by the stack marking code */
3715 mini_gc_set_slot_type_from_fp (cfg
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, rbx
), SLOT_NOREF
);
3716 mini_gc_set_slot_type_from_fp (cfg
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, rbp
), SLOT_NOREF
);
3717 mini_gc_set_slot_type_from_fp (cfg
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, r12
), SLOT_NOREF
);
3718 mini_gc_set_slot_type_from_fp (cfg
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, r13
), SLOT_NOREF
);
3719 mini_gc_set_slot_type_from_fp (cfg
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, r14
), SLOT_NOREF
);
3720 mini_gc_set_slot_type_from_fp (cfg
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, r15
), SLOT_NOREF
);
3722 mini_gc_set_slot_type_from_fp (cfg
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, rdi
), SLOT_NOREF
);
3723 mini_gc_set_slot_type_from_fp (cfg
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, rsi
), SLOT_NOREF
);
3732 * Emit code to push an LMF structure on the LMF stack.
3735 emit_save_lmf (MonoCompile
*cfg
, guint8
*code
, gint32 lmf_offset
, gboolean
*args_clobbered
)
3737 if ((lmf_tls_offset
!= -1) && !optimize_for_xen
) {
3739 * Optimized version which uses the mono_lmf TLS variable instead of
3740 * indirection through the mono_lmf_addr TLS variable.
3742 /* %rax = previous_lmf */
3743 x86_prefix (code
, X86_FS_PREFIX
);
3744 amd64_mov_reg_mem (code
, AMD64_RAX
, lmf_tls_offset
, 8);
3746 /* Save previous_lmf */
3747 amd64_mov_membase_reg (code
, cfg
->frame_reg
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, previous_lmf
), AMD64_RAX
, 8);
3749 if (lmf_offset
== 0) {
3750 x86_prefix (code
, X86_FS_PREFIX
);
3751 amd64_mov_mem_reg (code
, lmf_tls_offset
, cfg
->frame_reg
, 8);
3753 amd64_lea_membase (code
, AMD64_R11
, cfg
->frame_reg
, lmf_offset
);
3754 x86_prefix (code
, X86_FS_PREFIX
);
3755 amd64_mov_mem_reg (code
, lmf_tls_offset
, AMD64_R11
, 8);
3758 if (lmf_addr_tls_offset
!= -1) {
3759 /* Load lmf quicky using the FS register */
3760 code
= mono_amd64_emit_tls_get (code
, AMD64_RAX
, lmf_addr_tls_offset
);
3762 /* The TLS key actually contains a pointer to the MonoJitTlsData structure */
3763 /* FIXME: Add a separate key for LMF to avoid this */
3764 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_RAX
, G_STRUCT_OFFSET (MonoJitTlsData
, lmf
));
3769 * The call might clobber argument registers, but they are already
3770 * saved to the stack/global regs.
3773 *args_clobbered
= TRUE
;
3774 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
3775 (gpointer
)"mono_get_lmf_addr", TRUE
);
3779 amd64_mov_membase_reg (code
, cfg
->frame_reg
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, lmf_addr
), AMD64_RAX
, sizeof(gpointer
));
3780 /* Save previous_lmf */
3781 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RAX
, 0, sizeof(gpointer
));
3782 amd64_mov_membase_reg (code
, cfg
->frame_reg
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, previous_lmf
), AMD64_R11
, sizeof(gpointer
));
3784 amd64_lea_membase (code
, AMD64_R11
, cfg
->frame_reg
, lmf_offset
);
3785 amd64_mov_membase_reg (code
, AMD64_RAX
, 0, AMD64_R11
, sizeof(gpointer
));
3794 * Emit code to pop an LMF structure from the LMF stack.
3797 emit_restore_lmf (MonoCompile
*cfg
, guint8
*code
, gint32 lmf_offset
)
3799 if ((lmf_tls_offset
!= -1) && !optimize_for_xen
) {
3801 * Optimized version which uses the mono_lmf TLS variable instead of indirection
3802 * through the mono_lmf_addr TLS variable.
3804 /* reg = previous_lmf */
3805 amd64_mov_reg_membase (code
, AMD64_R11
, cfg
->frame_reg
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, previous_lmf
), sizeof(gpointer
));
3806 x86_prefix (code
, X86_FS_PREFIX
);
3807 amd64_mov_mem_reg (code
, lmf_tls_offset
, AMD64_R11
, 8);
3809 /* Restore previous lmf */
3810 amd64_mov_reg_membase (code
, AMD64_RCX
, cfg
->frame_reg
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, previous_lmf
), sizeof(gpointer
));
3811 amd64_mov_reg_membase (code
, AMD64_R11
, cfg
->frame_reg
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, lmf_addr
), sizeof(gpointer
));
3812 amd64_mov_membase_reg (code
, AMD64_R11
, 0, AMD64_RCX
, sizeof(gpointer
));
3818 #define REAL_PRINT_REG(text,reg) \
3819 mono_assert (reg >= 0); \
3820 amd64_push_reg (code, AMD64_RAX); \
3821 amd64_push_reg (code, AMD64_RDX); \
3822 amd64_push_reg (code, AMD64_RCX); \
3823 amd64_push_reg (code, reg); \
3824 amd64_push_imm (code, reg); \
3825 amd64_push_imm (code, text " %d %p\n"); \
3826 amd64_mov_reg_imm (code, AMD64_RAX, printf); \
3827 amd64_call_reg (code, AMD64_RAX); \
3828 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 3*4); \
3829 amd64_pop_reg (code, AMD64_RCX); \
3830 amd64_pop_reg (code, AMD64_RDX); \
3831 amd64_pop_reg (code, AMD64_RAX);
3833 /* benchmark and set based on cpu */
3834 #define LOOP_ALIGNMENT 8
3835 #define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
3839 #if defined(__native_client__) || defined(__native_client_codegen__)
3842 #ifdef __native_client_gc__
3843 __nacl_suspend_thread_if_needed();
3849 mono_arch_output_basic_block (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
3854 guint8
*code
= cfg
->native_code
+ cfg
->code_len
;
3855 MonoInst
*last_ins
= NULL
;
3856 guint last_offset
= 0;
3859 /* Fix max_offset estimate for each successor bb */
3860 if (cfg
->opt
& MONO_OPT_BRANCH
) {
3861 int current_offset
= cfg
->code_len
;
3862 MonoBasicBlock
*current_bb
;
3863 for (current_bb
= bb
; current_bb
!= NULL
; current_bb
= current_bb
->next_bb
) {
3864 current_bb
->max_offset
= current_offset
;
3865 current_offset
+= current_bb
->max_length
;
3869 if (cfg
->opt
& MONO_OPT_LOOP
) {
3870 int pad
, align
= LOOP_ALIGNMENT
;
3871 /* set alignment depending on cpu */
3872 if (bb_is_loop_start (bb
) && (pad
= (cfg
->code_len
& (align
- 1)))) {
3874 /*g_print ("adding %d pad at %x to loop in %s\n", pad, cfg->code_len, cfg->method->name);*/
3875 amd64_padding (code
, pad
);
3876 cfg
->code_len
+= pad
;
3877 bb
->native_offset
= cfg
->code_len
;
3881 #if defined(__native_client_codegen__)
3882 /* For Native Client, all indirect call/jump targets must be */
3883 /* 32-byte aligned. Exception handler blocks are jumped to */
3884 /* indirectly as well. */
3885 gboolean bb_needs_alignment
= (bb
->flags
& BB_INDIRECT_JUMP_TARGET
) ||
3886 (bb
->flags
& BB_EXCEPTION_HANDLER
);
3888 if ( bb_needs_alignment
&& ((cfg
->code_len
& kNaClAlignmentMask
) != 0)) {
3889 int pad
= kNaClAlignment
- (cfg
->code_len
& kNaClAlignmentMask
);
3890 if (pad
!= kNaClAlignment
) code
= mono_arch_nacl_pad(code
, pad
);
3891 cfg
->code_len
+= pad
;
3892 bb
->native_offset
= cfg
->code_len
;
3894 #endif /*__native_client_codegen__*/
3896 if (cfg
->verbose_level
> 2)
3897 g_print ("Basic block %d starting at offset 0x%x\n", bb
->block_num
, bb
->native_offset
);
3899 if (cfg
->prof_options
& MONO_PROFILE_COVERAGE
) {
3900 MonoProfileCoverageInfo
*cov
= cfg
->coverage_info
;
3901 g_assert (!cfg
->compile_aot
);
3903 cov
->data
[bb
->dfn
].cil_code
= bb
->cil_code
;
3904 amd64_mov_reg_imm (code
, AMD64_R11
, (guint64
)&cov
->data
[bb
->dfn
].count
);
3905 /* this is not thread save, but good enough */
3906 amd64_inc_membase (code
, AMD64_R11
, 0);
3909 offset
= code
- cfg
->native_code
;
3911 mono_debug_open_block (cfg
, bb
, offset
);
3913 if (mono_break_at_bb_method
&& mono_method_desc_full_match (mono_break_at_bb_method
, cfg
->method
) && bb
->block_num
== mono_break_at_bb_bb_num
)
3914 x86_breakpoint (code
);
3916 MONO_BB_FOR_EACH_INS (bb
, ins
) {
3917 offset
= code
- cfg
->native_code
;
3919 max_len
= ((guint8
*)ins_get_spec (ins
->opcode
))[MONO_INST_LEN
];
3921 #define EXTRA_CODE_SPACE (NACL_SIZE (16, 16 + kNaClAlignment))
3923 if (G_UNLIKELY (offset
> (cfg
->code_size
- max_len
- EXTRA_CODE_SPACE
))) {
3924 cfg
->code_size
*= 2;
3925 cfg
->native_code
= mono_realloc_native_code(cfg
);
3926 code
= cfg
->native_code
+ offset
;
3927 cfg
->stat_code_reallocs
++;
3930 if (cfg
->debug_info
)
3931 mono_debug_record_line_number (cfg
, ins
, offset
);
3933 switch (ins
->opcode
) {
3935 amd64_mul_reg (code
, ins
->sreg2
, TRUE
);
3938 amd64_mul_reg (code
, ins
->sreg2
, FALSE
);
3940 case OP_X86_SETEQ_MEMBASE
:
3941 amd64_set_membase (code
, X86_CC_EQ
, ins
->inst_basereg
, ins
->inst_offset
, TRUE
);
3943 case OP_STOREI1_MEMBASE_IMM
:
3944 amd64_mov_membase_imm (code
, ins
->inst_destbasereg
, ins
->inst_offset
, ins
->inst_imm
, 1);
3946 case OP_STOREI2_MEMBASE_IMM
:
3947 amd64_mov_membase_imm (code
, ins
->inst_destbasereg
, ins
->inst_offset
, ins
->inst_imm
, 2);
3949 case OP_STOREI4_MEMBASE_IMM
:
3950 amd64_mov_membase_imm (code
, ins
->inst_destbasereg
, ins
->inst_offset
, ins
->inst_imm
, 4);
3952 case OP_STOREI1_MEMBASE_REG
:
3953 amd64_mov_membase_reg (code
, ins
->inst_destbasereg
, ins
->inst_offset
, ins
->sreg1
, 1);
3955 case OP_STOREI2_MEMBASE_REG
:
3956 amd64_mov_membase_reg (code
, ins
->inst_destbasereg
, ins
->inst_offset
, ins
->sreg1
, 2);
3958 /* In AMD64 NaCl, pointers are 4 bytes, */
3959 /* so STORE_* != STOREI8_*. Likewise below. */
3960 case OP_STORE_MEMBASE_REG
:
3961 amd64_mov_membase_reg (code
, ins
->inst_destbasereg
, ins
->inst_offset
, ins
->sreg1
, sizeof(gpointer
));
3963 case OP_STOREI8_MEMBASE_REG
:
3964 amd64_mov_membase_reg (code
, ins
->inst_destbasereg
, ins
->inst_offset
, ins
->sreg1
, 8);
3966 case OP_STOREI4_MEMBASE_REG
:
3967 amd64_mov_membase_reg (code
, ins
->inst_destbasereg
, ins
->inst_offset
, ins
->sreg1
, 4);
3969 case OP_STORE_MEMBASE_IMM
:
3970 #ifndef __native_client_codegen__
3971 /* In NaCl, this could be a PCONST type, which could */
3972 /* mean a pointer type was copied directly into the */
3973 /* lower 32-bits of inst_imm, so for InvalidPtr==-1 */
3974 /* the value would be 0x00000000FFFFFFFF which is */
3975 /* not proper for an imm32 unless you cast it. */
3976 g_assert (amd64_is_imm32 (ins
->inst_imm
));
3978 amd64_mov_membase_imm (code
, ins
->inst_destbasereg
, ins
->inst_offset
, (gint32
)ins
->inst_imm
, sizeof(gpointer
));
3980 case OP_STOREI8_MEMBASE_IMM
:
3981 g_assert (amd64_is_imm32 (ins
->inst_imm
));
3982 amd64_mov_membase_imm (code
, ins
->inst_destbasereg
, ins
->inst_offset
, ins
->inst_imm
, 8);
3985 #ifdef __mono_ilp32__
3986 /* In ILP32, pointers are 4 bytes, so separate these */
3987 /* cases, use literal 8 below where we really want 8 */
3988 amd64_mov_reg_imm (code
, ins
->dreg
, ins
->inst_imm
);
3989 amd64_mov_reg_membase (code
, ins
->dreg
, ins
->dreg
, 0, sizeof(gpointer
));
3993 // FIXME: Decompose this earlier
3994 if (amd64_is_imm32 (ins
->inst_imm
))
3995 amd64_mov_reg_mem (code
, ins
->dreg
, ins
->inst_imm
, 8);
3997 amd64_mov_reg_imm (code
, ins
->dreg
, ins
->inst_imm
);
3998 amd64_mov_reg_membase (code
, ins
->dreg
, ins
->dreg
, 0, 8);
4002 amd64_mov_reg_imm (code
, ins
->dreg
, ins
->inst_imm
);
4003 amd64_movsxd_reg_membase (code
, ins
->dreg
, ins
->dreg
, 0);
4006 // FIXME: Decompose this earlier
4007 if (amd64_is_imm32 (ins
->inst_imm
))
4008 amd64_mov_reg_mem (code
, ins
->dreg
, ins
->inst_imm
, 4);
4010 amd64_mov_reg_imm (code
, ins
->dreg
, ins
->inst_imm
);
4011 amd64_mov_reg_membase (code
, ins
->dreg
, ins
->dreg
, 0, 4);
4015 amd64_mov_reg_imm (code
, ins
->dreg
, ins
->inst_imm
);
4016 amd64_widen_membase (code
, ins
->dreg
, ins
->dreg
, 0, FALSE
, FALSE
);
4019 /* For NaCl, pointers are 4 bytes, so separate these */
4020 /* cases, use literal 8 below where we really want 8 */
4021 amd64_mov_reg_imm (code
, ins
->dreg
, ins
->inst_imm
);
4022 amd64_widen_membase (code
, ins
->dreg
, ins
->dreg
, 0, FALSE
, TRUE
);
4024 case OP_LOAD_MEMBASE
:
4025 g_assert (amd64_is_imm32 (ins
->inst_offset
));
4026 amd64_mov_reg_membase (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
, sizeof(gpointer
));
4028 case OP_LOADI8_MEMBASE
:
4029 /* Use literal 8 instead of sizeof pointer or */
4030 /* register, we really want 8 for this opcode */
4031 g_assert (amd64_is_imm32 (ins
->inst_offset
));
4032 amd64_mov_reg_membase (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
, 8);
4034 case OP_LOADI4_MEMBASE
:
4035 amd64_movsxd_reg_membase (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4037 case OP_LOADU4_MEMBASE
:
4038 amd64_mov_reg_membase (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
, 4);
4040 case OP_LOADU1_MEMBASE
:
4041 /* The cpu zero extends the result into 64 bits */
4042 amd64_widen_membase_size (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
, FALSE
, FALSE
, 4);
4044 case OP_LOADI1_MEMBASE
:
4045 amd64_widen_membase (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
, TRUE
, FALSE
);
4047 case OP_LOADU2_MEMBASE
:
4048 /* The cpu zero extends the result into 64 bits */
4049 amd64_widen_membase_size (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
, FALSE
, TRUE
, 4);
4051 case OP_LOADI2_MEMBASE
:
4052 amd64_widen_membase (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
, TRUE
, TRUE
);
4054 case OP_AMD64_LOADI8_MEMINDEX
:
4055 amd64_mov_reg_memindex_size (code
, ins
->dreg
, ins
->inst_basereg
, 0, ins
->inst_indexreg
, 0, 8);
4057 case OP_LCONV_TO_I1
:
4058 case OP_ICONV_TO_I1
:
4060 amd64_widen_reg (code
, ins
->dreg
, ins
->sreg1
, TRUE
, FALSE
);
4062 case OP_LCONV_TO_I2
:
4063 case OP_ICONV_TO_I2
:
4065 amd64_widen_reg (code
, ins
->dreg
, ins
->sreg1
, TRUE
, TRUE
);
4067 case OP_LCONV_TO_U1
:
4068 case OP_ICONV_TO_U1
:
4069 amd64_widen_reg (code
, ins
->dreg
, ins
->sreg1
, FALSE
, FALSE
);
4071 case OP_LCONV_TO_U2
:
4072 case OP_ICONV_TO_U2
:
4073 amd64_widen_reg (code
, ins
->dreg
, ins
->sreg1
, FALSE
, TRUE
);
4076 /* Clean out the upper word */
4077 amd64_mov_reg_reg_size (code
, ins
->dreg
, ins
->sreg1
, 4);
4080 amd64_movsxd_reg_reg (code
, ins
->dreg
, ins
->sreg1
);
4084 amd64_alu_reg_reg (code
, X86_CMP
, ins
->sreg1
, ins
->sreg2
);
4086 case OP_COMPARE_IMM
:
4087 case OP_LCOMPARE_IMM
:
4088 g_assert (amd64_is_imm32 (ins
->inst_imm
));
4089 amd64_alu_reg_imm (code
, X86_CMP
, ins
->sreg1
, ins
->inst_imm
);
4091 case OP_X86_COMPARE_REG_MEMBASE
:
4092 amd64_alu_reg_membase (code
, X86_CMP
, ins
->sreg1
, ins
->sreg2
, ins
->inst_offset
);
4094 case OP_X86_TEST_NULL
:
4095 amd64_test_reg_reg_size (code
, ins
->sreg1
, ins
->sreg1
, 4);
4097 case OP_AMD64_TEST_NULL
:
4098 amd64_test_reg_reg (code
, ins
->sreg1
, ins
->sreg1
);
4101 case OP_X86_ADD_REG_MEMBASE
:
4102 amd64_alu_reg_membase_size (code
, X86_ADD
, ins
->sreg1
, ins
->sreg2
, ins
->inst_offset
, 4);
4104 case OP_X86_SUB_REG_MEMBASE
:
4105 amd64_alu_reg_membase_size (code
, X86_SUB
, ins
->sreg1
, ins
->sreg2
, ins
->inst_offset
, 4);
4107 case OP_X86_AND_REG_MEMBASE
:
4108 amd64_alu_reg_membase_size (code
, X86_AND
, ins
->sreg1
, ins
->sreg2
, ins
->inst_offset
, 4);
4110 case OP_X86_OR_REG_MEMBASE
:
4111 amd64_alu_reg_membase_size (code
, X86_OR
, ins
->sreg1
, ins
->sreg2
, ins
->inst_offset
, 4);
4113 case OP_X86_XOR_REG_MEMBASE
:
4114 amd64_alu_reg_membase_size (code
, X86_XOR
, ins
->sreg1
, ins
->sreg2
, ins
->inst_offset
, 4);
4117 case OP_X86_ADD_MEMBASE_IMM
:
4118 /* FIXME: Make a 64 version too */
4119 amd64_alu_membase_imm_size (code
, X86_ADD
, ins
->inst_basereg
, ins
->inst_offset
, ins
->inst_imm
, 4);
4121 case OP_X86_SUB_MEMBASE_IMM
:
4122 g_assert (amd64_is_imm32 (ins
->inst_imm
));
4123 amd64_alu_membase_imm_size (code
, X86_SUB
, ins
->inst_basereg
, ins
->inst_offset
, ins
->inst_imm
, 4);
4125 case OP_X86_AND_MEMBASE_IMM
:
4126 g_assert (amd64_is_imm32 (ins
->inst_imm
));
4127 amd64_alu_membase_imm_size (code
, X86_AND
, ins
->inst_basereg
, ins
->inst_offset
, ins
->inst_imm
, 4);
4129 case OP_X86_OR_MEMBASE_IMM
:
4130 g_assert (amd64_is_imm32 (ins
->inst_imm
));
4131 amd64_alu_membase_imm_size (code
, X86_OR
, ins
->inst_basereg
, ins
->inst_offset
, ins
->inst_imm
, 4);
4133 case OP_X86_XOR_MEMBASE_IMM
:
4134 g_assert (amd64_is_imm32 (ins
->inst_imm
));
4135 amd64_alu_membase_imm_size (code
, X86_XOR
, ins
->inst_basereg
, ins
->inst_offset
, ins
->inst_imm
, 4);
4137 case OP_X86_ADD_MEMBASE_REG
:
4138 amd64_alu_membase_reg_size (code
, X86_ADD
, ins
->inst_basereg
, ins
->inst_offset
, ins
->sreg2
, 4);
4140 case OP_X86_SUB_MEMBASE_REG
:
4141 amd64_alu_membase_reg_size (code
, X86_SUB
, ins
->inst_basereg
, ins
->inst_offset
, ins
->sreg2
, 4);
4143 case OP_X86_AND_MEMBASE_REG
:
4144 amd64_alu_membase_reg_size (code
, X86_AND
, ins
->inst_basereg
, ins
->inst_offset
, ins
->sreg2
, 4);
4146 case OP_X86_OR_MEMBASE_REG
:
4147 amd64_alu_membase_reg_size (code
, X86_OR
, ins
->inst_basereg
, ins
->inst_offset
, ins
->sreg2
, 4);
4149 case OP_X86_XOR_MEMBASE_REG
:
4150 amd64_alu_membase_reg_size (code
, X86_XOR
, ins
->inst_basereg
, ins
->inst_offset
, ins
->sreg2
, 4);
4152 case OP_X86_INC_MEMBASE
:
4153 amd64_inc_membase_size (code
, ins
->inst_basereg
, ins
->inst_offset
, 4);
4155 case OP_X86_INC_REG
:
4156 amd64_inc_reg_size (code
, ins
->dreg
, 4);
4158 case OP_X86_DEC_MEMBASE
:
4159 amd64_dec_membase_size (code
, ins
->inst_basereg
, ins
->inst_offset
, 4);
4161 case OP_X86_DEC_REG
:
4162 amd64_dec_reg_size (code
, ins
->dreg
, 4);
4164 case OP_X86_MUL_REG_MEMBASE
:
4165 case OP_X86_MUL_MEMBASE_REG
:
4166 amd64_imul_reg_membase_size (code
, ins
->sreg1
, ins
->sreg2
, ins
->inst_offset
, 4);
4168 case OP_AMD64_ICOMPARE_MEMBASE_REG
:
4169 amd64_alu_membase_reg_size (code
, X86_CMP
, ins
->inst_basereg
, ins
->inst_offset
, ins
->sreg2
, 4);
4171 case OP_AMD64_ICOMPARE_MEMBASE_IMM
:
4172 amd64_alu_membase_imm_size (code
, X86_CMP
, ins
->inst_basereg
, ins
->inst_offset
, ins
->inst_imm
, 4);
4174 case OP_AMD64_COMPARE_MEMBASE_REG
:
4175 amd64_alu_membase_reg_size (code
, X86_CMP
, ins
->inst_basereg
, ins
->inst_offset
, ins
->sreg2
, 8);
4177 case OP_AMD64_COMPARE_MEMBASE_IMM
:
4178 g_assert (amd64_is_imm32 (ins
->inst_imm
));
4179 amd64_alu_membase_imm_size (code
, X86_CMP
, ins
->inst_basereg
, ins
->inst_offset
, ins
->inst_imm
, 8);
4181 case OP_X86_COMPARE_MEMBASE8_IMM
:
4182 amd64_alu_membase8_imm_size (code
, X86_CMP
, ins
->inst_basereg
, ins
->inst_offset
, ins
->inst_imm
, 4);
4184 case OP_AMD64_ICOMPARE_REG_MEMBASE
:
4185 amd64_alu_reg_membase_size (code
, X86_CMP
, ins
->sreg1
, ins
->sreg2
, ins
->inst_offset
, 4);
4187 case OP_AMD64_COMPARE_REG_MEMBASE
:
4188 amd64_alu_reg_membase_size (code
, X86_CMP
, ins
->sreg1
, ins
->sreg2
, ins
->inst_offset
, 8);
4191 case OP_AMD64_ADD_REG_MEMBASE
:
4192 amd64_alu_reg_membase_size (code
, X86_ADD
, ins
->sreg1
, ins
->sreg2
, ins
->inst_offset
, 8);
4194 case OP_AMD64_SUB_REG_MEMBASE
:
4195 amd64_alu_reg_membase_size (code
, X86_SUB
, ins
->sreg1
, ins
->sreg2
, ins
->inst_offset
, 8);
4197 case OP_AMD64_AND_REG_MEMBASE
:
4198 amd64_alu_reg_membase_size (code
, X86_AND
, ins
->sreg1
, ins
->sreg2
, ins
->inst_offset
, 8);
4200 case OP_AMD64_OR_REG_MEMBASE
:
4201 amd64_alu_reg_membase_size (code
, X86_OR
, ins
->sreg1
, ins
->sreg2
, ins
->inst_offset
, 8);
4203 case OP_AMD64_XOR_REG_MEMBASE
:
4204 amd64_alu_reg_membase_size (code
, X86_XOR
, ins
->sreg1
, ins
->sreg2
, ins
->inst_offset
, 8);
4207 case OP_AMD64_ADD_MEMBASE_REG
:
4208 amd64_alu_membase_reg_size (code
, X86_ADD
, ins
->inst_basereg
, ins
->inst_offset
, ins
->sreg2
, 8);
4210 case OP_AMD64_SUB_MEMBASE_REG
:
4211 amd64_alu_membase_reg_size (code
, X86_SUB
, ins
->inst_basereg
, ins
->inst_offset
, ins
->sreg2
, 8);
4213 case OP_AMD64_AND_MEMBASE_REG
:
4214 amd64_alu_membase_reg_size (code
, X86_AND
, ins
->inst_basereg
, ins
->inst_offset
, ins
->sreg2
, 8);
4216 case OP_AMD64_OR_MEMBASE_REG
:
4217 amd64_alu_membase_reg_size (code
, X86_OR
, ins
->inst_basereg
, ins
->inst_offset
, ins
->sreg2
, 8);
4219 case OP_AMD64_XOR_MEMBASE_REG
:
4220 amd64_alu_membase_reg_size (code
, X86_XOR
, ins
->inst_basereg
, ins
->inst_offset
, ins
->sreg2
, 8);
4223 case OP_AMD64_ADD_MEMBASE_IMM
:
4224 g_assert (amd64_is_imm32 (ins
->inst_imm
));
4225 amd64_alu_membase_imm_size (code
, X86_ADD
, ins
->inst_basereg
, ins
->inst_offset
, ins
->inst_imm
, 8);
4227 case OP_AMD64_SUB_MEMBASE_IMM
:
4228 g_assert (amd64_is_imm32 (ins
->inst_imm
));
4229 amd64_alu_membase_imm_size (code
, X86_SUB
, ins
->inst_basereg
, ins
->inst_offset
, ins
->inst_imm
, 8);
4231 case OP_AMD64_AND_MEMBASE_IMM
:
4232 g_assert (amd64_is_imm32 (ins
->inst_imm
));
4233 amd64_alu_membase_imm_size (code
, X86_AND
, ins
->inst_basereg
, ins
->inst_offset
, ins
->inst_imm
, 8);
4235 case OP_AMD64_OR_MEMBASE_IMM
:
4236 g_assert (amd64_is_imm32 (ins
->inst_imm
));
4237 amd64_alu_membase_imm_size (code
, X86_OR
, ins
->inst_basereg
, ins
->inst_offset
, ins
->inst_imm
, 8);
4239 case OP_AMD64_XOR_MEMBASE_IMM
:
4240 g_assert (amd64_is_imm32 (ins
->inst_imm
));
4241 amd64_alu_membase_imm_size (code
, X86_XOR
, ins
->inst_basereg
, ins
->inst_offset
, ins
->inst_imm
, 8);
4245 amd64_breakpoint (code
);
4247 case OP_RELAXED_NOP
:
4248 x86_prefix (code
, X86_REP_PREFIX
);
4256 case OP_DUMMY_STORE
:
4257 case OP_NOT_REACHED
:
4260 case OP_SEQ_POINT
: {
4264 * Read from the single stepping trigger page. This will cause a
4265 * SIGSEGV when single stepping is enabled.
4266 * We do this _before_ the breakpoint, so single stepping after
4267 * a breakpoint is hit will step to the next IL offset.
4269 if (ins
->flags
& MONO_INST_SINGLE_STEP_LOC
) {
4270 MonoInst
*var
= cfg
->arch
.ss_trigger_page_var
;
4272 amd64_mov_reg_membase (code
, AMD64_R11
, var
->inst_basereg
, var
->inst_offset
, 8);
4273 amd64_alu_membase_imm_size (code
, X86_CMP
, AMD64_R11
, 0, 0, 4);
4277 * This is the address which is saved in seq points,
4279 mono_add_seq_point (cfg
, bb
, ins
, code
- cfg
->native_code
);
4281 if (cfg
->compile_aot
) {
4282 guint32 offset
= code
- cfg
->native_code
;
4284 MonoInst
*info_var
= cfg
->arch
.seq_point_info_var
;
4287 amd64_mov_reg_membase (code
, AMD64_R11
, info_var
->inst_basereg
, info_var
->inst_offset
, 8);
4288 val
= ((offset
) * sizeof (guint8
*)) + G_STRUCT_OFFSET (SeqPointInfo
, bp_addrs
);
4289 /* Load the info->bp_addrs [offset], which is either a valid address or the address of a trigger page */
4290 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, val
, 8);
4291 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, 0, 8);
4294 * A placeholder for a possible breakpoint inserted by
4295 * mono_arch_set_breakpoint ().
4297 for (i
= 0; i
< breakpoint_size
; ++i
)
4301 * Add an additional nop so skipping the bp doesn't cause the ip to point
4302 * to another IL offset.
4309 amd64_alu_reg_reg (code
, X86_ADD
, ins
->sreg1
, ins
->sreg2
);
4312 amd64_alu_reg_reg (code
, X86_ADC
, ins
->sreg1
, ins
->sreg2
);
4316 g_assert (amd64_is_imm32 (ins
->inst_imm
));
4317 amd64_alu_reg_imm (code
, X86_ADD
, ins
->dreg
, ins
->inst_imm
);
4320 g_assert (amd64_is_imm32 (ins
->inst_imm
));
4321 amd64_alu_reg_imm (code
, X86_ADC
, ins
->dreg
, ins
->inst_imm
);
4325 amd64_alu_reg_reg (code
, X86_SUB
, ins
->sreg1
, ins
->sreg2
);
4328 amd64_alu_reg_reg (code
, X86_SBB
, ins
->sreg1
, ins
->sreg2
);
4332 g_assert (amd64_is_imm32 (ins
->inst_imm
));
4333 amd64_alu_reg_imm (code
, X86_SUB
, ins
->dreg
, ins
->inst_imm
);
4336 g_assert (amd64_is_imm32 (ins
->inst_imm
));
4337 amd64_alu_reg_imm (code
, X86_SBB
, ins
->dreg
, ins
->inst_imm
);
4340 amd64_alu_reg_reg (code
, X86_AND
, ins
->sreg1
, ins
->sreg2
);
4344 g_assert (amd64_is_imm32 (ins
->inst_imm
));
4345 amd64_alu_reg_imm (code
, X86_AND
, ins
->sreg1
, ins
->inst_imm
);
4348 amd64_imul_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
4353 guint32 size
= (ins
->opcode
== OP_IMUL_IMM
) ? 4 : 8;
4355 switch (ins
->inst_imm
) {
4359 if (ins
->dreg
!= ins
->sreg1
)
4360 amd64_mov_reg_reg (code
, ins
->dreg
, ins
->sreg1
, size
);
4361 amd64_alu_reg_reg (code
, X86_ADD
, ins
->dreg
, ins
->dreg
);
4364 /* LEA r1, [r2 + r2*2] */
4365 amd64_lea_memindex (code
, ins
->dreg
, ins
->sreg1
, 0, ins
->sreg1
, 1);
4368 /* LEA r1, [r2 + r2*4] */
4369 amd64_lea_memindex (code
, ins
->dreg
, ins
->sreg1
, 0, ins
->sreg1
, 2);
4372 /* LEA r1, [r2 + r2*2] */
4374 amd64_lea_memindex (code
, ins
->dreg
, ins
->sreg1
, 0, ins
->sreg1
, 1);
4375 amd64_alu_reg_reg (code
, X86_ADD
, ins
->dreg
, ins
->dreg
);
4378 /* LEA r1, [r2 + r2*8] */
4379 amd64_lea_memindex (code
, ins
->dreg
, ins
->sreg1
, 0, ins
->sreg1
, 3);
4382 /* LEA r1, [r2 + r2*4] */
4384 amd64_lea_memindex (code
, ins
->dreg
, ins
->sreg1
, 0, ins
->sreg1
, 2);
4385 amd64_alu_reg_reg (code
, X86_ADD
, ins
->dreg
, ins
->dreg
);
4388 /* LEA r1, [r2 + r2*2] */
4390 amd64_lea_memindex (code
, ins
->dreg
, ins
->sreg1
, 0, ins
->sreg1
, 1);
4391 amd64_shift_reg_imm (code
, X86_SHL
, ins
->dreg
, 2);
4394 /* LEA r1, [r2 + r2*4] */
4395 /* LEA r1, [r1 + r1*4] */
4396 amd64_lea_memindex (code
, ins
->dreg
, ins
->sreg1
, 0, ins
->sreg1
, 2);
4397 amd64_lea_memindex (code
, ins
->dreg
, ins
->dreg
, 0, ins
->dreg
, 2);
4400 /* LEA r1, [r2 + r2*4] */
4402 /* LEA r1, [r1 + r1*4] */
4403 amd64_lea_memindex (code
, ins
->dreg
, ins
->sreg1
, 0, ins
->sreg1
, 2);
4404 amd64_shift_reg_imm (code
, X86_SHL
, ins
->dreg
, 2);
4405 amd64_lea_memindex (code
, ins
->dreg
, ins
->dreg
, 0, ins
->dreg
, 2);
4408 amd64_imul_reg_reg_imm_size (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_imm
, size
);
4415 /* Regalloc magic makes the div/rem cases the same */
4416 if (ins
->sreg2
== AMD64_RDX
) {
4417 amd64_mov_membase_reg (code
, AMD64_RSP
, -8, AMD64_RDX
, 8);
4419 amd64_div_membase (code
, AMD64_RSP
, -8, TRUE
);
4422 amd64_div_reg (code
, ins
->sreg2
, TRUE
);
4427 if (ins
->sreg2
== AMD64_RDX
) {
4428 amd64_mov_membase_reg (code
, AMD64_RSP
, -8, AMD64_RDX
, 8);
4429 amd64_alu_reg_reg (code
, X86_XOR
, AMD64_RDX
, AMD64_RDX
);
4430 amd64_div_membase (code
, AMD64_RSP
, -8, FALSE
);
4432 amd64_alu_reg_reg (code
, X86_XOR
, AMD64_RDX
, AMD64_RDX
);
4433 amd64_div_reg (code
, ins
->sreg2
, FALSE
);
4438 if (ins
->sreg2
== AMD64_RDX
) {
4439 amd64_mov_membase_reg (code
, AMD64_RSP
, -8, AMD64_RDX
, 8);
4440 amd64_cdq_size (code
, 4);
4441 amd64_div_membase_size (code
, AMD64_RSP
, -8, TRUE
, 4);
4443 amd64_cdq_size (code
, 4);
4444 amd64_div_reg_size (code
, ins
->sreg2
, TRUE
, 4);
4449 if (ins
->sreg2
== AMD64_RDX
) {
4450 amd64_mov_membase_reg (code
, AMD64_RSP
, -8, AMD64_RDX
, 8);
4451 amd64_alu_reg_reg (code
, X86_XOR
, AMD64_RDX
, AMD64_RDX
);
4452 amd64_div_membase_size (code
, AMD64_RSP
, -8, FALSE
, 4);
4454 amd64_alu_reg_reg (code
, X86_XOR
, AMD64_RDX
, AMD64_RDX
);
4455 amd64_div_reg_size (code
, ins
->sreg2
, FALSE
, 4);
4459 int power
= mono_is_power_of_two (ins
->inst_imm
);
4461 g_assert (ins
->sreg1
== X86_EAX
);
4462 g_assert (ins
->dreg
== X86_EAX
);
4463 g_assert (power
>= 0);
4466 amd64_mov_reg_imm (code
, ins
->dreg
, 0);
4470 /* Based on gcc code */
4472 /* Add compensation for negative dividents */
4473 amd64_mov_reg_reg_size (code
, AMD64_RDX
, AMD64_RAX
, 4);
4475 amd64_shift_reg_imm_size (code
, X86_SAR
, AMD64_RDX
, 31, 4);
4476 amd64_shift_reg_imm_size (code
, X86_SHR
, AMD64_RDX
, 32 - power
, 4);
4477 amd64_alu_reg_reg_size (code
, X86_ADD
, AMD64_RAX
, AMD64_RDX
, 4);
4478 /* Compute remainder */
4479 amd64_alu_reg_imm_size (code
, X86_AND
, AMD64_RAX
, (1 << power
) - 1, 4);
4480 /* Remove compensation */
4481 amd64_alu_reg_reg_size (code
, X86_SUB
, AMD64_RAX
, AMD64_RDX
, 4);
4485 amd64_imul_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
4486 EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O
, FALSE
, "OverflowException");
4489 amd64_alu_reg_reg (code
, X86_OR
, ins
->sreg1
, ins
->sreg2
);
4493 g_assert (amd64_is_imm32 (ins
->inst_imm
));
4494 amd64_alu_reg_imm (code
, X86_OR
, ins
->sreg1
, ins
->inst_imm
);
4497 amd64_alu_reg_reg (code
, X86_XOR
, ins
->sreg1
, ins
->sreg2
);
4501 g_assert (amd64_is_imm32 (ins
->inst_imm
));
4502 amd64_alu_reg_imm (code
, X86_XOR
, ins
->sreg1
, ins
->inst_imm
);
4505 g_assert (ins
->sreg2
== AMD64_RCX
);
4506 amd64_shift_reg (code
, X86_SHL
, ins
->dreg
);
4509 g_assert (ins
->sreg2
== AMD64_RCX
);
4510 amd64_shift_reg (code
, X86_SAR
, ins
->dreg
);
4513 g_assert (amd64_is_imm32 (ins
->inst_imm
));
4514 amd64_shift_reg_imm_size (code
, X86_SAR
, ins
->dreg
, ins
->inst_imm
, 4);
4517 g_assert (amd64_is_imm32 (ins
->inst_imm
));
4518 amd64_shift_reg_imm (code
, X86_SAR
, ins
->dreg
, ins
->inst_imm
);
4521 g_assert (amd64_is_imm32 (ins
->inst_imm
));
4522 amd64_shift_reg_imm_size (code
, X86_SHR
, ins
->dreg
, ins
->inst_imm
, 4);
4524 case OP_LSHR_UN_IMM
:
4525 g_assert (amd64_is_imm32 (ins
->inst_imm
));
4526 amd64_shift_reg_imm (code
, X86_SHR
, ins
->dreg
, ins
->inst_imm
);
4529 g_assert (ins
->sreg2
== AMD64_RCX
);
4530 amd64_shift_reg (code
, X86_SHR
, ins
->dreg
);
4533 g_assert (amd64_is_imm32 (ins
->inst_imm
));
4534 amd64_shift_reg_imm_size (code
, X86_SHL
, ins
->dreg
, ins
->inst_imm
, 4);
4537 g_assert (amd64_is_imm32 (ins
->inst_imm
));
4538 amd64_shift_reg_imm (code
, X86_SHL
, ins
->dreg
, ins
->inst_imm
);
4543 amd64_alu_reg_reg_size (code
, X86_ADD
, ins
->sreg1
, ins
->sreg2
, 4);
4546 amd64_alu_reg_reg_size (code
, X86_ADC
, ins
->sreg1
, ins
->sreg2
, 4);
4549 amd64_alu_reg_imm_size (code
, X86_ADD
, ins
->dreg
, ins
->inst_imm
, 4);
4552 amd64_alu_reg_imm_size (code
, X86_ADC
, ins
->dreg
, ins
->inst_imm
, 4);
4556 amd64_alu_reg_reg_size (code
, X86_SUB
, ins
->sreg1
, ins
->sreg2
, 4);
4559 amd64_alu_reg_reg_size (code
, X86_SBB
, ins
->sreg1
, ins
->sreg2
, 4);
4562 amd64_alu_reg_imm_size (code
, X86_SUB
, ins
->dreg
, ins
->inst_imm
, 4);
4565 amd64_alu_reg_imm_size (code
, X86_SBB
, ins
->dreg
, ins
->inst_imm
, 4);
4568 amd64_alu_reg_reg_size (code
, X86_AND
, ins
->sreg1
, ins
->sreg2
, 4);
4571 amd64_alu_reg_imm_size (code
, X86_AND
, ins
->sreg1
, ins
->inst_imm
, 4);
4574 amd64_alu_reg_reg_size (code
, X86_OR
, ins
->sreg1
, ins
->sreg2
, 4);
4577 amd64_alu_reg_imm_size (code
, X86_OR
, ins
->sreg1
, ins
->inst_imm
, 4);
4580 amd64_alu_reg_reg_size (code
, X86_XOR
, ins
->sreg1
, ins
->sreg2
, 4);
4583 amd64_alu_reg_imm_size (code
, X86_XOR
, ins
->sreg1
, ins
->inst_imm
, 4);
4586 amd64_neg_reg_size (code
, ins
->sreg1
, 4);
4589 amd64_not_reg_size (code
, ins
->sreg1
, 4);
4592 g_assert (ins
->sreg2
== AMD64_RCX
);
4593 amd64_shift_reg_size (code
, X86_SHL
, ins
->dreg
, 4);
4596 g_assert (ins
->sreg2
== AMD64_RCX
);
4597 amd64_shift_reg_size (code
, X86_SAR
, ins
->dreg
, 4);
4600 amd64_shift_reg_imm_size (code
, X86_SAR
, ins
->dreg
, ins
->inst_imm
, 4);
4602 case OP_ISHR_UN_IMM
:
4603 amd64_shift_reg_imm_size (code
, X86_SHR
, ins
->dreg
, ins
->inst_imm
, 4);
4606 g_assert (ins
->sreg2
== AMD64_RCX
);
4607 amd64_shift_reg_size (code
, X86_SHR
, ins
->dreg
, 4);
4610 amd64_shift_reg_imm_size (code
, X86_SHL
, ins
->dreg
, ins
->inst_imm
, 4);
4613 amd64_imul_reg_reg_size (code
, ins
->sreg1
, ins
->sreg2
, 4);
4616 amd64_imul_reg_reg_size (code
, ins
->sreg1
, ins
->sreg2
, 4);
4617 EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O
, FALSE
, "OverflowException");
4619 case OP_IMUL_OVF_UN
:
4620 case OP_LMUL_OVF_UN
: {
4621 /* the mul operation and the exception check should most likely be split */
4622 int non_eax_reg
, saved_eax
= FALSE
, saved_edx
= FALSE
;
4623 int size
= (ins
->opcode
== OP_IMUL_OVF_UN
) ? 4 : 8;
4624 /*g_assert (ins->sreg2 == X86_EAX);
4625 g_assert (ins->dreg == X86_EAX);*/
4626 if (ins
->sreg2
== X86_EAX
) {
4627 non_eax_reg
= ins
->sreg1
;
4628 } else if (ins
->sreg1
== X86_EAX
) {
4629 non_eax_reg
= ins
->sreg2
;
4631 /* no need to save since we're going to store to it anyway */
4632 if (ins
->dreg
!= X86_EAX
) {
4634 amd64_push_reg (code
, X86_EAX
);
4636 amd64_mov_reg_reg (code
, X86_EAX
, ins
->sreg1
, size
);
4637 non_eax_reg
= ins
->sreg2
;
4639 if (ins
->dreg
== X86_EDX
) {
4642 amd64_push_reg (code
, X86_EAX
);
4646 amd64_push_reg (code
, X86_EDX
);
4648 amd64_mul_reg_size (code
, non_eax_reg
, FALSE
, size
);
4649 /* save before the check since pop and mov don't change the flags */
4650 if (ins
->dreg
!= X86_EAX
)
4651 amd64_mov_reg_reg (code
, ins
->dreg
, X86_EAX
, size
);
4653 amd64_pop_reg (code
, X86_EDX
);
4655 amd64_pop_reg (code
, X86_EAX
);
4656 EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O
, FALSE
, "OverflowException");
4660 amd64_alu_reg_reg_size (code
, X86_CMP
, ins
->sreg1
, ins
->sreg2
, 4);
4662 case OP_ICOMPARE_IMM
:
4663 amd64_alu_reg_imm_size (code
, X86_CMP
, ins
->sreg1
, ins
->inst_imm
, 4);
4685 EMIT_COND_BRANCH (ins
, cc_table
[mono_opcode_to_cond (ins
->opcode
)], cc_signed_table
[mono_opcode_to_cond (ins
->opcode
)]);
4693 case OP_CMOV_INE_UN
:
4694 case OP_CMOV_IGE_UN
:
4695 case OP_CMOV_IGT_UN
:
4696 case OP_CMOV_ILE_UN
:
4697 case OP_CMOV_ILT_UN
:
4703 case OP_CMOV_LNE_UN
:
4704 case OP_CMOV_LGE_UN
:
4705 case OP_CMOV_LGT_UN
:
4706 case OP_CMOV_LLE_UN
:
4707 case OP_CMOV_LLT_UN
:
4708 g_assert (ins
->dreg
== ins
->sreg1
);
4709 /* This needs to operate on 64 bit values */
4710 amd64_cmov_reg (code
, cc_table
[mono_opcode_to_cond (ins
->opcode
)], cc_signed_table
[mono_opcode_to_cond (ins
->opcode
)], ins
->dreg
, ins
->sreg2
);
4714 amd64_not_reg (code
, ins
->sreg1
);
4717 amd64_neg_reg (code
, ins
->sreg1
);
4722 if ((((guint64
)ins
->inst_c0
) >> 32) == 0)
4723 amd64_mov_reg_imm_size (code
, ins
->dreg
, ins
->inst_c0
, 4);
4725 amd64_mov_reg_imm_size (code
, ins
->dreg
, ins
->inst_c0
, 8);
4728 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)ins
->inst_i1
, ins
->inst_p0
);
4729 amd64_mov_reg_membase (code
, ins
->dreg
, AMD64_RIP
, 0, sizeof(gpointer
));
4732 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)ins
->inst_i1
, ins
->inst_p0
);
4733 amd64_mov_reg_imm_size (code
, ins
->dreg
, 0, 8);
4736 amd64_mov_reg_reg (code
, ins
->dreg
, ins
->sreg1
, sizeof(mgreg_t
));
4738 case OP_AMD64_SET_XMMREG_R4
: {
4739 amd64_sse_cvtsd2ss_reg_reg (code
, ins
->dreg
, ins
->sreg1
);
4742 case OP_AMD64_SET_XMMREG_R8
: {
4743 if (ins
->dreg
!= ins
->sreg1
)
4744 amd64_sse_movsd_reg_reg (code
, ins
->dreg
, ins
->sreg1
);
4748 MonoCallInst
*call
= (MonoCallInst
*)ins
;
4751 /* FIXME: no tracing support... */
4752 if (cfg
->prof_options
& MONO_PROFILE_ENTER_LEAVE
)
4753 code
= mono_arch_instrument_epilog_full (cfg
, mono_profiler_method_leave
, code
, FALSE
, TRUE
);
4755 g_assert (!cfg
->method
->save_lmf
);
4757 if (cfg
->arch
.omit_fp
) {
4758 guint32 save_offset
= 0;
4759 /* Pop callee-saved registers */
4760 for (i
= 0; i
< AMD64_NREG
; ++i
)
4761 if (AMD64_IS_CALLEE_SAVED_REG (i
) && (cfg
->used_int_regs
& (1 << i
))) {
4762 amd64_mov_reg_membase (code
, i
, AMD64_RSP
, save_offset
, 8);
4765 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_RSP
, cfg
->arch
.stack_alloc_size
);
4768 if (call
->stack_usage
)
4772 for (i
= 0; i
< AMD64_NREG
; ++i
)
4773 if (AMD64_IS_CALLEE_SAVED_REG (i
) && (cfg
->used_int_regs
& (1 << i
)))
4774 pos
-= sizeof(mgreg_t
);
4776 /* Restore callee-saved registers */
4777 for (i
= AMD64_NREG
- 1; i
> 0; --i
) {
4778 if (AMD64_IS_CALLEE_SAVED_REG (i
) && (cfg
->used_int_regs
& (1 << i
))) {
4779 amd64_mov_reg_membase (code
, i
, AMD64_RBP
, pos
, sizeof(mgreg_t
));
4780 pos
+= sizeof(mgreg_t
);
4784 /* Copy arguments on the stack to our argument area */
4785 for (i
= 0; i
< call
->stack_usage
; i
+= sizeof(mgreg_t
)) {
4786 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RSP
, i
, sizeof(mgreg_t
));
4787 amd64_mov_membase_reg (code
, AMD64_RBP
, 16 + i
, AMD64_RAX
, sizeof(mgreg_t
));
4791 amd64_lea_membase (code
, AMD64_RSP
, AMD64_RBP
, pos
);
4796 offset
= code
- cfg
->native_code
;
4797 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_METHOD_JUMP
, ins
->inst_p0
);
4798 if (cfg
->compile_aot
)
4799 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RIP
, 0, 8);
4801 amd64_set_reg_template (code
, AMD64_R11
);
4802 amd64_jump_reg (code
, AMD64_R11
);
4803 ins
->flags
|= MONO_INST_GC_CALLSITE
;
4804 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
4808 /* ensure ins->sreg1 is not NULL */
4809 amd64_alu_membase_imm_size (code
, X86_CMP
, ins
->sreg1
, 0, 0, 4);
4812 amd64_lea_membase (code
, AMD64_R11
, cfg
->frame_reg
, cfg
->sig_cookie
);
4813 amd64_mov_membase_reg (code
, ins
->sreg1
, 0, AMD64_R11
, sizeof(gpointer
));
4822 call
= (MonoCallInst
*)ins
;
4824 * The AMD64 ABI forces callers to know about varargs.
4826 if ((call
->signature
->call_convention
== MONO_CALL_VARARG
) && (call
->signature
->pinvoke
))
4827 amd64_alu_reg_reg (code
, X86_XOR
, AMD64_RAX
, AMD64_RAX
);
4828 else if ((cfg
->method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
) && (cfg
->method
->klass
->image
!= mono_defaults
.corlib
)) {
4830 * Since the unmanaged calling convention doesn't contain a
4831 * 'vararg' entry, we have to treat every pinvoke call as a
4832 * potential vararg call.
4836 for (i
= 0; i
< AMD64_XMM_NREG
; ++i
)
4837 if (call
->used_fregs
& (1 << i
))
4840 amd64_alu_reg_reg (code
, X86_XOR
, AMD64_RAX
, AMD64_RAX
);
4842 amd64_mov_reg_imm (code
, AMD64_RAX
, nregs
);
4845 if (ins
->flags
& MONO_INST_HAS_METHOD
)
4846 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_METHOD
, call
->method
, FALSE
);
4848 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_ABS
, call
->fptr
, FALSE
);
4849 ins
->flags
|= MONO_INST_GC_CALLSITE
;
4850 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
4851 if (call
->stack_usage
&& !CALLCONV_IS_STDCALL (call
->signature
->call_convention
) && !cfg
->arch
.no_pushes
)
4852 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_RSP
, call
->stack_usage
);
4853 code
= emit_move_return_value (cfg
, ins
, code
);
4859 case OP_VOIDCALL_REG
:
4861 call
= (MonoCallInst
*)ins
;
4863 if (AMD64_IS_ARGUMENT_REG (ins
->sreg1
)) {
4864 amd64_mov_reg_reg (code
, AMD64_R11
, ins
->sreg1
, 8);
4865 ins
->sreg1
= AMD64_R11
;
4869 * The AMD64 ABI forces callers to know about varargs.
4871 if ((call
->signature
->call_convention
== MONO_CALL_VARARG
) && (call
->signature
->pinvoke
)) {
4872 if (ins
->sreg1
== AMD64_RAX
) {
4873 amd64_mov_reg_reg (code
, AMD64_R11
, AMD64_RAX
, 8);
4874 ins
->sreg1
= AMD64_R11
;
4876 amd64_alu_reg_reg (code
, X86_XOR
, AMD64_RAX
, AMD64_RAX
);
4877 } else if ((cfg
->method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
) && (cfg
->method
->klass
->image
!= mono_defaults
.corlib
)) {
4879 * Since the unmanaged calling convention doesn't contain a
4880 * 'vararg' entry, we have to treat every pinvoke call as a
4881 * potential vararg call.
4885 for (i
= 0; i
< AMD64_XMM_NREG
; ++i
)
4886 if (call
->used_fregs
& (1 << i
))
4888 if (ins
->sreg1
== AMD64_RAX
) {
4889 amd64_mov_reg_reg (code
, AMD64_R11
, AMD64_RAX
, 8);
4890 ins
->sreg1
= AMD64_R11
;
4893 amd64_alu_reg_reg (code
, X86_XOR
, AMD64_RAX
, AMD64_RAX
);
4895 amd64_mov_reg_imm (code
, AMD64_RAX
, nregs
);
4898 amd64_call_reg (code
, ins
->sreg1
);
4899 ins
->flags
|= MONO_INST_GC_CALLSITE
;
4900 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
4901 if (call
->stack_usage
&& !CALLCONV_IS_STDCALL (call
->signature
->call_convention
) && !cfg
->arch
.no_pushes
)
4902 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_RSP
, call
->stack_usage
);
4903 code
= emit_move_return_value (cfg
, ins
, code
);
4905 case OP_FCALL_MEMBASE
:
4906 case OP_LCALL_MEMBASE
:
4907 case OP_VCALL_MEMBASE
:
4908 case OP_VCALL2_MEMBASE
:
4909 case OP_VOIDCALL_MEMBASE
:
4910 case OP_CALL_MEMBASE
:
4911 call
= (MonoCallInst
*)ins
;
4913 amd64_call_membase (code
, ins
->sreg1
, ins
->inst_offset
);
4914 ins
->flags
|= MONO_INST_GC_CALLSITE
;
4915 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
4916 if (call
->stack_usage
&& !CALLCONV_IS_STDCALL (call
->signature
->call_convention
) && !cfg
->arch
.no_pushes
)
4917 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_RSP
, call
->stack_usage
);
4918 code
= emit_move_return_value (cfg
, ins
, code
);
4922 MonoInst
*var
= cfg
->dyn_call_var
;
4924 g_assert (var
->opcode
== OP_REGOFFSET
);
4926 /* r11 = args buffer filled by mono_arch_get_dyn_call_args () */
4927 amd64_mov_reg_reg (code
, AMD64_R11
, ins
->sreg1
, 8);
4929 amd64_mov_reg_reg (code
, AMD64_R10
, ins
->sreg2
, 8);
4931 /* Save args buffer */
4932 amd64_mov_membase_reg (code
, var
->inst_basereg
, var
->inst_offset
, AMD64_R11
, 8);
4934 /* Set argument registers */
4935 for (i
= 0; i
< PARAM_REGS
; ++i
)
4936 amd64_mov_reg_membase (code
, param_regs
[i
], AMD64_R11
, i
* sizeof(mgreg_t
), sizeof(mgreg_t
));
4939 amd64_call_reg (code
, AMD64_R10
);
4941 ins
->flags
|= MONO_INST_GC_CALLSITE
;
4942 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
4945 amd64_mov_reg_membase (code
, AMD64_R11
, var
->inst_basereg
, var
->inst_offset
, 8);
4946 amd64_mov_membase_reg (code
, AMD64_R11
, G_STRUCT_OFFSET (DynCallArgs
, res
), AMD64_RAX
, 8);
4949 case OP_AMD64_SAVE_SP_TO_LMF
: {
4950 MonoInst
*lmf_var
= cfg
->arch
.lmf_var
;
4951 amd64_mov_membase_reg (code
, cfg
->frame_reg
, lmf_var
->inst_offset
+ G_STRUCT_OFFSET (MonoLMF
, rsp
), AMD64_RSP
, 8);
4955 g_assert (!cfg
->arch
.no_pushes
);
4956 amd64_push_reg (code
, ins
->sreg1
);
4958 case OP_X86_PUSH_IMM
:
4959 g_assert (!cfg
->arch
.no_pushes
);
4960 g_assert (amd64_is_imm32 (ins
->inst_imm
));
4961 amd64_push_imm (code
, ins
->inst_imm
);
4963 case OP_X86_PUSH_MEMBASE
:
4964 g_assert (!cfg
->arch
.no_pushes
);
4965 amd64_push_membase (code
, ins
->inst_basereg
, ins
->inst_offset
);
4967 case OP_X86_PUSH_OBJ
: {
4968 int size
= ALIGN_TO (ins
->inst_imm
, 8);
4970 g_assert (!cfg
->arch
.no_pushes
);
4972 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, size
);
4973 amd64_push_reg (code
, AMD64_RDI
);
4974 amd64_push_reg (code
, AMD64_RSI
);
4975 amd64_push_reg (code
, AMD64_RCX
);
4976 if (ins
->inst_offset
)
4977 amd64_lea_membase (code
, AMD64_RSI
, ins
->inst_basereg
, ins
->inst_offset
);
4979 amd64_mov_reg_reg (code
, AMD64_RSI
, ins
->inst_basereg
, 8);
4980 amd64_lea_membase (code
, AMD64_RDI
, AMD64_RSP
, (3 * 8));
4981 amd64_mov_reg_imm (code
, AMD64_RCX
, (size
>> 3));
4983 amd64_prefix (code
, X86_REP_PREFIX
);
4985 amd64_pop_reg (code
, AMD64_RCX
);
4986 amd64_pop_reg (code
, AMD64_RSI
);
4987 amd64_pop_reg (code
, AMD64_RDI
);
4991 amd64_lea_memindex (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_imm
, ins
->sreg2
, ins
->backend
.shift_amount
);
4993 case OP_X86_LEA_MEMBASE
:
4994 amd64_lea_membase (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_imm
);
4997 amd64_xchg_reg_reg (code
, ins
->sreg1
, ins
->sreg2
, 4);
5000 /* keep alignment */
5001 amd64_alu_reg_imm (code
, X86_ADD
, ins
->sreg1
, MONO_ARCH_FRAME_ALIGNMENT
- 1);
5002 amd64_alu_reg_imm (code
, X86_AND
, ins
->sreg1
, ~(MONO_ARCH_FRAME_ALIGNMENT
- 1));
5003 code
= mono_emit_stack_alloc (cfg
, code
, ins
);
5004 amd64_mov_reg_reg (code
, ins
->dreg
, AMD64_RSP
, 8);
5005 if (cfg
->param_area
&& cfg
->arch
.no_pushes
)
5006 amd64_alu_reg_imm (code
, X86_ADD
, ins
->dreg
, cfg
->param_area
);
5008 case OP_LOCALLOC_IMM
: {
5009 guint32 size
= ins
->inst_imm
;
5010 size
= (size
+ (MONO_ARCH_FRAME_ALIGNMENT
- 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT
- 1);
5012 if (ins
->flags
& MONO_INST_INIT
) {
5016 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, size
);
5017 amd64_alu_reg_reg (code
, X86_XOR
, ins
->dreg
, ins
->dreg
);
5019 for (i
= 0; i
< size
; i
+= 8)
5020 amd64_mov_membase_reg (code
, AMD64_RSP
, i
, ins
->dreg
, 8);
5021 amd64_mov_reg_reg (code
, ins
->dreg
, AMD64_RSP
, 8);
5023 amd64_mov_reg_imm (code
, ins
->dreg
, size
);
5024 ins
->sreg1
= ins
->dreg
;
5026 code
= mono_emit_stack_alloc (cfg
, code
, ins
);
5027 amd64_mov_reg_reg (code
, ins
->dreg
, AMD64_RSP
, 8);
5030 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, size
);
5031 amd64_mov_reg_reg (code
, ins
->dreg
, AMD64_RSP
, 8);
5033 if (cfg
->param_area
&& cfg
->arch
.no_pushes
)
5034 amd64_alu_reg_imm (code
, X86_ADD
, ins
->dreg
, cfg
->param_area
);
5038 amd64_mov_reg_reg (code
, AMD64_ARG_REG1
, ins
->sreg1
, 8);
5039 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
5040 (gpointer
)"mono_arch_throw_exception", FALSE
);
5041 ins
->flags
|= MONO_INST_GC_CALLSITE
;
5042 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
5046 amd64_mov_reg_reg (code
, AMD64_ARG_REG1
, ins
->sreg1
, 8);
5047 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
5048 (gpointer
)"mono_arch_rethrow_exception", FALSE
);
5049 ins
->flags
|= MONO_INST_GC_CALLSITE
;
5050 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
5053 case OP_CALL_HANDLER
:
5055 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, 8);
5056 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
5057 amd64_call_imm (code
, 0);
5058 mono_cfg_add_try_hole (cfg
, ins
->inst_eh_block
, code
, bb
);
5059 /* Restore stack alignment */
5060 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_RSP
, 8);
5062 case OP_START_HANDLER
: {
5063 /* Even though we're saving RSP, use sizeof */
5064 /* gpointer because spvar is of type IntPtr */
5065 /* see: mono_create_spvar_for_region */
5066 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
5067 amd64_mov_membase_reg (code
, spvar
->inst_basereg
, spvar
->inst_offset
, AMD64_RSP
, sizeof(gpointer
));
5069 if ((MONO_BBLOCK_IS_IN_REGION (bb
, MONO_REGION_FINALLY
) ||
5070 MONO_BBLOCK_IS_IN_REGION (bb
, MONO_REGION_FINALLY
)) &&
5071 cfg
->param_area
&& cfg
->arch
.no_pushes
) {
5072 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, ALIGN_TO (cfg
->param_area
, MONO_ARCH_FRAME_ALIGNMENT
));
5076 case OP_ENDFINALLY
: {
5077 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
5078 amd64_mov_reg_membase (code
, AMD64_RSP
, spvar
->inst_basereg
, spvar
->inst_offset
, sizeof(gpointer
));
5082 case OP_ENDFILTER
: {
5083 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
5084 amd64_mov_reg_membase (code
, AMD64_RSP
, spvar
->inst_basereg
, spvar
->inst_offset
, sizeof(gpointer
));
5085 /* The local allocator will put the result into RAX */
5091 ins
->inst_c0
= code
- cfg
->native_code
;
5094 //g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
5095 //if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins)
5097 if (ins
->inst_target_bb
->native_offset
) {
5098 amd64_jump_code (code
, cfg
->native_code
+ ins
->inst_target_bb
->native_offset
);
5100 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
5101 if ((cfg
->opt
& MONO_OPT_BRANCH
) &&
5102 x86_is_imm8 (ins
->inst_target_bb
->max_offset
- offset
))
5103 x86_jump8 (code
, 0);
5105 x86_jump32 (code
, 0);
5109 amd64_jump_reg (code
, ins
->sreg1
);
5126 amd64_set_reg (code
, cc_table
[mono_opcode_to_cond (ins
->opcode
)], ins
->dreg
, cc_signed_table
[mono_opcode_to_cond (ins
->opcode
)]);
5127 amd64_widen_reg (code
, ins
->dreg
, ins
->dreg
, FALSE
, FALSE
);
5129 case OP_COND_EXC_EQ
:
5130 case OP_COND_EXC_NE_UN
:
5131 case OP_COND_EXC_LT
:
5132 case OP_COND_EXC_LT_UN
:
5133 case OP_COND_EXC_GT
:
5134 case OP_COND_EXC_GT_UN
:
5135 case OP_COND_EXC_GE
:
5136 case OP_COND_EXC_GE_UN
:
5137 case OP_COND_EXC_LE
:
5138 case OP_COND_EXC_LE_UN
:
5139 case OP_COND_EXC_IEQ
:
5140 case OP_COND_EXC_INE_UN
:
5141 case OP_COND_EXC_ILT
:
5142 case OP_COND_EXC_ILT_UN
:
5143 case OP_COND_EXC_IGT
:
5144 case OP_COND_EXC_IGT_UN
:
5145 case OP_COND_EXC_IGE
:
5146 case OP_COND_EXC_IGE_UN
:
5147 case OP_COND_EXC_ILE
:
5148 case OP_COND_EXC_ILE_UN
:
5149 EMIT_COND_SYSTEM_EXCEPTION (cc_table
[mono_opcode_to_cond (ins
->opcode
)], cc_signed_table
[mono_opcode_to_cond (ins
->opcode
)], ins
->inst_p1
);
5151 case OP_COND_EXC_OV
:
5152 case OP_COND_EXC_NO
:
5154 case OP_COND_EXC_NC
:
5155 EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table
[ins
->opcode
- OP_COND_EXC_EQ
],
5156 (ins
->opcode
< OP_COND_EXC_NE_UN
), ins
->inst_p1
);
5158 case OP_COND_EXC_IOV
:
5159 case OP_COND_EXC_INO
:
5160 case OP_COND_EXC_IC
:
5161 case OP_COND_EXC_INC
:
5162 EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table
[ins
->opcode
- OP_COND_EXC_IEQ
],
5163 (ins
->opcode
< OP_COND_EXC_INE_UN
), ins
->inst_p1
);
5166 /* floating point opcodes */
5168 double d
= *(double *)ins
->inst_p0
;
5170 if ((d
== 0.0) && (mono_signbit (d
) == 0)) {
5171 amd64_sse_xorpd_reg_reg (code
, ins
->dreg
, ins
->dreg
);
5174 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_R8
, ins
->inst_p0
);
5175 amd64_sse_movsd_reg_membase (code
, ins
->dreg
, AMD64_RIP
, 0);
5180 float f
= *(float *)ins
->inst_p0
;
5182 if ((f
== 0.0) && (mono_signbit (f
) == 0)) {
5183 amd64_sse_xorpd_reg_reg (code
, ins
->dreg
, ins
->dreg
);
5186 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_R4
, ins
->inst_p0
);
5187 amd64_sse_movss_reg_membase (code
, ins
->dreg
, AMD64_RIP
, 0);
5188 amd64_sse_cvtss2sd_reg_reg (code
, ins
->dreg
, ins
->dreg
);
5192 case OP_STORER8_MEMBASE_REG
:
5193 amd64_sse_movsd_membase_reg (code
, ins
->inst_destbasereg
, ins
->inst_offset
, ins
->sreg1
);
5195 case OP_LOADR8_MEMBASE
:
5196 amd64_sse_movsd_reg_membase (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
5198 case OP_STORER4_MEMBASE_REG
:
5199 /* This requires a double->single conversion */
5200 amd64_sse_cvtsd2ss_reg_reg (code
, AMD64_XMM15
, ins
->sreg1
);
5201 amd64_sse_movss_membase_reg (code
, ins
->inst_destbasereg
, ins
->inst_offset
, AMD64_XMM15
);
5203 case OP_LOADR4_MEMBASE
:
5204 amd64_sse_movss_reg_membase (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
5205 amd64_sse_cvtss2sd_reg_reg (code
, ins
->dreg
, ins
->dreg
);
5207 case OP_ICONV_TO_R4
: /* FIXME: change precision */
5208 case OP_ICONV_TO_R8
:
5209 amd64_sse_cvtsi2sd_reg_reg_size (code
, ins
->dreg
, ins
->sreg1
, 4);
5211 case OP_LCONV_TO_R4
: /* FIXME: change precision */
5212 case OP_LCONV_TO_R8
:
5213 amd64_sse_cvtsi2sd_reg_reg (code
, ins
->dreg
, ins
->sreg1
);
5215 case OP_FCONV_TO_R4
:
5216 /* FIXME: nothing to do ?? */
5218 case OP_FCONV_TO_I1
:
5219 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, TRUE
);
5221 case OP_FCONV_TO_U1
:
5222 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, FALSE
);
5224 case OP_FCONV_TO_I2
:
5225 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, TRUE
);
5227 case OP_FCONV_TO_U2
:
5228 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, FALSE
);
5230 case OP_FCONV_TO_U4
:
5231 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, FALSE
);
5233 case OP_FCONV_TO_I4
:
5235 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, TRUE
);
5237 case OP_FCONV_TO_I8
:
5238 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 8, TRUE
);
5240 case OP_LCONV_TO_R_UN
: {
5243 /* Based on gcc code */
5244 amd64_test_reg_reg (code
, ins
->sreg1
, ins
->sreg1
);
5245 br
[0] = code
; x86_branch8 (code
, X86_CC_S
, 0, TRUE
);
5248 amd64_sse_cvtsi2sd_reg_reg (code
, ins
->dreg
, ins
->sreg1
);
5249 br
[1] = code
; x86_jump8 (code
, 0);
5250 amd64_patch (br
[0], code
);
5253 /* Save to the red zone */
5254 amd64_mov_membase_reg (code
, AMD64_RSP
, -8, AMD64_RAX
, 8);
5255 amd64_mov_membase_reg (code
, AMD64_RSP
, -16, AMD64_RCX
, 8);
5256 amd64_mov_reg_reg (code
, AMD64_RCX
, ins
->sreg1
, 8);
5257 amd64_mov_reg_reg (code
, AMD64_RAX
, ins
->sreg1
, 8);
5258 amd64_alu_reg_imm (code
, X86_AND
, AMD64_RCX
, 1);
5259 amd64_shift_reg_imm (code
, X86_SHR
, AMD64_RAX
, 1);
5260 amd64_alu_reg_imm (code
, X86_OR
, AMD64_RAX
, AMD64_RCX
);
5261 amd64_sse_cvtsi2sd_reg_reg (code
, ins
->dreg
, AMD64_RAX
);
5262 amd64_sse_addsd_reg_reg (code
, ins
->dreg
, ins
->dreg
);
5264 amd64_mov_reg_membase (code
, AMD64_RCX
, AMD64_RSP
, -16, 8);
5265 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RSP
, -8, 8);
5266 amd64_patch (br
[1], code
);
5269 case OP_LCONV_TO_OVF_U4
:
5270 amd64_alu_reg_imm (code
, X86_CMP
, ins
->sreg1
, 0);
5271 EMIT_COND_SYSTEM_EXCEPTION (X86_CC_LT
, TRUE
, "OverflowException");
5272 amd64_mov_reg_reg (code
, ins
->dreg
, ins
->sreg1
, 8);
5274 case OP_LCONV_TO_OVF_I4_UN
:
5275 amd64_alu_reg_imm (code
, X86_CMP
, ins
->sreg1
, 0x7fffffff);
5276 EMIT_COND_SYSTEM_EXCEPTION (X86_CC_GT
, FALSE
, "OverflowException");
5277 amd64_mov_reg_reg (code
, ins
->dreg
, ins
->sreg1
, 8);
5280 if (ins
->dreg
!= ins
->sreg1
)
5281 amd64_sse_movsd_reg_reg (code
, ins
->dreg
, ins
->sreg1
);
5284 amd64_sse_addsd_reg_reg (code
, ins
->dreg
, ins
->sreg2
);
5287 amd64_sse_subsd_reg_reg (code
, ins
->dreg
, ins
->sreg2
);
5290 amd64_sse_mulsd_reg_reg (code
, ins
->dreg
, ins
->sreg2
);
5293 amd64_sse_divsd_reg_reg (code
, ins
->dreg
, ins
->sreg2
);
5296 static double r8_0
= -0.0;
5298 g_assert (ins
->sreg1
== ins
->dreg
);
5300 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_R8
, &r8_0
);
5301 amd64_sse_xorpd_reg_membase (code
, ins
->dreg
, AMD64_RIP
, 0);
5305 EMIT_SSE2_FPFUNC (code
, fsin
, ins
->dreg
, ins
->sreg1
);
5308 EMIT_SSE2_FPFUNC (code
, fcos
, ins
->dreg
, ins
->sreg1
);
5311 static guint64 d
= 0x7fffffffffffffffUL
;
5313 g_assert (ins
->sreg1
== ins
->dreg
);
5315 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_R8
, &d
);
5316 amd64_sse_andpd_reg_membase (code
, ins
->dreg
, AMD64_RIP
, 0);
5320 EMIT_SSE2_FPFUNC (code
, fsqrt
, ins
->dreg
, ins
->sreg1
);
5323 g_assert (cfg
->opt
& MONO_OPT_CMOV
);
5324 g_assert (ins
->dreg
== ins
->sreg1
);
5325 amd64_alu_reg_reg_size (code
, X86_CMP
, ins
->sreg1
, ins
->sreg2
, 4);
5326 amd64_cmov_reg_size (code
, X86_CC_GT
, TRUE
, ins
->dreg
, ins
->sreg2
, 4);
5329 g_assert (cfg
->opt
& MONO_OPT_CMOV
);
5330 g_assert (ins
->dreg
== ins
->sreg1
);
5331 amd64_alu_reg_reg_size (code
, X86_CMP
, ins
->sreg1
, ins
->sreg2
, 4);
5332 amd64_cmov_reg_size (code
, X86_CC_GT
, FALSE
, ins
->dreg
, ins
->sreg2
, 4);
5335 g_assert (cfg
->opt
& MONO_OPT_CMOV
);
5336 g_assert (ins
->dreg
== ins
->sreg1
);
5337 amd64_alu_reg_reg_size (code
, X86_CMP
, ins
->sreg1
, ins
->sreg2
, 4);
5338 amd64_cmov_reg_size (code
, X86_CC_LT
, TRUE
, ins
->dreg
, ins
->sreg2
, 4);
5341 g_assert (cfg
->opt
& MONO_OPT_CMOV
);
5342 g_assert (ins
->dreg
== ins
->sreg1
);
5343 amd64_alu_reg_reg_size (code
, X86_CMP
, ins
->sreg1
, ins
->sreg2
, 4);
5344 amd64_cmov_reg_size (code
, X86_CC_LT
, FALSE
, ins
->dreg
, ins
->sreg2
, 4);
5347 g_assert (cfg
->opt
& MONO_OPT_CMOV
);
5348 g_assert (ins
->dreg
== ins
->sreg1
);
5349 amd64_alu_reg_reg (code
, X86_CMP
, ins
->sreg1
, ins
->sreg2
);
5350 amd64_cmov_reg (code
, X86_CC_GT
, TRUE
, ins
->dreg
, ins
->sreg2
);
5353 g_assert (cfg
->opt
& MONO_OPT_CMOV
);
5354 g_assert (ins
->dreg
== ins
->sreg1
);
5355 amd64_alu_reg_reg (code
, X86_CMP
, ins
->sreg1
, ins
->sreg2
);
5356 amd64_cmov_reg (code
, X86_CC_GT
, FALSE
, ins
->dreg
, ins
->sreg2
);
5359 g_assert (cfg
->opt
& MONO_OPT_CMOV
);
5360 g_assert (ins
->dreg
== ins
->sreg1
);
5361 amd64_alu_reg_reg (code
, X86_CMP
, ins
->sreg1
, ins
->sreg2
);
5362 amd64_cmov_reg (code
, X86_CC_LT
, TRUE
, ins
->dreg
, ins
->sreg2
);
5365 g_assert (cfg
->opt
& MONO_OPT_CMOV
);
5366 g_assert (ins
->dreg
== ins
->sreg1
);
5367 amd64_alu_reg_reg (code
, X86_CMP
, ins
->sreg1
, ins
->sreg2
);
5368 amd64_cmov_reg (code
, X86_CC_LT
, FALSE
, ins
->dreg
, ins
->sreg2
);
5374 * The two arguments are swapped because the fbranch instructions
5375 * depend on this for the non-sse case to work.
5377 amd64_sse_comisd_reg_reg (code
, ins
->sreg2
, ins
->sreg1
);
5380 /* zeroing the register at the start results in
5381 * shorter and faster code (we can also remove the widening op)
5383 guchar
*unordered_check
;
5384 amd64_alu_reg_reg (code
, X86_XOR
, ins
->dreg
, ins
->dreg
);
5385 amd64_sse_comisd_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5386 unordered_check
= code
;
5387 x86_branch8 (code
, X86_CC_P
, 0, FALSE
);
5388 amd64_set_reg (code
, X86_CC_EQ
, ins
->dreg
, FALSE
);
5389 amd64_patch (unordered_check
, code
);
5394 /* zeroing the register at the start results in
5395 * shorter and faster code (we can also remove the widening op)
5397 amd64_alu_reg_reg (code
, X86_XOR
, ins
->dreg
, ins
->dreg
);
5398 amd64_sse_comisd_reg_reg (code
, ins
->sreg2
, ins
->sreg1
);
5399 if (ins
->opcode
== OP_FCLT_UN
) {
5400 guchar
*unordered_check
= code
;
5401 guchar
*jump_to_end
;
5402 x86_branch8 (code
, X86_CC_P
, 0, FALSE
);
5403 amd64_set_reg (code
, X86_CC_GT
, ins
->dreg
, FALSE
);
5405 x86_jump8 (code
, 0);
5406 amd64_patch (unordered_check
, code
);
5407 amd64_inc_reg (code
, ins
->dreg
);
5408 amd64_patch (jump_to_end
, code
);
5410 amd64_set_reg (code
, X86_CC_GT
, ins
->dreg
, FALSE
);
5415 /* zeroing the register at the start results in
5416 * shorter and faster code (we can also remove the widening op)
5418 guchar
*unordered_check
;
5419 amd64_alu_reg_reg (code
, X86_XOR
, ins
->dreg
, ins
->dreg
);
5420 amd64_sse_comisd_reg_reg (code
, ins
->sreg2
, ins
->sreg1
);
5421 if (ins
->opcode
== OP_FCGT
) {
5422 unordered_check
= code
;
5423 x86_branch8 (code
, X86_CC_P
, 0, FALSE
);
5424 amd64_set_reg (code
, X86_CC_LT
, ins
->dreg
, FALSE
);
5425 amd64_patch (unordered_check
, code
);
5427 amd64_set_reg (code
, X86_CC_LT
, ins
->dreg
, FALSE
);
5431 case OP_FCLT_MEMBASE
:
5432 case OP_FCGT_MEMBASE
:
5433 case OP_FCLT_UN_MEMBASE
:
5434 case OP_FCGT_UN_MEMBASE
:
5435 case OP_FCEQ_MEMBASE
: {
5436 guchar
*unordered_check
, *jump_to_end
;
5439 amd64_alu_reg_reg (code
, X86_XOR
, ins
->dreg
, ins
->dreg
);
5440 amd64_sse_comisd_reg_membase (code
, ins
->sreg1
, ins
->sreg2
, ins
->inst_offset
);
5442 switch (ins
->opcode
) {
5443 case OP_FCEQ_MEMBASE
:
5444 x86_cond
= X86_CC_EQ
;
5446 case OP_FCLT_MEMBASE
:
5447 case OP_FCLT_UN_MEMBASE
:
5448 x86_cond
= X86_CC_LT
;
5450 case OP_FCGT_MEMBASE
:
5451 case OP_FCGT_UN_MEMBASE
:
5452 x86_cond
= X86_CC_GT
;
5455 g_assert_not_reached ();
5458 unordered_check
= code
;
5459 x86_branch8 (code
, X86_CC_P
, 0, FALSE
);
5460 amd64_set_reg (code
, x86_cond
, ins
->dreg
, FALSE
);
5462 switch (ins
->opcode
) {
5463 case OP_FCEQ_MEMBASE
:
5464 case OP_FCLT_MEMBASE
:
5465 case OP_FCGT_MEMBASE
:
5466 amd64_patch (unordered_check
, code
);
5468 case OP_FCLT_UN_MEMBASE
:
5469 case OP_FCGT_UN_MEMBASE
:
5471 x86_jump8 (code
, 0);
5472 amd64_patch (unordered_check
, code
);
5473 amd64_inc_reg (code
, ins
->dreg
);
5474 amd64_patch (jump_to_end
, code
);
5482 guchar
*jump
= code
;
5483 x86_branch8 (code
, X86_CC_P
, 0, TRUE
);
5484 EMIT_COND_BRANCH (ins
, X86_CC_EQ
, FALSE
);
5485 amd64_patch (jump
, code
);
5489 /* Branch if C013 != 100 */
5490 /* branch if !ZF or (PF|CF) */
5491 EMIT_COND_BRANCH (ins
, X86_CC_NE
, FALSE
);
5492 EMIT_COND_BRANCH (ins
, X86_CC_P
, FALSE
);
5493 EMIT_COND_BRANCH (ins
, X86_CC_B
, FALSE
);
5496 EMIT_COND_BRANCH (ins
, X86_CC_GT
, FALSE
);
5499 EMIT_COND_BRANCH (ins
, X86_CC_P
, FALSE
);
5500 EMIT_COND_BRANCH (ins
, X86_CC_GT
, FALSE
);
5504 if (ins
->opcode
== OP_FBGT
) {
5507 /* skip branch if C1=1 */
5509 x86_branch8 (code
, X86_CC_P
, 0, FALSE
);
5510 /* branch if (C0 | C3) = 1 */
5511 EMIT_COND_BRANCH (ins
, X86_CC_LT
, FALSE
);
5512 amd64_patch (br1
, code
);
5515 EMIT_COND_BRANCH (ins
, X86_CC_LT
, FALSE
);
5519 /* Branch if C013 == 100 or 001 */
5522 /* skip branch if C1=1 */
5524 x86_branch8 (code
, X86_CC_P
, 0, FALSE
);
5525 /* branch if (C0 | C3) = 1 */
5526 EMIT_COND_BRANCH (ins
, X86_CC_BE
, FALSE
);
5527 amd64_patch (br1
, code
);
5531 /* Branch if C013 == 000 */
5532 EMIT_COND_BRANCH (ins
, X86_CC_LE
, FALSE
);
5535 /* Branch if C013=000 or 100 */
5538 /* skip branch if C1=1 */
5540 x86_branch8 (code
, X86_CC_P
, 0, FALSE
);
5541 /* branch if C0=0 */
5542 EMIT_COND_BRANCH (ins
, X86_CC_NB
, FALSE
);
5543 amd64_patch (br1
, code
);
5547 /* Branch if C013 != 001 */
5548 EMIT_COND_BRANCH (ins
, X86_CC_P
, FALSE
);
5549 EMIT_COND_BRANCH (ins
, X86_CC_GE
, FALSE
);
5552 /* Transfer value to the fp stack */
5553 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, 16);
5554 amd64_movsd_membase_reg (code
, AMD64_RSP
, 0, ins
->sreg1
);
5555 amd64_fld_membase (code
, AMD64_RSP
, 0, TRUE
);
5557 amd64_push_reg (code
, AMD64_RAX
);
5559 amd64_fnstsw (code
);
5560 amd64_alu_reg_imm (code
, X86_AND
, AMD64_RAX
, 0x4100);
5561 amd64_alu_reg_imm (code
, X86_CMP
, AMD64_RAX
, X86_FP_C0
);
5562 amd64_pop_reg (code
, AMD64_RAX
);
5563 amd64_fstp (code
, 0);
5564 EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ
, FALSE
, "ArithmeticException");
5565 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_RSP
, 16);
5568 code
= mono_amd64_emit_tls_get (code
, ins
->dreg
, ins
->inst_offset
);
5571 case OP_MEMORY_BARRIER
: {
5572 switch (ins
->backend
.memory_barrier_kind
) {
5573 case StoreLoadBarrier
:
5575 /* http://blogs.sun.com/dave/resource/NHM-Pipeline-Blog-V2.txt */
5576 x86_prefix (code
, X86_LOCK_PREFIX
);
5577 amd64_alu_membase_imm (code
, X86_ADD
, AMD64_RSP
, 0, 0);
5582 case OP_ATOMIC_ADD_I4
:
5583 case OP_ATOMIC_ADD_I8
: {
5584 int dreg
= ins
->dreg
;
5585 guint32 size
= (ins
->opcode
== OP_ATOMIC_ADD_I4
) ? 4 : 8;
5587 if (dreg
== ins
->inst_basereg
)
5590 if (dreg
!= ins
->sreg2
)
5591 amd64_mov_reg_reg (code
, ins
->dreg
, ins
->sreg2
, size
);
5593 x86_prefix (code
, X86_LOCK_PREFIX
);
5594 amd64_xadd_membase_reg (code
, ins
->inst_basereg
, ins
->inst_offset
, dreg
, size
);
5596 if (dreg
!= ins
->dreg
)
5597 amd64_mov_reg_reg (code
, ins
->dreg
, dreg
, size
);
5601 case OP_ATOMIC_ADD_NEW_I4
:
5602 case OP_ATOMIC_ADD_NEW_I8
: {
5603 int dreg
= ins
->dreg
;
5604 guint32 size
= (ins
->opcode
== OP_ATOMIC_ADD_NEW_I4
) ? 4 : 8;
5606 if ((dreg
== ins
->sreg2
) || (dreg
== ins
->inst_basereg
))
5609 amd64_mov_reg_reg (code
, dreg
, ins
->sreg2
, size
);
5610 amd64_prefix (code
, X86_LOCK_PREFIX
);
5611 amd64_xadd_membase_reg (code
, ins
->inst_basereg
, ins
->inst_offset
, dreg
, size
);
5612 /* dreg contains the old value, add with sreg2 value */
5613 amd64_alu_reg_reg_size (code
, X86_ADD
, dreg
, ins
->sreg2
, size
);
5615 if (ins
->dreg
!= dreg
)
5616 amd64_mov_reg_reg (code
, ins
->dreg
, dreg
, size
);
5620 case OP_ATOMIC_EXCHANGE_I4
:
5621 case OP_ATOMIC_EXCHANGE_I8
: {
5623 int sreg2
= ins
->sreg2
;
5624 int breg
= ins
->inst_basereg
;
5626 gboolean need_push
= FALSE
, rdx_pushed
= FALSE
;
5628 if (ins
->opcode
== OP_ATOMIC_EXCHANGE_I8
)
5634 * See http://msdn.microsoft.com/en-us/magazine/cc302329.aspx for
5635 * an explanation of how this works.
5638 /* cmpxchg uses eax as comperand, need to make sure we can use it
5639 * hack to overcome limits in x86 reg allocator
5640 * (req: dreg == eax and sreg2 != eax and breg != eax)
5642 g_assert (ins
->dreg
== AMD64_RAX
);
5644 if (breg
== AMD64_RAX
&& ins
->sreg2
== AMD64_RAX
)
5645 /* Highly unlikely, but possible */
5648 /* The pushes invalidate rsp */
5649 if ((breg
== AMD64_RAX
) || need_push
) {
5650 amd64_mov_reg_reg (code
, AMD64_R11
, breg
, 8);
5654 /* We need the EAX reg for the comparand */
5655 if (ins
->sreg2
== AMD64_RAX
) {
5656 if (breg
!= AMD64_R11
) {
5657 amd64_mov_reg_reg (code
, AMD64_R11
, AMD64_RAX
, 8);
5660 g_assert (need_push
);
5661 amd64_push_reg (code
, AMD64_RDX
);
5662 amd64_mov_reg_reg (code
, AMD64_RDX
, AMD64_RAX
, size
);
5668 amd64_mov_reg_membase (code
, AMD64_RAX
, breg
, ins
->inst_offset
, size
);
5670 br
[0] = code
; amd64_prefix (code
, X86_LOCK_PREFIX
);
5671 amd64_cmpxchg_membase_reg_size (code
, breg
, ins
->inst_offset
, sreg2
, size
);
5672 br
[1] = code
; amd64_branch8 (code
, X86_CC_NE
, -1, FALSE
);
5673 amd64_patch (br
[1], br
[0]);
5676 amd64_pop_reg (code
, AMD64_RDX
);
5680 case OP_ATOMIC_CAS_I4
:
5681 case OP_ATOMIC_CAS_I8
: {
5684 if (ins
->opcode
== OP_ATOMIC_CAS_I8
)
5690 * See http://msdn.microsoft.com/en-us/magazine/cc302329.aspx for
5691 * an explanation of how this works.
5693 g_assert (ins
->sreg3
== AMD64_RAX
);
5694 g_assert (ins
->sreg1
!= AMD64_RAX
);
5695 g_assert (ins
->sreg1
!= ins
->sreg2
);
5697 amd64_prefix (code
, X86_LOCK_PREFIX
);
5698 amd64_cmpxchg_membase_reg_size (code
, ins
->sreg1
, ins
->inst_offset
, ins
->sreg2
, size
);
5700 if (ins
->dreg
!= AMD64_RAX
)
5701 amd64_mov_reg_reg (code
, ins
->dreg
, AMD64_RAX
, size
);
5704 case OP_CARD_TABLE_WBARRIER
: {
5705 int ptr
= ins
->sreg1
;
5706 int value
= ins
->sreg2
;
5708 int nursery_shift
, card_table_shift
;
5709 gpointer card_table_mask
;
5710 size_t nursery_size
;
5712 gpointer card_table
= mono_gc_get_card_table (&card_table_shift
, &card_table_mask
);
5713 guint64 nursery_start
= (guint64
)mono_gc_get_nursery (&nursery_shift
, &nursery_size
);
5714 guint64 shifted_nursery_start
= nursery_start
>> nursery_shift
;
5716 /*If either point to the stack we can simply avoid the WB. This happens due to
5717 * optimizations revealing a stack store that was not visible when op_cardtable was emited.
5719 if (ins
->sreg1
== AMD64_RSP
|| ins
->sreg2
== AMD64_RSP
)
5723 * We need one register we can clobber, we choose EDX and make sreg1
5724 * fixed EAX to work around limitations in the local register allocator.
5725 * sreg2 might get allocated to EDX, but that is not a problem since
5726 * we use it before clobbering EDX.
5728 g_assert (ins
->sreg1
== AMD64_RAX
);
5731 * This is the code we produce:
5734 * edx >>= nursery_shift
5735 * cmp edx, (nursery_start >> nursery_shift)
5738 * edx >>= card_table_shift
5744 if (mono_gc_card_table_nursery_check ()) {
5745 if (value
!= AMD64_RDX
)
5746 amd64_mov_reg_reg (code
, AMD64_RDX
, value
, 8);
5747 amd64_shift_reg_imm (code
, X86_SHR
, AMD64_RDX
, nursery_shift
);
5748 if (shifted_nursery_start
>> 31) {
5750 * The value we need to compare against is 64 bits, so we need
5751 * another spare register. We use RBX, which we save and
5754 amd64_mov_membase_reg (code
, AMD64_RSP
, -8, AMD64_RBX
, 8);
5755 amd64_mov_reg_imm (code
, AMD64_RBX
, shifted_nursery_start
);
5756 amd64_alu_reg_reg (code
, X86_CMP
, AMD64_RDX
, AMD64_RBX
);
5757 amd64_mov_reg_membase (code
, AMD64_RBX
, AMD64_RSP
, -8, 8);
5759 amd64_alu_reg_imm (code
, X86_CMP
, AMD64_RDX
, shifted_nursery_start
);
5761 br
= code
; x86_branch8 (code
, X86_CC_NE
, -1, FALSE
);
5763 amd64_mov_reg_reg (code
, AMD64_RDX
, ptr
, 8);
5764 amd64_shift_reg_imm (code
, X86_SHR
, AMD64_RDX
, card_table_shift
);
5765 if (card_table_mask
)
5766 amd64_alu_reg_imm (code
, X86_AND
, AMD64_RDX
, (guint32
)(guint64
)card_table_mask
);
5768 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR
, card_table
);
5769 amd64_alu_reg_membase (code
, X86_ADD
, AMD64_RDX
, AMD64_RIP
, 0);
5771 amd64_mov_membase_imm (code
, AMD64_RDX
, 0, 1, 1);
5772 if (mono_gc_card_table_nursery_check ())
5773 x86_patch (br
, code
);
5776 #ifdef MONO_ARCH_SIMD_INTRINSICS
5777 /* TODO: Some of these IR opcodes are marked as no clobber when they indeed do. */
5779 amd64_sse_addps_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5782 amd64_sse_divps_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5785 amd64_sse_mulps_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5788 amd64_sse_subps_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5791 amd64_sse_maxps_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5794 amd64_sse_minps_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5797 g_assert (ins
->inst_c0
>= 0 && ins
->inst_c0
<= 7);
5798 amd64_sse_cmpps_reg_reg_imm (code
, ins
->sreg1
, ins
->sreg2
, ins
->inst_c0
);
5801 amd64_sse_andps_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5804 amd64_sse_andnps_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5807 amd64_sse_orps_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5810 amd64_sse_xorps_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5813 amd64_sse_sqrtps_reg_reg (code
, ins
->dreg
, ins
->sreg1
);
5816 amd64_sse_rsqrtps_reg_reg (code
, ins
->dreg
, ins
->sreg1
);
5819 amd64_sse_rcpps_reg_reg (code
, ins
->dreg
, ins
->sreg1
);
5822 amd64_sse_addsubps_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5825 amd64_sse_haddps_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5828 amd64_sse_hsubps_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5831 amd64_sse_movshdup_reg_reg (code
, ins
->dreg
, ins
->sreg1
);
5834 amd64_sse_movsldup_reg_reg (code
, ins
->dreg
, ins
->sreg1
);
5837 case OP_PSHUFLEW_HIGH
:
5838 g_assert (ins
->inst_c0
>= 0 && ins
->inst_c0
<= 0xFF);
5839 amd64_sse_pshufhw_reg_reg_imm (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_c0
);
5841 case OP_PSHUFLEW_LOW
:
5842 g_assert (ins
->inst_c0
>= 0 && ins
->inst_c0
<= 0xFF);
5843 amd64_sse_pshuflw_reg_reg_imm (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_c0
);
5846 g_assert (ins
->inst_c0
>= 0 && ins
->inst_c0
<= 0xFF);
5847 amd64_sse_pshufd_reg_reg_imm (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_c0
);
5850 g_assert (ins
->inst_c0
>= 0 && ins
->inst_c0
<= 0xFF);
5851 amd64_sse_shufps_reg_reg_imm (code
, ins
->sreg1
, ins
->sreg2
, ins
->inst_c0
);
5854 g_assert (ins
->inst_c0
>= 0 && ins
->inst_c0
<= 0x3);
5855 amd64_sse_shufpd_reg_reg_imm (code
, ins
->sreg1
, ins
->sreg2
, ins
->inst_c0
);
5859 amd64_sse_addpd_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5862 amd64_sse_divpd_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5865 amd64_sse_mulpd_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5868 amd64_sse_subpd_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5871 amd64_sse_maxpd_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5874 amd64_sse_minpd_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5877 g_assert (ins
->inst_c0
>= 0 && ins
->inst_c0
<= 7);
5878 amd64_sse_cmppd_reg_reg_imm (code
, ins
->sreg1
, ins
->sreg2
, ins
->inst_c0
);
5881 amd64_sse_andpd_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5884 amd64_sse_andnpd_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5887 amd64_sse_orpd_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5890 amd64_sse_xorpd_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5893 amd64_sse_sqrtpd_reg_reg (code
, ins
->dreg
, ins
->sreg1
);
5896 amd64_sse_addsubpd_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5899 amd64_sse_haddpd_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5902 amd64_sse_hsubpd_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5905 amd64_sse_movddup_reg_reg (code
, ins
->dreg
, ins
->sreg1
);
5908 case OP_EXTRACT_MASK
:
5909 amd64_sse_pmovmskb_reg_reg (code
, ins
->dreg
, ins
->sreg1
);
5913 amd64_sse_pand_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5916 amd64_sse_por_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5919 amd64_sse_pxor_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5923 amd64_sse_paddb_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5926 amd64_sse_paddw_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5929 amd64_sse_paddd_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5932 amd64_sse_paddq_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5936 amd64_sse_psubb_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5939 amd64_sse_psubw_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5942 amd64_sse_psubd_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5945 amd64_sse_psubq_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5949 amd64_sse_pmaxub_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5952 amd64_sse_pmaxuw_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5955 amd64_sse_pmaxud_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5959 amd64_sse_pmaxsb_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5962 amd64_sse_pmaxsw_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5965 amd64_sse_pmaxsd_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5969 amd64_sse_pavgb_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5972 amd64_sse_pavgw_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5976 amd64_sse_pminub_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5979 amd64_sse_pminuw_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5982 amd64_sse_pminud_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5986 amd64_sse_pminsb_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5989 amd64_sse_pminsw_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5992 amd64_sse_pminsd_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5996 amd64_sse_pcmpeqb_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
5999 amd64_sse_pcmpeqw_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6002 amd64_sse_pcmpeqd_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6005 amd64_sse_pcmpeqq_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6009 amd64_sse_pcmpgtb_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6012 amd64_sse_pcmpgtw_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6015 amd64_sse_pcmpgtd_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6018 amd64_sse_pcmpgtq_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6021 case OP_PSUM_ABS_DIFF
:
6022 amd64_sse_psadbw_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6025 case OP_UNPACK_LOWB
:
6026 amd64_sse_punpcklbw_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6028 case OP_UNPACK_LOWW
:
6029 amd64_sse_punpcklwd_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6031 case OP_UNPACK_LOWD
:
6032 amd64_sse_punpckldq_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6034 case OP_UNPACK_LOWQ
:
6035 amd64_sse_punpcklqdq_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6037 case OP_UNPACK_LOWPS
:
6038 amd64_sse_unpcklps_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6040 case OP_UNPACK_LOWPD
:
6041 amd64_sse_unpcklpd_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6044 case OP_UNPACK_HIGHB
:
6045 amd64_sse_punpckhbw_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6047 case OP_UNPACK_HIGHW
:
6048 amd64_sse_punpckhwd_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6050 case OP_UNPACK_HIGHD
:
6051 amd64_sse_punpckhdq_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6053 case OP_UNPACK_HIGHQ
:
6054 amd64_sse_punpckhqdq_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6056 case OP_UNPACK_HIGHPS
:
6057 amd64_sse_unpckhps_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6059 case OP_UNPACK_HIGHPD
:
6060 amd64_sse_unpckhpd_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6064 amd64_sse_packsswb_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6067 amd64_sse_packssdw_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6070 amd64_sse_packuswb_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6073 amd64_sse_packusdw_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6076 case OP_PADDB_SAT_UN
:
6077 amd64_sse_paddusb_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6079 case OP_PSUBB_SAT_UN
:
6080 amd64_sse_psubusb_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6082 case OP_PADDW_SAT_UN
:
6083 amd64_sse_paddusw_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6085 case OP_PSUBW_SAT_UN
:
6086 amd64_sse_psubusw_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6090 amd64_sse_paddsb_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6093 amd64_sse_psubsb_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6096 amd64_sse_paddsw_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6099 amd64_sse_psubsw_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6103 amd64_sse_pmullw_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6106 amd64_sse_pmulld_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6109 amd64_sse_pmuludq_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6111 case OP_PMULW_HIGH_UN
:
6112 amd64_sse_pmulhuw_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6115 amd64_sse_pmulhw_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
6119 amd64_sse_psrlw_reg_imm (code
, ins
->dreg
, ins
->inst_imm
);
6122 amd64_sse_psrlw_reg_reg (code
, ins
->dreg
, ins
->sreg2
);
6126 amd64_sse_psraw_reg_imm (code
, ins
->dreg
, ins
->inst_imm
);
6129 amd64_sse_psraw_reg_reg (code
, ins
->dreg
, ins
->sreg2
);
6133 amd64_sse_psllw_reg_imm (code
, ins
->dreg
, ins
->inst_imm
);
6136 amd64_sse_psllw_reg_reg (code
, ins
->dreg
, ins
->sreg2
);
6140 amd64_sse_psrld_reg_imm (code
, ins
->dreg
, ins
->inst_imm
);
6143 amd64_sse_psrld_reg_reg (code
, ins
->dreg
, ins
->sreg2
);
6147 amd64_sse_psrad_reg_imm (code
, ins
->dreg
, ins
->inst_imm
);
6150 amd64_sse_psrad_reg_reg (code
, ins
->dreg
, ins
->sreg2
);
6154 amd64_sse_pslld_reg_imm (code
, ins
->dreg
, ins
->inst_imm
);
6157 amd64_sse_pslld_reg_reg (code
, ins
->dreg
, ins
->sreg2
);
6161 amd64_sse_psrlq_reg_imm (code
, ins
->dreg
, ins
->inst_imm
);
6164 amd64_sse_psrlq_reg_reg (code
, ins
->dreg
, ins
->sreg2
);
6167 /*TODO: This is appart of the sse spec but not added
6169 amd64_sse_psraq_reg_imm (code, ins->dreg, ins->inst_imm);
6172 amd64_sse_psraq_reg_reg (code, ins->dreg, ins->sreg2);
6177 amd64_sse_psllq_reg_imm (code
, ins
->dreg
, ins
->inst_imm
);
6180 amd64_sse_psllq_reg_reg (code
, ins
->dreg
, ins
->sreg2
);
6183 amd64_sse_cvtdq2pd_reg_reg (code
, ins
->dreg
, ins
->sreg1
);
6186 amd64_sse_cvtdq2ps_reg_reg (code
, ins
->dreg
, ins
->sreg1
);
6189 amd64_sse_cvtpd2dq_reg_reg (code
, ins
->dreg
, ins
->sreg1
);
6192 amd64_sse_cvtpd2ps_reg_reg (code
, ins
->dreg
, ins
->sreg1
);
6195 amd64_sse_cvtps2dq_reg_reg (code
, ins
->dreg
, ins
->sreg1
);
6198 amd64_sse_cvtps2pd_reg_reg (code
, ins
->dreg
, ins
->sreg1
);
6201 amd64_sse_cvttpd2dq_reg_reg (code
, ins
->dreg
, ins
->sreg1
);
6204 amd64_sse_cvttps2dq_reg_reg (code
, ins
->dreg
, ins
->sreg1
);
6208 amd64_movd_xreg_reg_size (code
, ins
->dreg
, ins
->sreg1
, 4);
6211 amd64_movd_reg_xreg_size (code
, ins
->dreg
, ins
->sreg1
, 4);
6215 amd64_movhlps_reg_reg (code
, AMD64_XMM15
, ins
->sreg1
);
6216 amd64_movd_reg_xreg_size (code
, ins
->dreg
, AMD64_XMM15
, 8);
6218 amd64_movd_reg_xreg_size (code
, ins
->dreg
, ins
->sreg1
, 8);
6223 amd64_movd_reg_xreg_size (code
, ins
->dreg
, ins
->sreg1
, 4);
6225 amd64_shift_reg_imm (code
, X86_SHR
, ins
->dreg
, ins
->inst_c0
* 8);
6226 amd64_widen_reg (code
, ins
->dreg
, ins
->dreg
, ins
->opcode
== OP_EXTRACT_I1
, FALSE
);
6230 /*amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 4);
6232 amd64_shift_reg_imm_size (code, X86_SHR, ins->dreg, 16, 4);*/
6233 amd64_sse_pextrw_reg_reg_imm (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_c0
);
6234 amd64_widen_reg_size (code
, ins
->dreg
, ins
->dreg
, ins
->opcode
== OP_EXTRACT_I2
, TRUE
, 4);
6238 amd64_movhlps_reg_reg (code
, ins
->dreg
, ins
->sreg1
);
6240 amd64_sse_movsd_reg_reg (code
, ins
->dreg
, ins
->sreg1
);
6243 amd64_sse_pinsrw_reg_reg_imm (code
, ins
->sreg1
, ins
->sreg2
, ins
->inst_c0
);
6245 case OP_EXTRACTX_U2
:
6246 amd64_sse_pextrw_reg_reg_imm (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_c0
);
6248 case OP_INSERTX_U1_SLOW
:
6249 /*sreg1 is the extracted ireg (scratch)
6250 /sreg2 is the to be inserted ireg (scratch)
6251 /dreg is the xreg to receive the value*/
6253 /*clear the bits from the extracted word*/
6254 amd64_alu_reg_imm (code
, X86_AND
, ins
->sreg1
, ins
->inst_c0
& 1 ? 0x00FF : 0xFF00);
6255 /*shift the value to insert if needed*/
6256 if (ins
->inst_c0
& 1)
6257 amd64_shift_reg_imm_size (code
, X86_SHL
, ins
->sreg2
, 8, 4);
6258 /*join them together*/
6259 amd64_alu_reg_reg (code
, X86_OR
, ins
->sreg1
, ins
->sreg2
);
6260 amd64_sse_pinsrw_reg_reg_imm (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_c0
/ 2);
6262 case OP_INSERTX_I4_SLOW
:
6263 amd64_sse_pinsrw_reg_reg_imm (code
, ins
->dreg
, ins
->sreg2
, ins
->inst_c0
* 2);
6264 amd64_shift_reg_imm (code
, X86_SHR
, ins
->sreg2
, 16);
6265 amd64_sse_pinsrw_reg_reg_imm (code
, ins
->dreg
, ins
->sreg2
, ins
->inst_c0
* 2 + 1);
6267 case OP_INSERTX_I8_SLOW
:
6268 amd64_movd_xreg_reg_size(code
, AMD64_XMM15
, ins
->sreg2
, 8);
6270 amd64_movlhps_reg_reg (code
, ins
->dreg
, AMD64_XMM15
);
6272 amd64_sse_movsd_reg_reg (code
, ins
->dreg
, AMD64_XMM15
);
6275 case OP_INSERTX_R4_SLOW
:
6276 switch (ins
->inst_c0
) {
6278 amd64_sse_cvtsd2ss_reg_reg (code
, ins
->dreg
, ins
->sreg2
);
6281 amd64_sse_pshufd_reg_reg_imm (code
, ins
->dreg
, ins
->dreg
, mono_simd_shuffle_mask(1, 0, 2, 3));
6282 amd64_sse_cvtsd2ss_reg_reg (code
, ins
->dreg
, ins
->sreg2
);
6283 amd64_sse_pshufd_reg_reg_imm (code
, ins
->dreg
, ins
->dreg
, mono_simd_shuffle_mask(1, 0, 2, 3));
6286 amd64_sse_pshufd_reg_reg_imm (code
, ins
->dreg
, ins
->dreg
, mono_simd_shuffle_mask(2, 1, 0, 3));
6287 amd64_sse_cvtsd2ss_reg_reg (code
, ins
->dreg
, ins
->sreg2
);
6288 amd64_sse_pshufd_reg_reg_imm (code
, ins
->dreg
, ins
->dreg
, mono_simd_shuffle_mask(2, 1, 0, 3));
6291 amd64_sse_pshufd_reg_reg_imm (code
, ins
->dreg
, ins
->dreg
, mono_simd_shuffle_mask(3, 1, 2, 0));
6292 amd64_sse_cvtsd2ss_reg_reg (code
, ins
->dreg
, ins
->sreg2
);
6293 amd64_sse_pshufd_reg_reg_imm (code
, ins
->dreg
, ins
->dreg
, mono_simd_shuffle_mask(3, 1, 2, 0));
6297 case OP_INSERTX_R8_SLOW
:
6299 amd64_movlhps_reg_reg (code
, ins
->dreg
, ins
->sreg2
);
6301 amd64_sse_movsd_reg_reg (code
, ins
->dreg
, ins
->sreg2
);
6303 case OP_STOREX_MEMBASE_REG
:
6304 case OP_STOREX_MEMBASE
:
6305 amd64_sse_movups_membase_reg (code
, ins
->dreg
, ins
->inst_offset
, ins
->sreg1
);
6307 case OP_LOADX_MEMBASE
:
6308 amd64_sse_movups_reg_membase (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_offset
);
6310 case OP_LOADX_ALIGNED_MEMBASE
:
6311 amd64_sse_movaps_reg_membase (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_offset
);
6313 case OP_STOREX_ALIGNED_MEMBASE_REG
:
6314 amd64_sse_movaps_membase_reg (code
, ins
->dreg
, ins
->inst_offset
, ins
->sreg1
);
6316 case OP_STOREX_NTA_MEMBASE_REG
:
6317 amd64_sse_movntps_reg_membase (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_offset
);
6319 case OP_PREFETCH_MEMBASE
:
6320 amd64_sse_prefetch_reg_membase (code
, ins
->backend
.arg_info
, ins
->sreg1
, ins
->inst_offset
);
6324 /*FIXME the peephole pass should have killed this*/
6325 if (ins
->dreg
!= ins
->sreg1
)
6326 amd64_sse_movaps_reg_reg (code
, ins
->dreg
, ins
->sreg1
);
6329 amd64_sse_pxor_reg_reg (code
, ins
->dreg
, ins
->dreg
);
6331 case OP_ICONV_TO_R8_RAW
:
6332 amd64_movd_xreg_reg_size (code
, ins
->dreg
, ins
->sreg1
, 4);
6333 amd64_sse_cvtss2sd_reg_reg (code
, ins
->dreg
, ins
->dreg
);
6336 case OP_FCONV_TO_R8_X
:
6337 amd64_sse_movsd_reg_reg (code
, ins
->dreg
, ins
->sreg1
);
6340 case OP_XCONV_R8_TO_I4
:
6341 amd64_sse_cvttsd2si_reg_xreg_size (code
, ins
->dreg
, ins
->sreg1
, 4);
6342 switch (ins
->backend
.source_opcode
) {
6343 case OP_FCONV_TO_I1
:
6344 amd64_widen_reg (code
, ins
->dreg
, ins
->dreg
, TRUE
, FALSE
);
6346 case OP_FCONV_TO_U1
:
6347 amd64_widen_reg (code
, ins
->dreg
, ins
->dreg
, FALSE
, FALSE
);
6349 case OP_FCONV_TO_I2
:
6350 amd64_widen_reg (code
, ins
->dreg
, ins
->dreg
, TRUE
, TRUE
);
6352 case OP_FCONV_TO_U2
:
6353 amd64_widen_reg (code
, ins
->dreg
, ins
->dreg
, FALSE
, TRUE
);
6359 amd64_sse_pinsrw_reg_reg_imm (code
, ins
->dreg
, ins
->sreg1
, 0);
6360 amd64_sse_pinsrw_reg_reg_imm (code
, ins
->dreg
, ins
->sreg1
, 1);
6361 amd64_sse_pshufd_reg_reg_imm (code
, ins
->dreg
, ins
->dreg
, 0);
6364 amd64_movd_xreg_reg_size (code
, ins
->dreg
, ins
->sreg1
, 4);
6365 amd64_sse_pshufd_reg_reg_imm (code
, ins
->dreg
, ins
->dreg
, 0);
6368 amd64_movd_xreg_reg_size (code
, ins
->dreg
, ins
->sreg1
, 8);
6369 amd64_sse_pshufd_reg_reg_imm (code
, ins
->dreg
, ins
->dreg
, 0x44);
6372 amd64_sse_movsd_reg_reg (code
, ins
->dreg
, ins
->sreg1
);
6373 amd64_sse_cvtsd2ss_reg_reg (code
, ins
->dreg
, ins
->dreg
);
6374 amd64_sse_pshufd_reg_reg_imm (code
, ins
->dreg
, ins
->dreg
, 0);
6377 amd64_sse_movsd_reg_reg (code
, ins
->dreg
, ins
->sreg1
);
6378 amd64_sse_pshufd_reg_reg_imm (code
, ins
->dreg
, ins
->dreg
, 0x44);
6381 case OP_LIVERANGE_START
: {
6382 if (cfg
->verbose_level
> 1)
6383 printf ("R%d START=0x%x\n", MONO_VARINFO (cfg
, ins
->inst_c0
)->vreg
, (int)(code
- cfg
->native_code
));
6384 MONO_VARINFO (cfg
, ins
->inst_c0
)->live_range_start
= code
- cfg
->native_code
;
6387 case OP_LIVERANGE_END
: {
6388 if (cfg
->verbose_level
> 1)
6389 printf ("R%d END=0x%x\n", MONO_VARINFO (cfg
, ins
->inst_c0
)->vreg
, (int)(code
- cfg
->native_code
));
6390 MONO_VARINFO (cfg
, ins
->inst_c0
)->live_range_end
= code
- cfg
->native_code
;
6393 case OP_NACL_GC_SAFE_POINT
: {
6394 #if defined(__native_client_codegen__)
6395 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_ABS
, (gpointer
)mono_nacl_gc
, TRUE
);
6399 case OP_GC_LIVENESS_DEF
:
6400 case OP_GC_LIVENESS_USE
:
6401 case OP_GC_PARAM_SLOT_LIVENESS_DEF
:
6402 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
6404 case OP_GC_SPILL_SLOT_LIVENESS_DEF
:
6405 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
6406 bb
->spill_slot_defs
= g_slist_prepend_mempool (cfg
->mempool
, bb
->spill_slot_defs
, ins
);
6409 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins
->opcode
), __FUNCTION__
);
6410 g_assert_not_reached ();
6413 if ((code
- cfg
->native_code
- offset
) > max_len
) {
6414 #if !defined(__native_client_codegen__)
6415 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %ld)",
6416 mono_inst_name (ins
->opcode
), max_len
, code
- cfg
->native_code
- offset
);
6417 g_assert_not_reached ();
6422 last_offset
= offset
;
6425 cfg
->code_len
= code
- cfg
->native_code
;
6428 #endif /* DISABLE_JIT */
6431 mono_arch_register_lowlevel_calls (void)
6433 /* The signature doesn't matter */
6434 mono_register_jit_icall (mono_amd64_throw_exception
, "mono_amd64_throw_exception", mono_create_icall_signature ("void"), TRUE
);
6438 mono_arch_patch_code (MonoMethod
*method
, MonoDomain
*domain
, guint8
*code
, MonoJumpInfo
*ji
, MonoCodeManager
*dyn_code_mp
, gboolean run_cctors
)
6440 MonoJumpInfo
*patch_info
;
6441 gboolean compile_aot
= !run_cctors
;
6443 for (patch_info
= ji
; patch_info
; patch_info
= patch_info
->next
) {
6444 unsigned char *ip
= patch_info
->ip
.i
+ code
;
6445 unsigned char *target
;
6447 target
= mono_resolve_patch_target (method
, domain
, code
, patch_info
, run_cctors
);
6450 switch (patch_info
->type
) {
6451 case MONO_PATCH_INFO_BB
:
6452 case MONO_PATCH_INFO_LABEL
:
6455 /* No need to patch these */
6460 switch (patch_info
->type
) {
6461 case MONO_PATCH_INFO_NONE
:
6463 case MONO_PATCH_INFO_METHOD_REL
:
6464 case MONO_PATCH_INFO_R8
:
6465 case MONO_PATCH_INFO_R4
:
6466 g_assert_not_reached ();
6468 case MONO_PATCH_INFO_BB
:
6475 * Debug code to help track down problems where the target of a near call is
6478 if (amd64_is_near_call (ip
)) {
6479 gint64 disp
= (guint8
*)target
- (guint8
*)ip
;
6481 if (!amd64_is_imm32 (disp
)) {
6482 printf ("TYPE: %d\n", patch_info
->type
);
6483 switch (patch_info
->type
) {
6484 case MONO_PATCH_INFO_INTERNAL_METHOD
:
6485 printf ("V: %s\n", patch_info
->data
.name
);
6487 case MONO_PATCH_INFO_METHOD_JUMP
:
6488 case MONO_PATCH_INFO_METHOD
:
6489 printf ("V: %s\n", patch_info
->data
.method
->name
);
6497 amd64_patch (ip
, (gpointer
)target
);
6504 get_max_epilog_size (MonoCompile
*cfg
)
6506 int max_epilog_size
= 16;
6508 if (cfg
->method
->save_lmf
)
6509 max_epilog_size
+= 256;
6511 if (mono_jit_trace_calls
!= NULL
)
6512 max_epilog_size
+= 50;
6514 if (cfg
->prof_options
& MONO_PROFILE_ENTER_LEAVE
)
6515 max_epilog_size
+= 50;
6517 max_epilog_size
+= (AMD64_NREG
* 2);
6519 return max_epilog_size
;
6523 * This macro is used for testing whenever the unwinder works correctly at every point
6524 * where an async exception can happen.
6526 /* This will generate a SIGSEGV at the given point in the code */
6527 #define async_exc_point(code) do { \
6528 if (mono_inject_async_exc_method && mono_method_desc_full_match (mono_inject_async_exc_method, cfg->method)) { \
6529 if (cfg->arch.async_point_count == mono_inject_async_exc_pos) \
6530 amd64_mov_reg_mem (code, AMD64_RAX, 0, 4); \
6531 cfg->arch.async_point_count ++; \
6536 mono_arch_emit_prolog (MonoCompile
*cfg
)
6538 MonoMethod
*method
= cfg
->method
;
6540 MonoMethodSignature
*sig
;
6542 int alloc_size
, pos
, i
, cfa_offset
, quad
, max_epilog_size
;
6545 MonoInst
*lmf_var
= cfg
->arch
.lmf_var
;
6546 gboolean args_clobbered
= FALSE
;
6547 gboolean trace
= FALSE
;
6548 #ifdef __native_client_codegen__
6549 guint alignment_check
;
6552 cfg
->code_size
= MAX (cfg
->header
->code_size
* 4, 10240);
6554 #if defined(__default_codegen__)
6555 code
= cfg
->native_code
= g_malloc (cfg
->code_size
);
6556 #elif defined(__native_client_codegen__)
6557 /* native_code_alloc is not 32-byte aligned, native_code is. */
6558 cfg
->native_code_alloc
= g_malloc (cfg
->code_size
+ kNaClAlignment
);
6560 /* Align native_code to next nearest kNaclAlignment byte. */
6561 cfg
->native_code
= (uintptr_t)cfg
->native_code_alloc
+ kNaClAlignment
;
6562 cfg
->native_code
= (uintptr_t)cfg
->native_code
& ~kNaClAlignmentMask
;
6564 code
= cfg
->native_code
;
6566 alignment_check
= (guint
)cfg
->native_code
& kNaClAlignmentMask
;
6567 g_assert (alignment_check
== 0);
6570 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
))
6573 /* Amount of stack space allocated by register saving code */
6576 /* Offset between RSP and the CFA */
6580 * The prolog consists of the following parts:
6582 * - push rbp, mov rbp, rsp
6583 * - save callee saved regs using pushes
6585 * - save rgctx if needed
6586 * - save lmf if needed
6589 * - save rgctx if needed
6590 * - save lmf if needed
6591 * - save callee saved regs using moves
6596 mono_emit_unwind_op_def_cfa (cfg
, code
, AMD64_RSP
, 8);
6597 // IP saved at CFA - 8
6598 mono_emit_unwind_op_offset (cfg
, code
, AMD64_RIP
, -cfa_offset
);
6599 async_exc_point (code
);
6600 mini_gc_set_slot_type_from_cfa (cfg
, -cfa_offset
, SLOT_NOREF
);
6602 if (!cfg
->arch
.omit_fp
) {
6603 amd64_push_reg (code
, AMD64_RBP
);
6605 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, cfa_offset
);
6606 mono_emit_unwind_op_offset (cfg
, code
, AMD64_RBP
, - cfa_offset
);
6607 async_exc_point (code
);
6609 mono_arch_unwindinfo_add_push_nonvol (&cfg
->arch
.unwindinfo
, cfg
->native_code
, code
, AMD64_RBP
);
6611 /* These are handled automatically by the stack marking code */
6612 mini_gc_set_slot_type_from_cfa (cfg
, -cfa_offset
, SLOT_NOREF
);
6614 amd64_mov_reg_reg (code
, AMD64_RBP
, AMD64_RSP
, sizeof(mgreg_t
));
6615 mono_emit_unwind_op_def_cfa_reg (cfg
, code
, AMD64_RBP
);
6616 async_exc_point (code
);
6618 mono_arch_unwindinfo_add_set_fpreg (&cfg
->arch
.unwindinfo
, cfg
->native_code
, code
, AMD64_RBP
);
6622 /* Save callee saved registers */
6623 if (!cfg
->arch
.omit_fp
&& !method
->save_lmf
) {
6624 int offset
= cfa_offset
;
6626 for (i
= 0; i
< AMD64_NREG
; ++i
)
6627 if (AMD64_IS_CALLEE_SAVED_REG (i
) && (cfg
->used_int_regs
& (1 << i
))) {
6628 amd64_push_reg (code
, i
);
6629 pos
+= 8; /* AMD64 push inst is always 8 bytes, no way to change it */
6631 mono_emit_unwind_op_offset (cfg
, code
, i
, - offset
);
6632 async_exc_point (code
);
6634 /* These are handled automatically by the stack marking code */
6635 mini_gc_set_slot_type_from_cfa (cfg
, - offset
, SLOT_NOREF
);
6639 /* The param area is always at offset 0 from sp */
6640 /* This needs to be allocated here, since it has to come after the spill area */
6641 if (cfg
->arch
.no_pushes
&& cfg
->param_area
) {
6642 if (cfg
->arch
.omit_fp
)
6644 g_assert_not_reached ();
6645 cfg
->stack_offset
+= ALIGN_TO (cfg
->param_area
, sizeof(mgreg_t
));
6648 if (cfg
->arch
.omit_fp
) {
6650 * On enter, the stack is misaligned by the pushing of the return
6651 * address. It is either made aligned by the pushing of %rbp, or by
6654 alloc_size
= ALIGN_TO (cfg
->stack_offset
, 8);
6655 if ((alloc_size
% 16) == 0) {
6657 /* Mark the padding slot as NOREF */
6658 mini_gc_set_slot_type_from_cfa (cfg
, -cfa_offset
- sizeof (mgreg_t
), SLOT_NOREF
);
6661 alloc_size
= ALIGN_TO (cfg
->stack_offset
, MONO_ARCH_FRAME_ALIGNMENT
);
6662 if (cfg
->stack_offset
!= alloc_size
) {
6663 /* Mark the padding slot as NOREF */
6664 mini_gc_set_slot_type_from_fp (cfg
, -alloc_size
+ cfg
->param_area
, SLOT_NOREF
);
6666 cfg
->arch
.sp_fp_offset
= alloc_size
;
6670 cfg
->arch
.stack_alloc_size
= alloc_size
;
6672 /* Allocate stack frame */
6674 /* See mono_emit_stack_alloc */
6675 #if defined(HOST_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
6676 guint32 remaining_size
= alloc_size
;
6677 /*FIXME handle unbounded code expansion, we should use a loop in case of more than X interactions*/
6678 guint32 required_code_size
= ((remaining_size
/ 0x1000) + 1) * 10; /*10 is the max size of amd64_alu_reg_imm + amd64_test_membase_reg*/
6679 guint32 offset
= code
- cfg
->native_code
;
6680 if (G_UNLIKELY (required_code_size
>= (cfg
->code_size
- offset
))) {
6681 while (required_code_size
>= (cfg
->code_size
- offset
))
6682 cfg
->code_size
*= 2;
6683 cfg
->native_code
= mono_realloc_native_code (cfg
);
6684 code
= cfg
->native_code
+ offset
;
6685 cfg
->stat_code_reallocs
++;
6688 while (remaining_size
>= 0x1000) {
6689 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, 0x1000);
6690 if (cfg
->arch
.omit_fp
) {
6691 cfa_offset
+= 0x1000;
6692 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, cfa_offset
);
6694 async_exc_point (code
);
6696 if (cfg
->arch
.omit_fp
)
6697 mono_arch_unwindinfo_add_alloc_stack (&cfg
->arch
.unwindinfo
, cfg
->native_code
, code
, 0x1000);
6700 amd64_test_membase_reg (code
, AMD64_RSP
, 0, AMD64_RSP
);
6701 remaining_size
-= 0x1000;
6703 if (remaining_size
) {
6704 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, remaining_size
);
6705 if (cfg
->arch
.omit_fp
) {
6706 cfa_offset
+= remaining_size
;
6707 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, cfa_offset
);
6708 async_exc_point (code
);
6711 if (cfg
->arch
.omit_fp
)
6712 mono_arch_unwindinfo_add_alloc_stack (&cfg
->arch
.unwindinfo
, cfg
->native_code
, code
, remaining_size
);
6716 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, alloc_size
);
6717 if (cfg
->arch
.omit_fp
) {
6718 cfa_offset
+= alloc_size
;
6719 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, cfa_offset
);
6720 async_exc_point (code
);
6725 /* Stack alignment check */
6728 amd64_mov_reg_reg (code
, AMD64_RAX
, AMD64_RSP
, 8);
6729 amd64_alu_reg_imm (code
, X86_AND
, AMD64_RAX
, 0xf);
6730 amd64_alu_reg_imm (code
, X86_CMP
, AMD64_RAX
, 0);
6731 x86_branch8 (code
, X86_CC_EQ
, 2, FALSE
);
6732 amd64_breakpoint (code
);
6736 #ifndef TARGET_WIN32
6737 if (mini_get_debug_options ()->init_stacks
) {
6738 /* Fill the stack frame with a dummy value to force deterministic behavior */
6740 /* Save registers to the red zone */
6741 amd64_mov_membase_reg (code
, AMD64_RSP
, -8, AMD64_RDI
, 8);
6742 amd64_mov_membase_reg (code
, AMD64_RSP
, -16, AMD64_RCX
, 8);
6744 amd64_mov_reg_imm (code
, AMD64_RAX
, 0x2a2a2a2a2a2a2a2a);
6745 amd64_mov_reg_imm (code
, AMD64_RCX
, alloc_size
/ 8);
6746 amd64_mov_reg_reg (code
, AMD64_RDI
, AMD64_RSP
, 8);
6749 #if defined(__default_codegen__)
6750 amd64_prefix (code
, X86_REP_PREFIX
);
6752 #elif defined(__native_client_codegen__)
6753 /* NaCl stos pseudo-instruction */
6754 amd64_codegen_pre (code
);
6755 /* First, clear the upper 32 bits of RDI (mov %edi, %edi) */
6756 amd64_mov_reg_reg (code
, AMD64_RDI
, AMD64_RDI
, 4);
6757 /* Add %r15 to %rdi using lea, condition flags unaffected. */
6758 amd64_lea_memindex_size (code
, AMD64_RDI
, AMD64_R15
, 0, AMD64_RDI
, 0, 8);
6759 amd64_prefix (code
, X86_REP_PREFIX
);
6761 amd64_codegen_post (code
);
6762 #endif /* __native_client_codegen__ */
6764 amd64_mov_reg_membase (code
, AMD64_RDI
, AMD64_RSP
, -8, 8);
6765 amd64_mov_reg_membase (code
, AMD64_RCX
, AMD64_RSP
, -16, 8);
6770 if (method
->save_lmf
) {
6771 code
= emit_setup_lmf (cfg
, code
, lmf_var
->inst_offset
, cfa_offset
);
6774 /* Save callee saved registers */
6775 if (cfg
->arch
.omit_fp
&& !method
->save_lmf
) {
6776 gint32 save_area_offset
= cfg
->arch
.reg_save_area_offset
;
6778 /* Save caller saved registers after sp is adjusted */
6779 /* The registers are saved at the bottom of the frame */
6780 /* FIXME: Optimize this so the regs are saved at the end of the frame in increasing order */
6781 for (i
= 0; i
< AMD64_NREG
; ++i
)
6782 if (AMD64_IS_CALLEE_SAVED_REG (i
) && (cfg
->used_int_regs
& (1 << i
))) {
6783 amd64_mov_membase_reg (code
, AMD64_RSP
, save_area_offset
, i
, 8);
6784 mono_emit_unwind_op_offset (cfg
, code
, i
, - (cfa_offset
- save_area_offset
));
6786 /* These are handled automatically by the stack marking code */
6787 mini_gc_set_slot_type_from_cfa (cfg
, - (cfa_offset
- save_area_offset
), SLOT_NOREF
);
6789 save_area_offset
+= 8;
6790 async_exc_point (code
);
6794 /* store runtime generic context */
6795 if (cfg
->rgctx_var
) {
6796 g_assert (cfg
->rgctx_var
->opcode
== OP_REGOFFSET
&&
6797 (cfg
->rgctx_var
->inst_basereg
== AMD64_RBP
|| cfg
->rgctx_var
->inst_basereg
== AMD64_RSP
));
6799 amd64_mov_membase_reg (code
, cfg
->rgctx_var
->inst_basereg
, cfg
->rgctx_var
->inst_offset
, MONO_ARCH_RGCTX_REG
, sizeof(gpointer
));
6801 mono_add_var_location (cfg
, cfg
->rgctx_var
, TRUE
, MONO_ARCH_RGCTX_REG
, 0, 0, code
- cfg
->native_code
);
6802 mono_add_var_location (cfg
, cfg
->rgctx_var
, FALSE
, cfg
->rgctx_var
->inst_basereg
, cfg
->rgctx_var
->inst_offset
, code
- cfg
->native_code
, 0);
6805 /* compute max_length in order to use short forward jumps */
6806 max_epilog_size
= get_max_epilog_size (cfg
);
6807 if (cfg
->opt
& MONO_OPT_BRANCH
) {
6808 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
6812 if (cfg
->prof_options
& MONO_PROFILE_COVERAGE
)
6814 /* max alignment for loops */
6815 if ((cfg
->opt
& MONO_OPT_LOOP
) && bb_is_loop_start (bb
))
6816 max_length
+= LOOP_ALIGNMENT
;
6817 #ifdef __native_client_codegen__
6818 /* max alignment for native client */
6819 max_length
+= kNaClAlignment
;
6822 MONO_BB_FOR_EACH_INS (bb
, ins
) {
6823 #ifdef __native_client_codegen__
6825 int space_in_block
= kNaClAlignment
-
6826 ((max_length
+ cfg
->code_len
) & kNaClAlignmentMask
);
6827 int max_len
= ((guint8
*)ins_get_spec (ins
->opcode
))[MONO_INST_LEN
];
6828 if (space_in_block
< max_len
&& max_len
< kNaClAlignment
) {
6829 max_length
+= space_in_block
;
6832 #endif /*__native_client_codegen__*/
6833 max_length
+= ((guint8
*)ins_get_spec (ins
->opcode
))[MONO_INST_LEN
];
6836 /* Take prolog and epilog instrumentation into account */
6837 if (bb
== cfg
->bb_entry
|| bb
== cfg
->bb_exit
)
6838 max_length
+= max_epilog_size
;
6840 bb
->max_length
= max_length
;
6844 sig
= mono_method_signature (method
);
6847 cinfo
= cfg
->arch
.cinfo
;
6849 if (sig
->ret
->type
!= MONO_TYPE_VOID
) {
6850 /* Save volatile arguments to the stack */
6851 if (cfg
->vret_addr
&& (cfg
->vret_addr
->opcode
!= OP_REGVAR
))
6852 amd64_mov_membase_reg (code
, cfg
->vret_addr
->inst_basereg
, cfg
->vret_addr
->inst_offset
, cinfo
->ret
.reg
, 8);
6855 /* Keep this in sync with emit_load_volatile_arguments */
6856 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
6857 ArgInfo
*ainfo
= cinfo
->args
+ i
;
6858 gint32 stack_offset
;
6861 ins
= cfg
->args
[i
];
6863 if ((ins
->flags
& MONO_INST_IS_DEAD
) && !trace
)
6864 /* Unused arguments */
6867 if (sig
->hasthis
&& (i
== 0))
6868 arg_type
= &mono_defaults
.object_class
->byval_arg
;
6870 arg_type
= sig
->params
[i
- sig
->hasthis
];
6872 stack_offset
= ainfo
->offset
+ ARGS_OFFSET
;
6874 if (cfg
->globalra
) {
6875 /* All the other moves are done by the register allocator */
6876 switch (ainfo
->storage
) {
6877 case ArgInFloatSSEReg
:
6878 amd64_sse_cvtss2sd_reg_reg (code
, ainfo
->reg
, ainfo
->reg
);
6880 case ArgValuetypeInReg
:
6881 for (quad
= 0; quad
< 2; quad
++) {
6882 switch (ainfo
->pair_storage
[quad
]) {
6884 amd64_mov_membase_reg (code
, ins
->inst_basereg
, ins
->inst_offset
+ (quad
* sizeof(mgreg_t
)), ainfo
->pair_regs
[quad
], sizeof(mgreg_t
));
6886 case ArgInFloatSSEReg
:
6887 amd64_movss_membase_reg (code
, ins
->inst_basereg
, ins
->inst_offset
+ (quad
* sizeof(mgreg_t
)), ainfo
->pair_regs
[quad
]);
6889 case ArgInDoubleSSEReg
:
6890 amd64_movsd_membase_reg (code
, ins
->inst_basereg
, ins
->inst_offset
+ (quad
* sizeof(mgreg_t
)), ainfo
->pair_regs
[quad
]);
6895 g_assert_not_reached ();
6906 /* Save volatile arguments to the stack */
6907 if (ins
->opcode
!= OP_REGVAR
) {
6908 switch (ainfo
->storage
) {
6914 if (stack_offset & 0x1)
6916 else if (stack_offset & 0x2)
6918 else if (stack_offset & 0x4)
6923 amd64_mov_membase_reg (code
, ins
->inst_basereg
, ins
->inst_offset
, ainfo
->reg
, size
);
6926 * Save the original location of 'this',
6927 * get_generic_info_from_stack_frame () needs this to properly look up
6928 * the argument value during the handling of async exceptions.
6930 if (ins
== cfg
->args
[0]) {
6931 mono_add_var_location (cfg
, ins
, TRUE
, ainfo
->reg
, 0, 0, code
- cfg
->native_code
);
6932 mono_add_var_location (cfg
, ins
, FALSE
, ins
->inst_basereg
, ins
->inst_offset
, code
- cfg
->native_code
, 0);
6936 case ArgInFloatSSEReg
:
6937 amd64_movss_membase_reg (code
, ins
->inst_basereg
, ins
->inst_offset
, ainfo
->reg
);
6939 case ArgInDoubleSSEReg
:
6940 amd64_movsd_membase_reg (code
, ins
->inst_basereg
, ins
->inst_offset
, ainfo
->reg
);
6942 case ArgValuetypeInReg
:
6943 for (quad
= 0; quad
< 2; quad
++) {
6944 switch (ainfo
->pair_storage
[quad
]) {
6946 amd64_mov_membase_reg (code
, ins
->inst_basereg
, ins
->inst_offset
+ (quad
* sizeof(mgreg_t
)), ainfo
->pair_regs
[quad
], sizeof(mgreg_t
));
6948 case ArgInFloatSSEReg
:
6949 amd64_movss_membase_reg (code
, ins
->inst_basereg
, ins
->inst_offset
+ (quad
* sizeof(mgreg_t
)), ainfo
->pair_regs
[quad
]);
6951 case ArgInDoubleSSEReg
:
6952 amd64_movsd_membase_reg (code
, ins
->inst_basereg
, ins
->inst_offset
+ (quad
* sizeof(mgreg_t
)), ainfo
->pair_regs
[quad
]);
6957 g_assert_not_reached ();
6961 case ArgValuetypeAddrInIReg
:
6962 if (ainfo
->pair_storage
[0] == ArgInIReg
)
6963 amd64_mov_membase_reg (code
, ins
->inst_left
->inst_basereg
, ins
->inst_left
->inst_offset
, ainfo
->pair_regs
[0], sizeof (gpointer
));
6969 /* Argument allocated to (non-volatile) register */
6970 switch (ainfo
->storage
) {
6972 amd64_mov_reg_reg (code
, ins
->dreg
, ainfo
->reg
, 8);
6975 amd64_mov_reg_membase (code
, ins
->dreg
, AMD64_RBP
, ARGS_OFFSET
+ ainfo
->offset
, 8);
6978 g_assert_not_reached ();
6981 if (ins
== cfg
->args
[0]) {
6982 mono_add_var_location (cfg
, ins
, TRUE
, ainfo
->reg
, 0, 0, code
- cfg
->native_code
);
6983 mono_add_var_location (cfg
, ins
, TRUE
, ins
->dreg
, 0, code
- cfg
->native_code
, 0);
6988 if (method
->save_lmf
) {
6989 code
= emit_save_lmf (cfg
, code
, lmf_var
->inst_offset
, &args_clobbered
);
6993 args_clobbered
= TRUE
;
6994 code
= mono_arch_instrument_prolog (cfg
, mono_trace_enter_method
, code
, TRUE
);
6997 if (cfg
->prof_options
& MONO_PROFILE_ENTER_LEAVE
)
6998 args_clobbered
= TRUE
;
7001 * Optimize the common case of the first bblock making a call with the same
7002 * arguments as the method. This works because the arguments are still in their
7003 * original argument registers.
7004 * FIXME: Generalize this
7006 if (!args_clobbered
) {
7007 MonoBasicBlock
*first_bb
= cfg
->bb_entry
;
7010 next
= mono_bb_first_ins (first_bb
);
7011 if (!next
&& first_bb
->next_bb
) {
7012 first_bb
= first_bb
->next_bb
;
7013 next
= mono_bb_first_ins (first_bb
);
7016 if (first_bb
->in_count
> 1)
7019 for (i
= 0; next
&& i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
7020 ArgInfo
*ainfo
= cinfo
->args
+ i
;
7021 gboolean match
= FALSE
;
7023 ins
= cfg
->args
[i
];
7024 if (ins
->opcode
!= OP_REGVAR
) {
7025 switch (ainfo
->storage
) {
7027 if (((next
->opcode
== OP_LOAD_MEMBASE
) || (next
->opcode
== OP_LOADI4_MEMBASE
)) && next
->inst_basereg
== ins
->inst_basereg
&& next
->inst_offset
== ins
->inst_offset
) {
7028 if (next
->dreg
== ainfo
->reg
) {
7032 next
->opcode
= OP_MOVE
;
7033 next
->sreg1
= ainfo
->reg
;
7034 /* Only continue if the instruction doesn't change argument regs */
7035 if (next
->dreg
== ainfo
->reg
|| next
->dreg
== AMD64_RAX
)
7045 /* Argument allocated to (non-volatile) register */
7046 switch (ainfo
->storage
) {
7048 if (next
->opcode
== OP_MOVE
&& next
->sreg1
== ins
->dreg
&& next
->dreg
== ainfo
->reg
) {
7060 //next = mono_inst_list_next (&next->node, &first_bb->ins_list);
7067 if (cfg
->gen_seq_points
) {
7068 MonoInst
*info_var
= cfg
->arch
.seq_point_info_var
;
7070 /* Initialize seq_point_info_var */
7071 if (cfg
->compile_aot
) {
7072 /* Initialize the variable from a GOT slot */
7073 /* Same as OP_AOTCONST */
7074 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_SEQ_POINT_INFO
, cfg
->method
);
7075 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RIP
, 0, sizeof(gpointer
));
7076 g_assert (info_var
->opcode
== OP_REGOFFSET
);
7077 amd64_mov_membase_reg (code
, info_var
->inst_basereg
, info_var
->inst_offset
, AMD64_R11
, 8);
7080 /* Initialize ss_trigger_page_var */
7081 ins
= cfg
->arch
.ss_trigger_page_var
;
7083 g_assert (ins
->opcode
== OP_REGOFFSET
);
7085 if (cfg
->compile_aot
) {
7086 amd64_mov_reg_membase (code
, AMD64_R11
, info_var
->inst_basereg
, info_var
->inst_offset
, 8);
7087 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, G_STRUCT_OFFSET (SeqPointInfo
, ss_trigger_page
), 8);
7089 amd64_mov_reg_imm (code
, AMD64_R11
, (guint64
)ss_trigger_page
);
7091 amd64_mov_membase_reg (code
, ins
->inst_basereg
, ins
->inst_offset
, AMD64_R11
, 8);
7094 cfg
->code_len
= code
- cfg
->native_code
;
7096 g_assert (cfg
->code_len
< cfg
->code_size
);
7102 mono_arch_emit_epilog (MonoCompile
*cfg
)
7104 MonoMethod
*method
= cfg
->method
;
7107 int max_epilog_size
;
7109 gint32 lmf_offset
= cfg
->arch
.lmf_var
? ((MonoInst
*)cfg
->arch
.lmf_var
)->inst_offset
: -1;
7111 max_epilog_size
= get_max_epilog_size (cfg
);
7113 while (cfg
->code_len
+ max_epilog_size
> (cfg
->code_size
- 16)) {
7114 cfg
->code_size
*= 2;
7115 cfg
->native_code
= mono_realloc_native_code (cfg
);
7116 cfg
->stat_code_reallocs
++;
7119 code
= cfg
->native_code
+ cfg
->code_len
;
7121 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
))
7122 code
= mono_arch_instrument_epilog (cfg
, mono_trace_leave_method
, code
, TRUE
);
7124 /* the code restoring the registers must be kept in sync with OP_JMP */
7127 if (method
->save_lmf
) {
7128 /* check if we need to restore protection of the stack after a stack overflow */
7129 if (mono_get_jit_tls_offset () != -1) {
7131 code
= mono_amd64_emit_tls_get (code
, AMD64_RCX
, mono_get_jit_tls_offset ());
7132 /* we load the value in a separate instruction: this mechanism may be
7133 * used later as a safer way to do thread interruption
7135 amd64_mov_reg_membase (code
, AMD64_RCX
, AMD64_RCX
, G_STRUCT_OFFSET (MonoJitTlsData
, restore_stack_prot
), 8);
7136 x86_alu_reg_imm (code
, X86_CMP
, X86_ECX
, 0);
7138 x86_branch8 (code
, X86_CC_Z
, 0, FALSE
);
7139 /* note that the call trampoline will preserve eax/edx */
7140 x86_call_reg (code
, X86_ECX
);
7141 x86_patch (patch
, code
);
7143 /* FIXME: maybe save the jit tls in the prolog */
7146 code
= emit_restore_lmf (cfg
, code
, lmf_offset
);
7148 /* Restore caller saved regs */
7149 if (cfg
->used_int_regs
& (1 << AMD64_RBP
)) {
7150 amd64_mov_reg_membase (code
, AMD64_RBP
, cfg
->frame_reg
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, rbp
), 8);
7152 if (cfg
->used_int_regs
& (1 << AMD64_RBX
)) {
7153 amd64_mov_reg_membase (code
, AMD64_RBX
, cfg
->frame_reg
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, rbx
), 8);
7155 if (cfg
->used_int_regs
& (1 << AMD64_R12
)) {
7156 amd64_mov_reg_membase (code
, AMD64_R12
, cfg
->frame_reg
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, r12
), 8);
7158 if (cfg
->used_int_regs
& (1 << AMD64_R13
)) {
7159 amd64_mov_reg_membase (code
, AMD64_R13
, cfg
->frame_reg
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, r13
), 8);
7161 if (cfg
->used_int_regs
& (1 << AMD64_R14
)) {
7162 amd64_mov_reg_membase (code
, AMD64_R14
, cfg
->frame_reg
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, r14
), 8);
7164 if (cfg
->used_int_regs
& (1 << AMD64_R15
)) {
7165 #if defined(__default_codegen__)
7166 amd64_mov_reg_membase (code
, AMD64_R15
, cfg
->frame_reg
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, r15
), 8);
7167 #elif defined(__native_client_codegen__)
7168 g_assert_not_reached();
7172 if (cfg
->used_int_regs
& (1 << AMD64_RDI
)) {
7173 amd64_mov_reg_membase (code
, AMD64_RDI
, cfg
->frame_reg
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, rdi
), 8);
7175 if (cfg
->used_int_regs
& (1 << AMD64_RSI
)) {
7176 amd64_mov_reg_membase (code
, AMD64_RSI
, cfg
->frame_reg
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, rsi
), 8);
7181 if (cfg
->arch
.omit_fp
) {
7182 gint32 save_area_offset
= cfg
->arch
.reg_save_area_offset
;
7184 for (i
= 0; i
< AMD64_NREG
; ++i
)
7185 if (AMD64_IS_CALLEE_SAVED_REG (i
) && (cfg
->used_int_regs
& (1 << i
))) {
7186 amd64_mov_reg_membase (code
, i
, AMD64_RSP
, save_area_offset
, 8);
7187 save_area_offset
+= 8;
7191 for (i
= 0; i
< AMD64_NREG
; ++i
)
7192 if (AMD64_IS_CALLEE_SAVED_REG (i
) && (cfg
->used_int_regs
& (1 << i
)))
7193 pos
-= sizeof(mgreg_t
);
7196 if (pos
== - sizeof(mgreg_t
)) {
7197 /* Only one register, so avoid lea */
7198 for (i
= AMD64_NREG
- 1; i
> 0; --i
)
7199 if (AMD64_IS_CALLEE_SAVED_REG (i
) && (cfg
->used_int_regs
& (1 << i
))) {
7200 amd64_mov_reg_membase (code
, i
, AMD64_RBP
, pos
, 8);
7204 amd64_lea_membase (code
, AMD64_RSP
, AMD64_RBP
, pos
);
7206 /* Pop registers in reverse order */
7207 for (i
= AMD64_NREG
- 1; i
> 0; --i
)
7208 if (AMD64_IS_CALLEE_SAVED_REG (i
) && (cfg
->used_int_regs
& (1 << i
))) {
7209 amd64_pop_reg (code
, i
);
7216 /* Load returned vtypes into registers if needed */
7217 cinfo
= cfg
->arch
.cinfo
;
7218 if (cinfo
->ret
.storage
== ArgValuetypeInReg
) {
7219 ArgInfo
*ainfo
= &cinfo
->ret
;
7220 MonoInst
*inst
= cfg
->ret
;
7222 for (quad
= 0; quad
< 2; quad
++) {
7223 switch (ainfo
->pair_storage
[quad
]) {
7225 amd64_mov_reg_membase (code
, ainfo
->pair_regs
[quad
], inst
->inst_basereg
, inst
->inst_offset
+ (quad
* sizeof(mgreg_t
)), sizeof(mgreg_t
));
7227 case ArgInFloatSSEReg
:
7228 amd64_movss_reg_membase (code
, ainfo
->pair_regs
[quad
], inst
->inst_basereg
, inst
->inst_offset
+ (quad
* sizeof(mgreg_t
)));
7230 case ArgInDoubleSSEReg
:
7231 amd64_movsd_reg_membase (code
, ainfo
->pair_regs
[quad
], inst
->inst_basereg
, inst
->inst_offset
+ (quad
* sizeof(mgreg_t
)));
7236 g_assert_not_reached ();
7241 if (cfg
->arch
.omit_fp
) {
7242 if (cfg
->arch
.stack_alloc_size
)
7243 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_RSP
, cfg
->arch
.stack_alloc_size
);
7247 async_exc_point (code
);
7250 cfg
->code_len
= code
- cfg
->native_code
;
7252 g_assert (cfg
->code_len
< cfg
->code_size
);
7256 mono_arch_emit_exceptions (MonoCompile
*cfg
)
7258 MonoJumpInfo
*patch_info
;
7261 MonoClass
*exc_classes
[16];
7262 guint8
*exc_throw_start
[16], *exc_throw_end
[16];
7263 guint32 code_size
= 0;
7265 /* Compute needed space */
7266 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
7267 if (patch_info
->type
== MONO_PATCH_INFO_EXC
)
7269 if (patch_info
->type
== MONO_PATCH_INFO_R8
)
7270 code_size
+= 8 + 15; /* sizeof (double) + alignment */
7271 if (patch_info
->type
== MONO_PATCH_INFO_R4
)
7272 code_size
+= 4 + 15; /* sizeof (float) + alignment */
7273 if (patch_info
->type
== MONO_PATCH_INFO_GC_CARD_TABLE_ADDR
)
7274 code_size
+= 8 + 7; /*sizeof (void*) + alignment */
7277 #ifdef __native_client_codegen__
7278 /* Give us extra room on Native Client. This could be */
7279 /* more carefully calculated, but bundle alignment makes */
7280 /* it much trickier, so *2 like other places is good. */
7284 while (cfg
->code_len
+ code_size
> (cfg
->code_size
- 16)) {
7285 cfg
->code_size
*= 2;
7286 cfg
->native_code
= mono_realloc_native_code (cfg
);
7287 cfg
->stat_code_reallocs
++;
7290 code
= cfg
->native_code
+ cfg
->code_len
;
7292 /* add code to raise exceptions */
7294 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
7295 switch (patch_info
->type
) {
7296 case MONO_PATCH_INFO_EXC
: {
7297 MonoClass
*exc_class
;
7301 amd64_patch (patch_info
->ip
.i
+ cfg
->native_code
, code
);
7303 exc_class
= mono_class_from_name (mono_defaults
.corlib
, "System", patch_info
->data
.name
);
7304 g_assert (exc_class
);
7305 throw_ip
= patch_info
->ip
.i
;
7307 //x86_breakpoint (code);
7308 /* Find a throw sequence for the same exception class */
7309 for (i
= 0; i
< nthrows
; ++i
)
7310 if (exc_classes
[i
] == exc_class
)
7313 amd64_mov_reg_imm (code
, AMD64_ARG_REG2
, (exc_throw_end
[i
] - cfg
->native_code
) - throw_ip
);
7314 x86_jump_code (code
, exc_throw_start
[i
]);
7315 patch_info
->type
= MONO_PATCH_INFO_NONE
;
7319 amd64_mov_reg_imm_size (code
, AMD64_ARG_REG2
, 0xf0f0f0f0, 4);
7323 exc_classes
[nthrows
] = exc_class
;
7324 exc_throw_start
[nthrows
] = code
;
7326 amd64_mov_reg_imm (code
, AMD64_ARG_REG1
, exc_class
->type_token
- MONO_TOKEN_TYPE_DEF
);
7328 patch_info
->type
= MONO_PATCH_INFO_NONE
;
7330 code
= emit_call_body (cfg
, code
, MONO_PATCH_INFO_INTERNAL_METHOD
, "mono_arch_throw_corlib_exception");
7332 amd64_mov_reg_imm (buf
, AMD64_ARG_REG2
, (code
- cfg
->native_code
) - throw_ip
);
7337 exc_throw_end
[nthrows
] = code
;
7347 g_assert(code
< cfg
->native_code
+ cfg
->code_size
);
7350 /* Handle relocations with RIP relative addressing */
7351 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
7352 gboolean remove
= FALSE
;
7353 guint8
*orig_code
= code
;
7355 switch (patch_info
->type
) {
7356 case MONO_PATCH_INFO_R8
:
7357 case MONO_PATCH_INFO_R4
: {
7358 guint8
*pos
, *patch_pos
;
7361 /* The SSE opcodes require a 16 byte alignment */
7362 #if defined(__default_codegen__)
7363 code
= (guint8
*)ALIGN_TO (code
, 16);
7364 #elif defined(__native_client_codegen__)
7366 /* Pad this out with HLT instructions */
7367 /* or we can get garbage bytes emitted */
7368 /* which will fail validation */
7369 guint8
*aligned_code
;
7370 /* extra align to make room for */
7371 /* mov/push below */
7372 int extra_align
= patch_info
->type
== MONO_PATCH_INFO_R8
? 2 : 1;
7373 aligned_code
= (guint8
*)ALIGN_TO (code
+ extra_align
, 16);
7374 /* The technique of hiding data in an */
7375 /* instruction has a problem here: we */
7376 /* need the data aligned to a 16-byte */
7377 /* boundary but the instruction cannot */
7378 /* cross the bundle boundary. so only */
7379 /* odd multiples of 16 can be used */
7380 if ((intptr_t)aligned_code
% kNaClAlignment
== 0) {
7383 while (code
< aligned_code
) {
7384 *(code
++) = 0xf4; /* hlt */
7389 pos
= cfg
->native_code
+ patch_info
->ip
.i
;
7390 if (IS_REX (pos
[1])) {
7391 patch_pos
= pos
+ 5;
7392 target_pos
= code
- pos
- 9;
7395 patch_pos
= pos
+ 4;
7396 target_pos
= code
- pos
- 8;
7399 if (patch_info
->type
== MONO_PATCH_INFO_R8
) {
7400 #ifdef __native_client_codegen__
7401 /* Hide 64-bit data in a */
7402 /* "mov imm64, r11" instruction. */
7403 /* write it before the start of */
7405 *(code
-2) = 0x49; /* prefix */
7406 *(code
-1) = 0xbb; /* mov X, %r11 */
7408 *(double*)code
= *(double*)patch_info
->data
.target
;
7409 code
+= sizeof (double);
7411 #ifdef __native_client_codegen__
7412 /* Hide 32-bit data in a */
7413 /* "push imm32" instruction. */
7414 *(code
-1) = 0x68; /* push */
7416 *(float*)code
= *(float*)patch_info
->data
.target
;
7417 code
+= sizeof (float);
7420 *(guint32
*)(patch_pos
) = target_pos
;
7425 case MONO_PATCH_INFO_GC_CARD_TABLE_ADDR
: {
7428 if (cfg
->compile_aot
)
7431 /*loading is faster against aligned addresses.*/
7432 code
= (guint8
*)ALIGN_TO (code
, 8);
7433 memset (orig_code
, 0, code
- orig_code
);
7435 pos
= cfg
->native_code
+ patch_info
->ip
.i
;
7437 /*alu_op [rex] modr/m imm32 - 7 or 8 bytes */
7438 if (IS_REX (pos
[1]))
7439 *(guint32
*)(pos
+ 4) = (guint8
*)code
- pos
- 8;
7441 *(guint32
*)(pos
+ 3) = (guint8
*)code
- pos
- 7;
7443 *(gpointer
*)code
= (gpointer
)patch_info
->data
.target
;
7444 code
+= sizeof (gpointer
);
7454 if (patch_info
== cfg
->patch_info
)
7455 cfg
->patch_info
= patch_info
->next
;
7459 for (tmp
= cfg
->patch_info
; tmp
->next
!= patch_info
; tmp
= tmp
->next
)
7461 tmp
->next
= patch_info
->next
;
7464 g_assert (code
< cfg
->native_code
+ cfg
->code_size
);
7467 cfg
->code_len
= code
- cfg
->native_code
;
7469 g_assert (cfg
->code_len
< cfg
->code_size
);
7473 #endif /* DISABLE_JIT */
7476 mono_arch_instrument_prolog (MonoCompile
*cfg
, void *func
, void *p
, gboolean enable_arguments
)
7479 CallInfo
*cinfo
= NULL
;
7480 MonoMethodSignature
*sig
;
7482 int i
, n
, stack_area
= 0;
7484 /* Keep this in sync with mono_arch_get_argument_info */
7486 if (enable_arguments
) {
7487 /* Allocate a new area on the stack and save arguments there */
7488 sig
= mono_method_signature (cfg
->method
);
7490 cinfo
= get_call_info (cfg
->generic_sharing_context
, cfg
->mempool
, sig
);
7492 n
= sig
->param_count
+ sig
->hasthis
;
7494 stack_area
= ALIGN_TO (n
* 8, 16);
7496 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, stack_area
);
7498 for (i
= 0; i
< n
; ++i
) {
7499 inst
= cfg
->args
[i
];
7501 if (inst
->opcode
== OP_REGVAR
)
7502 amd64_mov_membase_reg (code
, AMD64_RSP
, (i
* 8), inst
->dreg
, 8);
7504 amd64_mov_reg_membase (code
, AMD64_R11
, inst
->inst_basereg
, inst
->inst_offset
, 8);
7505 amd64_mov_membase_reg (code
, AMD64_RSP
, (i
* 8), AMD64_R11
, 8);
7510 mono_add_patch_info (cfg
, code
-cfg
->native_code
, MONO_PATCH_INFO_METHODCONST
, cfg
->method
);
7511 amd64_set_reg_template (code
, AMD64_ARG_REG1
);
7512 amd64_mov_reg_reg (code
, AMD64_ARG_REG2
, AMD64_RSP
, 8);
7513 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_ABS
, (gpointer
)func
, TRUE
);
7515 if (enable_arguments
)
7516 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_RSP
, stack_area
);
7530 mono_arch_instrument_epilog_full (MonoCompile
*cfg
, void *func
, void *p
, gboolean enable_arguments
, gboolean preserve_argument_registers
)
7533 int save_mode
= SAVE_NONE
;
7534 MonoMethod
*method
= cfg
->method
;
7535 MonoType
*ret_type
= mini_type_get_underlying_type (NULL
, mono_method_signature (method
)->ret
);
7538 switch (ret_type
->type
) {
7539 case MONO_TYPE_VOID
:
7540 /* special case string .ctor icall */
7541 if (strcmp (".ctor", method
->name
) && method
->klass
== mono_defaults
.string_class
)
7542 save_mode
= SAVE_EAX
;
7544 save_mode
= SAVE_NONE
;
7548 save_mode
= SAVE_EAX
;
7552 save_mode
= SAVE_XMM
;
7554 case MONO_TYPE_GENERICINST
:
7555 if (!mono_type_generic_inst_is_valuetype (ret_type
)) {
7556 save_mode
= SAVE_EAX
;
7560 case MONO_TYPE_VALUETYPE
:
7561 save_mode
= SAVE_STRUCT
;
7564 save_mode
= SAVE_EAX
;
7568 /* Save the result and copy it into the proper argument register */
7569 switch (save_mode
) {
7571 amd64_push_reg (code
, AMD64_RAX
);
7573 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, 8);
7574 if (enable_arguments
)
7575 amd64_mov_reg_reg (code
, AMD64_ARG_REG2
, AMD64_RAX
, 8);
7579 if (enable_arguments
)
7580 amd64_mov_reg_imm (code
, AMD64_ARG_REG2
, 0);
7583 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, 8);
7584 amd64_movsd_membase_reg (code
, AMD64_RSP
, 0, AMD64_XMM0
);
7586 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, 8);
7588 * The result is already in the proper argument register so no copying
7595 g_assert_not_reached ();
7598 /* Set %al since this is a varargs call */
7599 if (save_mode
== SAVE_XMM
)
7600 amd64_mov_reg_imm (code
, AMD64_RAX
, 1);
7602 amd64_mov_reg_imm (code
, AMD64_RAX
, 0);
7604 if (preserve_argument_registers
) {
7605 for (i
= 0; i
< PARAM_REGS
; ++i
)
7606 amd64_push_reg (code
, param_regs
[i
]);
7609 mono_add_patch_info (cfg
, code
-cfg
->native_code
, MONO_PATCH_INFO_METHODCONST
, method
);
7610 amd64_set_reg_template (code
, AMD64_ARG_REG1
);
7611 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_ABS
, (gpointer
)func
, TRUE
);
7613 if (preserve_argument_registers
) {
7614 for (i
= PARAM_REGS
- 1; i
>= 0; --i
)
7615 amd64_pop_reg (code
, param_regs
[i
]);
7618 /* Restore result */
7619 switch (save_mode
) {
7621 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_RSP
, 8);
7622 amd64_pop_reg (code
, AMD64_RAX
);
7628 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_RSP
, 8);
7629 amd64_movsd_reg_membase (code
, AMD64_XMM0
, AMD64_RSP
, 0);
7630 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_RSP
, 8);
7635 g_assert_not_reached ();
7642 mono_arch_flush_icache (guint8
*code
, gint size
)
7648 mono_arch_flush_register_windows (void)
7653 mono_arch_is_inst_imm (gint64 imm
)
7655 return amd64_is_imm32 (imm
);
7659 * Determine whenever the trap whose info is in SIGINFO is caused by
7663 mono_arch_is_int_overflow (void *sigctx
, void *info
)
7670 mono_arch_sigctx_to_monoctx (sigctx
, &ctx
);
7672 rip
= (guint8
*)ctx
.rip
;
7674 if (IS_REX (rip
[0])) {
7675 reg
= amd64_rex_b (rip
[0]);
7681 if ((rip
[0] == 0xf7) && (x86_modrm_mod (rip
[1]) == 0x3) && (x86_modrm_reg (rip
[1]) == 0x7)) {
7683 reg
+= x86_modrm_rm (rip
[1]);
7723 g_assert_not_reached ();
7735 mono_arch_get_patch_offset (guint8
*code
)
7741 * mono_breakpoint_clean_code:
7743 * Copy @size bytes from @code - @offset to the buffer @buf. If the debugger inserted software
7744 * breakpoints in the original code, they are removed in the copy.
7746 * Returns TRUE if no sw breakpoint was present.
7749 mono_breakpoint_clean_code (guint8
*method_start
, guint8
*code
, int offset
, guint8
*buf
, int size
)
7752 gboolean can_write
= TRUE
;
7754 * If method_start is non-NULL we need to perform bound checks, since we access memory
7755 * at code - offset we could go before the start of the method and end up in a different
7756 * page of memory that is not mapped or read incorrect data anyway. We zero-fill the bytes
7759 if (!method_start
|| code
- offset
>= method_start
) {
7760 memcpy (buf
, code
- offset
, size
);
7762 int diff
= code
- method_start
;
7763 memset (buf
, 0, size
);
7764 memcpy (buf
+ offset
- diff
, method_start
, diff
+ size
- offset
);
7767 for (i
= 0; i
< MONO_BREAKPOINT_ARRAY_SIZE
; ++i
) {
7768 int idx
= mono_breakpoint_info_index
[i
];
7772 ptr
= mono_breakpoint_info
[idx
].address
;
7773 if (ptr
>= code
&& ptr
< code
+ size
) {
7774 guint8 saved_byte
= mono_breakpoint_info
[idx
].saved_byte
;
7776 /*g_print ("patching %p with 0x%02x (was: 0x%02x)\n", ptr, saved_byte, buf [ptr - code]);*/
7777 buf
[ptr
- code
] = saved_byte
;
7783 #if defined(__native_client_codegen__)
7784 /* For membase calls, we want the base register. for Native Client, */
7785 /* all indirect calls have the following sequence with the given sizes: */
7786 /* mov %eXX,%eXX [2-3] */
7787 /* mov disp(%r15,%rXX,scale),%r11d [4-8] */
7788 /* and $0xffffffffffffffe0,%r11d [4] */
7789 /* add %r15,%r11 [3] */
7790 /* callq *%r11 [3] */
7793 /* Determine if code points to a NaCl call-through-register sequence, */
7794 /* (i.e., the last 3 instructions listed above) */
7796 is_nacl_call_reg_sequence(guint8
* code
)
7798 const char *sequence
= "\x41\x83\xe3\xe0" /* and */
7799 "\x4d\x03\xdf" /* add */
7800 "\x41\xff\xd3"; /* call */
7801 return memcmp(code
, sequence
, 10) == 0;
7804 /* Determine if code points to the first opcode of the mov membase component */
7805 /* of an indirect call sequence (i.e. the first 2 instructions listed above) */
7806 /* (there could be a REX prefix before the opcode but it is ignored) */
7808 is_nacl_indirect_call_membase_sequence(guint8
* code
)
7810 /* Check for mov opcode, reg-reg addressing mode (mod = 3), */
7811 return code
[0] == 0x8b && amd64_modrm_mod(code
[1]) == 3 &&
7812 /* and that src reg = dest reg */
7813 amd64_modrm_reg(code
[1]) == amd64_modrm_rm(code
[1]) &&
7814 /* Check that next inst is mov, uses SIB byte (rm = 4), */
7816 code
[3] == 0x8b && amd64_modrm_rm(code
[4]) == 4 &&
7817 /* and has dst of r11 and base of r15 */
7818 (amd64_modrm_reg(code
[4]) + amd64_rex_r(code
[2])) == AMD64_R11
&&
7819 (amd64_sib_base(code
[5]) + amd64_rex_b(code
[2])) == AMD64_R15
;
7821 #endif /* __native_client_codegen__ */
7824 mono_arch_get_this_arg_reg (guint8
*code
)
7826 return AMD64_ARG_REG1
;
7830 mono_arch_get_this_arg_from_call (mgreg_t
*regs
, guint8
*code
)
7832 return (gpointer
)regs
[mono_arch_get_this_arg_reg (code
)];
7835 #define MAX_ARCH_DELEGATE_PARAMS 10
7838 get_delegate_invoke_impl (gboolean has_target
, guint32 param_count
, guint32
*code_len
)
7840 guint8
*code
, *start
;
7844 start
= code
= mono_global_codeman_reserve (64);
7846 /* Replace the this argument with the target */
7847 amd64_mov_reg_reg (code
, AMD64_RAX
, AMD64_ARG_REG1
, 8);
7848 amd64_mov_reg_membase (code
, AMD64_ARG_REG1
, AMD64_RAX
, G_STRUCT_OFFSET (MonoDelegate
, target
), 8);
7849 amd64_jump_membase (code
, AMD64_RAX
, G_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
7851 g_assert ((code
- start
) < 64);
7853 start
= code
= mono_global_codeman_reserve (64);
7855 if (param_count
== 0) {
7856 amd64_jump_membase (code
, AMD64_ARG_REG1
, G_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
7858 /* We have to shift the arguments left */
7859 amd64_mov_reg_reg (code
, AMD64_RAX
, AMD64_ARG_REG1
, 8);
7860 for (i
= 0; i
< param_count
; ++i
) {
7863 amd64_mov_reg_reg (code
, param_regs
[i
], param_regs
[i
+ 1], 8);
7865 amd64_mov_reg_membase (code
, param_regs
[i
], AMD64_RSP
, 0x28, 8);
7867 amd64_mov_reg_reg (code
, param_regs
[i
], param_regs
[i
+ 1], 8);
7871 amd64_jump_membase (code
, AMD64_RAX
, G_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
7873 g_assert ((code
- start
) < 64);
7876 nacl_global_codeman_validate(&start
, 64, &code
);
7878 mono_debug_add_delegate_trampoline (start
, code
- start
);
7881 *code_len
= code
- start
;
7884 if (mono_jit_map_is_enabled ()) {
7887 buff
= (char*)"delegate_invoke_has_target";
7889 buff
= g_strdup_printf ("delegate_invoke_no_target_%d", param_count
);
7890 mono_emit_jit_tramp (start
, code
- start
, buff
);
7899 * mono_arch_get_delegate_invoke_impls:
7901 * Return a list of MonoTrampInfo structures for the delegate invoke impl
7905 mono_arch_get_delegate_invoke_impls (void)
7912 code
= get_delegate_invoke_impl (TRUE
, 0, &code_len
);
7913 res
= g_slist_prepend (res
, mono_tramp_info_create (g_strdup ("delegate_invoke_impl_has_target"), code
, code_len
, NULL
, NULL
));
7915 for (i
= 0; i
< MAX_ARCH_DELEGATE_PARAMS
; ++i
) {
7916 code
= get_delegate_invoke_impl (FALSE
, i
, &code_len
);
7917 res
= g_slist_prepend (res
, mono_tramp_info_create (g_strdup_printf ("delegate_invoke_impl_target_%d", i
), code
, code_len
, NULL
, NULL
));
7924 mono_arch_get_delegate_invoke_impl (MonoMethodSignature
*sig
, gboolean has_target
)
7926 guint8
*code
, *start
;
7929 if (sig
->param_count
> MAX_ARCH_DELEGATE_PARAMS
)
7932 /* FIXME: Support more cases */
7933 if (MONO_TYPE_ISSTRUCT (sig
->ret
))
7937 static guint8
* cached
= NULL
;
7943 start
= mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
7945 start
= get_delegate_invoke_impl (TRUE
, 0, NULL
);
7947 mono_memory_barrier ();
7951 static guint8
* cache
[MAX_ARCH_DELEGATE_PARAMS
+ 1] = {NULL
};
7952 for (i
= 0; i
< sig
->param_count
; ++i
)
7953 if (!mono_is_regsize_var (sig
->params
[i
]))
7955 if (sig
->param_count
> 4)
7958 code
= cache
[sig
->param_count
];
7962 if (mono_aot_only
) {
7963 char *name
= g_strdup_printf ("delegate_invoke_impl_target_%d", sig
->param_count
);
7964 start
= mono_aot_get_trampoline (name
);
7967 start
= get_delegate_invoke_impl (FALSE
, sig
->param_count
, NULL
);
7970 mono_memory_barrier ();
7972 cache
[sig
->param_count
] = start
;
7978 mono_arch_finish_init (void)
7982 * We need to init this multiple times, since when we are first called, the key might not
7983 * be initialized yet.
7985 appdomain_tls_offset
= mono_domain_get_tls_key ();
7986 lmf_tls_offset
= mono_get_jit_tls_key ();
7987 lmf_addr_tls_offset
= mono_get_jit_tls_key ();
7989 /* Only 64 tls entries can be accessed using inline code */
7990 if (appdomain_tls_offset
>= 64)
7991 appdomain_tls_offset
= -1;
7992 if (lmf_tls_offset
>= 64)
7993 lmf_tls_offset
= -1;
7994 if (lmf_addr_tls_offset
>= 64)
7995 lmf_addr_tls_offset
= -1;
7998 optimize_for_xen
= access ("/proc/xen", F_OK
) == 0;
8000 appdomain_tls_offset
= mono_domain_get_tls_offset ();
8001 lmf_tls_offset
= mono_get_lmf_tls_offset ();
8002 lmf_addr_tls_offset
= mono_get_lmf_addr_tls_offset ();
8007 mono_arch_free_jit_tls_data (MonoJitTlsData
*tls
)
8011 #ifdef MONO_ARCH_HAVE_IMT
8013 #if defined(__default_codegen__)
8014 #define CMP_SIZE (6 + 1)
8015 #define CMP_REG_REG_SIZE (4 + 1)
8016 #define BR_SMALL_SIZE 2
8017 #define BR_LARGE_SIZE 6
8018 #define MOV_REG_IMM_SIZE 10
8019 #define MOV_REG_IMM_32BIT_SIZE 6
8020 #define JUMP_REG_SIZE (2 + 1)
8021 #elif defined(__native_client_codegen__)
8022 /* NaCl N-byte instructions can be padded up to N-1 bytes */
8023 #define CMP_SIZE ((6 + 1) * 2 - 1)
8024 #define CMP_REG_REG_SIZE ((4 + 1) * 2 - 1)
8025 #define BR_SMALL_SIZE (2 * 2 - 1)
8026 #define BR_LARGE_SIZE (6 * 2 - 1)
8027 #define MOV_REG_IMM_SIZE (10 * 2 - 1)
8028 #define MOV_REG_IMM_32BIT_SIZE (6 * 2 - 1)
8029 /* Jump reg for NaCl adds a mask (+4) and add (+3) */
8030 #define JUMP_REG_SIZE ((2 + 1 + 4 + 3) * 2 - 1)
8031 /* Jump membase's size is large and unpredictable */
8032 /* in native client, just pad it out a whole bundle. */
8033 #define JUMP_MEMBASE_SIZE (kNaClAlignment)
8037 imt_branch_distance (MonoIMTCheckItem
**imt_entries
, int start
, int target
)
8039 int i
, distance
= 0;
8040 for (i
= start
; i
< target
; ++i
)
8041 distance
+= imt_entries
[i
]->chunk_size
;
8046 * LOCKING: called with the domain lock held
8049 mono_arch_build_imt_thunk (MonoVTable
*vtable
, MonoDomain
*domain
, MonoIMTCheckItem
**imt_entries
, int count
,
8050 gpointer fail_tramp
)
8054 guint8
*code
, *start
;
8055 gboolean vtable_is_32bit
= ((gsize
)(vtable
) == (gsize
)(int)(gsize
)(vtable
));
8057 for (i
= 0; i
< count
; ++i
) {
8058 MonoIMTCheckItem
*item
= imt_entries
[i
];
8059 if (item
->is_equals
) {
8060 if (item
->check_target_idx
) {
8061 if (!item
->compare_done
) {
8062 if (amd64_is_imm32 (item
->key
))
8063 item
->chunk_size
+= CMP_SIZE
;
8065 item
->chunk_size
+= MOV_REG_IMM_SIZE
+ CMP_REG_REG_SIZE
;
8067 if (item
->has_target_code
) {
8068 item
->chunk_size
+= MOV_REG_IMM_SIZE
;
8070 if (vtable_is_32bit
)
8071 item
->chunk_size
+= MOV_REG_IMM_32BIT_SIZE
;
8073 item
->chunk_size
+= MOV_REG_IMM_SIZE
;
8074 #ifdef __native_client_codegen__
8075 item
->chunk_size
+= JUMP_MEMBASE_SIZE
;
8078 item
->chunk_size
+= BR_SMALL_SIZE
+ JUMP_REG_SIZE
;
8081 item
->chunk_size
+= MOV_REG_IMM_SIZE
* 3 + CMP_REG_REG_SIZE
+
8082 BR_SMALL_SIZE
+ JUMP_REG_SIZE
* 2;
8084 if (vtable_is_32bit
)
8085 item
->chunk_size
+= MOV_REG_IMM_32BIT_SIZE
;
8087 item
->chunk_size
+= MOV_REG_IMM_SIZE
;
8088 item
->chunk_size
+= JUMP_REG_SIZE
;
8089 /* with assert below:
8090 * item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + 1;
8092 #ifdef __native_client_codegen__
8093 item
->chunk_size
+= JUMP_MEMBASE_SIZE
;
8098 if (amd64_is_imm32 (item
->key
))
8099 item
->chunk_size
+= CMP_SIZE
;
8101 item
->chunk_size
+= MOV_REG_IMM_SIZE
+ CMP_REG_REG_SIZE
;
8102 item
->chunk_size
+= BR_LARGE_SIZE
;
8103 imt_entries
[item
->check_target_idx
]->compare_done
= TRUE
;
8105 size
+= item
->chunk_size
;
8107 #if defined(__native_client__) && defined(__native_client_codegen__)
8108 /* In Native Client, we don't re-use thunks, allocate from the */
8109 /* normal code manager paths. */
8110 code
= mono_domain_code_reserve (domain
, size
);
8113 code
= mono_method_alloc_generic_virtual_thunk (domain
, size
);
8115 code
= mono_domain_code_reserve (domain
, size
);
8118 for (i
= 0; i
< count
; ++i
) {
8119 MonoIMTCheckItem
*item
= imt_entries
[i
];
8120 item
->code_target
= code
;
8121 if (item
->is_equals
) {
8122 gboolean fail_case
= !item
->check_target_idx
&& fail_tramp
;
8124 if (item
->check_target_idx
|| fail_case
) {
8125 if (!item
->compare_done
|| fail_case
) {
8126 if (amd64_is_imm32 (item
->key
))
8127 amd64_alu_reg_imm (code
, X86_CMP
, MONO_ARCH_IMT_REG
, (guint32
)(gssize
)item
->key
);
8129 amd64_mov_reg_imm (code
, MONO_ARCH_IMT_SCRATCH_REG
, item
->key
);
8130 amd64_alu_reg_reg (code
, X86_CMP
, MONO_ARCH_IMT_REG
, MONO_ARCH_IMT_SCRATCH_REG
);
8133 item
->jmp_code
= code
;
8134 amd64_branch8 (code
, X86_CC_NE
, 0, FALSE
);
8135 if (item
->has_target_code
) {
8136 amd64_mov_reg_imm (code
, MONO_ARCH_IMT_SCRATCH_REG
, item
->value
.target_code
);
8137 amd64_jump_reg (code
, MONO_ARCH_IMT_SCRATCH_REG
);
8139 amd64_mov_reg_imm (code
, MONO_ARCH_IMT_SCRATCH_REG
, & (vtable
->vtable
[item
->value
.vtable_slot
]));
8140 amd64_jump_membase (code
, MONO_ARCH_IMT_SCRATCH_REG
, 0);
8144 amd64_patch (item
->jmp_code
, code
);
8145 amd64_mov_reg_imm (code
, MONO_ARCH_IMT_SCRATCH_REG
, fail_tramp
);
8146 amd64_jump_reg (code
, MONO_ARCH_IMT_SCRATCH_REG
);
8147 item
->jmp_code
= NULL
;
8150 /* enable the commented code to assert on wrong method */
8152 if (amd64_is_imm32 (item
->key
))
8153 amd64_alu_reg_imm (code
, X86_CMP
, MONO_ARCH_IMT_REG
, (guint32
)(gssize
)item
->key
);
8155 amd64_mov_reg_imm (code
, MONO_ARCH_IMT_SCRATCH_REG
, item
->key
);
8156 amd64_alu_reg_reg (code
, X86_CMP
, MONO_ARCH_IMT_REG
, MONO_ARCH_IMT_SCRATCH_REG
);
8158 item
->jmp_code
= code
;
8159 amd64_branch8 (code
, X86_CC_NE
, 0, FALSE
);
8160 /* See the comment below about R10 */
8161 amd64_mov_reg_imm (code
, MONO_ARCH_IMT_SCRATCH_REG
, & (vtable
->vtable
[item
->value
.vtable_slot
]));
8162 amd64_jump_membase (code
, MONO_ARCH_IMT_SCRATCH_REG
, 0);
8163 amd64_patch (item
->jmp_code
, code
);
8164 amd64_breakpoint (code
);
8165 item
->jmp_code
= NULL
;
8167 /* We're using R10 (MONO_ARCH_IMT_SCRATCH_REG) here because R11 (MONO_ARCH_IMT_REG)
8168 needs to be preserved. R10 needs
8169 to be preserved for calls which
8170 require a runtime generic context,
8171 but interface calls don't. */
8172 amd64_mov_reg_imm (code
, MONO_ARCH_IMT_SCRATCH_REG
, & (vtable
->vtable
[item
->value
.vtable_slot
]));
8173 amd64_jump_membase (code
, MONO_ARCH_IMT_SCRATCH_REG
, 0);
8177 if (amd64_is_imm32 (item
->key
))
8178 amd64_alu_reg_imm (code
, X86_CMP
, MONO_ARCH_IMT_REG
, (guint32
)(gssize
)item
->key
);
8180 amd64_mov_reg_imm (code
, MONO_ARCH_IMT_SCRATCH_REG
, item
->key
);
8181 amd64_alu_reg_reg (code
, X86_CMP
, MONO_ARCH_IMT_REG
, MONO_ARCH_IMT_SCRATCH_REG
);
8183 item
->jmp_code
= code
;
8184 if (x86_is_imm8 (imt_branch_distance (imt_entries
, i
, item
->check_target_idx
)))
8185 x86_branch8 (code
, X86_CC_GE
, 0, FALSE
);
8187 x86_branch32 (code
, X86_CC_GE
, 0, FALSE
);
8189 g_assert (code
- item
->code_target
<= item
->chunk_size
);
8191 /* patch the branches to get to the target items */
8192 for (i
= 0; i
< count
; ++i
) {
8193 MonoIMTCheckItem
*item
= imt_entries
[i
];
8194 if (item
->jmp_code
) {
8195 if (item
->check_target_idx
) {
8196 amd64_patch (item
->jmp_code
, imt_entries
[item
->check_target_idx
]->code_target
);
8202 mono_stats
.imt_thunks_size
+= code
- start
;
8203 g_assert (code
- start
<= size
);
8205 nacl_domain_code_validate(domain
, &start
, size
, &code
);
8211 mono_arch_find_imt_method (mgreg_t
*regs
, guint8
*code
)
8213 return (MonoMethod
*)regs
[MONO_ARCH_IMT_REG
];
8218 mono_arch_find_static_call_vtable (mgreg_t
*regs
, guint8
*code
)
8220 return (MonoVTable
*) regs
[MONO_ARCH_RGCTX_REG
];
8224 mono_arch_get_cie_program (void)
8228 mono_add_unwind_op_def_cfa (l
, (guint8
*)NULL
, (guint8
*)NULL
, AMD64_RSP
, 8);
8229 mono_add_unwind_op_offset (l
, (guint8
*)NULL
, (guint8
*)NULL
, AMD64_RIP
, -8);
8235 mono_arch_emit_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
8237 MonoInst
*ins
= NULL
;
8240 if (cmethod
->klass
== mono_defaults
.math_class
) {
8241 if (strcmp (cmethod
->name
, "Sin") == 0) {
8243 } else if (strcmp (cmethod
->name
, "Cos") == 0) {
8245 } else if (strcmp (cmethod
->name
, "Sqrt") == 0) {
8247 } else if (strcmp (cmethod
->name
, "Abs") == 0 && fsig
->params
[0]->type
== MONO_TYPE_R8
) {
8252 MONO_INST_NEW (cfg
, ins
, opcode
);
8253 ins
->type
= STACK_R8
;
8254 ins
->dreg
= mono_alloc_freg (cfg
);
8255 ins
->sreg1
= args
[0]->dreg
;
8256 MONO_ADD_INS (cfg
->cbb
, ins
);
8260 if (cfg
->opt
& MONO_OPT_CMOV
) {
8261 if (strcmp (cmethod
->name
, "Min") == 0) {
8262 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
8264 if (fsig
->params
[0]->type
== MONO_TYPE_U4
)
8265 opcode
= OP_IMIN_UN
;
8266 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
8268 else if (fsig
->params
[0]->type
== MONO_TYPE_U8
)
8269 opcode
= OP_LMIN_UN
;
8270 } else if (strcmp (cmethod
->name
, "Max") == 0) {
8271 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
8273 if (fsig
->params
[0]->type
== MONO_TYPE_U4
)
8274 opcode
= OP_IMAX_UN
;
8275 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
8277 else if (fsig
->params
[0]->type
== MONO_TYPE_U8
)
8278 opcode
= OP_LMAX_UN
;
8283 MONO_INST_NEW (cfg
, ins
, opcode
);
8284 ins
->type
= fsig
->params
[0]->type
== MONO_TYPE_I4
? STACK_I4
: STACK_I8
;
8285 ins
->dreg
= mono_alloc_ireg (cfg
);
8286 ins
->sreg1
= args
[0]->dreg
;
8287 ins
->sreg2
= args
[1]->dreg
;
8288 MONO_ADD_INS (cfg
->cbb
, ins
);
8292 /* OP_FREM is not IEEE compatible */
8293 else if (strcmp (cmethod
->name
, "IEEERemainder") == 0) {
8294 MONO_INST_NEW (cfg
, ins
, OP_FREM
);
8295 ins
->inst_i0
= args
[0];
8296 ins
->inst_i1
= args
[1];
8302 * Can't implement CompareExchange methods this way since they have
8310 mono_arch_print_tree (MonoInst
*tree
, int arity
)
8315 MonoInst
* mono_arch_get_domain_intrinsic (MonoCompile
* cfg
)
8319 if (appdomain_tls_offset
== -1)
8322 MONO_INST_NEW (cfg
, ins
, OP_TLS_GET
);
8323 ins
->inst_offset
= appdomain_tls_offset
;
8327 #define _CTX_REG(ctx,fld,i) ((&ctx->fld)[i])
8330 mono_arch_context_get_int_reg (MonoContext
*ctx
, int reg
)
8333 case AMD64_RCX
: return ctx
->rcx
;
8334 case AMD64_RDX
: return ctx
->rdx
;
8335 case AMD64_RBX
: return ctx
->rbx
;
8336 case AMD64_RBP
: return ctx
->rbp
;
8337 case AMD64_RSP
: return ctx
->rsp
;
8340 return _CTX_REG (ctx
, rax
, reg
);
8342 return _CTX_REG (ctx
, r12
, reg
- 12);
8344 g_assert_not_reached ();
8349 mono_arch_context_set_int_reg (MonoContext
*ctx
, int reg
, mgreg_t val
)
8369 _CTX_REG (ctx
, rax
, reg
) = val
;
8371 _CTX_REG (ctx
, r12
, reg
- 12) = val
;
8373 g_assert_not_reached ();
8377 /*MONO_ARCH_HAVE_HANDLER_BLOCK_GUARD*/
8379 mono_arch_install_handler_block_guard (MonoJitInfo
*ji
, MonoJitExceptionInfo
*clause
, MonoContext
*ctx
, gpointer new_value
)
8382 gpointer
*sp
, old_value
;
8384 const unsigned char *handler
;
8386 /*Decode the first instruction to figure out where did we store the spvar*/
8387 /*Our jit MUST generate the following:
8390 Which is encoded as: REX.W 0x89 mod_rm
8391 mod_rm (rsp, rbp, imm) which can be: (imm will never be zero)
8392 mod (reg + imm8): 01 reg(rsp): 100 rm(rbp): 101 -> 01100101 (0x65)
8393 mod (reg + imm32): 10 reg(rsp): 100 rm(rbp): 101 -> 10100101 (0xA5)
8395 FIXME can we generate frameless methods on this case?
8398 handler
= clause
->handler_start
;
8401 if (*handler
!= 0x48)
8406 if (*handler
!= 0x89)
8410 if (*handler
== 0x65)
8411 offset
= *(signed char*)(handler
+ 1);
8412 else if (*handler
== 0xA5)
8413 offset
= *(int*)(handler
+ 1);
8418 bp
= MONO_CONTEXT_GET_BP (ctx
);
8419 sp
= *(gpointer
*)(bp
+ offset
);
8422 if (old_value
< ji
->code_start
|| (char*)old_value
> ((char*)ji
->code_start
+ ji
->code_size
))
8431 * mono_arch_emit_load_aotconst:
8433 * Emit code to load the contents of the GOT slot identified by TRAMP_TYPE and
8434 * TARGET from the mscorlib GOT in full-aot code.
8435 * On AMD64, the result is placed into R11.
8438 mono_arch_emit_load_aotconst (guint8
*start
, guint8
*code
, MonoJumpInfo
**ji
, int tramp_type
, gconstpointer target
)
8440 *ji
= mono_patch_info_list_prepend (*ji
, code
- start
, tramp_type
, target
);
8441 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RIP
, 0, 8);
8447 * mono_arch_get_trampolines:
8449 * Return a list of MonoTrampInfo structures describing arch specific trampolines
8453 mono_arch_get_trampolines (gboolean aot
)
8455 return mono_amd64_get_exception_trampolines (aot
);
8458 /* Soft Debug support */
8459 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
8462 * mono_arch_set_breakpoint:
8464 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
8465 * The location should contain code emitted by OP_SEQ_POINT.
8468 mono_arch_set_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
8471 guint8
*orig_code
= code
;
8474 guint32 native_offset
= ip
- (guint8
*)ji
->code_start
;
8475 SeqPointInfo
*info
= mono_arch_get_seq_point_info (mono_domain_get (), ji
->code_start
);
8477 g_assert (info
->bp_addrs
[native_offset
] == 0);
8478 info
->bp_addrs
[native_offset
] = bp_trigger_page
;
8481 * In production, we will use int3 (has to fix the size in the md
8482 * file). But that could confuse gdb, so during development, we emit a SIGSEGV
8485 g_assert (code
[0] == 0x90);
8486 if (breakpoint_size
== 8) {
8487 amd64_mov_reg_mem (code
, AMD64_R11
, (guint64
)bp_trigger_page
, 4);
8489 amd64_mov_reg_imm_size (code
, AMD64_R11
, (guint64
)bp_trigger_page
, 8);
8490 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, 0, 4);
8493 g_assert (code
- orig_code
== breakpoint_size
);
8498 * mono_arch_clear_breakpoint:
8500 * Clear the breakpoint at IP.
8503 mono_arch_clear_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
8509 guint32 native_offset
= ip
- (guint8
*)ji
->code_start
;
8510 SeqPointInfo
*info
= mono_arch_get_seq_point_info (mono_domain_get (), ji
->code_start
);
8512 g_assert (info
->bp_addrs
[native_offset
] == 0);
8513 info
->bp_addrs
[native_offset
] = info
;
8515 for (i
= 0; i
< breakpoint_size
; ++i
)
8521 mono_arch_is_breakpoint_event (void *info
, void *sigctx
)
8524 EXCEPTION_RECORD
* einfo
= (EXCEPTION_RECORD
*)info
;
8527 siginfo_t
* sinfo
= (siginfo_t
*) info
;
8528 /* Sometimes the address is off by 4 */
8529 if (sinfo
->si_addr
>= bp_trigger_page
&& (guint8
*)sinfo
->si_addr
<= (guint8
*)bp_trigger_page
+ 128)
8537 * mono_arch_skip_breakpoint:
8539 * Modify CTX so the ip is placed after the breakpoint instruction, so when
8540 * we resume, the instruction is not executed again.
8543 mono_arch_skip_breakpoint (MonoContext
*ctx
, MonoJitInfo
*ji
)
8546 /* amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 8) */
8547 MONO_CONTEXT_SET_IP (ctx
, (guint8
*)MONO_CONTEXT_GET_IP (ctx
) + 3);
8549 MONO_CONTEXT_SET_IP (ctx
, (guint8
*)MONO_CONTEXT_GET_IP (ctx
) + breakpoint_fault_size
);
8554 * mono_arch_start_single_stepping:
8556 * Start single stepping.
8559 mono_arch_start_single_stepping (void)
8561 mono_mprotect (ss_trigger_page
, mono_pagesize (), 0);
8565 * mono_arch_stop_single_stepping:
8567 * Stop single stepping.
8570 mono_arch_stop_single_stepping (void)
8572 mono_mprotect (ss_trigger_page
, mono_pagesize (), MONO_MMAP_READ
);
8576 * mono_arch_is_single_step_event:
8578 * Return whenever the machine state in SIGCTX corresponds to a single
8582 mono_arch_is_single_step_event (void *info
, void *sigctx
)
8585 EXCEPTION_RECORD
* einfo
= (EXCEPTION_RECORD
*)info
;
8588 siginfo_t
* sinfo
= (siginfo_t
*) info
;
8589 /* Sometimes the address is off by 4 */
8590 if (sinfo
->si_addr
>= ss_trigger_page
&& (guint8
*)sinfo
->si_addr
<= (guint8
*)ss_trigger_page
+ 128)
8598 * mono_arch_skip_single_step:
8600 * Modify CTX so the ip is placed after the single step trigger instruction,
8601 * we resume, the instruction is not executed again.
8604 mono_arch_skip_single_step (MonoContext
*ctx
)
8606 MONO_CONTEXT_SET_IP (ctx
, (guint8
*)MONO_CONTEXT_GET_IP (ctx
) + single_step_fault_size
);
8610 * mono_arch_create_seq_point_info:
8612 * Return a pointer to a data structure which is used by the sequence
8613 * point implementation in AOTed code.
8616 mono_arch_get_seq_point_info (MonoDomain
*domain
, guint8
*code
)
8622 // FIXME: Add a free function
8624 mono_domain_lock (domain
);
8625 info
= g_hash_table_lookup (domain_jit_info (domain
)->arch_seq_points
,
8627 mono_domain_unlock (domain
);
8630 ji
= mono_jit_info_table_find (domain
, (char*)code
);
8633 // FIXME: Optimize the size
8634 info
= g_malloc0 (sizeof (SeqPointInfo
) + (ji
->code_size
* sizeof (gpointer
)));
8636 info
->ss_trigger_page
= ss_trigger_page
;
8637 info
->bp_trigger_page
= bp_trigger_page
;
8638 /* Initialize to a valid address */
8639 for (i
= 0; i
< ji
->code_size
; ++i
)
8640 info
->bp_addrs
[i
] = info
;
8642 mono_domain_lock (domain
);
8643 g_hash_table_insert (domain_jit_info (domain
)->arch_seq_points
,
8645 mono_domain_unlock (domain
);