2 * mini-x86.c: x86 backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
9 * Copyright 2003 Ximian, Inc.
10 * Copyright 2003-2011 Novell Inc.
11 * Copyright 2011 Xamarin Inc.
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
21 #include <mono/metadata/abi-details.h>
22 #include <mono/metadata/appdomain.h>
23 #include <mono/metadata/debug-helpers.h>
24 #include <mono/metadata/threads.h>
25 #include <mono/metadata/profiler-private.h>
26 #include <mono/metadata/mono-debug.h>
27 #include <mono/metadata/gc-internals.h>
28 #include <mono/utils/mono-math.h>
29 #include <mono/utils/mono-counters.h>
30 #include <mono/utils/mono-mmap.h>
31 #include <mono/utils/mono-memory-model.h>
32 #include <mono/utils/mono-hwcap.h>
33 #include <mono/utils/mono-threads.h>
43 static gboolean optimize_for_xen
= TRUE
;
45 #define optimize_for_xen 0
49 /* The single step trampoline */
50 static gpointer ss_trampoline
;
52 /* The breakpoint trampoline */
53 static gpointer bp_trampoline
;
55 /* This mutex protects architecture specific caches */
56 #define mono_mini_arch_lock() mono_os_mutex_lock (&mini_arch_mutex)
57 #define mono_mini_arch_unlock() mono_os_mutex_unlock (&mini_arch_mutex)
58 static mono_mutex_t mini_arch_mutex
;
60 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
65 /* Under windows, the default pinvoke calling convention is stdcall */
66 #define CALLCONV_IS_STDCALL(sig) ((sig)->pinvoke && ((sig)->call_convention == MONO_CALL_STDCALL || (sig)->call_convention == MONO_CALL_DEFAULT || (sig)->call_convention == MONO_CALL_THISCALL))
68 #define CALLCONV_IS_STDCALL(sig) ((sig)->pinvoke && ((sig)->call_convention == MONO_CALL_STDCALL || (sig)->call_convention == MONO_CALL_THISCALL))
71 #define X86_IS_CALLEE_SAVED_REG(reg) (((reg) == X86_EBX) || ((reg) == X86_EDI) || ((reg) == X86_ESI))
73 #define OP_SEQ_POINT_BP_OFFSET 7
76 emit_load_aotconst (guint8
*start
, guint8
*code
, MonoCompile
*cfg
, MonoJumpInfo
**ji
, int dreg
, int tramp_type
, gconstpointer target
);
79 mono_arch_regname (int reg
)
82 case X86_EAX
: return "%eax";
83 case X86_EBX
: return "%ebx";
84 case X86_ECX
: return "%ecx";
85 case X86_EDX
: return "%edx";
86 case X86_ESP
: return "%esp";
87 case X86_EBP
: return "%ebp";
88 case X86_EDI
: return "%edi";
89 case X86_ESI
: return "%esi";
95 mono_arch_fregname (int reg
)
120 mono_arch_xregname (int reg
)
145 mono_x86_patch (unsigned char* code
, gpointer target
)
147 x86_patch (code
, (unsigned char*)target
);
150 #define FLOAT_PARAM_REGS 0
152 static const guint32 thiscall_param_regs
[] = { X86_ECX
, X86_NREG
};
154 static const guint32
*callconv_param_regs(MonoMethodSignature
*sig
)
159 switch (sig
->call_convention
) {
160 case MONO_CALL_THISCALL
:
161 return thiscall_param_regs
;
167 #if defined(TARGET_WIN32) || defined(__APPLE__) || defined(__FreeBSD__)
168 #define SMALL_STRUCTS_IN_REGS
169 static X86_Reg_No return_regs
[] = { X86_EAX
, X86_EDX
};
173 add_general (guint32
*gr
, const guint32
*param_regs
, guint32
*stack_size
, ArgInfo
*ainfo
)
175 ainfo
->offset
= *stack_size
;
177 if (!param_regs
|| param_regs
[*gr
] == X86_NREG
) {
178 ainfo
->storage
= ArgOnStack
;
180 (*stack_size
) += sizeof (gpointer
);
183 ainfo
->storage
= ArgInIReg
;
184 ainfo
->reg
= param_regs
[*gr
];
190 add_general_pair (guint32
*gr
, const guint32
*param_regs
, guint32
*stack_size
, ArgInfo
*ainfo
)
192 ainfo
->offset
= *stack_size
;
194 g_assert(!param_regs
|| param_regs
[*gr
] == X86_NREG
);
196 ainfo
->storage
= ArgOnStack
;
197 (*stack_size
) += sizeof (gpointer
) * 2;
202 add_float (guint32
*gr
, guint32
*stack_size
, ArgInfo
*ainfo
, gboolean is_double
)
204 ainfo
->offset
= *stack_size
;
206 if (*gr
>= FLOAT_PARAM_REGS
) {
207 ainfo
->storage
= ArgOnStack
;
208 (*stack_size
) += is_double
? 8 : 4;
209 ainfo
->nslots
= is_double
? 2 : 1;
212 /* A double register */
214 ainfo
->storage
= ArgInDoubleSSEReg
;
216 ainfo
->storage
= ArgInFloatSSEReg
;
224 add_valuetype (MonoMethodSignature
*sig
, ArgInfo
*ainfo
, MonoType
*type
,
226 guint32
*gr
, const guint32
*param_regs
, guint32
*fr
, guint32
*stack_size
)
231 klass
= mono_class_from_mono_type (type
);
232 size
= mini_type_stack_size_full (&klass
->byval_arg
, NULL
, sig
->pinvoke
);
234 #if defined(TARGET_WIN32)
236 * Standard C and C++ doesn't allow empty structs, empty structs will always have a size of 1 byte.
237 * GCC have an extension to allow empty structs, https://gcc.gnu.org/onlinedocs/gcc/Empty-Structures.html.
238 * This cause a little dilemma since runtime build using none GCC compiler will not be compatible with
239 * GCC build C libraries and the other way around. On platforms where empty structs has size of 1 byte
240 * it must be represented in call and cannot be dropped.
242 if (size
== 0 && MONO_TYPE_ISSTRUCT (type
) && sig
->pinvoke
) {
243 /* Empty structs (1 byte size) needs to be represented in a stack slot */
244 ainfo
->pass_empty_struct
= TRUE
;
249 #ifdef SMALL_STRUCTS_IN_REGS
250 if (sig
->pinvoke
&& is_return
) {
251 MonoMarshalType
*info
;
253 info
= mono_marshal_load_type_info (klass
);
256 ainfo
->pair_storage
[0] = ainfo
->pair_storage
[1] = ArgNone
;
258 /* Ignore empty struct return value, if used. */
259 if (info
->num_fields
== 0 && ainfo
->pass_empty_struct
) {
260 ainfo
->storage
= ArgValuetypeInReg
;
265 * Windows x86 ABI for returning structs of size 4 or 8 bytes (regardless of type) dictates that
266 * values are passed in EDX:EAX register pairs, https://msdn.microsoft.com/en-us/library/984x0h58.aspx.
267 * This is different compared to for example float or double return types (not in struct) that will be returned
268 * in ST(0), https://msdn.microsoft.com/en-us/library/ha59cbfz.aspx.
270 * Apples OSX x86 ABI for returning structs of size 4 or 8 bytes uses a slightly different approach.
271 * If a struct includes only one scalar value, it will be handled with the same rules as scalar values.
272 * This means that structs with one float or double will be returned in ST(0). For more details,
273 * https://developer.apple.com/library/mac/documentation/DeveloperTools/Conceptual/LowLevelABI/130-IA-32_Function_Calling_Conventions/IA32.html.
275 #if !defined(TARGET_WIN32)
277 /* Special case structs with only a float member */
278 if (info
->num_fields
== 1) {
279 int ftype
= mini_get_underlying_type (info
->fields
[0].field
->type
)->type
;
280 if ((info
->native_size
== 8) && (ftype
== MONO_TYPE_R8
)) {
281 ainfo
->storage
= ArgValuetypeInReg
;
282 ainfo
->pair_storage
[0] = ArgOnDoubleFpStack
;
285 if ((info
->native_size
== 4) && (ftype
== MONO_TYPE_R4
)) {
286 ainfo
->storage
= ArgValuetypeInReg
;
287 ainfo
->pair_storage
[0] = ArgOnFloatFpStack
;
293 if ((info
->native_size
== 1) || (info
->native_size
== 2) || (info
->native_size
== 4) || (info
->native_size
== 8)) {
294 ainfo
->storage
= ArgValuetypeInReg
;
295 ainfo
->pair_storage
[0] = ArgInIReg
;
296 ainfo
->pair_regs
[0] = return_regs
[0];
297 if (info
->native_size
> 4) {
298 ainfo
->pair_storage
[1] = ArgInIReg
;
299 ainfo
->pair_regs
[1] = return_regs
[1];
306 if (param_regs
&& param_regs
[*gr
] != X86_NREG
&& !is_return
) {
307 g_assert (size
<= 4);
308 ainfo
->storage
= ArgValuetypeInReg
;
309 ainfo
->reg
= param_regs
[*gr
];
314 ainfo
->offset
= *stack_size
;
315 ainfo
->storage
= ArgOnStack
;
316 *stack_size
+= ALIGN_TO (size
, sizeof (gpointer
));
317 ainfo
->nslots
= ALIGN_TO (size
, sizeof (gpointer
)) / sizeof (gpointer
);
323 * Obtain information about a call according to the calling convention.
324 * For x86 ELF, see the "System V Application Binary Interface Intel386
325 * Architecture Processor Supplment, Fourth Edition" document for more
327 * For x86 win32, see https://msdn.microsoft.com/en-us/library/984x0h58.aspx.
330 get_call_info_internal (CallInfo
*cinfo
, MonoMethodSignature
*sig
)
332 guint32 i
, gr
, fr
, pstart
;
333 const guint32
*param_regs
;
335 int n
= sig
->hasthis
+ sig
->param_count
;
336 guint32 stack_size
= 0;
337 gboolean is_pinvoke
= sig
->pinvoke
;
343 param_regs
= callconv_param_regs(sig
);
347 ret_type
= mini_get_underlying_type (sig
->ret
);
348 switch (ret_type
->type
) {
358 case MONO_TYPE_FNPTR
:
359 case MONO_TYPE_CLASS
:
360 case MONO_TYPE_OBJECT
:
361 case MONO_TYPE_SZARRAY
:
362 case MONO_TYPE_ARRAY
:
363 case MONO_TYPE_STRING
:
364 cinfo
->ret
.storage
= ArgInIReg
;
365 cinfo
->ret
.reg
= X86_EAX
;
369 cinfo
->ret
.storage
= ArgInIReg
;
370 cinfo
->ret
.reg
= X86_EAX
;
371 cinfo
->ret
.is_pair
= TRUE
;
374 cinfo
->ret
.storage
= ArgOnFloatFpStack
;
377 cinfo
->ret
.storage
= ArgOnDoubleFpStack
;
379 case MONO_TYPE_GENERICINST
:
380 if (!mono_type_generic_inst_is_valuetype (ret_type
)) {
381 cinfo
->ret
.storage
= ArgInIReg
;
382 cinfo
->ret
.reg
= X86_EAX
;
385 if (mini_is_gsharedvt_type (ret_type
)) {
386 cinfo
->ret
.storage
= ArgOnStack
;
387 cinfo
->vtype_retaddr
= TRUE
;
391 case MONO_TYPE_VALUETYPE
:
392 case MONO_TYPE_TYPEDBYREF
: {
393 guint32 tmp_gr
= 0, tmp_fr
= 0, tmp_stacksize
= 0;
395 add_valuetype (sig
, &cinfo
->ret
, ret_type
, TRUE
, &tmp_gr
, NULL
, &tmp_fr
, &tmp_stacksize
);
396 if (cinfo
->ret
.storage
== ArgOnStack
) {
397 cinfo
->vtype_retaddr
= TRUE
;
398 /* The caller passes the address where the value is stored */
404 g_assert (mini_is_gsharedvt_type (ret_type
));
405 cinfo
->ret
.storage
= ArgOnStack
;
406 cinfo
->vtype_retaddr
= TRUE
;
409 cinfo
->ret
.storage
= ArgNone
;
412 g_error ("Can't handle as return value 0x%x", ret_type
->type
);
418 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
419 * the first argument, allowing 'this' to be always passed in the first arg reg.
420 * Also do this if the first argument is a reference type, since virtual calls
421 * are sometimes made using calli without sig->hasthis set, like in the delegate
424 if (cinfo
->vtype_retaddr
&& !is_pinvoke
&& (sig
->hasthis
|| (sig
->param_count
> 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig
->params
[0]))))) {
426 add_general (&gr
, param_regs
, &stack_size
, cinfo
->args
+ 0);
428 add_general (&gr
, param_regs
, &stack_size
, &cinfo
->args
[sig
->hasthis
+ 0]);
431 cinfo
->vret_arg_offset
= stack_size
;
432 add_general (&gr
, NULL
, &stack_size
, &cinfo
->ret
);
433 cinfo
->vret_arg_index
= 1;
437 add_general (&gr
, param_regs
, &stack_size
, cinfo
->args
+ 0);
439 if (cinfo
->vtype_retaddr
)
440 add_general (&gr
, NULL
, &stack_size
, &cinfo
->ret
);
443 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (n
== 0)) {
444 fr
= FLOAT_PARAM_REGS
;
446 /* Emit the signature cookie just before the implicit arguments */
447 add_general (&gr
, param_regs
, &stack_size
, &cinfo
->sig_cookie
);
450 for (i
= pstart
; i
< sig
->param_count
; ++i
) {
451 ArgInfo
*ainfo
= &cinfo
->args
[sig
->hasthis
+ i
];
454 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
455 /* We allways pass the sig cookie on the stack for simplicity */
457 * Prevent implicit arguments + the sig cookie from being passed
460 fr
= FLOAT_PARAM_REGS
;
462 /* Emit the signature cookie just before the implicit arguments */
463 add_general (&gr
, param_regs
, &stack_size
, &cinfo
->sig_cookie
);
466 if (sig
->params
[i
]->byref
) {
467 add_general (&gr
, param_regs
, &stack_size
, ainfo
);
470 ptype
= mini_get_underlying_type (sig
->params
[i
]);
471 switch (ptype
->type
) {
474 add_general (&gr
, param_regs
, &stack_size
, ainfo
);
478 add_general (&gr
, param_regs
, &stack_size
, ainfo
);
482 add_general (&gr
, param_regs
, &stack_size
, ainfo
);
487 case MONO_TYPE_FNPTR
:
488 case MONO_TYPE_CLASS
:
489 case MONO_TYPE_OBJECT
:
490 case MONO_TYPE_STRING
:
491 case MONO_TYPE_SZARRAY
:
492 case MONO_TYPE_ARRAY
:
493 add_general (&gr
, param_regs
, &stack_size
, ainfo
);
495 case MONO_TYPE_GENERICINST
:
496 if (!mono_type_generic_inst_is_valuetype (ptype
)) {
497 add_general (&gr
, param_regs
, &stack_size
, ainfo
);
500 if (mini_is_gsharedvt_type (ptype
)) {
501 /* gsharedvt arguments are passed by ref */
502 add_general (&gr
, param_regs
, &stack_size
, ainfo
);
503 g_assert (ainfo
->storage
== ArgOnStack
);
504 ainfo
->storage
= ArgGSharedVt
;
508 case MONO_TYPE_VALUETYPE
:
509 case MONO_TYPE_TYPEDBYREF
:
510 add_valuetype (sig
, ainfo
, ptype
, FALSE
, &gr
, param_regs
, &fr
, &stack_size
);
514 add_general_pair (&gr
, param_regs
, &stack_size
, ainfo
);
517 add_float (&fr
, &stack_size
, ainfo
, FALSE
);
520 add_float (&fr
, &stack_size
, ainfo
, TRUE
);
524 /* gsharedvt arguments are passed by ref */
525 g_assert (mini_is_gsharedvt_type (ptype
));
526 add_general (&gr
, param_regs
, &stack_size
, ainfo
);
527 g_assert (ainfo
->storage
== ArgOnStack
);
528 ainfo
->storage
= ArgGSharedVt
;
531 g_error ("unexpected type 0x%x", ptype
->type
);
532 g_assert_not_reached ();
536 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (n
> 0) && (sig
->sentinelpos
== sig
->param_count
)) {
537 fr
= FLOAT_PARAM_REGS
;
539 /* Emit the signature cookie just before the implicit arguments */
540 add_general (&gr
, param_regs
, &stack_size
, &cinfo
->sig_cookie
);
543 if (cinfo
->vtype_retaddr
) {
544 /* if the function returns a struct on stack, the called method already does a ret $0x4 */
545 cinfo
->callee_stack_pop
= 4;
546 } else if (CALLCONV_IS_STDCALL (sig
)) {
547 /* Have to compensate for the stack space popped by the native callee */
548 cinfo
->callee_stack_pop
= stack_size
;
551 if (mono_do_x86_stack_align
&& (stack_size
% MONO_ARCH_FRAME_ALIGNMENT
) != 0) {
552 cinfo
->need_stack_align
= TRUE
;
553 cinfo
->stack_align_amount
= MONO_ARCH_FRAME_ALIGNMENT
- (stack_size
% MONO_ARCH_FRAME_ALIGNMENT
);
554 stack_size
+= cinfo
->stack_align_amount
;
557 cinfo
->stack_usage
= stack_size
;
558 cinfo
->reg_usage
= gr
;
559 cinfo
->freg_usage
= fr
;
564 get_call_info (MonoMemPool
*mp
, MonoMethodSignature
*sig
)
566 int n
= sig
->hasthis
+ sig
->param_count
;
570 cinfo
= mono_mempool_alloc0 (mp
, sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
572 cinfo
= g_malloc0 (sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
574 return get_call_info_internal (cinfo
, sig
);
578 * mono_arch_get_argument_info:
579 * @csig: a method signature
580 * @param_count: the number of parameters to consider
581 * @arg_info: an array to store the result infos
583 * Gathers information on parameters such as size, alignment and
584 * padding. arg_info should be large enought to hold param_count + 1 entries.
586 * Returns the size of the argument area on the stack.
587 * This should be signal safe, since it is called from
588 * mono_arch_unwind_frame ().
589 * FIXME: The metadata calls might not be signal safe.
592 mono_arch_get_argument_info (MonoMethodSignature
*csig
, int param_count
, MonoJitArgumentInfo
*arg_info
)
594 int len
, k
, args_size
= 0;
600 /* Avoid g_malloc as it is not signal safe */
601 len
= sizeof (CallInfo
) + (sizeof (ArgInfo
) * (csig
->param_count
+ 1));
602 cinfo
= (CallInfo
*)g_newa (guint8
*, len
);
603 memset (cinfo
, 0, len
);
605 cinfo
= get_call_info_internal (cinfo
, csig
);
607 arg_info
[0].offset
= offset
;
609 if (cinfo
->vtype_retaddr
&& cinfo
->vret_arg_index
== 0) {
610 args_size
+= sizeof (gpointer
);
615 args_size
+= sizeof (gpointer
);
619 if (cinfo
->vtype_retaddr
&& cinfo
->vret_arg_index
== 1 && csig
->hasthis
) {
620 /* Emitted after this */
621 args_size
+= sizeof (gpointer
);
625 arg_info
[0].size
= args_size
;
627 for (k
= 0; k
< param_count
; k
++) {
628 size
= mini_type_stack_size_full (csig
->params
[k
], &align
, csig
->pinvoke
);
630 /* ignore alignment for now */
633 args_size
+= pad
= (align
- (args_size
& (align
- 1))) & (align
- 1);
634 arg_info
[k
].pad
= pad
;
636 arg_info
[k
+ 1].pad
= 0;
637 arg_info
[k
+ 1].size
= size
;
639 arg_info
[k
+ 1].offset
= offset
;
642 if (k
== 0 && cinfo
->vtype_retaddr
&& cinfo
->vret_arg_index
== 1 && !csig
->hasthis
) {
643 /* Emitted after the first arg */
644 args_size
+= sizeof (gpointer
);
649 if (mono_do_x86_stack_align
&& !CALLCONV_IS_STDCALL (csig
))
650 align
= MONO_ARCH_FRAME_ALIGNMENT
;
653 args_size
+= pad
= (align
- (args_size
& (align
- 1))) & (align
- 1);
654 arg_info
[k
].pad
= pad
;
660 mono_arch_tail_call_supported (MonoCompile
*cfg
, MonoMethodSignature
*caller_sig
, MonoMethodSignature
*callee_sig
)
662 MonoType
*callee_ret
;
666 if (cfg
->compile_aot
&& !cfg
->full_aot
)
667 /* OP_TAILCALL doesn't work with AOT */
670 c1
= get_call_info (NULL
, caller_sig
);
671 c2
= get_call_info (NULL
, callee_sig
);
673 * Tail calls with more callee stack usage than the caller cannot be supported, since
674 * the extra stack space would be left on the stack after the tail call.
676 res
= c1
->stack_usage
>= c2
->stack_usage
;
677 callee_ret
= mini_get_underlying_type (callee_sig
->ret
);
678 if (callee_ret
&& MONO_TYPE_ISSTRUCT (callee_ret
) && c2
->ret
.storage
!= ArgValuetypeInReg
)
679 /* An address on the callee's stack is passed as the first argument */
689 * Initialize the cpu to execute managed code.
692 mono_arch_cpu_init (void)
694 /* spec compliance requires running with double precision */
698 __asm__
__volatile__ ("fnstcw %0\n": "=m" (fpcw
));
699 fpcw
&= ~X86_FPCW_PRECC_MASK
;
700 fpcw
|= X86_FPCW_PREC_DOUBLE
;
701 __asm__
__volatile__ ("fldcw %0\n": : "m" (fpcw
));
702 __asm__
__volatile__ ("fnstcw %0\n": "=m" (fpcw
));
704 _control87 (_PC_53
, MCW_PC
);
709 * Initialize architecture specific code.
712 mono_arch_init (void)
714 mono_os_mutex_init_recursive (&mini_arch_mutex
);
717 bp_trampoline
= mini_get_breakpoint_trampoline ();
719 mono_aot_register_jit_icall ("mono_x86_throw_exception", mono_x86_throw_exception
);
720 mono_aot_register_jit_icall ("mono_x86_throw_corlib_exception", mono_x86_throw_corlib_exception
);
721 #if defined(MONO_ARCH_GSHAREDVT_SUPPORTED)
722 mono_aot_register_jit_icall ("mono_x86_start_gsharedvt_call", mono_x86_start_gsharedvt_call
);
727 * Cleanup architecture specific code.
730 mono_arch_cleanup (void)
732 mono_os_mutex_destroy (&mini_arch_mutex
);
736 * This function returns the optimizations supported on this cpu.
739 mono_arch_cpu_optimizations (guint32
*exclude_mask
)
745 if (mono_hwcap_x86_has_cmov
) {
746 opts
|= MONO_OPT_CMOV
;
748 if (mono_hwcap_x86_has_fcmov
)
749 opts
|= MONO_OPT_FCMOV
;
751 *exclude_mask
|= MONO_OPT_FCMOV
;
753 *exclude_mask
|= MONO_OPT_CMOV
;
756 if (mono_hwcap_x86_has_sse2
)
757 opts
|= MONO_OPT_SSE2
;
759 *exclude_mask
|= MONO_OPT_SSE2
;
761 #ifdef MONO_ARCH_SIMD_INTRINSICS
762 /*SIMD intrinsics require at least SSE2.*/
763 if (!mono_hwcap_x86_has_sse2
)
764 *exclude_mask
|= MONO_OPT_SIMD
;
771 * This function test for all SSE functions supported.
773 * Returns a bitmask corresponding to all supported versions.
777 mono_arch_cpu_enumerate_simd_versions (void)
779 guint32 sse_opts
= 0;
781 if (mono_hwcap_x86_has_sse1
)
782 sse_opts
|= SIMD_VERSION_SSE1
;
784 if (mono_hwcap_x86_has_sse2
)
785 sse_opts
|= SIMD_VERSION_SSE2
;
787 if (mono_hwcap_x86_has_sse3
)
788 sse_opts
|= SIMD_VERSION_SSE3
;
790 if (mono_hwcap_x86_has_ssse3
)
791 sse_opts
|= SIMD_VERSION_SSSE3
;
793 if (mono_hwcap_x86_has_sse41
)
794 sse_opts
|= SIMD_VERSION_SSE41
;
796 if (mono_hwcap_x86_has_sse42
)
797 sse_opts
|= SIMD_VERSION_SSE42
;
799 if (mono_hwcap_x86_has_sse4a
)
800 sse_opts
|= SIMD_VERSION_SSE4a
;
806 * Determine whenever the trap whose info is in SIGINFO is caused by
810 mono_arch_is_int_overflow (void *sigctx
, void *info
)
815 mono_sigctx_to_monoctx (sigctx
, &ctx
);
817 ip
= (guint8
*)ctx
.eip
;
819 if ((ip
[0] == 0xf7) && (x86_modrm_mod (ip
[1]) == 0x3) && (x86_modrm_reg (ip
[1]) == 0x7)) {
823 switch (x86_modrm_rm (ip
[1])) {
843 g_assert_not_reached ();
855 mono_arch_get_allocatable_int_vars (MonoCompile
*cfg
)
860 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
861 MonoInst
*ins
= cfg
->varinfo
[i
];
862 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
865 if (vmv
->range
.first_use
.abs_pos
>= vmv
->range
.last_use
.abs_pos
)
868 if ((ins
->flags
& (MONO_INST_IS_DEAD
|MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)) ||
869 (ins
->opcode
!= OP_LOCAL
&& ins
->opcode
!= OP_ARG
))
872 /* we dont allocate I1 to registers because there is no simply way to sign extend
873 * 8bit quantities in caller saved registers on x86 */
874 if (mono_is_regsize_var (ins
->inst_vtype
) && (ins
->inst_vtype
->type
!= MONO_TYPE_I1
)) {
875 g_assert (MONO_VARINFO (cfg
, i
)->reg
== -1);
876 g_assert (i
== vmv
->idx
);
877 vars
= g_list_prepend (vars
, vmv
);
881 vars
= mono_varlist_sort (cfg
, vars
, 0);
887 mono_arch_get_global_int_regs (MonoCompile
*cfg
)
891 /* we can use 3 registers for global allocation */
892 regs
= g_list_prepend (regs
, (gpointer
)X86_EBX
);
893 regs
= g_list_prepend (regs
, (gpointer
)X86_ESI
);
894 regs
= g_list_prepend (regs
, (gpointer
)X86_EDI
);
900 * mono_arch_regalloc_cost:
902 * Return the cost, in number of memory references, of the action of
903 * allocating the variable VMV into a register during global register
907 mono_arch_regalloc_cost (MonoCompile
*cfg
, MonoMethodVar
*vmv
)
909 MonoInst
*ins
= cfg
->varinfo
[vmv
->idx
];
911 if (cfg
->method
->save_lmf
)
912 /* The register is already saved */
913 return (ins
->opcode
== OP_ARG
) ? 1 : 0;
915 /* push+pop+possible load if it is an argument */
916 return (ins
->opcode
== OP_ARG
) ? 3 : 2;
920 set_needs_stack_frame (MonoCompile
*cfg
, gboolean flag
)
922 static int inited
= FALSE
;
923 static int count
= 0;
925 if (cfg
->arch
.need_stack_frame_inited
) {
926 g_assert (cfg
->arch
.need_stack_frame
== flag
);
930 cfg
->arch
.need_stack_frame
= flag
;
931 cfg
->arch
.need_stack_frame_inited
= TRUE
;
937 mono_counters_register ("Could eliminate stack frame", MONO_COUNTER_INT
|MONO_COUNTER_JIT
, &count
);
942 //g_print ("will eliminate %s.%s.%s\n", cfg->method->klass->name_space, cfg->method->klass->name, cfg->method->name);
946 needs_stack_frame (MonoCompile
*cfg
)
948 MonoMethodSignature
*sig
;
949 MonoMethodHeader
*header
;
950 gboolean result
= FALSE
;
952 #if defined(__APPLE__)
953 /*OSX requires stack frame code to have the correct alignment. */
957 if (cfg
->arch
.need_stack_frame_inited
)
958 return cfg
->arch
.need_stack_frame
;
960 header
= cfg
->header
;
961 sig
= mono_method_signature (cfg
->method
);
963 if (cfg
->disable_omit_fp
)
965 else if (cfg
->flags
& MONO_CFG_HAS_ALLOCA
)
967 else if (cfg
->method
->save_lmf
)
969 else if (cfg
->stack_offset
)
971 else if (cfg
->param_area
)
973 else if (cfg
->flags
& (MONO_CFG_HAS_CALLS
| MONO_CFG_HAS_ALLOCA
| MONO_CFG_HAS_TAIL
))
975 else if (header
->num_clauses
)
977 else if (sig
->param_count
+ sig
->hasthis
)
979 else if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
))
981 else if ((mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
)) ||
982 (cfg
->prof_options
& MONO_PROFILE_ENTER_LEAVE
))
985 set_needs_stack_frame (cfg
, result
);
987 return cfg
->arch
.need_stack_frame
;
991 * Set var information according to the calling convention. X86 version.
992 * The locals var stuff should most likely be split in another method.
995 mono_arch_allocate_vars (MonoCompile
*cfg
)
997 MonoMethodSignature
*sig
;
998 MonoMethodHeader
*header
;
1000 guint32 locals_stack_size
, locals_stack_align
;
1005 header
= cfg
->header
;
1006 sig
= mono_method_signature (cfg
->method
);
1008 if (!cfg
->arch
.cinfo
)
1009 cfg
->arch
.cinfo
= get_call_info (cfg
->mempool
, sig
);
1010 cinfo
= (CallInfo
*)cfg
->arch
.cinfo
;
1012 cfg
->frame_reg
= X86_EBP
;
1015 if (cfg
->has_atomic_add_i4
|| cfg
->has_atomic_exchange_i4
) {
1016 /* The opcode implementations use callee-saved regs as scratch regs by pushing and pop-ing them, but that is not async safe */
1017 cfg
->used_int_regs
|= (1 << X86_EBX
) | (1 << X86_EDI
) | (1 << X86_ESI
);
1020 /* Reserve space to save LMF and caller saved registers */
1022 if (cfg
->method
->save_lmf
) {
1023 /* The LMF var is allocated normally */
1025 if (cfg
->used_int_regs
& (1 << X86_EBX
)) {
1029 if (cfg
->used_int_regs
& (1 << X86_EDI
)) {
1033 if (cfg
->used_int_regs
& (1 << X86_ESI
)) {
1038 switch (cinfo
->ret
.storage
) {
1039 case ArgValuetypeInReg
:
1040 /* Allocate a local to hold the result, the epilog will copy it to the correct place */
1042 cfg
->ret
->opcode
= OP_REGOFFSET
;
1043 cfg
->ret
->inst_basereg
= X86_EBP
;
1044 cfg
->ret
->inst_offset
= - offset
;
1050 /* Allocate locals */
1051 offsets
= mono_allocate_stack_slots (cfg
, TRUE
, &locals_stack_size
, &locals_stack_align
);
1052 if (locals_stack_size
> MONO_ARCH_MAX_FRAME_SIZE
) {
1053 char *mname
= mono_method_full_name (cfg
->method
, TRUE
);
1054 mono_cfg_set_exception_invalid_program (cfg
, g_strdup_printf ("Method %s stack is too big.", mname
));
1058 if (locals_stack_align
) {
1059 int prev_offset
= offset
;
1061 offset
+= (locals_stack_align
- 1);
1062 offset
&= ~(locals_stack_align
- 1);
1064 while (prev_offset
< offset
) {
1066 mini_gc_set_slot_type_from_fp (cfg
, - prev_offset
, SLOT_NOREF
);
1069 cfg
->locals_min_stack_offset
= - (offset
+ locals_stack_size
);
1070 cfg
->locals_max_stack_offset
= - offset
;
1072 * EBP is at alignment 8 % MONO_ARCH_FRAME_ALIGNMENT, so if we
1073 * have locals larger than 8 bytes we need to make sure that
1074 * they have the appropriate offset.
1076 if (MONO_ARCH_FRAME_ALIGNMENT
> 8 && locals_stack_align
> 8)
1077 offset
+= MONO_ARCH_FRAME_ALIGNMENT
- sizeof (gpointer
) * 2;
1078 for (i
= cfg
->locals_start
; i
< cfg
->num_varinfo
; i
++) {
1079 if (offsets
[i
] != -1) {
1080 MonoInst
*inst
= cfg
->varinfo
[i
];
1081 inst
->opcode
= OP_REGOFFSET
;
1082 inst
->inst_basereg
= X86_EBP
;
1083 inst
->inst_offset
= - (offset
+ offsets
[i
]);
1084 //printf ("allocated local %d to ", i); mono_print_tree_nl (inst);
1087 offset
+= locals_stack_size
;
1091 * Allocate arguments+return value
1094 switch (cinfo
->ret
.storage
) {
1096 if (cfg
->vret_addr
) {
1098 * In the new IR, the cfg->vret_addr variable represents the
1099 * vtype return value.
1101 cfg
->vret_addr
->opcode
= OP_REGOFFSET
;
1102 cfg
->vret_addr
->inst_basereg
= cfg
->frame_reg
;
1103 cfg
->vret_addr
->inst_offset
= cinfo
->ret
.offset
+ ARGS_OFFSET
;
1104 if (G_UNLIKELY (cfg
->verbose_level
> 1)) {
1105 printf ("vret_addr =");
1106 mono_print_ins (cfg
->vret_addr
);
1109 cfg
->ret
->opcode
= OP_REGOFFSET
;
1110 cfg
->ret
->inst_basereg
= X86_EBP
;
1111 cfg
->ret
->inst_offset
= cinfo
->ret
.offset
+ ARGS_OFFSET
;
1114 case ArgValuetypeInReg
:
1117 cfg
->ret
->opcode
= OP_REGVAR
;
1118 cfg
->ret
->inst_c0
= cinfo
->ret
.reg
;
1119 cfg
->ret
->dreg
= cinfo
->ret
.reg
;
1122 case ArgOnFloatFpStack
:
1123 case ArgOnDoubleFpStack
:
1126 g_assert_not_reached ();
1129 if (sig
->call_convention
== MONO_CALL_VARARG
) {
1130 g_assert (cinfo
->sig_cookie
.storage
== ArgOnStack
);
1131 cfg
->sig_cookie
= cinfo
->sig_cookie
.offset
+ ARGS_OFFSET
;
1134 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
1135 ArgInfo
*ainfo
= &cinfo
->args
[i
];
1136 inst
= cfg
->args
[i
];
1137 if (inst
->opcode
!= OP_REGVAR
) {
1138 inst
->opcode
= OP_REGOFFSET
;
1139 inst
->inst_basereg
= X86_EBP
;
1140 inst
->inst_offset
= ainfo
->offset
+ ARGS_OFFSET
;
1144 cfg
->stack_offset
= offset
;
1148 mono_arch_create_vars (MonoCompile
*cfg
)
1151 MonoMethodSignature
*sig
;
1154 sig
= mono_method_signature (cfg
->method
);
1156 if (!cfg
->arch
.cinfo
)
1157 cfg
->arch
.cinfo
= get_call_info (cfg
->mempool
, sig
);
1158 cinfo
= (CallInfo
*)cfg
->arch
.cinfo
;
1160 sig_ret
= mini_get_underlying_type (sig
->ret
);
1162 if (cinfo
->ret
.storage
== ArgValuetypeInReg
)
1163 cfg
->ret_var_is_local
= TRUE
;
1164 if ((cinfo
->ret
.storage
!= ArgValuetypeInReg
) && (MONO_TYPE_ISSTRUCT (sig_ret
) || mini_is_gsharedvt_variable_type (sig_ret
))) {
1165 cfg
->vret_addr
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_ARG
);
1168 if (cfg
->gen_sdb_seq_points
) {
1171 ins
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1172 ins
->flags
|= MONO_INST_VOLATILE
;
1173 cfg
->arch
.ss_tramp_var
= ins
;
1175 ins
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1176 ins
->flags
|= MONO_INST_VOLATILE
;
1177 cfg
->arch
.bp_tramp_var
= ins
;
1180 if (cfg
->method
->save_lmf
) {
1181 cfg
->create_lmf_var
= TRUE
;
1184 cfg
->lmf_ir_mono_lmf
= TRUE
;
1188 cfg
->arch_eh_jit_info
= 1;
1192 * It is expensive to adjust esp for each individual fp argument pushed on the stack
1193 * so we try to do it just once when we have multiple fp arguments in a row.
1194 * We don't use this mechanism generally because for int arguments the generated code
1195 * is slightly bigger and new generation cpus optimize away the dependency chains
1196 * created by push instructions on the esp value.
1197 * fp_arg_setup is the first argument in the execution sequence where the esp register
1200 static G_GNUC_UNUSED
int
1201 collect_fp_stack_space (MonoMethodSignature
*sig
, int start_arg
, int *fp_arg_setup
)
1206 for (; start_arg
< sig
->param_count
; ++start_arg
) {
1207 t
= mini_get_underlying_type (sig
->params
[start_arg
]);
1208 if (!t
->byref
&& t
->type
== MONO_TYPE_R8
) {
1209 fp_space
+= sizeof (double);
1210 *fp_arg_setup
= start_arg
;
1219 emit_sig_cookie (MonoCompile
*cfg
, MonoCallInst
*call
, CallInfo
*cinfo
)
1221 MonoMethodSignature
*tmp_sig
;
1225 * mono_ArgIterator_Setup assumes the signature cookie is
1226 * passed first and all the arguments which were before it are
1227 * passed on the stack after the signature. So compensate by
1228 * passing a different signature.
1230 tmp_sig
= mono_metadata_signature_dup (call
->signature
);
1231 tmp_sig
->param_count
-= call
->signature
->sentinelpos
;
1232 tmp_sig
->sentinelpos
= 0;
1233 memcpy (tmp_sig
->params
, call
->signature
->params
+ call
->signature
->sentinelpos
, tmp_sig
->param_count
* sizeof (MonoType
*));
1235 if (cfg
->compile_aot
) {
1236 sig_reg
= mono_alloc_ireg (cfg
);
1237 MONO_EMIT_NEW_SIGNATURECONST (cfg
, sig_reg
, tmp_sig
);
1238 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, X86_ESP
, cinfo
->sig_cookie
.offset
, sig_reg
);
1240 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STORE_MEMBASE_IMM
, X86_ESP
, cinfo
->sig_cookie
.offset
, tmp_sig
);
1246 mono_arch_get_llvm_call_info (MonoCompile
*cfg
, MonoMethodSignature
*sig
)
1251 LLVMCallInfo
*linfo
;
1252 MonoType
*t
, *sig_ret
;
1254 n
= sig
->param_count
+ sig
->hasthis
;
1256 cinfo
= get_call_info (cfg
->mempool
, sig
);
1259 linfo
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (LLVMCallInfo
) + (sizeof (LLVMArgInfo
) * n
));
1262 * LLVM always uses the native ABI while we use our own ABI, the
1263 * only difference is the handling of vtypes:
1264 * - we only pass/receive them in registers in some cases, and only
1265 * in 1 or 2 integer registers.
1267 if (cinfo
->ret
.storage
== ArgValuetypeInReg
) {
1269 cfg
->exception_message
= g_strdup ("pinvoke + vtypes");
1270 cfg
->disable_llvm
= TRUE
;
1274 cfg
->exception_message
= g_strdup ("vtype ret in call");
1275 cfg
->disable_llvm
= TRUE
;
1277 linfo->ret.storage = LLVMArgVtypeInReg;
1278 for (j = 0; j < 2; ++j)
1279 linfo->ret.pair_storage [j] = arg_storage_to_llvm_arg_storage (cfg, cinfo->ret.pair_storage [j]);
1283 if (mini_type_is_vtype (sig_ret
) && cinfo
->ret
.storage
== ArgInIReg
) {
1284 /* Vtype returned using a hidden argument */
1285 linfo
->ret
.storage
= LLVMArgVtypeRetAddr
;
1286 linfo
->vret_arg_index
= cinfo
->vret_arg_index
;
1289 if (mini_type_is_vtype (sig_ret
) && cinfo
->ret
.storage
!= ArgInIReg
) {
1291 cfg
->exception_message
= g_strdup ("vtype ret in call");
1292 cfg
->disable_llvm
= TRUE
;
1295 for (i
= 0; i
< n
; ++i
) {
1296 ainfo
= cinfo
->args
+ i
;
1298 if (i
>= sig
->hasthis
)
1299 t
= sig
->params
[i
- sig
->hasthis
];
1301 t
= &mono_defaults
.int_class
->byval_arg
;
1303 linfo
->args
[i
].storage
= LLVMArgNone
;
1305 switch (ainfo
->storage
) {
1307 linfo
->args
[i
].storage
= LLVMArgNormal
;
1309 case ArgInDoubleSSEReg
:
1310 case ArgInFloatSSEReg
:
1311 linfo
->args
[i
].storage
= LLVMArgNormal
;
1314 if (mini_type_is_vtype (t
)) {
1315 if (mono_class_value_size (mono_class_from_mono_type (t
), NULL
) == 0)
1316 /* LLVM seems to allocate argument space for empty structures too */
1317 linfo
->args
[i
].storage
= LLVMArgNone
;
1319 linfo
->args
[i
].storage
= LLVMArgVtypeByVal
;
1321 linfo
->args
[i
].storage
= LLVMArgNormal
;
1324 case ArgValuetypeInReg
:
1326 cfg
->exception_message
= g_strdup ("pinvoke + vtypes");
1327 cfg
->disable_llvm
= TRUE
;
1331 cfg
->exception_message
= g_strdup ("vtype arg");
1332 cfg
->disable_llvm
= TRUE
;
1334 linfo->args [i].storage = LLVMArgVtypeInReg;
1335 for (j = 0; j < 2; ++j)
1336 linfo->args [i].pair_storage [j] = arg_storage_to_llvm_arg_storage (cfg, ainfo->pair_storage [j]);
1340 linfo
->args
[i
].storage
= LLVMArgGSharedVt
;
1343 cfg
->exception_message
= g_strdup ("ainfo->storage");
1344 cfg
->disable_llvm
= TRUE
;
1354 emit_gc_param_slot_def (MonoCompile
*cfg
, int sp_offset
, MonoType
*t
)
1356 if (cfg
->compute_gc_maps
) {
1359 /* Needs checking if the feature will be enabled again */
1360 g_assert_not_reached ();
1362 /* On x86, the offsets are from the sp value before the start of the call sequence */
1364 t
= &mono_defaults
.int_class
->byval_arg
;
1365 EMIT_NEW_GC_PARAM_SLOT_LIVENESS_DEF (cfg
, def
, sp_offset
, t
);
1370 mono_arch_emit_call (MonoCompile
*cfg
, MonoCallInst
*call
)
1374 MonoMethodSignature
*sig
;
1377 int sentinelpos
= 0, sp_offset
= 0;
1379 sig
= call
->signature
;
1380 n
= sig
->param_count
+ sig
->hasthis
;
1381 sig_ret
= mini_get_underlying_type (sig
->ret
);
1383 cinfo
= get_call_info (cfg
->mempool
, sig
);
1384 call
->call_info
= cinfo
;
1386 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
))
1387 sentinelpos
= sig
->sentinelpos
+ (sig
->hasthis
? 1 : 0);
1389 if (sig_ret
&& MONO_TYPE_ISSTRUCT (sig_ret
)) {
1390 if (cinfo
->ret
.storage
== ArgValuetypeInReg
&& cinfo
->ret
.pair_storage
[0] != ArgNone
) {
1392 * Tell the JIT to use a more efficient calling convention: call using
1393 * OP_CALL, compute the result location after the call, and save the
1396 call
->vret_in_reg
= TRUE
;
1397 #if defined(__APPLE__)
1398 if (cinfo
->ret
.pair_storage
[0] == ArgOnDoubleFpStack
|| cinfo
->ret
.pair_storage
[0] == ArgOnFloatFpStack
)
1399 call
->vret_in_reg_fp
= TRUE
;
1402 NULLIFY_INS (call
->vret_var
);
1406 // FIXME: Emit EMIT_NEW_GC_PARAM_SLOT_LIVENESS_DEF everywhere
1408 /* Handle the case where there are no implicit arguments */
1409 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (n
== sentinelpos
)) {
1410 emit_sig_cookie (cfg
, call
, cinfo
);
1411 sp_offset
= cinfo
->sig_cookie
.offset
;
1412 emit_gc_param_slot_def (cfg
, sp_offset
, NULL
);
1415 /* Arguments are pushed in the reverse order */
1416 for (i
= n
- 1; i
>= 0; i
--) {
1417 ArgInfo
*ainfo
= cinfo
->args
+ i
;
1418 MonoType
*orig_type
, *t
;
1421 if (cinfo
->vtype_retaddr
&& cinfo
->vret_arg_index
== 1 && i
== 0) {
1424 /* Push the vret arg before the first argument */
1425 MONO_INST_NEW (cfg
, vtarg
, OP_STORE_MEMBASE_REG
);
1426 vtarg
->type
= STACK_MP
;
1427 vtarg
->inst_destbasereg
= X86_ESP
;
1428 vtarg
->sreg1
= call
->vret_var
->dreg
;
1429 vtarg
->inst_offset
= cinfo
->ret
.offset
;
1430 MONO_ADD_INS (cfg
->cbb
, vtarg
);
1431 emit_gc_param_slot_def (cfg
, cinfo
->ret
.offset
, NULL
);
1434 if (i
>= sig
->hasthis
)
1435 t
= sig
->params
[i
- sig
->hasthis
];
1437 t
= &mono_defaults
.int_class
->byval_arg
;
1439 t
= mini_get_underlying_type (t
);
1441 MONO_INST_NEW (cfg
, arg
, OP_X86_PUSH
);
1443 in
= call
->args
[i
];
1444 arg
->cil_code
= in
->cil_code
;
1445 arg
->sreg1
= in
->dreg
;
1446 arg
->type
= in
->type
;
1448 g_assert (in
->dreg
!= -1);
1450 if (ainfo
->storage
== ArgGSharedVt
) {
1451 arg
->opcode
= OP_OUTARG_VT
;
1452 arg
->sreg1
= in
->dreg
;
1453 arg
->klass
= in
->klass
;
1454 arg
->inst_p1
= mono_mempool_alloc (cfg
->mempool
, sizeof (ArgInfo
));
1455 memcpy (arg
->inst_p1
, ainfo
, sizeof (ArgInfo
));
1457 MONO_ADD_INS (cfg
->cbb
, arg
);
1458 } else if ((i
>= sig
->hasthis
) && (MONO_TYPE_ISSTRUCT(t
))) {
1462 g_assert (in
->klass
);
1464 if (t
->type
== MONO_TYPE_TYPEDBYREF
) {
1465 size
= sizeof (MonoTypedRef
);
1466 align
= sizeof (gpointer
);
1469 size
= mini_type_stack_size_full (&in
->klass
->byval_arg
, &align
, sig
->pinvoke
);
1472 if (size
> 0 || ainfo
->pass_empty_struct
) {
1473 arg
->opcode
= OP_OUTARG_VT
;
1474 arg
->sreg1
= in
->dreg
;
1475 arg
->klass
= in
->klass
;
1476 arg
->backend
.size
= size
;
1477 arg
->inst_p0
= call
;
1478 arg
->inst_p1
= mono_mempool_alloc (cfg
->mempool
, sizeof (ArgInfo
));
1479 memcpy (arg
->inst_p1
, ainfo
, sizeof (ArgInfo
));
1481 MONO_ADD_INS (cfg
->cbb
, arg
);
1482 if (ainfo
->storage
!= ArgValuetypeInReg
) {
1483 emit_gc_param_slot_def (cfg
, ainfo
->offset
, orig_type
);
1487 switch (ainfo
->storage
) {
1490 if (t
->type
== MONO_TYPE_R4
) {
1491 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, X86_ESP
, ainfo
->offset
, in
->dreg
);
1493 } else if (t
->type
== MONO_TYPE_R8
) {
1494 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, X86_ESP
, ainfo
->offset
, in
->dreg
);
1496 } else if (t
->type
== MONO_TYPE_I8
|| t
->type
== MONO_TYPE_U8
) {
1497 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, X86_ESP
, ainfo
->offset
+ 4, MONO_LVREG_MS (in
->dreg
));
1498 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, X86_ESP
, ainfo
->offset
, MONO_LVREG_LS (in
->dreg
));
1501 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, X86_ESP
, ainfo
->offset
, in
->dreg
);
1505 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, X86_ESP
, ainfo
->offset
, in
->dreg
);
1510 arg
->opcode
= OP_MOVE
;
1511 arg
->dreg
= ainfo
->reg
;
1512 MONO_ADD_INS (cfg
->cbb
, arg
);
1516 g_assert_not_reached ();
1519 if (cfg
->compute_gc_maps
) {
1521 /* FIXME: The == STACK_OBJ check might be fragile ? */
1522 if (sig
->hasthis
&& i
== 0 && call
->args
[i
]->type
== STACK_OBJ
) {
1524 if (call
->need_unbox_trampoline
)
1525 /* The unbox trampoline transforms this into a managed pointer */
1526 emit_gc_param_slot_def (cfg
, ainfo
->offset
, &mono_defaults
.int_class
->this_arg
);
1528 emit_gc_param_slot_def (cfg
, ainfo
->offset
, &mono_defaults
.object_class
->byval_arg
);
1530 emit_gc_param_slot_def (cfg
, ainfo
->offset
, orig_type
);
1534 for (j
= 0; j
< argsize
; j
+= 4)
1535 emit_gc_param_slot_def (cfg
, ainfo
->offset
+ j
, NULL
);
1540 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sentinelpos
)) {
1541 /* Emit the signature cookie just before the implicit arguments */
1542 emit_sig_cookie (cfg
, call
, cinfo
);
1543 emit_gc_param_slot_def (cfg
, cinfo
->sig_cookie
.offset
, NULL
);
1547 if (sig_ret
&& (MONO_TYPE_ISSTRUCT (sig_ret
) || cinfo
->vtype_retaddr
)) {
1550 if (cinfo
->ret
.storage
== ArgValuetypeInReg
) {
1553 else if (cinfo
->ret
.storage
== ArgInIReg
) {
1555 /* The return address is passed in a register */
1556 MONO_INST_NEW (cfg
, vtarg
, OP_MOVE
);
1557 vtarg
->sreg1
= call
->inst
.dreg
;
1558 vtarg
->dreg
= mono_alloc_ireg (cfg
);
1559 MONO_ADD_INS (cfg
->cbb
, vtarg
);
1561 mono_call_inst_add_outarg_reg (cfg
, call
, vtarg
->dreg
, cinfo
->ret
.reg
, FALSE
);
1562 } else if (cinfo
->vtype_retaddr
&& cinfo
->vret_arg_index
== 0) {
1563 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, X86_ESP
, cinfo
->ret
.offset
, call
->vret_var
->dreg
);
1564 emit_gc_param_slot_def (cfg
, cinfo
->ret
.offset
, NULL
);
1568 call
->stack_usage
= cinfo
->stack_usage
;
1569 call
->stack_align_amount
= cinfo
->stack_align_amount
;
1573 mono_arch_emit_outarg_vt (MonoCompile
*cfg
, MonoInst
*ins
, MonoInst
*src
)
1575 MonoCallInst
*call
= (MonoCallInst
*)ins
->inst_p0
;
1576 ArgInfo
*ainfo
= ins
->inst_p1
;
1577 int size
= ins
->backend
.size
;
1579 if (ainfo
->storage
== ArgValuetypeInReg
) {
1580 int dreg
= mono_alloc_ireg (cfg
);
1583 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, dreg
, src
->dreg
, 0);
1586 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, dreg
, src
->dreg
, 0);
1589 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, dreg
, src
->dreg
, 0);
1593 g_assert_not_reached ();
1595 mono_call_inst_add_outarg_reg (cfg
, call
, dreg
, ainfo
->reg
, FALSE
);
1598 if (cfg
->gsharedvt
&& mini_is_gsharedvt_klass (ins
->klass
)) {
1600 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, X86_ESP
, ainfo
->offset
, src
->dreg
);
1601 } else if (size
<= 4) {
1602 int dreg
= mono_alloc_ireg (cfg
);
1603 if (ainfo
->pass_empty_struct
) {
1604 //Pass empty struct value as 0 on platforms representing empty structs as 1 byte.
1605 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 0);
1607 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, dreg
, src
->dreg
, 0);
1609 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, X86_ESP
, ainfo
->offset
, dreg
);
1610 } else if (size
<= 20) {
1611 mini_emit_memcpy (cfg
, X86_ESP
, ainfo
->offset
, src
->dreg
, 0, size
, 4);
1613 // FIXME: Code growth
1614 mini_emit_memcpy (cfg
, X86_ESP
, ainfo
->offset
, src
->dreg
, 0, size
, 4);
1620 mono_arch_emit_setret (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
*val
)
1622 MonoType
*ret
= mini_get_underlying_type (mono_method_signature (method
)->ret
);
1625 if (ret
->type
== MONO_TYPE_R4
) {
1626 if (COMPILE_LLVM (cfg
))
1627 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, cfg
->ret
->dreg
, val
->dreg
);
1630 } else if (ret
->type
== MONO_TYPE_R8
) {
1631 if (COMPILE_LLVM (cfg
))
1632 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, cfg
->ret
->dreg
, val
->dreg
);
1635 } else if (ret
->type
== MONO_TYPE_I8
|| ret
->type
== MONO_TYPE_U8
) {
1636 if (COMPILE_LLVM (cfg
))
1637 MONO_EMIT_NEW_UNALU (cfg
, OP_LMOVE
, cfg
->ret
->dreg
, val
->dreg
);
1639 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, X86_EAX
, MONO_LVREG_LS (val
->dreg
));
1640 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, X86_EDX
, MONO_LVREG_MS (val
->dreg
));
1646 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
1650 * Allow tracing to work with this interface (with an optional argument)
1653 mono_arch_instrument_prolog (MonoCompile
*cfg
, void *func
, void *p
, gboolean enable_arguments
)
1657 g_assert (MONO_ARCH_FRAME_ALIGNMENT
>= 8);
1658 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, MONO_ARCH_FRAME_ALIGNMENT
- 8);
1660 /* if some args are passed in registers, we need to save them here */
1661 x86_push_reg (code
, X86_EBP
);
1663 if (cfg
->compile_aot
) {
1664 x86_push_imm (code
, cfg
->method
);
1665 x86_mov_reg_imm (code
, X86_EAX
, func
);
1666 x86_call_reg (code
, X86_EAX
);
1668 mono_add_patch_info (cfg
, code
-cfg
->native_code
, MONO_PATCH_INFO_METHODCONST
, cfg
->method
);
1669 x86_push_imm (code
, cfg
->method
);
1670 mono_add_patch_info (cfg
, code
-cfg
->native_code
, MONO_PATCH_INFO_ABS
, func
);
1671 x86_call_code (code
, 0);
1673 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, MONO_ARCH_FRAME_ALIGNMENT
);
1687 mono_arch_instrument_epilog_full (MonoCompile
*cfg
, void *func
, void *p
, gboolean enable_arguments
, gboolean preserve_argument_registers
)
1690 int arg_size
= 0, stack_usage
= 0, save_mode
= SAVE_NONE
;
1691 MonoMethod
*method
= cfg
->method
;
1692 MonoType
*ret_type
= mini_get_underlying_type (mono_method_signature (method
)->ret
);
1694 switch (ret_type
->type
) {
1695 case MONO_TYPE_VOID
:
1696 /* special case string .ctor icall */
1697 if (strcmp (".ctor", method
->name
) && method
->klass
== mono_defaults
.string_class
) {
1698 save_mode
= SAVE_EAX
;
1699 stack_usage
= enable_arguments
? 8 : 4;
1701 save_mode
= SAVE_NONE
;
1705 save_mode
= SAVE_EAX_EDX
;
1706 stack_usage
= enable_arguments
? 16 : 8;
1710 save_mode
= SAVE_FP
;
1711 stack_usage
= enable_arguments
? 16 : 8;
1713 case MONO_TYPE_GENERICINST
:
1714 if (!mono_type_generic_inst_is_valuetype (ret_type
)) {
1715 save_mode
= SAVE_EAX
;
1716 stack_usage
= enable_arguments
? 8 : 4;
1720 case MONO_TYPE_VALUETYPE
:
1721 // FIXME: Handle SMALL_STRUCT_IN_REG here for proper alignment on darwin-x86
1722 save_mode
= SAVE_STRUCT
;
1723 stack_usage
= enable_arguments
? 4 : 0;
1726 save_mode
= SAVE_EAX
;
1727 stack_usage
= enable_arguments
? 8 : 4;
1731 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, MONO_ARCH_FRAME_ALIGNMENT
- stack_usage
- 4);
1733 switch (save_mode
) {
1735 x86_push_reg (code
, X86_EDX
);
1736 x86_push_reg (code
, X86_EAX
);
1737 if (enable_arguments
) {
1738 x86_push_reg (code
, X86_EDX
);
1739 x86_push_reg (code
, X86_EAX
);
1744 x86_push_reg (code
, X86_EAX
);
1745 if (enable_arguments
) {
1746 x86_push_reg (code
, X86_EAX
);
1751 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, 8);
1752 x86_fst_membase (code
, X86_ESP
, 0, TRUE
, TRUE
);
1753 if (enable_arguments
) {
1754 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, 8);
1755 x86_fst_membase (code
, X86_ESP
, 0, TRUE
, TRUE
);
1760 if (enable_arguments
) {
1761 x86_push_membase (code
, X86_EBP
, 8);
1770 if (cfg
->compile_aot
) {
1771 x86_push_imm (code
, method
);
1772 x86_mov_reg_imm (code
, X86_EAX
, func
);
1773 x86_call_reg (code
, X86_EAX
);
1775 mono_add_patch_info (cfg
, code
-cfg
->native_code
, MONO_PATCH_INFO_METHODCONST
, method
);
1776 x86_push_imm (code
, method
);
1777 mono_add_patch_info (cfg
, code
-cfg
->native_code
, MONO_PATCH_INFO_ABS
, func
);
1778 x86_call_code (code
, 0);
1781 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, arg_size
+ 4);
1783 switch (save_mode
) {
1785 x86_pop_reg (code
, X86_EAX
);
1786 x86_pop_reg (code
, X86_EDX
);
1789 x86_pop_reg (code
, X86_EAX
);
1792 x86_fld_membase (code
, X86_ESP
, 0, TRUE
);
1793 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 8);
1800 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, MONO_ARCH_FRAME_ALIGNMENT
- stack_usage
);
1805 #define EMIT_COND_BRANCH(ins,cond,sign) \
1806 if (ins->inst_true_bb->native_offset) { \
1807 x86_branch (code, cond, cfg->native_code + ins->inst_true_bb->native_offset, sign); \
1809 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1810 if ((cfg->opt & MONO_OPT_BRANCH) && \
1811 x86_is_imm8 (ins->inst_true_bb->max_offset - cpos)) \
1812 x86_branch8 (code, cond, 0, sign); \
1814 x86_branch32 (code, cond, 0, sign); \
1818 * Emit an exception if condition is fail and
1819 * if possible do a directly branch to target
1821 #define EMIT_COND_SYSTEM_EXCEPTION(cond,signed,exc_name) \
1823 MonoInst *tins = mono_branch_optimize_exception_target (cfg, bb, exc_name); \
1824 if (tins == NULL) { \
1825 mono_add_patch_info (cfg, code - cfg->native_code, \
1826 MONO_PATCH_INFO_EXC, exc_name); \
1827 x86_branch32 (code, cond, 0, signed); \
1829 EMIT_COND_BRANCH (tins, cond, signed); \
1833 #define EMIT_FPCOMPARE(code) do { \
1834 x86_fcompp (code); \
1835 x86_fnstsw (code); \
1840 emit_call (MonoCompile
*cfg
, guint8
*code
, guint32 patch_type
, gconstpointer data
)
1842 gboolean needs_paddings
= TRUE
;
1844 MonoJumpInfo
*jinfo
= NULL
;
1846 if (cfg
->abs_patches
) {
1847 jinfo
= g_hash_table_lookup (cfg
->abs_patches
, data
);
1848 if (jinfo
&& jinfo
->type
== MONO_PATCH_INFO_JIT_ICALL_ADDR
)
1849 needs_paddings
= FALSE
;
1852 if (cfg
->compile_aot
)
1853 needs_paddings
= FALSE
;
1854 /*The address must be 4 bytes aligned to avoid spanning multiple cache lines.
1855 This is required for code patching to be safe on SMP machines.
1857 pad_size
= (guint32
)(code
+ 1 - cfg
->native_code
) & 0x3;
1858 if (needs_paddings
&& pad_size
)
1859 x86_padding (code
, 4 - pad_size
);
1861 mono_add_patch_info (cfg
, code
- cfg
->native_code
, patch_type
, data
);
1862 x86_call_code (code
, 0);
1867 #define INST_IGNORES_CFLAGS(opcode) (!(((opcode) == OP_ADC) || ((opcode) == OP_IADC) || ((opcode) == OP_ADC_IMM) || ((opcode) == OP_IADC_IMM) || ((opcode) == OP_SBB) || ((opcode) == OP_ISBB) || ((opcode) == OP_SBB_IMM) || ((opcode) == OP_ISBB_IMM)))
1870 * mono_peephole_pass_1:
1872 * Perform peephole opts which should/can be performed before local regalloc
1875 mono_arch_peephole_pass_1 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
1879 MONO_BB_FOR_EACH_INS_SAFE (bb
, n
, ins
) {
1880 MonoInst
*last_ins
= mono_inst_prev (ins
, FILTER_IL_SEQ_POINT
);
1882 switch (ins
->opcode
) {
1885 if ((ins
->sreg1
< MONO_MAX_IREGS
) && (ins
->dreg
>= MONO_MAX_IREGS
)) {
1887 * X86_LEA is like ADD, but doesn't have the
1888 * sreg1==dreg restriction.
1890 ins
->opcode
= OP_X86_LEA_MEMBASE
;
1891 ins
->inst_basereg
= ins
->sreg1
;
1892 } else if ((ins
->inst_imm
== 1) && (ins
->dreg
== ins
->sreg1
))
1893 ins
->opcode
= OP_X86_INC_REG
;
1897 if ((ins
->sreg1
< MONO_MAX_IREGS
) && (ins
->dreg
>= MONO_MAX_IREGS
)) {
1898 ins
->opcode
= OP_X86_LEA_MEMBASE
;
1899 ins
->inst_basereg
= ins
->sreg1
;
1900 ins
->inst_imm
= -ins
->inst_imm
;
1901 } else if ((ins
->inst_imm
== 1) && (ins
->dreg
== ins
->sreg1
))
1902 ins
->opcode
= OP_X86_DEC_REG
;
1904 case OP_COMPARE_IMM
:
1905 case OP_ICOMPARE_IMM
:
1906 /* OP_COMPARE_IMM (reg, 0)
1908 * OP_X86_TEST_NULL (reg)
1911 ins
->opcode
= OP_X86_TEST_NULL
;
1913 case OP_X86_COMPARE_MEMBASE_IMM
:
1915 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1916 * OP_X86_COMPARE_MEMBASE_IMM offset(basereg), imm
1918 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1919 * OP_COMPARE_IMM reg, imm
1921 * Note: if imm = 0 then OP_COMPARE_IMM replaced with OP_X86_TEST_NULL
1923 if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_REG
) &&
1924 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
1925 ins
->inst_offset
== last_ins
->inst_offset
) {
1926 ins
->opcode
= OP_COMPARE_IMM
;
1927 ins
->sreg1
= last_ins
->sreg1
;
1929 /* check if we can remove cmp reg,0 with test null */
1931 ins
->opcode
= OP_X86_TEST_NULL
;
1935 case OP_X86_PUSH_MEMBASE
:
1936 if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_REG
||
1937 last_ins
->opcode
== OP_STORE_MEMBASE_REG
) &&
1938 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
1939 ins
->inst_offset
== last_ins
->inst_offset
) {
1940 ins
->opcode
= OP_X86_PUSH
;
1941 ins
->sreg1
= last_ins
->sreg1
;
1946 mono_peephole_ins (bb
, ins
);
1951 mono_arch_peephole_pass_2 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
1955 MONO_BB_FOR_EACH_INS_SAFE (bb
, n
, ins
) {
1956 switch (ins
->opcode
) {
1958 /* reg = 0 -> XOR (reg, reg) */
1959 /* XOR sets cflags on x86, so we cant do it always */
1960 if (ins
->inst_c0
== 0 && (!ins
->next
|| (ins
->next
&& INST_IGNORES_CFLAGS (ins
->next
->opcode
)))) {
1963 ins
->opcode
= OP_IXOR
;
1964 ins
->sreg1
= ins
->dreg
;
1965 ins
->sreg2
= ins
->dreg
;
1968 * Convert succeeding STORE_MEMBASE_IMM 0 ins to STORE_MEMBASE_REG
1969 * since it takes 3 bytes instead of 7.
1971 for (ins2
= mono_inst_next (ins
, FILTER_IL_SEQ_POINT
); ins2
; ins2
= ins2
->next
) {
1972 if ((ins2
->opcode
== OP_STORE_MEMBASE_IMM
) && (ins2
->inst_imm
== 0)) {
1973 ins2
->opcode
= OP_STORE_MEMBASE_REG
;
1974 ins2
->sreg1
= ins
->dreg
;
1976 else if ((ins2
->opcode
== OP_STOREI4_MEMBASE_IMM
) && (ins2
->inst_imm
== 0)) {
1977 ins2
->opcode
= OP_STOREI4_MEMBASE_REG
;
1978 ins2
->sreg1
= ins
->dreg
;
1980 else if ((ins2
->opcode
== OP_STOREI1_MEMBASE_IMM
) || (ins2
->opcode
== OP_STOREI2_MEMBASE_IMM
)) {
1981 /* Continue iteration */
1990 if ((ins
->inst_imm
== 1) && (ins
->dreg
== ins
->sreg1
))
1991 ins
->opcode
= OP_X86_INC_REG
;
1995 if ((ins
->inst_imm
== 1) && (ins
->dreg
== ins
->sreg1
))
1996 ins
->opcode
= OP_X86_DEC_REG
;
2000 mono_peephole_ins (bb
, ins
);
2005 * mono_arch_lowering_pass:
2007 * Converts complex opcodes into simpler ones so that each IR instruction
2008 * corresponds to one machine instruction.
2011 mono_arch_lowering_pass (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
2013 MonoInst
*ins
, *next
;
2016 * FIXME: Need to add more instructions, but the current machine
2017 * description can't model some parts of the composite instructions like
2020 MONO_BB_FOR_EACH_INS_SAFE (bb
, next
, ins
) {
2021 switch (ins
->opcode
) {
2024 case OP_IDIV_UN_IMM
:
2025 case OP_IREM_UN_IMM
:
2027 * Keep the cases where we could generated optimized code, otherwise convert
2028 * to the non-imm variant.
2030 if ((ins
->opcode
== OP_IREM_IMM
) && mono_is_power_of_two (ins
->inst_imm
) >= 0)
2032 mono_decompose_op_imm (cfg
, bb
, ins
);
2039 bb
->max_vreg
= cfg
->next_vreg
;
2043 branch_cc_table
[] = {
2044 X86_CC_EQ
, X86_CC_GE
, X86_CC_GT
, X86_CC_LE
, X86_CC_LT
,
2045 X86_CC_NE
, X86_CC_GE
, X86_CC_GT
, X86_CC_LE
, X86_CC_LT
,
2046 X86_CC_O
, X86_CC_NO
, X86_CC_C
, X86_CC_NC
2049 /* Maps CMP_... constants to X86_CC_... constants */
2052 X86_CC_EQ
, X86_CC_NE
, X86_CC_LE
, X86_CC_GE
, X86_CC_LT
, X86_CC_GT
,
2053 X86_CC_LE
, X86_CC_GE
, X86_CC_LT
, X86_CC_GT
2057 cc_signed_table
[] = {
2058 TRUE
, TRUE
, TRUE
, TRUE
, TRUE
, TRUE
,
2059 FALSE
, FALSE
, FALSE
, FALSE
2062 static unsigned char*
2063 emit_float_to_int (MonoCompile
*cfg
, guchar
*code
, int dreg
, int size
, gboolean is_signed
)
2065 #define XMM_TEMP_REG 0
2066 /*This SSE2 optimization must not be done which OPT_SIMD in place as it clobbers xmm0.*/
2067 /*The xmm pass decomposes OP_FCONV_ ops anyway anyway.*/
2068 if (cfg
->opt
& MONO_OPT_SSE2
&& size
< 8 && !(cfg
->opt
& MONO_OPT_SIMD
)) {
2069 /* optimize by assigning a local var for this use so we avoid
2070 * the stack manipulations */
2071 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, 8);
2072 x86_fst_membase (code
, X86_ESP
, 0, TRUE
, TRUE
);
2073 x86_movsd_reg_membase (code
, XMM_TEMP_REG
, X86_ESP
, 0);
2074 x86_cvttsd2si (code
, dreg
, XMM_TEMP_REG
);
2075 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 8);
2077 x86_widen_reg (code
, dreg
, dreg
, is_signed
, FALSE
);
2079 x86_widen_reg (code
, dreg
, dreg
, is_signed
, TRUE
);
2082 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, 4);
2083 x86_fnstcw_membase(code
, X86_ESP
, 0);
2084 x86_mov_reg_membase (code
, dreg
, X86_ESP
, 0, 2);
2085 x86_alu_reg_imm (code
, X86_OR
, dreg
, 0xc00);
2086 x86_mov_membase_reg (code
, X86_ESP
, 2, dreg
, 2);
2087 x86_fldcw_membase (code
, X86_ESP
, 2);
2089 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, 8);
2090 x86_fist_pop_membase (code
, X86_ESP
, 0, TRUE
);
2091 x86_pop_reg (code
, dreg
);
2092 /* FIXME: need the high register
2093 * x86_pop_reg (code, dreg_high);
2096 x86_push_reg (code
, X86_EAX
); // SP = SP - 4
2097 x86_fist_pop_membase (code
, X86_ESP
, 0, FALSE
);
2098 x86_pop_reg (code
, dreg
);
2100 x86_fldcw_membase (code
, X86_ESP
, 0);
2101 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 4);
2104 x86_widen_reg (code
, dreg
, dreg
, is_signed
, FALSE
);
2106 x86_widen_reg (code
, dreg
, dreg
, is_signed
, TRUE
);
2110 static unsigned char*
2111 mono_emit_stack_alloc (MonoCompile
*cfg
, guchar
*code
, MonoInst
* tree
)
2113 int sreg
= tree
->sreg1
;
2114 int need_touch
= FALSE
;
2116 #if defined(TARGET_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
2125 * If requested stack size is larger than one page,
2126 * perform stack-touch operation
2129 * Generate stack probe code.
2130 * Under Windows, it is necessary to allocate one page at a time,
2131 * "touching" stack after each successful sub-allocation. This is
2132 * because of the way stack growth is implemented - there is a
2133 * guard page before the lowest stack page that is currently commited.
2134 * Stack normally grows sequentially so OS traps access to the
2135 * guard page and commits more pages when needed.
2137 x86_test_reg_imm (code
, sreg
, ~0xFFF);
2138 br
[0] = code
; x86_branch8 (code
, X86_CC_Z
, 0, FALSE
);
2140 br
[2] = code
; /* loop */
2141 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, 0x1000);
2142 x86_test_membase_reg (code
, X86_ESP
, 0, X86_ESP
);
2145 * By the end of the loop, sreg2 is smaller than 0x1000, so the init routine
2146 * that follows only initializes the last part of the area.
2148 /* Same as the init code below with size==0x1000 */
2149 if (tree
->flags
& MONO_INST_INIT
) {
2150 x86_push_reg (code
, X86_EAX
);
2151 x86_push_reg (code
, X86_ECX
);
2152 x86_push_reg (code
, X86_EDI
);
2153 x86_mov_reg_imm (code
, X86_ECX
, (0x1000 >> 2));
2154 x86_alu_reg_reg (code
, X86_XOR
, X86_EAX
, X86_EAX
);
2155 if (cfg
->param_area
)
2156 x86_lea_membase (code
, X86_EDI
, X86_ESP
, 12 + ALIGN_TO (cfg
->param_area
, MONO_ARCH_FRAME_ALIGNMENT
));
2158 x86_lea_membase (code
, X86_EDI
, X86_ESP
, 12);
2160 x86_prefix (code
, X86_REP_PREFIX
);
2162 x86_pop_reg (code
, X86_EDI
);
2163 x86_pop_reg (code
, X86_ECX
);
2164 x86_pop_reg (code
, X86_EAX
);
2167 x86_alu_reg_imm (code
, X86_SUB
, sreg
, 0x1000);
2168 x86_alu_reg_imm (code
, X86_CMP
, sreg
, 0x1000);
2169 br
[3] = code
; x86_branch8 (code
, X86_CC_AE
, 0, FALSE
);
2170 x86_patch (br
[3], br
[2]);
2171 x86_test_reg_reg (code
, sreg
, sreg
);
2172 br
[4] = code
; x86_branch8 (code
, X86_CC_Z
, 0, FALSE
);
2173 x86_alu_reg_reg (code
, X86_SUB
, X86_ESP
, sreg
);
2175 br
[1] = code
; x86_jump8 (code
, 0);
2177 x86_patch (br
[0], code
);
2178 x86_alu_reg_reg (code
, X86_SUB
, X86_ESP
, sreg
);
2179 x86_patch (br
[1], code
);
2180 x86_patch (br
[4], code
);
2183 x86_alu_reg_reg (code
, X86_SUB
, X86_ESP
, tree
->sreg1
);
2185 if (tree
->flags
& MONO_INST_INIT
) {
2187 if (tree
->dreg
!= X86_EAX
&& sreg
!= X86_EAX
) {
2188 x86_push_reg (code
, X86_EAX
);
2191 if (tree
->dreg
!= X86_ECX
&& sreg
!= X86_ECX
) {
2192 x86_push_reg (code
, X86_ECX
);
2195 if (tree
->dreg
!= X86_EDI
&& sreg
!= X86_EDI
) {
2196 x86_push_reg (code
, X86_EDI
);
2200 x86_shift_reg_imm (code
, X86_SHR
, sreg
, 2);
2201 if (sreg
!= X86_ECX
)
2202 x86_mov_reg_reg (code
, X86_ECX
, sreg
, 4);
2203 x86_alu_reg_reg (code
, X86_XOR
, X86_EAX
, X86_EAX
);
2205 if (cfg
->param_area
)
2206 x86_lea_membase (code
, X86_EDI
, X86_ESP
, offset
+ ALIGN_TO (cfg
->param_area
, MONO_ARCH_FRAME_ALIGNMENT
));
2208 x86_lea_membase (code
, X86_EDI
, X86_ESP
, offset
);
2210 x86_prefix (code
, X86_REP_PREFIX
);
2213 if (tree
->dreg
!= X86_EDI
&& sreg
!= X86_EDI
)
2214 x86_pop_reg (code
, X86_EDI
);
2215 if (tree
->dreg
!= X86_ECX
&& sreg
!= X86_ECX
)
2216 x86_pop_reg (code
, X86_ECX
);
2217 if (tree
->dreg
!= X86_EAX
&& sreg
!= X86_EAX
)
2218 x86_pop_reg (code
, X86_EAX
);
2225 emit_move_return_value (MonoCompile
*cfg
, MonoInst
*ins
, guint8
*code
)
2227 /* Move return value to the target register */
2228 switch (ins
->opcode
) {
2231 case OP_CALL_MEMBASE
:
2232 if (ins
->dreg
!= X86_EAX
)
2233 x86_mov_reg_reg (code
, ins
->dreg
, X86_EAX
, 4);
2243 static int tls_gs_offset
;
2247 mono_arch_have_fast_tls (void)
2250 static gboolean have_fast_tls
= FALSE
;
2251 static gboolean inited
= FALSE
;
2254 if (mini_get_debug_options ()->use_fallback_tls
)
2257 return have_fast_tls
;
2259 ins
= (guint32
*)pthread_getspecific
;
2261 * We're looking for these two instructions:
2263 * mov 0x4(%esp),%eax
2264 * mov %gs:[offset](,%eax,4),%eax
2266 have_fast_tls
= ins
[0] == 0x0424448b && ins
[1] == 0x85048b65;
2267 tls_gs_offset
= ins
[2];
2270 return have_fast_tls
;
2271 #elif defined(TARGET_ANDROID)
2274 if (mini_get_debug_options ()->use_fallback_tls
)
2281 mono_x86_emit_tls_get (guint8
* code
, int dreg
, int tls_offset
)
2283 #if defined(TARGET_MACH)
2284 x86_prefix (code
, X86_GS_PREFIX
);
2285 x86_mov_reg_mem (code
, dreg
, tls_gs_offset
+ (tls_offset
* 4), 4);
2286 #elif defined(TARGET_WIN32)
2288 * See the Under the Hood article in the May 1996 issue of Microsoft Systems
2289 * Journal and/or a disassembly of the TlsGet () function.
2291 x86_prefix (code
, X86_FS_PREFIX
);
2292 x86_mov_reg_mem (code
, dreg
, 0x18, 4);
2293 if (tls_offset
< 64) {
2294 x86_mov_reg_membase (code
, dreg
, dreg
, 3600 + (tls_offset
* 4), 4);
2298 g_assert (tls_offset
< 0x440);
2299 /* Load TEB->TlsExpansionSlots */
2300 x86_mov_reg_membase (code
, dreg
, dreg
, 0xf94, 4);
2301 x86_test_reg_reg (code
, dreg
, dreg
);
2303 x86_branch (code
, X86_CC_EQ
, code
, TRUE
);
2304 x86_mov_reg_membase (code
, dreg
, dreg
, (tls_offset
* 4) - 0x100, 4);
2305 x86_patch (buf
[0], code
);
2308 if (optimize_for_xen
) {
2309 x86_prefix (code
, X86_GS_PREFIX
);
2310 x86_mov_reg_mem (code
, dreg
, 0, 4);
2311 x86_mov_reg_membase (code
, dreg
, dreg
, tls_offset
, 4);
2313 x86_prefix (code
, X86_GS_PREFIX
);
2314 x86_mov_reg_mem (code
, dreg
, tls_offset
, 4);
2321 mono_x86_emit_tls_set (guint8
* code
, int sreg
, int tls_offset
)
2323 #if defined(TARGET_MACH)
2324 x86_prefix (code
, X86_GS_PREFIX
);
2325 x86_mov_mem_reg (code
, tls_gs_offset
+ (tls_offset
* 4), sreg
, 4);
2326 #elif defined(TARGET_WIN32)
2327 g_assert_not_reached ();
2329 x86_prefix (code
, X86_GS_PREFIX
);
2330 x86_mov_mem_reg (code
, tls_offset
, sreg
, 4);
2338 * Emit code to initialize an LMF structure at LMF_OFFSET.
2341 emit_setup_lmf (MonoCompile
*cfg
, guint8
*code
, gint32 lmf_offset
, int cfa_offset
)
2343 /* save all caller saved regs */
2344 x86_mov_membase_reg (code
, cfg
->frame_reg
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, ebx
), X86_EBX
, sizeof (mgreg_t
));
2345 mono_emit_unwind_op_offset (cfg
, code
, X86_EBX
, - cfa_offset
+ lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, ebx
));
2346 x86_mov_membase_reg (code
, cfg
->frame_reg
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, edi
), X86_EDI
, sizeof (mgreg_t
));
2347 mono_emit_unwind_op_offset (cfg
, code
, X86_EDI
, - cfa_offset
+ lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, edi
));
2348 x86_mov_membase_reg (code
, cfg
->frame_reg
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, esi
), X86_ESI
, sizeof (mgreg_t
));
2349 mono_emit_unwind_op_offset (cfg
, code
, X86_ESI
, - cfa_offset
+ lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, esi
));
2350 x86_mov_membase_reg (code
, cfg
->frame_reg
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, ebp
), X86_EBP
, sizeof (mgreg_t
));
2352 /* save the current IP */
2353 if (cfg
->compile_aot
) {
2354 /* This pushes the current ip */
2355 x86_call_imm (code
, 0);
2356 x86_pop_reg (code
, X86_EAX
);
2358 mono_add_patch_info (cfg
, code
+ 1 - cfg
->native_code
, MONO_PATCH_INFO_IP
, NULL
);
2359 x86_mov_reg_imm (code
, X86_EAX
, 0);
2361 x86_mov_membase_reg (code
, cfg
->frame_reg
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, eip
), X86_EAX
, sizeof (mgreg_t
));
2363 mini_gc_set_slot_type_from_cfa (cfg
, -cfa_offset
+ lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, eip
), SLOT_NOREF
);
2364 mini_gc_set_slot_type_from_cfa (cfg
, -cfa_offset
+ lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, ebp
), SLOT_NOREF
);
2365 mini_gc_set_slot_type_from_cfa (cfg
, -cfa_offset
+ lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, esi
), SLOT_NOREF
);
2366 mini_gc_set_slot_type_from_cfa (cfg
, -cfa_offset
+ lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, edi
), SLOT_NOREF
);
2367 mini_gc_set_slot_type_from_cfa (cfg
, -cfa_offset
+ lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, ebx
), SLOT_NOREF
);
2368 mini_gc_set_slot_type_from_cfa (cfg
, -cfa_offset
+ lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, esp
), SLOT_NOREF
);
2369 mini_gc_set_slot_type_from_cfa (cfg
, -cfa_offset
+ lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, method
), SLOT_NOREF
);
2370 mini_gc_set_slot_type_from_cfa (cfg
, -cfa_offset
+ lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, lmf_addr
), SLOT_NOREF
);
2371 mini_gc_set_slot_type_from_cfa (cfg
, -cfa_offset
+ lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
), SLOT_NOREF
);
2376 /* benchmark and set based on cpu */
2377 #define LOOP_ALIGNMENT 8
2378 #define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
2382 mono_arch_output_basic_block (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
2387 guint8
*code
= cfg
->native_code
+ cfg
->code_len
;
2390 if (cfg
->opt
& MONO_OPT_LOOP
) {
2391 int pad
, align
= LOOP_ALIGNMENT
;
2392 /* set alignment depending on cpu */
2393 if (bb_is_loop_start (bb
) && (pad
= (cfg
->code_len
& (align
- 1)))) {
2395 /*g_print ("adding %d pad at %x to loop in %s\n", pad, cfg->code_len, cfg->method->name);*/
2396 x86_padding (code
, pad
);
2397 cfg
->code_len
+= pad
;
2398 bb
->native_offset
= cfg
->code_len
;
2402 if (cfg
->verbose_level
> 2)
2403 g_print ("Basic block %d starting at offset 0x%x\n", bb
->block_num
, bb
->native_offset
);
2405 cpos
= bb
->max_offset
;
2407 if ((cfg
->prof_options
& MONO_PROFILE_COVERAGE
) && cfg
->coverage_info
) {
2408 MonoProfileCoverageInfo
*cov
= cfg
->coverage_info
;
2409 g_assert (!cfg
->compile_aot
);
2412 cov
->data
[bb
->dfn
].cil_code
= bb
->cil_code
;
2413 /* this is not thread save, but good enough */
2414 x86_inc_mem (code
, &cov
->data
[bb
->dfn
].count
);
2417 offset
= code
- cfg
->native_code
;
2419 mono_debug_open_block (cfg
, bb
, offset
);
2421 if (mono_break_at_bb_method
&& mono_method_desc_full_match (mono_break_at_bb_method
, cfg
->method
) && bb
->block_num
== mono_break_at_bb_bb_num
)
2422 x86_breakpoint (code
);
2424 MONO_BB_FOR_EACH_INS (bb
, ins
) {
2425 offset
= code
- cfg
->native_code
;
2427 max_len
= ((guint8
*)ins_get_spec (ins
->opcode
))[MONO_INST_LEN
];
2429 #define EXTRA_CODE_SPACE (16)
2431 if (G_UNLIKELY (offset
> (cfg
->code_size
- max_len
- EXTRA_CODE_SPACE
))) {
2432 cfg
->code_size
*= 2;
2433 cfg
->native_code
= mono_realloc_native_code(cfg
);
2434 code
= cfg
->native_code
+ offset
;
2435 cfg
->stat_code_reallocs
++;
2438 if (cfg
->debug_info
)
2439 mono_debug_record_line_number (cfg
, ins
, offset
);
2441 switch (ins
->opcode
) {
2443 x86_mul_reg (code
, ins
->sreg2
, TRUE
);
2446 x86_mul_reg (code
, ins
->sreg2
, FALSE
);
2448 case OP_X86_SETEQ_MEMBASE
:
2449 case OP_X86_SETNE_MEMBASE
:
2450 x86_set_membase (code
, ins
->opcode
== OP_X86_SETEQ_MEMBASE
? X86_CC_EQ
: X86_CC_NE
,
2451 ins
->inst_basereg
, ins
->inst_offset
, TRUE
);
2453 case OP_STOREI1_MEMBASE_IMM
:
2454 x86_mov_membase_imm (code
, ins
->inst_destbasereg
, ins
->inst_offset
, ins
->inst_imm
, 1);
2456 case OP_STOREI2_MEMBASE_IMM
:
2457 x86_mov_membase_imm (code
, ins
->inst_destbasereg
, ins
->inst_offset
, ins
->inst_imm
, 2);
2459 case OP_STORE_MEMBASE_IMM
:
2460 case OP_STOREI4_MEMBASE_IMM
:
2461 x86_mov_membase_imm (code
, ins
->inst_destbasereg
, ins
->inst_offset
, ins
->inst_imm
, 4);
2463 case OP_STOREI1_MEMBASE_REG
:
2464 x86_mov_membase_reg (code
, ins
->inst_destbasereg
, ins
->inst_offset
, ins
->sreg1
, 1);
2466 case OP_STOREI2_MEMBASE_REG
:
2467 x86_mov_membase_reg (code
, ins
->inst_destbasereg
, ins
->inst_offset
, ins
->sreg1
, 2);
2469 case OP_STORE_MEMBASE_REG
:
2470 case OP_STOREI4_MEMBASE_REG
:
2471 x86_mov_membase_reg (code
, ins
->inst_destbasereg
, ins
->inst_offset
, ins
->sreg1
, 4);
2473 case OP_STORE_MEM_IMM
:
2474 x86_mov_mem_imm (code
, ins
->inst_p0
, ins
->inst_c0
, 4);
2477 x86_mov_reg_mem (code
, ins
->dreg
, ins
->inst_imm
, 4);
2481 /* These are created by the cprop pass so they use inst_imm as the source */
2482 x86_mov_reg_mem (code
, ins
->dreg
, ins
->inst_imm
, 4);
2485 x86_widen_mem (code
, ins
->dreg
, ins
->inst_imm
, FALSE
, FALSE
);
2488 x86_widen_mem (code
, ins
->dreg
, ins
->inst_imm
, FALSE
, TRUE
);
2490 case OP_LOAD_MEMBASE
:
2491 case OP_LOADI4_MEMBASE
:
2492 case OP_LOADU4_MEMBASE
:
2493 x86_mov_reg_membase (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
, 4);
2495 case OP_LOADU1_MEMBASE
:
2496 x86_widen_membase (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
, FALSE
, FALSE
);
2498 case OP_LOADI1_MEMBASE
:
2499 x86_widen_membase (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
, TRUE
, FALSE
);
2501 case OP_LOADU2_MEMBASE
:
2502 x86_widen_membase (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
, FALSE
, TRUE
);
2504 case OP_LOADI2_MEMBASE
:
2505 x86_widen_membase (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
, TRUE
, TRUE
);
2507 case OP_ICONV_TO_I1
:
2509 x86_widen_reg (code
, ins
->dreg
, ins
->sreg1
, TRUE
, FALSE
);
2511 case OP_ICONV_TO_I2
:
2513 x86_widen_reg (code
, ins
->dreg
, ins
->sreg1
, TRUE
, TRUE
);
2515 case OP_ICONV_TO_U1
:
2516 x86_widen_reg (code
, ins
->dreg
, ins
->sreg1
, FALSE
, FALSE
);
2518 case OP_ICONV_TO_U2
:
2519 x86_widen_reg (code
, ins
->dreg
, ins
->sreg1
, FALSE
, TRUE
);
2523 x86_alu_reg_reg (code
, X86_CMP
, ins
->sreg1
, ins
->sreg2
);
2525 case OP_COMPARE_IMM
:
2526 case OP_ICOMPARE_IMM
:
2527 x86_alu_reg_imm (code
, X86_CMP
, ins
->sreg1
, ins
->inst_imm
);
2529 case OP_X86_COMPARE_MEMBASE_REG
:
2530 x86_alu_membase_reg (code
, X86_CMP
, ins
->inst_basereg
, ins
->inst_offset
, ins
->sreg2
);
2532 case OP_X86_COMPARE_MEMBASE_IMM
:
2533 x86_alu_membase_imm (code
, X86_CMP
, ins
->inst_basereg
, ins
->inst_offset
, ins
->inst_imm
);
2535 case OP_X86_COMPARE_MEMBASE8_IMM
:
2536 x86_alu_membase8_imm (code
, X86_CMP
, ins
->inst_basereg
, ins
->inst_offset
, ins
->inst_imm
);
2538 case OP_X86_COMPARE_REG_MEMBASE
:
2539 x86_alu_reg_membase (code
, X86_CMP
, ins
->sreg1
, ins
->sreg2
, ins
->inst_offset
);
2541 case OP_X86_COMPARE_MEM_IMM
:
2542 x86_alu_mem_imm (code
, X86_CMP
, ins
->inst_offset
, ins
->inst_imm
);
2544 case OP_X86_TEST_NULL
:
2545 x86_test_reg_reg (code
, ins
->sreg1
, ins
->sreg1
);
2547 case OP_X86_ADD_MEMBASE_IMM
:
2548 x86_alu_membase_imm (code
, X86_ADD
, ins
->inst_basereg
, ins
->inst_offset
, ins
->inst_imm
);
2550 case OP_X86_ADD_REG_MEMBASE
:
2551 x86_alu_reg_membase (code
, X86_ADD
, ins
->sreg1
, ins
->sreg2
, ins
->inst_offset
);
2553 case OP_X86_SUB_MEMBASE_IMM
:
2554 x86_alu_membase_imm (code
, X86_SUB
, ins
->inst_basereg
, ins
->inst_offset
, ins
->inst_imm
);
2556 case OP_X86_SUB_REG_MEMBASE
:
2557 x86_alu_reg_membase (code
, X86_SUB
, ins
->sreg1
, ins
->sreg2
, ins
->inst_offset
);
2559 case OP_X86_AND_MEMBASE_IMM
:
2560 x86_alu_membase_imm (code
, X86_AND
, ins
->inst_basereg
, ins
->inst_offset
, ins
->inst_imm
);
2562 case OP_X86_OR_MEMBASE_IMM
:
2563 x86_alu_membase_imm (code
, X86_OR
, ins
->inst_basereg
, ins
->inst_offset
, ins
->inst_imm
);
2565 case OP_X86_XOR_MEMBASE_IMM
:
2566 x86_alu_membase_imm (code
, X86_XOR
, ins
->inst_basereg
, ins
->inst_offset
, ins
->inst_imm
);
2568 case OP_X86_ADD_MEMBASE_REG
:
2569 x86_alu_membase_reg (code
, X86_ADD
, ins
->inst_basereg
, ins
->inst_offset
, ins
->sreg2
);
2571 case OP_X86_SUB_MEMBASE_REG
:
2572 x86_alu_membase_reg (code
, X86_SUB
, ins
->inst_basereg
, ins
->inst_offset
, ins
->sreg2
);
2574 case OP_X86_AND_MEMBASE_REG
:
2575 x86_alu_membase_reg (code
, X86_AND
, ins
->inst_basereg
, ins
->inst_offset
, ins
->sreg2
);
2577 case OP_X86_OR_MEMBASE_REG
:
2578 x86_alu_membase_reg (code
, X86_OR
, ins
->inst_basereg
, ins
->inst_offset
, ins
->sreg2
);
2580 case OP_X86_XOR_MEMBASE_REG
:
2581 x86_alu_membase_reg (code
, X86_XOR
, ins
->inst_basereg
, ins
->inst_offset
, ins
->sreg2
);
2583 case OP_X86_INC_MEMBASE
:
2584 x86_inc_membase (code
, ins
->inst_basereg
, ins
->inst_offset
);
2586 case OP_X86_INC_REG
:
2587 x86_inc_reg (code
, ins
->dreg
);
2589 case OP_X86_DEC_MEMBASE
:
2590 x86_dec_membase (code
, ins
->inst_basereg
, ins
->inst_offset
);
2592 case OP_X86_DEC_REG
:
2593 x86_dec_reg (code
, ins
->dreg
);
2595 case OP_X86_MUL_REG_MEMBASE
:
2596 x86_imul_reg_membase (code
, ins
->sreg1
, ins
->sreg2
, ins
->inst_offset
);
2598 case OP_X86_AND_REG_MEMBASE
:
2599 x86_alu_reg_membase (code
, X86_AND
, ins
->sreg1
, ins
->sreg2
, ins
->inst_offset
);
2601 case OP_X86_OR_REG_MEMBASE
:
2602 x86_alu_reg_membase (code
, X86_OR
, ins
->sreg1
, ins
->sreg2
, ins
->inst_offset
);
2604 case OP_X86_XOR_REG_MEMBASE
:
2605 x86_alu_reg_membase (code
, X86_XOR
, ins
->sreg1
, ins
->sreg2
, ins
->inst_offset
);
2608 x86_breakpoint (code
);
2610 case OP_RELAXED_NOP
:
2611 x86_prefix (code
, X86_REP_PREFIX
);
2619 case OP_DUMMY_STORE
:
2620 case OP_DUMMY_ICONST
:
2621 case OP_DUMMY_R8CONST
:
2622 case OP_NOT_REACHED
:
2625 case OP_IL_SEQ_POINT
:
2626 mono_add_seq_point (cfg
, bb
, ins
, code
- cfg
->native_code
);
2628 case OP_SEQ_POINT
: {
2631 if (cfg
->compile_aot
)
2634 /* Have to use ecx as a temp reg since this can occur after OP_SETRET */
2637 * We do this _before_ the breakpoint, so single stepping after
2638 * a breakpoint is hit will step to the next IL offset.
2640 if (ins
->flags
& MONO_INST_SINGLE_STEP_LOC
) {
2641 MonoInst
*var
= cfg
->arch
.ss_tramp_var
;
2645 g_assert (var
->opcode
== OP_REGOFFSET
);
2646 /* Load ss_tramp_var */
2647 /* This is equal to &ss_trampoline */
2648 x86_mov_reg_membase (code
, X86_ECX
, var
->inst_basereg
, var
->inst_offset
, sizeof (mgreg_t
));
2649 x86_alu_membase_imm (code
, X86_CMP
, X86_ECX
, 0, 0);
2650 br
[0] = code
; x86_branch8 (code
, X86_CC_EQ
, 0, FALSE
);
2651 x86_call_membase (code
, X86_ECX
, 0);
2652 x86_patch (br
[0], code
);
2656 * Many parts of sdb depend on the ip after the single step trampoline call to be equal to the seq point offset.
2657 * This means we have to put the loading of bp_tramp_var after the offset.
2660 mono_add_seq_point (cfg
, bb
, ins
, code
- cfg
->native_code
);
2662 MonoInst
*var
= cfg
->arch
.bp_tramp_var
;
2665 g_assert (var
->opcode
== OP_REGOFFSET
);
2666 /* Load the address of the bp trampoline */
2667 /* This needs to be constant size */
2668 guint8
*start
= code
;
2669 x86_mov_reg_membase (code
, X86_ECX
, var
->inst_basereg
, var
->inst_offset
, 4);
2670 if (code
< start
+ OP_SEQ_POINT_BP_OFFSET
) {
2671 int size
= start
+ OP_SEQ_POINT_BP_OFFSET
- code
;
2672 x86_padding (code
, size
);
2675 * A placeholder for a possible breakpoint inserted by
2676 * mono_arch_set_breakpoint ().
2678 for (i
= 0; i
< 2; ++i
)
2681 * Add an additional nop so skipping the bp doesn't cause the ip to point
2682 * to another IL offset.
2690 x86_alu_reg_reg (code
, X86_ADD
, ins
->sreg1
, ins
->sreg2
);
2694 x86_alu_reg_reg (code
, X86_ADC
, ins
->sreg1
, ins
->sreg2
);
2699 x86_alu_reg_imm (code
, X86_ADD
, ins
->dreg
, ins
->inst_imm
);
2703 x86_alu_reg_imm (code
, X86_ADC
, ins
->dreg
, ins
->inst_imm
);
2708 x86_alu_reg_reg (code
, X86_SUB
, ins
->sreg1
, ins
->sreg2
);
2712 x86_alu_reg_reg (code
, X86_SBB
, ins
->sreg1
, ins
->sreg2
);
2717 x86_alu_reg_imm (code
, X86_SUB
, ins
->dreg
, ins
->inst_imm
);
2721 x86_alu_reg_imm (code
, X86_SBB
, ins
->dreg
, ins
->inst_imm
);
2724 x86_alu_reg_reg (code
, X86_AND
, ins
->sreg1
, ins
->sreg2
);
2728 x86_alu_reg_imm (code
, X86_AND
, ins
->sreg1
, ins
->inst_imm
);
2733 * The code is the same for div/rem, the allocator will allocate dreg
2734 * to RAX/RDX as appropriate.
2736 if (ins
->sreg2
== X86_EDX
) {
2737 /* cdq clobbers this */
2738 x86_push_reg (code
, ins
->sreg2
);
2740 x86_div_membase (code
, X86_ESP
, 0, TRUE
);
2741 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 4);
2744 x86_div_reg (code
, ins
->sreg2
, TRUE
);
2749 if (ins
->sreg2
== X86_EDX
) {
2750 x86_push_reg (code
, ins
->sreg2
);
2751 x86_alu_reg_reg (code
, X86_XOR
, X86_EDX
, X86_EDX
);
2752 x86_div_membase (code
, X86_ESP
, 0, FALSE
);
2753 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 4);
2755 x86_alu_reg_reg (code
, X86_XOR
, X86_EDX
, X86_EDX
);
2756 x86_div_reg (code
, ins
->sreg2
, FALSE
);
2760 x86_mov_reg_imm (code
, ins
->sreg2
, ins
->inst_imm
);
2762 x86_div_reg (code
, ins
->sreg2
, TRUE
);
2765 int power
= mono_is_power_of_two (ins
->inst_imm
);
2767 g_assert (ins
->sreg1
== X86_EAX
);
2768 g_assert (ins
->dreg
== X86_EAX
);
2769 g_assert (power
>= 0);
2772 /* Based on http://compilers.iecc.com/comparch/article/93-04-079 */
2774 x86_alu_reg_imm (code
, X86_AND
, X86_EAX
, 1);
2776 * If the divident is >= 0, this does not nothing. If it is positive, it
2777 * it transforms %eax=0 into %eax=0, and %eax=1 into %eax=-1.
2779 x86_alu_reg_reg (code
, X86_XOR
, X86_EAX
, X86_EDX
);
2780 x86_alu_reg_reg (code
, X86_SUB
, X86_EAX
, X86_EDX
);
2781 } else if (power
== 0) {
2782 x86_alu_reg_reg (code
, X86_XOR
, ins
->dreg
, ins
->dreg
);
2784 /* Based on gcc code */
2786 /* Add compensation for negative dividents */
2788 x86_shift_reg_imm (code
, X86_SHR
, X86_EDX
, 32 - power
);
2789 x86_alu_reg_reg (code
, X86_ADD
, X86_EAX
, X86_EDX
);
2790 /* Compute remainder */
2791 x86_alu_reg_imm (code
, X86_AND
, X86_EAX
, (1 << power
) - 1);
2792 /* Remove compensation */
2793 x86_alu_reg_reg (code
, X86_SUB
, X86_EAX
, X86_EDX
);
2798 x86_alu_reg_reg (code
, X86_OR
, ins
->sreg1
, ins
->sreg2
);
2802 x86_alu_reg_imm (code
, X86_OR
, ins
->sreg1
, ins
->inst_imm
);
2805 x86_alu_reg_reg (code
, X86_XOR
, ins
->sreg1
, ins
->sreg2
);
2809 x86_alu_reg_imm (code
, X86_XOR
, ins
->sreg1
, ins
->inst_imm
);
2812 g_assert (ins
->sreg2
== X86_ECX
);
2813 x86_shift_reg (code
, X86_SHL
, ins
->dreg
);
2816 g_assert (ins
->sreg2
== X86_ECX
);
2817 x86_shift_reg (code
, X86_SAR
, ins
->dreg
);
2821 x86_shift_reg_imm (code
, X86_SAR
, ins
->dreg
, ins
->inst_imm
);
2824 case OP_ISHR_UN_IMM
:
2825 x86_shift_reg_imm (code
, X86_SHR
, ins
->dreg
, ins
->inst_imm
);
2828 g_assert (ins
->sreg2
== X86_ECX
);
2829 x86_shift_reg (code
, X86_SHR
, ins
->dreg
);
2833 x86_shift_reg_imm (code
, X86_SHL
, ins
->dreg
, ins
->inst_imm
);
2836 guint8
*jump_to_end
;
2838 /* handle shifts below 32 bits */
2839 x86_shld_reg (code
, ins
->backend
.reg3
, ins
->sreg1
);
2840 x86_shift_reg (code
, X86_SHL
, ins
->sreg1
);
2842 x86_test_reg_imm (code
, X86_ECX
, 32);
2843 jump_to_end
= code
; x86_branch8 (code
, X86_CC_EQ
, 0, TRUE
);
2845 /* handle shift over 32 bit */
2846 x86_mov_reg_reg (code
, ins
->backend
.reg3
, ins
->sreg1
, 4);
2847 x86_clear_reg (code
, ins
->sreg1
);
2849 x86_patch (jump_to_end
, code
);
2853 guint8
*jump_to_end
;
2855 /* handle shifts below 32 bits */
2856 x86_shrd_reg (code
, ins
->sreg1
, ins
->backend
.reg3
);
2857 x86_shift_reg (code
, X86_SAR
, ins
->backend
.reg3
);
2859 x86_test_reg_imm (code
, X86_ECX
, 32);
2860 jump_to_end
= code
; x86_branch8 (code
, X86_CC_EQ
, 0, FALSE
);
2862 /* handle shifts over 31 bits */
2863 x86_mov_reg_reg (code
, ins
->sreg1
, ins
->backend
.reg3
, 4);
2864 x86_shift_reg_imm (code
, X86_SAR
, ins
->backend
.reg3
, 31);
2866 x86_patch (jump_to_end
, code
);
2870 guint8
*jump_to_end
;
2872 /* handle shifts below 32 bits */
2873 x86_shrd_reg (code
, ins
->sreg1
, ins
->backend
.reg3
);
2874 x86_shift_reg (code
, X86_SHR
, ins
->backend
.reg3
);
2876 x86_test_reg_imm (code
, X86_ECX
, 32);
2877 jump_to_end
= code
; x86_branch8 (code
, X86_CC_EQ
, 0, FALSE
);
2879 /* handle shifts over 31 bits */
2880 x86_mov_reg_reg (code
, ins
->sreg1
, ins
->backend
.reg3
, 4);
2881 x86_clear_reg (code
, ins
->backend
.reg3
);
2883 x86_patch (jump_to_end
, code
);
2887 if (ins
->inst_imm
>= 32) {
2888 x86_mov_reg_reg (code
, ins
->backend
.reg3
, ins
->sreg1
, 4);
2889 x86_clear_reg (code
, ins
->sreg1
);
2890 x86_shift_reg_imm (code
, X86_SHL
, ins
->backend
.reg3
, ins
->inst_imm
- 32);
2892 x86_shld_reg_imm (code
, ins
->backend
.reg3
, ins
->sreg1
, ins
->inst_imm
);
2893 x86_shift_reg_imm (code
, X86_SHL
, ins
->sreg1
, ins
->inst_imm
);
2897 if (ins
->inst_imm
>= 32) {
2898 x86_mov_reg_reg (code
, ins
->sreg1
, ins
->backend
.reg3
, 4);
2899 x86_shift_reg_imm (code
, X86_SAR
, ins
->backend
.reg3
, 0x1f);
2900 x86_shift_reg_imm (code
, X86_SAR
, ins
->sreg1
, ins
->inst_imm
- 32);
2902 x86_shrd_reg_imm (code
, ins
->sreg1
, ins
->backend
.reg3
, ins
->inst_imm
);
2903 x86_shift_reg_imm (code
, X86_SAR
, ins
->backend
.reg3
, ins
->inst_imm
);
2906 case OP_LSHR_UN_IMM
:
2907 if (ins
->inst_imm
>= 32) {
2908 x86_mov_reg_reg (code
, ins
->sreg1
, ins
->backend
.reg3
, 4);
2909 x86_clear_reg (code
, ins
->backend
.reg3
);
2910 x86_shift_reg_imm (code
, X86_SHR
, ins
->sreg1
, ins
->inst_imm
- 32);
2912 x86_shrd_reg_imm (code
, ins
->sreg1
, ins
->backend
.reg3
, ins
->inst_imm
);
2913 x86_shift_reg_imm (code
, X86_SHR
, ins
->backend
.reg3
, ins
->inst_imm
);
2917 x86_not_reg (code
, ins
->sreg1
);
2920 x86_neg_reg (code
, ins
->sreg1
);
2924 x86_imul_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
2928 switch (ins
->inst_imm
) {
2932 if (ins
->dreg
!= ins
->sreg1
)
2933 x86_mov_reg_reg (code
, ins
->dreg
, ins
->sreg1
, 4);
2934 x86_alu_reg_reg (code
, X86_ADD
, ins
->dreg
, ins
->dreg
);
2937 /* LEA r1, [r2 + r2*2] */
2938 x86_lea_memindex (code
, ins
->dreg
, ins
->sreg1
, 0, ins
->sreg1
, 1);
2941 /* LEA r1, [r2 + r2*4] */
2942 x86_lea_memindex (code
, ins
->dreg
, ins
->sreg1
, 0, ins
->sreg1
, 2);
2945 /* LEA r1, [r2 + r2*2] */
2947 x86_lea_memindex (code
, ins
->dreg
, ins
->sreg1
, 0, ins
->sreg1
, 1);
2948 x86_alu_reg_reg (code
, X86_ADD
, ins
->dreg
, ins
->dreg
);
2951 /* LEA r1, [r2 + r2*8] */
2952 x86_lea_memindex (code
, ins
->dreg
, ins
->sreg1
, 0, ins
->sreg1
, 3);
2955 /* LEA r1, [r2 + r2*4] */
2957 x86_lea_memindex (code
, ins
->dreg
, ins
->sreg1
, 0, ins
->sreg1
, 2);
2958 x86_alu_reg_reg (code
, X86_ADD
, ins
->dreg
, ins
->dreg
);
2961 /* LEA r1, [r2 + r2*2] */
2963 x86_lea_memindex (code
, ins
->dreg
, ins
->sreg1
, 0, ins
->sreg1
, 1);
2964 x86_shift_reg_imm (code
, X86_SHL
, ins
->dreg
, 2);
2967 /* LEA r1, [r2 + r2*4] */
2968 /* LEA r1, [r1 + r1*4] */
2969 x86_lea_memindex (code
, ins
->dreg
, ins
->sreg1
, 0, ins
->sreg1
, 2);
2970 x86_lea_memindex (code
, ins
->dreg
, ins
->dreg
, 0, ins
->dreg
, 2);
2973 /* LEA r1, [r2 + r2*4] */
2975 /* LEA r1, [r1 + r1*4] */
2976 x86_lea_memindex (code
, ins
->dreg
, ins
->sreg1
, 0, ins
->sreg1
, 2);
2977 x86_shift_reg_imm (code
, X86_SHL
, ins
->dreg
, 2);
2978 x86_lea_memindex (code
, ins
->dreg
, ins
->dreg
, 0, ins
->dreg
, 2);
2981 x86_imul_reg_reg_imm (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_imm
);
2986 x86_imul_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
2987 EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O
, FALSE
, "OverflowException");
2989 case OP_IMUL_OVF_UN
: {
2990 /* the mul operation and the exception check should most likely be split */
2991 int non_eax_reg
, saved_eax
= FALSE
, saved_edx
= FALSE
;
2992 /*g_assert (ins->sreg2 == X86_EAX);
2993 g_assert (ins->dreg == X86_EAX);*/
2994 if (ins
->sreg2
== X86_EAX
) {
2995 non_eax_reg
= ins
->sreg1
;
2996 } else if (ins
->sreg1
== X86_EAX
) {
2997 non_eax_reg
= ins
->sreg2
;
2999 /* no need to save since we're going to store to it anyway */
3000 if (ins
->dreg
!= X86_EAX
) {
3002 x86_push_reg (code
, X86_EAX
);
3004 x86_mov_reg_reg (code
, X86_EAX
, ins
->sreg1
, 4);
3005 non_eax_reg
= ins
->sreg2
;
3007 if (ins
->dreg
== X86_EDX
) {
3010 x86_push_reg (code
, X86_EAX
);
3014 x86_push_reg (code
, X86_EDX
);
3016 x86_mul_reg (code
, non_eax_reg
, FALSE
);
3017 /* save before the check since pop and mov don't change the flags */
3018 if (ins
->dreg
!= X86_EAX
)
3019 x86_mov_reg_reg (code
, ins
->dreg
, X86_EAX
, 4);
3021 x86_pop_reg (code
, X86_EDX
);
3023 x86_pop_reg (code
, X86_EAX
);
3024 EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O
, FALSE
, "OverflowException");
3028 x86_mov_reg_imm (code
, ins
->dreg
, ins
->inst_c0
);
3031 g_assert_not_reached ();
3032 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)ins
->inst_i1
, ins
->inst_p0
);
3033 x86_mov_reg_imm (code
, ins
->dreg
, 0);
3036 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)ins
->inst_i1
, ins
->inst_p0
);
3037 x86_mov_reg_imm (code
, ins
->dreg
, 0);
3039 case OP_LOAD_GOTADDR
:
3040 g_assert (ins
->dreg
== MONO_ARCH_GOT_REG
);
3041 code
= mono_arch_emit_load_got_addr (cfg
->native_code
, code
, cfg
, NULL
);
3044 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)ins
->inst_right
->inst_i1
, ins
->inst_right
->inst_p0
);
3045 x86_mov_reg_membase (code
, ins
->dreg
, ins
->inst_basereg
, 0xf0f0f0f0, 4);
3047 case OP_X86_PUSH_GOT_ENTRY
:
3048 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)ins
->inst_right
->inst_i1
, ins
->inst_right
->inst_p0
);
3049 x86_push_membase (code
, ins
->inst_basereg
, 0xf0f0f0f0);
3052 if (ins
->dreg
!= ins
->sreg1
)
3053 x86_mov_reg_reg (code
, ins
->dreg
, ins
->sreg1
, 4);
3056 MonoCallInst
*call
= (MonoCallInst
*)ins
;
3059 ins
->flags
|= MONO_INST_GC_CALLSITE
;
3060 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
3062 /* reset offset to make max_len work */
3063 offset
= code
- cfg
->native_code
;
3065 g_assert (!cfg
->method
->save_lmf
);
3067 /* restore callee saved registers */
3068 for (i
= 0; i
< X86_NREG
; ++i
)
3069 if (X86_IS_CALLEE_SAVED_REG (i
) && cfg
->used_int_regs
& (1 << i
))
3071 if (cfg
->used_int_regs
& (1 << X86_ESI
)) {
3072 x86_mov_reg_membase (code
, X86_ESI
, X86_EBP
, pos
, 4);
3075 if (cfg
->used_int_regs
& (1 << X86_EDI
)) {
3076 x86_mov_reg_membase (code
, X86_EDI
, X86_EBP
, pos
, 4);
3079 if (cfg
->used_int_regs
& (1 << X86_EBX
)) {
3080 x86_mov_reg_membase (code
, X86_EBX
, X86_EBP
, pos
, 4);
3084 /* Copy arguments on the stack to our argument area */
3085 for (i
= 0; i
< call
->stack_usage
- call
->stack_align_amount
; i
+= 4) {
3086 x86_mov_reg_membase (code
, X86_EAX
, X86_ESP
, i
, 4);
3087 x86_mov_membase_reg (code
, X86_EBP
, 8 + i
, X86_EAX
, 4);
3090 /* restore ESP/EBP */
3092 offset
= code
- cfg
->native_code
;
3093 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_METHOD_JUMP
, call
->method
);
3094 x86_jump32 (code
, 0);
3096 ins
->flags
|= MONO_INST_GC_CALLSITE
;
3097 cfg
->disable_aot
= TRUE
;
3101 /* ensure ins->sreg1 is not NULL
3102 * note that cmp DWORD PTR [eax], eax is one byte shorter than
3103 * cmp DWORD PTR [eax], 0
3105 x86_alu_membase_reg (code
, X86_CMP
, ins
->sreg1
, 0, ins
->sreg1
);
3108 int hreg
= ins
->sreg1
== X86_EAX
? X86_ECX
: X86_EAX
;
3109 x86_push_reg (code
, hreg
);
3110 x86_lea_membase (code
, hreg
, X86_EBP
, cfg
->sig_cookie
);
3111 x86_mov_membase_reg (code
, ins
->sreg1
, 0, hreg
, 4);
3112 x86_pop_reg (code
, hreg
);
3125 case OP_VOIDCALL_REG
:
3127 case OP_FCALL_MEMBASE
:
3128 case OP_LCALL_MEMBASE
:
3129 case OP_VCALL_MEMBASE
:
3130 case OP_VCALL2_MEMBASE
:
3131 case OP_VOIDCALL_MEMBASE
:
3132 case OP_CALL_MEMBASE
: {
3135 call
= (MonoCallInst
*)ins
;
3136 cinfo
= (CallInfo
*)call
->call_info
;
3138 switch (ins
->opcode
) {
3145 if (ins
->flags
& MONO_INST_HAS_METHOD
)
3146 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_METHOD
, call
->method
);
3148 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_ABS
, call
->fptr
);
3154 case OP_VOIDCALL_REG
:
3156 x86_call_reg (code
, ins
->sreg1
);
3158 case OP_FCALL_MEMBASE
:
3159 case OP_LCALL_MEMBASE
:
3160 case OP_VCALL_MEMBASE
:
3161 case OP_VCALL2_MEMBASE
:
3162 case OP_VOIDCALL_MEMBASE
:
3163 case OP_CALL_MEMBASE
:
3164 x86_call_membase (code
, ins
->sreg1
, ins
->inst_offset
);
3167 g_assert_not_reached ();
3170 ins
->flags
|= MONO_INST_GC_CALLSITE
;
3171 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
3172 if (cinfo
->callee_stack_pop
) {
3173 /* Have to compensate for the stack space popped by the callee */
3174 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, cinfo
->callee_stack_pop
);
3176 code
= emit_move_return_value (cfg
, ins
, code
);
3180 x86_lea_memindex (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_imm
, ins
->sreg2
, ins
->backend
.shift_amount
);
3182 case OP_X86_LEA_MEMBASE
:
3183 x86_lea_membase (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_imm
);
3186 x86_xchg_reg_reg (code
, ins
->sreg1
, ins
->sreg2
, 4);
3189 /* keep alignment */
3190 x86_alu_reg_imm (code
, X86_ADD
, ins
->sreg1
, MONO_ARCH_LOCALLOC_ALIGNMENT
- 1);
3191 x86_alu_reg_imm (code
, X86_AND
, ins
->sreg1
, ~(MONO_ARCH_LOCALLOC_ALIGNMENT
- 1));
3192 code
= mono_emit_stack_alloc (cfg
, code
, ins
);
3193 x86_mov_reg_reg (code
, ins
->dreg
, X86_ESP
, 4);
3194 if (cfg
->param_area
)
3195 x86_alu_reg_imm (code
, X86_ADD
, ins
->dreg
, ALIGN_TO (cfg
->param_area
, MONO_ARCH_FRAME_ALIGNMENT
));
3197 case OP_LOCALLOC_IMM
: {
3198 guint32 size
= ins
->inst_imm
;
3199 size
= (size
+ (MONO_ARCH_FRAME_ALIGNMENT
- 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT
- 1);
3201 if (ins
->flags
& MONO_INST_INIT
) {
3202 /* FIXME: Optimize this */
3203 x86_mov_reg_imm (code
, ins
->dreg
, size
);
3204 ins
->sreg1
= ins
->dreg
;
3206 code
= mono_emit_stack_alloc (cfg
, code
, ins
);
3207 x86_mov_reg_reg (code
, ins
->dreg
, X86_ESP
, 4);
3209 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, size
);
3210 x86_mov_reg_reg (code
, ins
->dreg
, X86_ESP
, 4);
3212 if (cfg
->param_area
)
3213 x86_alu_reg_imm (code
, X86_ADD
, ins
->dreg
, ALIGN_TO (cfg
->param_area
, MONO_ARCH_FRAME_ALIGNMENT
));
3217 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, MONO_ARCH_FRAME_ALIGNMENT
- 4);
3218 x86_push_reg (code
, ins
->sreg1
);
3219 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
3220 (gpointer
)"mono_arch_throw_exception");
3221 ins
->flags
|= MONO_INST_GC_CALLSITE
;
3222 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
3226 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, MONO_ARCH_FRAME_ALIGNMENT
- 4);
3227 x86_push_reg (code
, ins
->sreg1
);
3228 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
3229 (gpointer
)"mono_arch_rethrow_exception");
3230 ins
->flags
|= MONO_INST_GC_CALLSITE
;
3231 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
3234 case OP_CALL_HANDLER
:
3235 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, MONO_ARCH_FRAME_ALIGNMENT
- 4);
3236 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
3237 x86_call_imm (code
, 0);
3238 mono_cfg_add_try_hole (cfg
, ins
->inst_eh_block
, code
, bb
);
3239 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, MONO_ARCH_FRAME_ALIGNMENT
- 4);
3241 case OP_START_HANDLER
: {
3242 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3243 x86_mov_membase_reg (code
, spvar
->inst_basereg
, spvar
->inst_offset
, X86_ESP
, 4);
3244 if (cfg
->param_area
)
3245 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, ALIGN_TO (cfg
->param_area
, MONO_ARCH_FRAME_ALIGNMENT
));
3248 case OP_ENDFINALLY
: {
3249 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3250 x86_mov_reg_membase (code
, X86_ESP
, spvar
->inst_basereg
, spvar
->inst_offset
, 4);
3254 case OP_ENDFILTER
: {
3255 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3256 x86_mov_reg_membase (code
, X86_ESP
, spvar
->inst_basereg
, spvar
->inst_offset
, 4);
3257 /* The local allocator will put the result into EAX */
3262 if (ins
->dreg
!= X86_EAX
)
3263 x86_mov_reg_reg (code
, ins
->dreg
, X86_EAX
, sizeof (gpointer
));
3267 ins
->inst_c0
= code
- cfg
->native_code
;
3270 if (ins
->inst_target_bb
->native_offset
) {
3271 x86_jump_code (code
, cfg
->native_code
+ ins
->inst_target_bb
->native_offset
);
3273 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
3274 if ((cfg
->opt
& MONO_OPT_BRANCH
) &&
3275 x86_is_imm8 (ins
->inst_target_bb
->max_offset
- cpos
))
3276 x86_jump8 (code
, 0);
3278 x86_jump32 (code
, 0);
3282 x86_jump_reg (code
, ins
->sreg1
);
3301 x86_set_reg (code
, cc_table
[mono_opcode_to_cond (ins
->opcode
)], ins
->dreg
, cc_signed_table
[mono_opcode_to_cond (ins
->opcode
)]);
3302 x86_widen_reg (code
, ins
->dreg
, ins
->dreg
, FALSE
, FALSE
);
3304 case OP_COND_EXC_EQ
:
3305 case OP_COND_EXC_NE_UN
:
3306 case OP_COND_EXC_LT
:
3307 case OP_COND_EXC_LT_UN
:
3308 case OP_COND_EXC_GT
:
3309 case OP_COND_EXC_GT_UN
:
3310 case OP_COND_EXC_GE
:
3311 case OP_COND_EXC_GE_UN
:
3312 case OP_COND_EXC_LE
:
3313 case OP_COND_EXC_LE_UN
:
3314 case OP_COND_EXC_IEQ
:
3315 case OP_COND_EXC_INE_UN
:
3316 case OP_COND_EXC_ILT
:
3317 case OP_COND_EXC_ILT_UN
:
3318 case OP_COND_EXC_IGT
:
3319 case OP_COND_EXC_IGT_UN
:
3320 case OP_COND_EXC_IGE
:
3321 case OP_COND_EXC_IGE_UN
:
3322 case OP_COND_EXC_ILE
:
3323 case OP_COND_EXC_ILE_UN
:
3324 EMIT_COND_SYSTEM_EXCEPTION (cc_table
[mono_opcode_to_cond (ins
->opcode
)], cc_signed_table
[mono_opcode_to_cond (ins
->opcode
)], ins
->inst_p1
);
3326 case OP_COND_EXC_OV
:
3327 case OP_COND_EXC_NO
:
3329 case OP_COND_EXC_NC
:
3330 EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table
[ins
->opcode
- OP_COND_EXC_EQ
], (ins
->opcode
< OP_COND_EXC_NE_UN
), ins
->inst_p1
);
3332 case OP_COND_EXC_IOV
:
3333 case OP_COND_EXC_INO
:
3334 case OP_COND_EXC_IC
:
3335 case OP_COND_EXC_INC
:
3336 EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table
[ins
->opcode
- OP_COND_EXC_IEQ
], (ins
->opcode
< OP_COND_EXC_INE_UN
), ins
->inst_p1
);
3348 EMIT_COND_BRANCH (ins
, cc_table
[mono_opcode_to_cond (ins
->opcode
)], cc_signed_table
[mono_opcode_to_cond (ins
->opcode
)]);
3356 case OP_CMOV_INE_UN
:
3357 case OP_CMOV_IGE_UN
:
3358 case OP_CMOV_IGT_UN
:
3359 case OP_CMOV_ILE_UN
:
3360 case OP_CMOV_ILT_UN
:
3361 g_assert (ins
->dreg
== ins
->sreg1
);
3362 x86_cmov_reg (code
, cc_table
[mono_opcode_to_cond (ins
->opcode
)], cc_signed_table
[mono_opcode_to_cond (ins
->opcode
)], ins
->dreg
, ins
->sreg2
);
3365 /* floating point opcodes */
3367 double d
= *(double *)ins
->inst_p0
;
3369 if ((d
== 0.0) && (mono_signbit (d
) == 0)) {
3371 } else if (d
== 1.0) {
3374 if (cfg
->compile_aot
) {
3375 guint32
*val
= (guint32
*)&d
;
3376 x86_push_imm (code
, val
[1]);
3377 x86_push_imm (code
, val
[0]);
3378 x86_fld_membase (code
, X86_ESP
, 0, TRUE
);
3379 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 8);
3382 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_R8
, ins
->inst_p0
);
3383 x86_fld (code
, NULL
, TRUE
);
3389 float f
= *(float *)ins
->inst_p0
;
3391 if ((f
== 0.0) && (mono_signbit (f
) == 0)) {
3393 } else if (f
== 1.0) {
3396 if (cfg
->compile_aot
) {
3397 guint32 val
= *(guint32
*)&f
;
3398 x86_push_imm (code
, val
);
3399 x86_fld_membase (code
, X86_ESP
, 0, FALSE
);
3400 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 4);
3403 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_R4
, ins
->inst_p0
);
3404 x86_fld (code
, NULL
, FALSE
);
3409 case OP_STORER8_MEMBASE_REG
:
3410 x86_fst_membase (code
, ins
->inst_destbasereg
, ins
->inst_offset
, TRUE
, TRUE
);
3412 case OP_LOADR8_MEMBASE
:
3413 x86_fld_membase (code
, ins
->inst_basereg
, ins
->inst_offset
, TRUE
);
3415 case OP_STORER4_MEMBASE_REG
:
3416 x86_fst_membase (code
, ins
->inst_destbasereg
, ins
->inst_offset
, FALSE
, TRUE
);
3418 case OP_LOADR4_MEMBASE
:
3419 x86_fld_membase (code
, ins
->inst_basereg
, ins
->inst_offset
, FALSE
);
3421 case OP_ICONV_TO_R4
:
3422 x86_push_reg (code
, ins
->sreg1
);
3423 x86_fild_membase (code
, X86_ESP
, 0, FALSE
);
3424 /* Change precision */
3425 x86_fst_membase (code
, X86_ESP
, 0, FALSE
, TRUE
);
3426 x86_fld_membase (code
, X86_ESP
, 0, FALSE
);
3427 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 4);
3429 case OP_ICONV_TO_R8
:
3430 x86_push_reg (code
, ins
->sreg1
);
3431 x86_fild_membase (code
, X86_ESP
, 0, FALSE
);
3432 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 4);
3434 case OP_ICONV_TO_R_UN
:
3435 x86_push_imm (code
, 0);
3436 x86_push_reg (code
, ins
->sreg1
);
3437 x86_fild_membase (code
, X86_ESP
, 0, TRUE
);
3438 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 8);
3440 case OP_X86_FP_LOAD_I8
:
3441 x86_fild_membase (code
, ins
->inst_basereg
, ins
->inst_offset
, TRUE
);
3443 case OP_X86_FP_LOAD_I4
:
3444 x86_fild_membase (code
, ins
->inst_basereg
, ins
->inst_offset
, FALSE
);
3446 case OP_FCONV_TO_R4
:
3447 /* Change precision */
3448 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, 4);
3449 x86_fst_membase (code
, X86_ESP
, 0, FALSE
, TRUE
);
3450 x86_fld_membase (code
, X86_ESP
, 0, FALSE
);
3451 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 4);
3453 case OP_FCONV_TO_I1
:
3454 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, 1, TRUE
);
3456 case OP_FCONV_TO_U1
:
3457 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, 1, FALSE
);
3459 case OP_FCONV_TO_I2
:
3460 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, 2, TRUE
);
3462 case OP_FCONV_TO_U2
:
3463 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, 2, FALSE
);
3465 case OP_FCONV_TO_I4
:
3467 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, 4, TRUE
);
3469 case OP_FCONV_TO_I8
:
3470 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, 4);
3471 x86_fnstcw_membase(code
, X86_ESP
, 0);
3472 x86_mov_reg_membase (code
, ins
->dreg
, X86_ESP
, 0, 2);
3473 x86_alu_reg_imm (code
, X86_OR
, ins
->dreg
, 0xc00);
3474 x86_mov_membase_reg (code
, X86_ESP
, 2, ins
->dreg
, 2);
3475 x86_fldcw_membase (code
, X86_ESP
, 2);
3476 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, 8);
3477 x86_fist_pop_membase (code
, X86_ESP
, 0, TRUE
);
3478 x86_pop_reg (code
, ins
->dreg
);
3479 x86_pop_reg (code
, ins
->backend
.reg3
);
3480 x86_fldcw_membase (code
, X86_ESP
, 0);
3481 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 4);
3483 case OP_LCONV_TO_R8_2
:
3484 x86_push_reg (code
, ins
->sreg2
);
3485 x86_push_reg (code
, ins
->sreg1
);
3486 x86_fild_membase (code
, X86_ESP
, 0, TRUE
);
3487 /* Change precision */
3488 x86_fst_membase (code
, X86_ESP
, 0, TRUE
, TRUE
);
3489 x86_fld_membase (code
, X86_ESP
, 0, TRUE
);
3490 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 8);
3492 case OP_LCONV_TO_R4_2
:
3493 x86_push_reg (code
, ins
->sreg2
);
3494 x86_push_reg (code
, ins
->sreg1
);
3495 x86_fild_membase (code
, X86_ESP
, 0, TRUE
);
3496 /* Change precision */
3497 x86_fst_membase (code
, X86_ESP
, 0, FALSE
, TRUE
);
3498 x86_fld_membase (code
, X86_ESP
, 0, FALSE
);
3499 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 8);
3501 case OP_LCONV_TO_R_UN_2
: {
3502 static guint8 mn
[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x3f, 0x40 };
3505 /* load 64bit integer to FP stack */
3506 x86_push_reg (code
, ins
->sreg2
);
3507 x86_push_reg (code
, ins
->sreg1
);
3508 x86_fild_membase (code
, X86_ESP
, 0, TRUE
);
3510 /* test if lreg is negative */
3511 x86_test_reg_reg (code
, ins
->sreg2
, ins
->sreg2
);
3512 br
= code
; x86_branch8 (code
, X86_CC_GEZ
, 0, TRUE
);
3514 /* add correction constant mn */
3515 if (cfg
->compile_aot
) {
3516 x86_push_imm (code
, (((guint32
)mn
[9]) << 24) | ((guint32
)mn
[8] << 16) | ((guint32
)mn
[7] << 8) | ((guint32
)mn
[6]));
3517 x86_push_imm (code
, (((guint32
)mn
[5]) << 24) | ((guint32
)mn
[4] << 16) | ((guint32
)mn
[3] << 8) | ((guint32
)mn
[2]));
3518 x86_push_imm (code
, (((guint32
)mn
[1]) << 24) | ((guint32
)mn
[0] << 16));
3519 x86_fld80_membase (code
, X86_ESP
, 2);
3520 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 12);
3522 x86_fld80_mem (code
, mn
);
3524 x86_fp_op_reg (code
, X86_FADD
, 1, TRUE
);
3526 x86_patch (br
, code
);
3528 /* Change precision */
3529 x86_fst_membase (code
, X86_ESP
, 0, TRUE
, TRUE
);
3530 x86_fld_membase (code
, X86_ESP
, 0, TRUE
);
3532 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 8);
3536 case OP_LCONV_TO_OVF_I
:
3537 case OP_LCONV_TO_OVF_I4_2
: {
3538 guint8
*br
[3], *label
[1];
3542 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
3544 x86_test_reg_reg (code
, ins
->sreg1
, ins
->sreg1
);
3546 /* If the low word top bit is set, see if we are negative */
3547 br
[0] = code
; x86_branch8 (code
, X86_CC_LT
, 0, TRUE
);
3548 /* We are not negative (no top bit set, check for our top word to be zero */
3549 x86_test_reg_reg (code
, ins
->sreg2
, ins
->sreg2
);
3550 br
[1] = code
; x86_branch8 (code
, X86_CC_EQ
, 0, TRUE
);
3553 /* throw exception */
3554 tins
= mono_branch_optimize_exception_target (cfg
, bb
, "OverflowException");
3556 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_BB
, tins
->inst_true_bb
);
3557 if ((cfg
->opt
& MONO_OPT_BRANCH
) && x86_is_imm8 (tins
->inst_true_bb
->max_offset
- cpos
))
3558 x86_jump8 (code
, 0);
3560 x86_jump32 (code
, 0);
3562 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_EXC
, "OverflowException");
3563 x86_jump32 (code
, 0);
3567 x86_patch (br
[0], code
);
3568 /* our top bit is set, check that top word is 0xfffffff */
3569 x86_alu_reg_imm (code
, X86_CMP
, ins
->sreg2
, 0xffffffff);
3571 x86_patch (br
[1], code
);
3572 /* nope, emit exception */
3573 br
[2] = code
; x86_branch8 (code
, X86_CC_NE
, 0, TRUE
);
3574 x86_patch (br
[2], label
[0]);
3576 if (ins
->dreg
!= ins
->sreg1
)
3577 x86_mov_reg_reg (code
, ins
->dreg
, ins
->sreg1
, 4);
3581 /* Not needed on the fp stack */
3583 case OP_MOVE_F_TO_I4
:
3584 x86_fst_membase (code
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
, FALSE
, TRUE
);
3585 x86_mov_reg_membase (code
, ins
->dreg
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
, 4);
3587 case OP_MOVE_I4_TO_F
:
3588 x86_mov_membase_reg (code
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
, ins
->sreg1
, 4);
3589 x86_fld_membase (code
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
, FALSE
);
3592 x86_fp_op_reg (code
, X86_FADD
, 1, TRUE
);
3595 x86_fp_op_reg (code
, X86_FSUB
, 1, TRUE
);
3598 x86_fp_op_reg (code
, X86_FMUL
, 1, TRUE
);
3601 x86_fp_op_reg (code
, X86_FDIV
, 1, TRUE
);
3609 x86_fp_op_reg (code
, X86_FADD
, 1, TRUE
);
3614 x86_fp_op_reg (code
, X86_FADD
, 1, TRUE
);
3621 * it really doesn't make sense to inline all this code,
3622 * it's here just to show that things may not be as simple
3625 guchar
*check_pos
, *end_tan
, *pop_jump
;
3626 x86_push_reg (code
, X86_EAX
);
3629 x86_test_reg_imm (code
, X86_EAX
, X86_FP_C2
);
3631 x86_branch8 (code
, X86_CC_NE
, 0, FALSE
);
3632 x86_fstp (code
, 0); /* pop the 1.0 */
3634 x86_jump8 (code
, 0);
3636 x86_fp_op (code
, X86_FADD
, 0);
3640 x86_test_reg_imm (code
, X86_EAX
, X86_FP_C2
);
3642 x86_branch8 (code
, X86_CC_NE
, 0, FALSE
);
3645 x86_patch (pop_jump
, code
);
3646 x86_fstp (code
, 0); /* pop the 1.0 */
3647 x86_patch (check_pos
, code
);
3648 x86_patch (end_tan
, code
);
3650 x86_fp_op_reg (code
, X86_FADD
, 1, TRUE
);
3651 x86_pop_reg (code
, X86_EAX
);
3658 x86_fp_op_reg (code
, X86_FADD
, 1, TRUE
);
3667 g_assert (cfg
->opt
& MONO_OPT_CMOV
);
3668 g_assert (ins
->dreg
== ins
->sreg1
);
3669 x86_alu_reg_reg (code
, X86_CMP
, ins
->sreg1
, ins
->sreg2
);
3670 x86_cmov_reg (code
, X86_CC_GT
, TRUE
, ins
->dreg
, ins
->sreg2
);
3673 g_assert (cfg
->opt
& MONO_OPT_CMOV
);
3674 g_assert (ins
->dreg
== ins
->sreg1
);
3675 x86_alu_reg_reg (code
, X86_CMP
, ins
->sreg1
, ins
->sreg2
);
3676 x86_cmov_reg (code
, X86_CC_GT
, FALSE
, ins
->dreg
, ins
->sreg2
);
3679 g_assert (cfg
->opt
& MONO_OPT_CMOV
);
3680 g_assert (ins
->dreg
== ins
->sreg1
);
3681 x86_alu_reg_reg (code
, X86_CMP
, ins
->sreg1
, ins
->sreg2
);
3682 x86_cmov_reg (code
, X86_CC_LT
, TRUE
, ins
->dreg
, ins
->sreg2
);
3685 g_assert (cfg
->opt
& MONO_OPT_CMOV
);
3686 g_assert (ins
->dreg
== ins
->sreg1
);
3687 x86_alu_reg_reg (code
, X86_CMP
, ins
->sreg1
, ins
->sreg2
);
3688 x86_cmov_reg (code
, X86_CC_LT
, FALSE
, ins
->dreg
, ins
->sreg2
);
3694 x86_fxch (code
, ins
->inst_imm
);
3699 x86_push_reg (code
, X86_EAX
);
3700 /* we need to exchange ST(0) with ST(1) */
3703 /* this requires a loop, because fprem somtimes
3704 * returns a partial remainder */
3706 /* looks like MS is using fprem instead of the IEEE compatible fprem1 */
3707 /* x86_fprem1 (code); */
3710 x86_alu_reg_imm (code
, X86_AND
, X86_EAX
, X86_FP_C2
);
3712 x86_branch8 (code
, X86_CC_NE
, 0, FALSE
);
3718 x86_pop_reg (code
, X86_EAX
);
3722 if (cfg
->opt
& MONO_OPT_FCMOV
) {
3723 x86_fcomip (code
, 1);
3727 /* this overwrites EAX */
3728 EMIT_FPCOMPARE(code
);
3729 x86_alu_reg_imm (code
, X86_AND
, X86_EAX
, X86_FP_CC_MASK
);
3733 if (cfg
->opt
& MONO_OPT_FCMOV
) {
3734 /* zeroing the register at the start results in
3735 * shorter and faster code (we can also remove the widening op)
3737 guchar
*unordered_check
;
3738 x86_alu_reg_reg (code
, X86_XOR
, ins
->dreg
, ins
->dreg
);
3739 x86_fcomip (code
, 1);
3741 unordered_check
= code
;
3742 x86_branch8 (code
, X86_CC_P
, 0, FALSE
);
3743 if (ins
->opcode
== OP_FCEQ
) {
3744 x86_set_reg (code
, X86_CC_EQ
, ins
->dreg
, FALSE
);
3745 x86_patch (unordered_check
, code
);
3747 guchar
*jump_to_end
;
3748 x86_set_reg (code
, X86_CC_NE
, ins
->dreg
, FALSE
);
3750 x86_jump8 (code
, 0);
3751 x86_patch (unordered_check
, code
);
3752 x86_inc_reg (code
, ins
->dreg
);
3753 x86_patch (jump_to_end
, code
);
3758 if (ins
->dreg
!= X86_EAX
)
3759 x86_push_reg (code
, X86_EAX
);
3761 EMIT_FPCOMPARE(code
);
3762 x86_alu_reg_imm (code
, X86_AND
, X86_EAX
, X86_FP_CC_MASK
);
3763 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, 0x4000);
3764 x86_set_reg (code
, ins
->opcode
== OP_FCEQ
? X86_CC_EQ
: X86_CC_NE
, ins
->dreg
, TRUE
);
3765 x86_widen_reg (code
, ins
->dreg
, ins
->dreg
, FALSE
, FALSE
);
3767 if (ins
->dreg
!= X86_EAX
)
3768 x86_pop_reg (code
, X86_EAX
);
3772 if (cfg
->opt
& MONO_OPT_FCMOV
) {
3773 /* zeroing the register at the start results in
3774 * shorter and faster code (we can also remove the widening op)
3776 x86_alu_reg_reg (code
, X86_XOR
, ins
->dreg
, ins
->dreg
);
3777 x86_fcomip (code
, 1);
3779 if (ins
->opcode
== OP_FCLT_UN
) {
3780 guchar
*unordered_check
= code
;
3781 guchar
*jump_to_end
;
3782 x86_branch8 (code
, X86_CC_P
, 0, FALSE
);
3783 x86_set_reg (code
, X86_CC_GT
, ins
->dreg
, FALSE
);
3785 x86_jump8 (code
, 0);
3786 x86_patch (unordered_check
, code
);
3787 x86_inc_reg (code
, ins
->dreg
);
3788 x86_patch (jump_to_end
, code
);
3790 x86_set_reg (code
, X86_CC_GT
, ins
->dreg
, FALSE
);
3794 if (ins
->dreg
!= X86_EAX
)
3795 x86_push_reg (code
, X86_EAX
);
3797 EMIT_FPCOMPARE(code
);
3798 x86_alu_reg_imm (code
, X86_AND
, X86_EAX
, X86_FP_CC_MASK
);
3799 if (ins
->opcode
== OP_FCLT_UN
) {
3800 guchar
*is_not_zero_check
, *end_jump
;
3801 is_not_zero_check
= code
;
3802 x86_branch8 (code
, X86_CC_NZ
, 0, TRUE
);
3804 x86_jump8 (code
, 0);
3805 x86_patch (is_not_zero_check
, code
);
3806 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, X86_FP_CC_MASK
);
3808 x86_patch (end_jump
, code
);
3810 x86_set_reg (code
, X86_CC_EQ
, ins
->dreg
, TRUE
);
3811 x86_widen_reg (code
, ins
->dreg
, ins
->dreg
, FALSE
, FALSE
);
3813 if (ins
->dreg
!= X86_EAX
)
3814 x86_pop_reg (code
, X86_EAX
);
3817 guchar
*unordered_check
;
3818 guchar
*jump_to_end
;
3819 if (cfg
->opt
& MONO_OPT_FCMOV
) {
3820 /* zeroing the register at the start results in
3821 * shorter and faster code (we can also remove the widening op)
3823 x86_alu_reg_reg (code
, X86_XOR
, ins
->dreg
, ins
->dreg
);
3824 x86_fcomip (code
, 1);
3826 unordered_check
= code
;
3827 x86_branch8 (code
, X86_CC_P
, 0, FALSE
);
3828 x86_set_reg (code
, X86_CC_NB
, ins
->dreg
, FALSE
);
3829 x86_patch (unordered_check
, code
);
3832 if (ins
->dreg
!= X86_EAX
)
3833 x86_push_reg (code
, X86_EAX
);
3835 EMIT_FPCOMPARE(code
);
3836 x86_alu_reg_imm (code
, X86_AND
, X86_EAX
, X86_FP_CC_MASK
);
3837 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, 0x4500);
3838 unordered_check
= code
;
3839 x86_branch8 (code
, X86_CC_EQ
, 0, FALSE
);
3841 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, X86_FP_C0
);
3842 x86_set_reg (code
, X86_CC_NE
, ins
->dreg
, TRUE
);
3843 x86_widen_reg (code
, ins
->dreg
, ins
->dreg
, FALSE
, FALSE
);
3845 x86_jump8 (code
, 0);
3846 x86_patch (unordered_check
, code
);
3847 x86_alu_reg_reg (code
, X86_XOR
, ins
->dreg
, ins
->dreg
);
3848 x86_patch (jump_to_end
, code
);
3850 if (ins
->dreg
!= X86_EAX
)
3851 x86_pop_reg (code
, X86_EAX
);
3856 if (cfg
->opt
& MONO_OPT_FCMOV
) {
3857 /* zeroing the register at the start results in
3858 * shorter and faster code (we can also remove the widening op)
3860 guchar
*unordered_check
;
3861 x86_alu_reg_reg (code
, X86_XOR
, ins
->dreg
, ins
->dreg
);
3862 x86_fcomip (code
, 1);
3864 if (ins
->opcode
== OP_FCGT
) {
3865 unordered_check
= code
;
3866 x86_branch8 (code
, X86_CC_P
, 0, FALSE
);
3867 x86_set_reg (code
, X86_CC_LT
, ins
->dreg
, FALSE
);
3868 x86_patch (unordered_check
, code
);
3870 x86_set_reg (code
, X86_CC_LT
, ins
->dreg
, FALSE
);
3874 if (ins
->dreg
!= X86_EAX
)
3875 x86_push_reg (code
, X86_EAX
);
3877 EMIT_FPCOMPARE(code
);
3878 x86_alu_reg_imm (code
, X86_AND
, X86_EAX
, X86_FP_CC_MASK
);
3879 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, X86_FP_C0
);
3880 if (ins
->opcode
== OP_FCGT_UN
) {
3881 guchar
*is_not_zero_check
, *end_jump
;
3882 is_not_zero_check
= code
;
3883 x86_branch8 (code
, X86_CC_NZ
, 0, TRUE
);
3885 x86_jump8 (code
, 0);
3886 x86_patch (is_not_zero_check
, code
);
3887 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, X86_FP_CC_MASK
);
3889 x86_patch (end_jump
, code
);
3891 x86_set_reg (code
, X86_CC_EQ
, ins
->dreg
, TRUE
);
3892 x86_widen_reg (code
, ins
->dreg
, ins
->dreg
, FALSE
, FALSE
);
3894 if (ins
->dreg
!= X86_EAX
)
3895 x86_pop_reg (code
, X86_EAX
);
3898 guchar
*unordered_check
;
3899 guchar
*jump_to_end
;
3900 if (cfg
->opt
& MONO_OPT_FCMOV
) {
3901 /* zeroing the register at the start results in
3902 * shorter and faster code (we can also remove the widening op)
3904 x86_alu_reg_reg (code
, X86_XOR
, ins
->dreg
, ins
->dreg
);
3905 x86_fcomip (code
, 1);
3907 unordered_check
= code
;
3908 x86_branch8 (code
, X86_CC_P
, 0, FALSE
);
3909 x86_set_reg (code
, X86_CC_NA
, ins
->dreg
, FALSE
);
3910 x86_patch (unordered_check
, code
);
3913 if (ins
->dreg
!= X86_EAX
)
3914 x86_push_reg (code
, X86_EAX
);
3916 EMIT_FPCOMPARE(code
);
3917 x86_alu_reg_imm (code
, X86_AND
, X86_EAX
, X86_FP_CC_MASK
);
3918 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, 0x4500);
3919 unordered_check
= code
;
3920 x86_branch8 (code
, X86_CC_EQ
, 0, FALSE
);
3922 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, X86_FP_C0
);
3923 x86_set_reg (code
, X86_CC_GE
, ins
->dreg
, TRUE
);
3924 x86_widen_reg (code
, ins
->dreg
, ins
->dreg
, FALSE
, FALSE
);
3926 x86_jump8 (code
, 0);
3927 x86_patch (unordered_check
, code
);
3928 x86_alu_reg_reg (code
, X86_XOR
, ins
->dreg
, ins
->dreg
);
3929 x86_patch (jump_to_end
, code
);
3931 if (ins
->dreg
!= X86_EAX
)
3932 x86_pop_reg (code
, X86_EAX
);
3936 if (cfg
->opt
& MONO_OPT_FCMOV
) {
3937 guchar
*jump
= code
;
3938 x86_branch8 (code
, X86_CC_P
, 0, TRUE
);
3939 EMIT_COND_BRANCH (ins
, X86_CC_EQ
, FALSE
);
3940 x86_patch (jump
, code
);
3943 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, 0x4000);
3944 EMIT_COND_BRANCH (ins
, X86_CC_EQ
, TRUE
);
3947 /* Branch if C013 != 100 */
3948 if (cfg
->opt
& MONO_OPT_FCMOV
) {
3949 /* branch if !ZF or (PF|CF) */
3950 EMIT_COND_BRANCH (ins
, X86_CC_NE
, FALSE
);
3951 EMIT_COND_BRANCH (ins
, X86_CC_P
, FALSE
);
3952 EMIT_COND_BRANCH (ins
, X86_CC_B
, FALSE
);
3955 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, X86_FP_C3
);
3956 EMIT_COND_BRANCH (ins
, X86_CC_NE
, FALSE
);
3959 if (cfg
->opt
& MONO_OPT_FCMOV
) {
3960 EMIT_COND_BRANCH (ins
, X86_CC_GT
, FALSE
);
3963 EMIT_COND_BRANCH (ins
, X86_CC_EQ
, FALSE
);
3966 if (cfg
->opt
& MONO_OPT_FCMOV
) {
3967 EMIT_COND_BRANCH (ins
, X86_CC_P
, FALSE
);
3968 EMIT_COND_BRANCH (ins
, X86_CC_GT
, FALSE
);
3971 if (ins
->opcode
== OP_FBLT_UN
) {
3972 guchar
*is_not_zero_check
, *end_jump
;
3973 is_not_zero_check
= code
;
3974 x86_branch8 (code
, X86_CC_NZ
, 0, TRUE
);
3976 x86_jump8 (code
, 0);
3977 x86_patch (is_not_zero_check
, code
);
3978 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, X86_FP_CC_MASK
);
3980 x86_patch (end_jump
, code
);
3982 EMIT_COND_BRANCH (ins
, X86_CC_EQ
, FALSE
);
3986 if (cfg
->opt
& MONO_OPT_FCMOV
) {
3987 if (ins
->opcode
== OP_FBGT
) {
3990 /* skip branch if C1=1 */
3992 x86_branch8 (code
, X86_CC_P
, 0, FALSE
);
3993 /* branch if (C0 | C3) = 1 */
3994 EMIT_COND_BRANCH (ins
, X86_CC_LT
, FALSE
);
3995 x86_patch (br1
, code
);
3997 EMIT_COND_BRANCH (ins
, X86_CC_LT
, FALSE
);
4001 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, X86_FP_C0
);
4002 if (ins
->opcode
== OP_FBGT_UN
) {
4003 guchar
*is_not_zero_check
, *end_jump
;
4004 is_not_zero_check
= code
;
4005 x86_branch8 (code
, X86_CC_NZ
, 0, TRUE
);
4007 x86_jump8 (code
, 0);
4008 x86_patch (is_not_zero_check
, code
);
4009 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, X86_FP_CC_MASK
);
4011 x86_patch (end_jump
, code
);
4013 EMIT_COND_BRANCH (ins
, X86_CC_EQ
, FALSE
);
4016 /* Branch if C013 == 100 or 001 */
4017 if (cfg
->opt
& MONO_OPT_FCMOV
) {
4020 /* skip branch if C1=1 */
4022 x86_branch8 (code
, X86_CC_P
, 0, FALSE
);
4023 /* branch if (C0 | C3) = 1 */
4024 EMIT_COND_BRANCH (ins
, X86_CC_BE
, FALSE
);
4025 x86_patch (br1
, code
);
4028 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, X86_FP_C0
);
4029 EMIT_COND_BRANCH (ins
, X86_CC_EQ
, FALSE
);
4030 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, X86_FP_C3
);
4031 EMIT_COND_BRANCH (ins
, X86_CC_EQ
, FALSE
);
4034 /* Branch if C013 == 000 */
4035 if (cfg
->opt
& MONO_OPT_FCMOV
) {
4036 EMIT_COND_BRANCH (ins
, X86_CC_LE
, FALSE
);
4039 EMIT_COND_BRANCH (ins
, X86_CC_NE
, FALSE
);
4042 /* Branch if C013=000 or 100 */
4043 if (cfg
->opt
& MONO_OPT_FCMOV
) {
4046 /* skip branch if C1=1 */
4048 x86_branch8 (code
, X86_CC_P
, 0, FALSE
);
4049 /* branch if C0=0 */
4050 EMIT_COND_BRANCH (ins
, X86_CC_NB
, FALSE
);
4051 x86_patch (br1
, code
);
4054 x86_alu_reg_imm (code
, X86_AND
, X86_EAX
, (X86_FP_C0
|X86_FP_C1
));
4055 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, 0);
4056 EMIT_COND_BRANCH (ins
, X86_CC_EQ
, FALSE
);
4059 /* Branch if C013 != 001 */
4060 if (cfg
->opt
& MONO_OPT_FCMOV
) {
4061 EMIT_COND_BRANCH (ins
, X86_CC_P
, FALSE
);
4062 EMIT_COND_BRANCH (ins
, X86_CC_GE
, FALSE
);
4065 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, X86_FP_C0
);
4066 EMIT_COND_BRANCH (ins
, X86_CC_NE
, FALSE
);
4070 x86_push_reg (code
, X86_EAX
);
4073 x86_alu_reg_imm (code
, X86_AND
, X86_EAX
, 0x4100);
4074 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, X86_FP_C0
);
4075 x86_pop_reg (code
, X86_EAX
);
4077 /* Have to clean up the fp stack before throwing the exception */
4079 x86_branch8 (code
, X86_CC_NE
, 0, FALSE
);
4082 EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ
, FALSE
, "OverflowException");
4084 x86_patch (br1
, code
);
4088 code
= mono_x86_emit_tls_get (code
, ins
->dreg
, ins
->inst_offset
);
4092 code
= mono_x86_emit_tls_set (code
, ins
->sreg1
, ins
->inst_offset
);
4095 case OP_MEMORY_BARRIER
: {
4096 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
) {
4097 x86_prefix (code
, X86_LOCK_PREFIX
);
4098 x86_alu_membase_imm (code
, X86_ADD
, X86_ESP
, 0, 0);
4102 case OP_ATOMIC_ADD_I4
: {
4103 int dreg
= ins
->dreg
;
4105 g_assert (cfg
->has_atomic_add_i4
);
4107 /* hack: limit in regalloc, dreg != sreg1 && dreg != sreg2 */
4108 if (ins
->sreg2
== dreg
) {
4109 if (dreg
== X86_EBX
) {
4111 if (ins
->inst_basereg
== X86_EDI
)
4115 if (ins
->inst_basereg
== X86_EBX
)
4118 } else if (ins
->inst_basereg
== dreg
) {
4119 if (dreg
== X86_EBX
) {
4121 if (ins
->sreg2
== X86_EDI
)
4125 if (ins
->sreg2
== X86_EBX
)
4130 if (dreg
!= ins
->dreg
) {
4131 x86_push_reg (code
, dreg
);
4134 x86_mov_reg_reg (code
, dreg
, ins
->sreg2
, 4);
4135 x86_prefix (code
, X86_LOCK_PREFIX
);
4136 x86_xadd_membase_reg (code
, ins
->inst_basereg
, ins
->inst_offset
, dreg
, 4);
4137 /* dreg contains the old value, add with sreg2 value */
4138 x86_alu_reg_reg (code
, X86_ADD
, dreg
, ins
->sreg2
);
4140 if (ins
->dreg
!= dreg
) {
4141 x86_mov_reg_reg (code
, ins
->dreg
, dreg
, 4);
4142 x86_pop_reg (code
, dreg
);
4147 case OP_ATOMIC_EXCHANGE_I4
: {
4149 int sreg2
= ins
->sreg2
;
4150 int breg
= ins
->inst_basereg
;
4152 g_assert (cfg
->has_atomic_exchange_i4
);
4154 /* cmpxchg uses eax as comperand, need to make sure we can use it
4155 * hack to overcome limits in x86 reg allocator
4156 * (req: dreg == eax and sreg2 != eax and breg != eax)
4158 g_assert (ins
->dreg
== X86_EAX
);
4160 /* We need the EAX reg for the cmpxchg */
4161 if (ins
->sreg2
== X86_EAX
) {
4162 sreg2
= (breg
== X86_EDX
) ? X86_EBX
: X86_EDX
;
4163 x86_push_reg (code
, sreg2
);
4164 x86_mov_reg_reg (code
, sreg2
, X86_EAX
, 4);
4167 if (breg
== X86_EAX
) {
4168 breg
= (sreg2
== X86_ESI
) ? X86_EDI
: X86_ESI
;
4169 x86_push_reg (code
, breg
);
4170 x86_mov_reg_reg (code
, breg
, X86_EAX
, 4);
4173 x86_mov_reg_membase (code
, X86_EAX
, breg
, ins
->inst_offset
, 4);
4175 br
[0] = code
; x86_prefix (code
, X86_LOCK_PREFIX
);
4176 x86_cmpxchg_membase_reg (code
, breg
, ins
->inst_offset
, sreg2
);
4177 br
[1] = code
; x86_branch8 (code
, X86_CC_NE
, -1, FALSE
);
4178 x86_patch (br
[1], br
[0]);
4180 if (breg
!= ins
->inst_basereg
)
4181 x86_pop_reg (code
, breg
);
4183 if (ins
->sreg2
!= sreg2
)
4184 x86_pop_reg (code
, sreg2
);
4188 case OP_ATOMIC_CAS_I4
: {
4189 g_assert (ins
->dreg
== X86_EAX
);
4190 g_assert (ins
->sreg3
== X86_EAX
);
4191 g_assert (ins
->sreg1
!= X86_EAX
);
4192 g_assert (ins
->sreg1
!= ins
->sreg2
);
4194 x86_prefix (code
, X86_LOCK_PREFIX
);
4195 x86_cmpxchg_membase_reg (code
, ins
->sreg1
, ins
->inst_offset
, ins
->sreg2
);
4198 case OP_ATOMIC_LOAD_I1
: {
4199 x86_widen_membase (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
, TRUE
, FALSE
);
4202 case OP_ATOMIC_LOAD_U1
: {
4203 x86_widen_membase (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
, FALSE
, FALSE
);
4206 case OP_ATOMIC_LOAD_I2
: {
4207 x86_widen_membase (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
, TRUE
, TRUE
);
4210 case OP_ATOMIC_LOAD_U2
: {
4211 x86_widen_membase (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
, FALSE
, TRUE
);
4214 case OP_ATOMIC_LOAD_I4
:
4215 case OP_ATOMIC_LOAD_U4
: {
4216 x86_mov_reg_membase (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
, 4);
4219 case OP_ATOMIC_LOAD_R4
:
4220 case OP_ATOMIC_LOAD_R8
: {
4221 x86_fld_membase (code
, ins
->inst_basereg
, ins
->inst_offset
, ins
->opcode
== OP_ATOMIC_LOAD_R8
);
4224 case OP_ATOMIC_STORE_I1
:
4225 case OP_ATOMIC_STORE_U1
:
4226 case OP_ATOMIC_STORE_I2
:
4227 case OP_ATOMIC_STORE_U2
:
4228 case OP_ATOMIC_STORE_I4
:
4229 case OP_ATOMIC_STORE_U4
: {
4232 switch (ins
->opcode
) {
4233 case OP_ATOMIC_STORE_I1
:
4234 case OP_ATOMIC_STORE_U1
:
4237 case OP_ATOMIC_STORE_I2
:
4238 case OP_ATOMIC_STORE_U2
:
4241 case OP_ATOMIC_STORE_I4
:
4242 case OP_ATOMIC_STORE_U4
:
4247 x86_mov_membase_reg (code
, ins
->inst_destbasereg
, ins
->inst_offset
, ins
->sreg1
, size
);
4249 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4253 case OP_ATOMIC_STORE_R4
:
4254 case OP_ATOMIC_STORE_R8
: {
4255 x86_fst_membase (code
, ins
->inst_destbasereg
, ins
->inst_offset
, ins
->opcode
== OP_ATOMIC_STORE_R8
, TRUE
);
4257 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4261 case OP_CARD_TABLE_WBARRIER
: {
4262 int ptr
= ins
->sreg1
;
4263 int value
= ins
->sreg2
;
4265 int nursery_shift
, card_table_shift
;
4266 gpointer card_table_mask
;
4267 size_t nursery_size
;
4268 gulong card_table
= (gulong
)mono_gc_get_card_table (&card_table_shift
, &card_table_mask
);
4269 gulong nursery_start
= (gulong
)mono_gc_get_nursery (&nursery_shift
, &nursery_size
);
4270 gboolean card_table_nursery_check
= mono_gc_card_table_nursery_check ();
4273 * We need one register we can clobber, we choose EDX and make sreg1
4274 * fixed EAX to work around limitations in the local register allocator.
4275 * sreg2 might get allocated to EDX, but that is not a problem since
4276 * we use it before clobbering EDX.
4278 g_assert (ins
->sreg1
== X86_EAX
);
4281 * This is the code we produce:
4284 * edx >>= nursery_shift
4285 * cmp edx, (nursery_start >> nursery_shift)
4288 * edx >>= card_table_shift
4289 * card_table[edx] = 1
4293 if (card_table_nursery_check
) {
4294 if (value
!= X86_EDX
)
4295 x86_mov_reg_reg (code
, X86_EDX
, value
, 4);
4296 x86_shift_reg_imm (code
, X86_SHR
, X86_EDX
, nursery_shift
);
4297 x86_alu_reg_imm (code
, X86_CMP
, X86_EDX
, nursery_start
>> nursery_shift
);
4298 br
= code
; x86_branch8 (code
, X86_CC_NE
, -1, FALSE
);
4300 x86_mov_reg_reg (code
, X86_EDX
, ptr
, 4);
4301 x86_shift_reg_imm (code
, X86_SHR
, X86_EDX
, card_table_shift
);
4302 if (card_table_mask
)
4303 x86_alu_reg_imm (code
, X86_AND
, X86_EDX
, (int)card_table_mask
);
4304 x86_mov_membase_imm (code
, X86_EDX
, card_table
, 1, 1);
4305 if (card_table_nursery_check
)
4306 x86_patch (br
, code
);
4309 #ifdef MONO_ARCH_SIMD_INTRINSICS
4311 x86_sse_alu_ps_reg_reg (code
, X86_SSE_ADD
, ins
->sreg1
, ins
->sreg2
);
4314 x86_sse_alu_ps_reg_reg (code
, X86_SSE_DIV
, ins
->sreg1
, ins
->sreg2
);
4317 x86_sse_alu_ps_reg_reg (code
, X86_SSE_MUL
, ins
->sreg1
, ins
->sreg2
);
4320 x86_sse_alu_ps_reg_reg (code
, X86_SSE_SUB
, ins
->sreg1
, ins
->sreg2
);
4323 x86_sse_alu_ps_reg_reg (code
, X86_SSE_MAX
, ins
->sreg1
, ins
->sreg2
);
4326 x86_sse_alu_ps_reg_reg (code
, X86_SSE_MIN
, ins
->sreg1
, ins
->sreg2
);
4329 g_assert (ins
->inst_c0
>= 0 && ins
->inst_c0
<= 7);
4330 x86_sse_alu_ps_reg_reg_imm (code
, X86_SSE_COMP
, ins
->sreg1
, ins
->sreg2
, ins
->inst_c0
);
4333 x86_sse_alu_ps_reg_reg (code
, X86_SSE_AND
, ins
->sreg1
, ins
->sreg2
);
4336 x86_sse_alu_ps_reg_reg (code
, X86_SSE_ANDN
, ins
->sreg1
, ins
->sreg2
);
4339 x86_sse_alu_ps_reg_reg (code
, X86_SSE_OR
, ins
->sreg1
, ins
->sreg2
);
4342 x86_sse_alu_ps_reg_reg (code
, X86_SSE_XOR
, ins
->sreg1
, ins
->sreg2
);
4345 x86_sse_alu_ps_reg_reg (code
, X86_SSE_SQRT
, ins
->dreg
, ins
->sreg1
);
4348 x86_sse_alu_ps_reg_reg (code
, X86_SSE_RSQRT
, ins
->dreg
, ins
->sreg1
);
4351 x86_sse_alu_ps_reg_reg (code
, X86_SSE_RCP
, ins
->dreg
, ins
->sreg1
);
4354 x86_sse_alu_sd_reg_reg (code
, X86_SSE_ADDSUB
, ins
->sreg1
, ins
->sreg2
);
4357 x86_sse_alu_sd_reg_reg (code
, X86_SSE_HADD
, ins
->sreg1
, ins
->sreg2
);
4360 x86_sse_alu_sd_reg_reg (code
, X86_SSE_HSUB
, ins
->sreg1
, ins
->sreg2
);
4363 x86_sse_alu_ss_reg_reg (code
, X86_SSE_MOVSHDUP
, ins
->dreg
, ins
->sreg1
);
4366 x86_sse_alu_ss_reg_reg (code
, X86_SSE_MOVSLDUP
, ins
->dreg
, ins
->sreg1
);
4369 case OP_PSHUFLEW_HIGH
:
4370 g_assert (ins
->inst_c0
>= 0 && ins
->inst_c0
<= 0xFF);
4371 x86_pshufw_reg_reg (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_c0
, 1);
4373 case OP_PSHUFLEW_LOW
:
4374 g_assert (ins
->inst_c0
>= 0 && ins
->inst_c0
<= 0xFF);
4375 x86_pshufw_reg_reg (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_c0
, 0);
4378 g_assert (ins
->inst_c0
>= 0 && ins
->inst_c0
<= 0xFF);
4379 x86_sse_shift_reg_imm (code
, X86_SSE_PSHUFD
, ins
->dreg
, ins
->sreg1
, ins
->inst_c0
);
4382 g_assert (ins
->inst_c0
>= 0 && ins
->inst_c0
<= 0xFF);
4383 x86_sse_alu_reg_reg_imm8 (code
, X86_SSE_SHUFP
, ins
->sreg1
, ins
->sreg2
, ins
->inst_c0
);
4386 g_assert (ins
->inst_c0
>= 0 && ins
->inst_c0
<= 0x3);
4387 x86_sse_alu_pd_reg_reg_imm8 (code
, X86_SSE_SHUFP
, ins
->sreg1
, ins
->sreg2
, ins
->inst_c0
);
4391 x86_sse_alu_pd_reg_reg (code
, X86_SSE_ADD
, ins
->sreg1
, ins
->sreg2
);
4394 x86_sse_alu_pd_reg_reg (code
, X86_SSE_DIV
, ins
->sreg1
, ins
->sreg2
);
4397 x86_sse_alu_pd_reg_reg (code
, X86_SSE_MUL
, ins
->sreg1
, ins
->sreg2
);
4400 x86_sse_alu_pd_reg_reg (code
, X86_SSE_SUB
, ins
->sreg1
, ins
->sreg2
);
4403 x86_sse_alu_pd_reg_reg (code
, X86_SSE_MAX
, ins
->sreg1
, ins
->sreg2
);
4406 x86_sse_alu_pd_reg_reg (code
, X86_SSE_MIN
, ins
->sreg1
, ins
->sreg2
);
4409 g_assert (ins
->inst_c0
>= 0 && ins
->inst_c0
<= 7);
4410 x86_sse_alu_pd_reg_reg_imm (code
, X86_SSE_COMP
, ins
->sreg1
, ins
->sreg2
, ins
->inst_c0
);
4413 x86_sse_alu_pd_reg_reg (code
, X86_SSE_AND
, ins
->sreg1
, ins
->sreg2
);
4416 x86_sse_alu_pd_reg_reg (code
, X86_SSE_ANDN
, ins
->sreg1
, ins
->sreg2
);
4419 x86_sse_alu_pd_reg_reg (code
, X86_SSE_OR
, ins
->sreg1
, ins
->sreg2
);
4422 x86_sse_alu_pd_reg_reg (code
, X86_SSE_XOR
, ins
->sreg1
, ins
->sreg2
);
4425 x86_sse_alu_pd_reg_reg (code
, X86_SSE_SQRT
, ins
->dreg
, ins
->sreg1
);
4428 x86_sse_alu_pd_reg_reg (code
, X86_SSE_ADDSUB
, ins
->sreg1
, ins
->sreg2
);
4431 x86_sse_alu_pd_reg_reg (code
, X86_SSE_HADD
, ins
->sreg1
, ins
->sreg2
);
4434 x86_sse_alu_pd_reg_reg (code
, X86_SSE_HSUB
, ins
->sreg1
, ins
->sreg2
);
4437 x86_sse_alu_sd_reg_reg (code
, X86_SSE_MOVDDUP
, ins
->dreg
, ins
->sreg1
);
4440 case OP_EXTRACT_MASK
:
4441 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PMOVMSKB
, ins
->dreg
, ins
->sreg1
);
4445 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PAND
, ins
->sreg1
, ins
->sreg2
);
4448 x86_sse_alu_pd_reg_reg (code
, X86_SSE_POR
, ins
->sreg1
, ins
->sreg2
);
4451 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PXOR
, ins
->sreg1
, ins
->sreg2
);
4455 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PADDB
, ins
->sreg1
, ins
->sreg2
);
4458 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PADDW
, ins
->sreg1
, ins
->sreg2
);
4461 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PADDD
, ins
->sreg1
, ins
->sreg2
);
4464 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PADDQ
, ins
->sreg1
, ins
->sreg2
);
4468 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PSUBB
, ins
->sreg1
, ins
->sreg2
);
4471 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PSUBW
, ins
->sreg1
, ins
->sreg2
);
4474 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PSUBD
, ins
->sreg1
, ins
->sreg2
);
4477 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PSUBQ
, ins
->sreg1
, ins
->sreg2
);
4481 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PMAXUB
, ins
->sreg1
, ins
->sreg2
);
4484 x86_sse_alu_sse41_reg_reg (code
, X86_SSE_PMAXUW
, ins
->sreg1
, ins
->sreg2
);
4487 x86_sse_alu_sse41_reg_reg (code
, X86_SSE_PMAXUD
, ins
->sreg1
, ins
->sreg2
);
4491 x86_sse_alu_sse41_reg_reg (code
, X86_SSE_PMAXSB
, ins
->sreg1
, ins
->sreg2
);
4494 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PMAXSW
, ins
->sreg1
, ins
->sreg2
);
4497 x86_sse_alu_sse41_reg_reg (code
, X86_SSE_PMAXSD
, ins
->sreg1
, ins
->sreg2
);
4501 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PAVGB
, ins
->sreg1
, ins
->sreg2
);
4504 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PAVGW
, ins
->sreg1
, ins
->sreg2
);
4508 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PMINUB
, ins
->sreg1
, ins
->sreg2
);
4511 x86_sse_alu_sse41_reg_reg (code
, X86_SSE_PMINUW
, ins
->sreg1
, ins
->sreg2
);
4514 x86_sse_alu_sse41_reg_reg (code
, X86_SSE_PMINUD
, ins
->sreg1
, ins
->sreg2
);
4518 x86_sse_alu_sse41_reg_reg (code
, X86_SSE_PMINSB
, ins
->sreg1
, ins
->sreg2
);
4521 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PMINSW
, ins
->sreg1
, ins
->sreg2
);
4524 x86_sse_alu_sse41_reg_reg (code
, X86_SSE_PMINSD
, ins
->sreg1
, ins
->sreg2
);
4528 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PCMPEQB
, ins
->sreg1
, ins
->sreg2
);
4531 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PCMPEQW
, ins
->sreg1
, ins
->sreg2
);
4534 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PCMPEQD
, ins
->sreg1
, ins
->sreg2
);
4537 x86_sse_alu_sse41_reg_reg (code
, X86_SSE_PCMPEQQ
, ins
->sreg1
, ins
->sreg2
);
4541 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PCMPGTB
, ins
->sreg1
, ins
->sreg2
);
4544 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PCMPGTW
, ins
->sreg1
, ins
->sreg2
);
4547 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PCMPGTD
, ins
->sreg1
, ins
->sreg2
);
4550 x86_sse_alu_sse41_reg_reg (code
, X86_SSE_PCMPGTQ
, ins
->sreg1
, ins
->sreg2
);
4553 case OP_PSUM_ABS_DIFF
:
4554 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PSADBW
, ins
->sreg1
, ins
->sreg2
);
4557 case OP_UNPACK_LOWB
:
4558 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PUNPCKLBW
, ins
->sreg1
, ins
->sreg2
);
4560 case OP_UNPACK_LOWW
:
4561 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PUNPCKLWD
, ins
->sreg1
, ins
->sreg2
);
4563 case OP_UNPACK_LOWD
:
4564 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PUNPCKLDQ
, ins
->sreg1
, ins
->sreg2
);
4566 case OP_UNPACK_LOWQ
:
4567 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PUNPCKLQDQ
, ins
->sreg1
, ins
->sreg2
);
4569 case OP_UNPACK_LOWPS
:
4570 x86_sse_alu_ps_reg_reg (code
, X86_SSE_UNPCKL
, ins
->sreg1
, ins
->sreg2
);
4572 case OP_UNPACK_LOWPD
:
4573 x86_sse_alu_pd_reg_reg (code
, X86_SSE_UNPCKL
, ins
->sreg1
, ins
->sreg2
);
4576 case OP_UNPACK_HIGHB
:
4577 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PUNPCKHBW
, ins
->sreg1
, ins
->sreg2
);
4579 case OP_UNPACK_HIGHW
:
4580 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PUNPCKHWD
, ins
->sreg1
, ins
->sreg2
);
4582 case OP_UNPACK_HIGHD
:
4583 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PUNPCKHDQ
, ins
->sreg1
, ins
->sreg2
);
4585 case OP_UNPACK_HIGHQ
:
4586 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PUNPCKHQDQ
, ins
->sreg1
, ins
->sreg2
);
4588 case OP_UNPACK_HIGHPS
:
4589 x86_sse_alu_ps_reg_reg (code
, X86_SSE_UNPCKH
, ins
->sreg1
, ins
->sreg2
);
4591 case OP_UNPACK_HIGHPD
:
4592 x86_sse_alu_pd_reg_reg (code
, X86_SSE_UNPCKH
, ins
->sreg1
, ins
->sreg2
);
4596 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PACKSSWB
, ins
->sreg1
, ins
->sreg2
);
4599 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PACKSSDW
, ins
->sreg1
, ins
->sreg2
);
4602 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PACKUSWB
, ins
->sreg1
, ins
->sreg2
);
4605 x86_sse_alu_sse41_reg_reg (code
, X86_SSE_PACKUSDW
, ins
->sreg1
, ins
->sreg2
);
4608 case OP_PADDB_SAT_UN
:
4609 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PADDUSB
, ins
->sreg1
, ins
->sreg2
);
4611 case OP_PSUBB_SAT_UN
:
4612 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PSUBUSB
, ins
->sreg1
, ins
->sreg2
);
4614 case OP_PADDW_SAT_UN
:
4615 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PADDUSW
, ins
->sreg1
, ins
->sreg2
);
4617 case OP_PSUBW_SAT_UN
:
4618 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PSUBUSW
, ins
->sreg1
, ins
->sreg2
);
4622 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PADDSB
, ins
->sreg1
, ins
->sreg2
);
4625 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PSUBSB
, ins
->sreg1
, ins
->sreg2
);
4628 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PADDSW
, ins
->sreg1
, ins
->sreg2
);
4631 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PSUBSW
, ins
->sreg1
, ins
->sreg2
);
4635 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PMULLW
, ins
->sreg1
, ins
->sreg2
);
4638 x86_sse_alu_sse41_reg_reg (code
, X86_SSE_PMULLD
, ins
->sreg1
, ins
->sreg2
);
4641 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PMULUDQ
, ins
->sreg1
, ins
->sreg2
);
4643 case OP_PMULW_HIGH_UN
:
4644 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PMULHUW
, ins
->sreg1
, ins
->sreg2
);
4647 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PMULHW
, ins
->sreg1
, ins
->sreg2
);
4651 x86_sse_shift_reg_imm (code
, X86_SSE_PSHIFTW
, X86_SSE_SHR
, ins
->dreg
, ins
->inst_imm
);
4654 x86_sse_shift_reg_reg (code
, X86_SSE_PSRLW_REG
, ins
->dreg
, ins
->sreg2
);
4658 x86_sse_shift_reg_imm (code
, X86_SSE_PSHIFTW
, X86_SSE_SAR
, ins
->dreg
, ins
->inst_imm
);
4661 x86_sse_shift_reg_reg (code
, X86_SSE_PSRAW_REG
, ins
->dreg
, ins
->sreg2
);
4665 x86_sse_shift_reg_imm (code
, X86_SSE_PSHIFTW
, X86_SSE_SHL
, ins
->dreg
, ins
->inst_imm
);
4668 x86_sse_shift_reg_reg (code
, X86_SSE_PSLLW_REG
, ins
->dreg
, ins
->sreg2
);
4672 x86_sse_shift_reg_imm (code
, X86_SSE_PSHIFTD
, X86_SSE_SHR
, ins
->dreg
, ins
->inst_imm
);
4675 x86_sse_shift_reg_reg (code
, X86_SSE_PSRLD_REG
, ins
->dreg
, ins
->sreg2
);
4679 x86_sse_shift_reg_imm (code
, X86_SSE_PSHIFTD
, X86_SSE_SAR
, ins
->dreg
, ins
->inst_imm
);
4682 x86_sse_shift_reg_reg (code
, X86_SSE_PSRAD_REG
, ins
->dreg
, ins
->sreg2
);
4686 x86_sse_shift_reg_imm (code
, X86_SSE_PSHIFTD
, X86_SSE_SHL
, ins
->dreg
, ins
->inst_imm
);
4689 x86_sse_shift_reg_reg (code
, X86_SSE_PSLLD_REG
, ins
->dreg
, ins
->sreg2
);
4693 x86_sse_shift_reg_imm (code
, X86_SSE_PSHIFTQ
, X86_SSE_SHR
, ins
->dreg
, ins
->inst_imm
);
4696 x86_sse_shift_reg_reg (code
, X86_SSE_PSRLQ_REG
, ins
->dreg
, ins
->sreg2
);
4700 x86_sse_shift_reg_imm (code
, X86_SSE_PSHIFTQ
, X86_SSE_SHL
, ins
->dreg
, ins
->inst_imm
);
4703 x86_sse_shift_reg_reg (code
, X86_SSE_PSLLQ_REG
, ins
->dreg
, ins
->sreg2
);
4707 x86_movd_xreg_reg (code
, ins
->dreg
, ins
->sreg1
);
4710 x86_movd_reg_xreg (code
, ins
->dreg
, ins
->sreg1
);
4714 x86_movd_reg_xreg (code
, ins
->dreg
, ins
->sreg1
);
4716 x86_shift_reg_imm (code
, X86_SHR
, ins
->dreg
, ins
->inst_c0
* 8);
4717 x86_widen_reg (code
, ins
->dreg
, ins
->dreg
, ins
->opcode
== OP_EXTRACT_I1
, FALSE
);
4721 x86_movd_reg_xreg (code
, ins
->dreg
, ins
->sreg1
);
4723 x86_shift_reg_imm (code
, X86_SHR
, ins
->dreg
, 16);
4724 x86_widen_reg (code
, ins
->dreg
, ins
->dreg
, ins
->opcode
== OP_EXTRACT_I2
, TRUE
);
4728 x86_sse_alu_pd_membase_reg (code
, X86_SSE_MOVHPD_MEMBASE_REG
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
, ins
->sreg1
);
4730 x86_sse_alu_sd_membase_reg (code
, X86_SSE_MOVSD_MEMBASE_REG
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
, ins
->sreg1
);
4731 x86_fld_membase (code
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
, TRUE
);
4735 x86_sse_alu_pd_reg_reg_imm (code
, X86_SSE_PINSRW
, ins
->sreg1
, ins
->sreg2
, ins
->inst_c0
);
4737 case OP_EXTRACTX_U2
:
4738 x86_sse_alu_pd_reg_reg_imm (code
, X86_SSE_PEXTRW
, ins
->dreg
, ins
->sreg1
, ins
->inst_c0
);
4740 case OP_INSERTX_U1_SLOW
:
4741 /*sreg1 is the extracted ireg (scratch)
4742 /sreg2 is the to be inserted ireg (scratch)
4743 /dreg is the xreg to receive the value*/
4745 /*clear the bits from the extracted word*/
4746 x86_alu_reg_imm (code
, X86_AND
, ins
->sreg1
, ins
->inst_c0
& 1 ? 0x00FF : 0xFF00);
4747 /*shift the value to insert if needed*/
4748 if (ins
->inst_c0
& 1)
4749 x86_shift_reg_imm (code
, X86_SHL
, ins
->sreg2
, 8);
4750 /*join them together*/
4751 x86_alu_reg_reg (code
, X86_OR
, ins
->sreg1
, ins
->sreg2
);
4752 x86_sse_alu_pd_reg_reg_imm (code
, X86_SSE_PINSRW
, ins
->dreg
, ins
->sreg1
, ins
->inst_c0
/ 2);
4754 case OP_INSERTX_I4_SLOW
:
4755 x86_sse_alu_pd_reg_reg_imm (code
, X86_SSE_PINSRW
, ins
->dreg
, ins
->sreg2
, ins
->inst_c0
* 2);
4756 x86_shift_reg_imm (code
, X86_SHR
, ins
->sreg2
, 16);
4757 x86_sse_alu_pd_reg_reg_imm (code
, X86_SSE_PINSRW
, ins
->dreg
, ins
->sreg2
, ins
->inst_c0
* 2 + 1);
4760 case OP_INSERTX_R4_SLOW
:
4761 x86_fst_membase (code
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
, FALSE
, TRUE
);
4762 /*TODO if inst_c0 == 0 use movss*/
4763 x86_sse_alu_pd_reg_membase_imm (code
, X86_SSE_PINSRW
, ins
->dreg
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
+ 0, ins
->inst_c0
* 2);
4764 x86_sse_alu_pd_reg_membase_imm (code
, X86_SSE_PINSRW
, ins
->dreg
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
+ 2, ins
->inst_c0
* 2 + 1);
4766 case OP_INSERTX_R8_SLOW
:
4767 x86_fst_membase (code
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
, TRUE
, TRUE
);
4768 if (cfg
->verbose_level
)
4769 printf ("CONVERTING a OP_INSERTX_R8_SLOW %d offset %x\n", ins
->inst_c0
, offset
);
4771 x86_sse_alu_pd_reg_membase (code
, X86_SSE_MOVHPD_REG_MEMBASE
, ins
->dreg
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
);
4773 x86_movsd_reg_membase (code
, ins
->dreg
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
);
4776 case OP_STOREX_MEMBASE_REG
:
4777 case OP_STOREX_MEMBASE
:
4778 x86_movups_membase_reg (code
, ins
->dreg
, ins
->inst_offset
, ins
->sreg1
);
4780 case OP_LOADX_MEMBASE
:
4781 x86_movups_reg_membase (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_offset
);
4783 case OP_LOADX_ALIGNED_MEMBASE
:
4784 x86_movaps_reg_membase (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_offset
);
4786 case OP_STOREX_ALIGNED_MEMBASE_REG
:
4787 x86_movaps_membase_reg (code
, ins
->dreg
, ins
->inst_offset
, ins
->sreg1
);
4789 case OP_STOREX_NTA_MEMBASE_REG
:
4790 x86_sse_alu_reg_membase (code
, X86_SSE_MOVNTPS
, ins
->dreg
, ins
->sreg1
, ins
->inst_offset
);
4792 case OP_PREFETCH_MEMBASE
:
4793 x86_sse_alu_reg_membase (code
, X86_SSE_PREFETCH
, ins
->backend
.arg_info
, ins
->sreg1
, ins
->inst_offset
);
4797 /*FIXME the peephole pass should have killed this*/
4798 if (ins
->dreg
!= ins
->sreg1
)
4799 x86_movaps_reg_reg (code
, ins
->dreg
, ins
->sreg1
);
4802 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PXOR
, ins
->dreg
, ins
->dreg
);
4805 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PCMPEQB
, ins
->dreg
, ins
->dreg
);
4808 case OP_FCONV_TO_R8_X
:
4809 x86_fst_membase (code
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
, TRUE
, TRUE
);
4810 x86_movsd_reg_membase (code
, ins
->dreg
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
);
4813 case OP_XCONV_R8_TO_I4
:
4814 x86_cvttsd2si (code
, ins
->dreg
, ins
->sreg1
);
4815 switch (ins
->backend
.source_opcode
) {
4816 case OP_FCONV_TO_I1
:
4817 x86_widen_reg (code
, ins
->dreg
, ins
->dreg
, TRUE
, FALSE
);
4819 case OP_FCONV_TO_U1
:
4820 x86_widen_reg (code
, ins
->dreg
, ins
->dreg
, FALSE
, FALSE
);
4822 case OP_FCONV_TO_I2
:
4823 x86_widen_reg (code
, ins
->dreg
, ins
->dreg
, TRUE
, TRUE
);
4825 case OP_FCONV_TO_U2
:
4826 x86_widen_reg (code
, ins
->dreg
, ins
->dreg
, FALSE
, TRUE
);
4832 /*FIXME this causes a partial register stall, maybe it would not be that bad to use shift + mask + or*/
4833 /*The +4 is to get a mov ?h, ?l over the same reg.*/
4834 x86_mov_reg_reg (code
, ins
->dreg
+ 4, ins
->dreg
, 1);
4835 x86_sse_alu_pd_reg_reg_imm (code
, X86_SSE_PINSRW
, ins
->dreg
, ins
->sreg1
, 0);
4836 x86_sse_alu_pd_reg_reg_imm (code
, X86_SSE_PINSRW
, ins
->dreg
, ins
->sreg1
, 1);
4837 x86_sse_shift_reg_imm (code
, X86_SSE_PSHUFD
, ins
->dreg
, ins
->dreg
, 0);
4840 x86_sse_alu_pd_reg_reg_imm (code
, X86_SSE_PINSRW
, ins
->dreg
, ins
->sreg1
, 0);
4841 x86_sse_alu_pd_reg_reg_imm (code
, X86_SSE_PINSRW
, ins
->dreg
, ins
->sreg1
, 1);
4842 x86_sse_shift_reg_imm (code
, X86_SSE_PSHUFD
, ins
->dreg
, ins
->dreg
, 0);
4845 x86_movd_xreg_reg (code
, ins
->dreg
, ins
->sreg1
);
4846 x86_sse_shift_reg_imm (code
, X86_SSE_PSHUFD
, ins
->dreg
, ins
->dreg
, 0);
4849 x86_fst_membase (code
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
, FALSE
, TRUE
);
4850 x86_movd_xreg_membase (code
, ins
->dreg
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
);
4851 x86_sse_shift_reg_imm (code
, X86_SSE_PSHUFD
, ins
->dreg
, ins
->dreg
, 0);
4854 x86_fst_membase (code
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
, TRUE
, TRUE
);
4855 x86_movsd_reg_membase (code
, ins
->dreg
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
);
4856 x86_sse_shift_reg_imm (code
, X86_SSE_PSHUFD
, ins
->dreg
, ins
->dreg
, 0x44);
4860 x86_sse_alu_ss_reg_reg (code
, X86_SSE_CVTDQ2PD
, ins
->dreg
, ins
->sreg1
);
4863 x86_sse_alu_ps_reg_reg (code
, X86_SSE_CVTDQ2PS
, ins
->dreg
, ins
->sreg1
);
4866 x86_sse_alu_sd_reg_reg (code
, X86_SSE_CVTPD2DQ
, ins
->dreg
, ins
->sreg1
);
4869 x86_sse_alu_pd_reg_reg (code
, X86_SSE_CVTPD2PS
, ins
->dreg
, ins
->sreg1
);
4872 x86_sse_alu_pd_reg_reg (code
, X86_SSE_CVTPS2DQ
, ins
->dreg
, ins
->sreg1
);
4875 x86_sse_alu_ps_reg_reg (code
, X86_SSE_CVTPS2PD
, ins
->dreg
, ins
->sreg1
);
4878 x86_sse_alu_pd_reg_reg (code
, X86_SSE_CVTTPD2DQ
, ins
->dreg
, ins
->sreg1
);
4881 x86_sse_alu_ss_reg_reg (code
, X86_SSE_CVTTPS2DQ
, ins
->dreg
, ins
->sreg1
);
4885 case OP_LIVERANGE_START
: {
4886 if (cfg
->verbose_level
> 1)
4887 printf ("R%d START=0x%x\n", MONO_VARINFO (cfg
, ins
->inst_c0
)->vreg
, (int)(code
- cfg
->native_code
));
4888 MONO_VARINFO (cfg
, ins
->inst_c0
)->live_range_start
= code
- cfg
->native_code
;
4891 case OP_LIVERANGE_END
: {
4892 if (cfg
->verbose_level
> 1)
4893 printf ("R%d END=0x%x\n", MONO_VARINFO (cfg
, ins
->inst_c0
)->vreg
, (int)(code
- cfg
->native_code
));
4894 MONO_VARINFO (cfg
, ins
->inst_c0
)->live_range_end
= code
- cfg
->native_code
;
4897 case OP_GC_SAFE_POINT
: {
4900 g_assert (mono_threads_is_coop_enabled ());
4902 x86_test_membase_imm (code
, ins
->sreg1
, 0, 1);
4903 br
[0] = code
; x86_branch8 (code
, X86_CC_EQ
, 0, FALSE
);
4904 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_INTERNAL_METHOD
, "mono_threads_state_poll");
4905 x86_patch (br
[0], code
);
4909 case OP_GC_LIVENESS_DEF
:
4910 case OP_GC_LIVENESS_USE
:
4911 case OP_GC_PARAM_SLOT_LIVENESS_DEF
:
4912 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
4914 case OP_GC_SPILL_SLOT_LIVENESS_DEF
:
4915 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
4916 bb
->spill_slot_defs
= g_slist_prepend_mempool (cfg
->mempool
, bb
->spill_slot_defs
, ins
);
4919 x86_mov_reg_reg (code
, ins
->dreg
, X86_ESP
, sizeof (mgreg_t
));
4922 x86_mov_reg_reg (code
, X86_ESP
, ins
->sreg1
, sizeof (mgreg_t
));
4925 g_warning ("unknown opcode %s\n", mono_inst_name (ins
->opcode
));
4926 g_assert_not_reached ();
4929 if (G_UNLIKELY ((code
- cfg
->native_code
- offset
) > max_len
)) {
4930 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
4931 mono_inst_name (ins
->opcode
), max_len
, code
- cfg
->native_code
- offset
);
4932 g_assert_not_reached ();
4938 cfg
->code_len
= code
- cfg
->native_code
;
4941 #endif /* DISABLE_JIT */
4944 mono_arch_register_lowlevel_calls (void)
4949 mono_arch_patch_code_new (MonoCompile
*cfg
, MonoDomain
*domain
, guint8
*code
, MonoJumpInfo
*ji
, gpointer target
)
4951 unsigned char *ip
= ji
->ip
.i
+ code
;
4954 case MONO_PATCH_INFO_IP
:
4955 *((gconstpointer
*)(ip
)) = target
;
4957 case MONO_PATCH_INFO_ABS
:
4958 case MONO_PATCH_INFO_METHOD
:
4959 case MONO_PATCH_INFO_METHOD_JUMP
:
4960 case MONO_PATCH_INFO_INTERNAL_METHOD
:
4961 case MONO_PATCH_INFO_BB
:
4962 case MONO_PATCH_INFO_LABEL
:
4963 case MONO_PATCH_INFO_RGCTX_FETCH
:
4964 case MONO_PATCH_INFO_JIT_ICALL_ADDR
:
4965 x86_patch (ip
, (unsigned char*)target
);
4967 case MONO_PATCH_INFO_NONE
:
4969 case MONO_PATCH_INFO_R4
:
4970 case MONO_PATCH_INFO_R8
: {
4971 guint32 offset
= mono_arch_get_patch_offset (ip
);
4972 *((gconstpointer
*)(ip
+ offset
)) = target
;
4976 guint32 offset
= mono_arch_get_patch_offset (ip
);
4977 *((gconstpointer
*)(ip
+ offset
)) = target
;
4983 static G_GNUC_UNUSED
void
4984 stack_unaligned (MonoMethod
*m
, gpointer caller
)
4986 printf ("%s\n", mono_method_full_name (m
, TRUE
));
4987 g_assert_not_reached ();
4991 mono_arch_emit_prolog (MonoCompile
*cfg
)
4993 MonoMethod
*method
= cfg
->method
;
4995 MonoMethodSignature
*sig
;
4999 int alloc_size
, pos
, max_offset
, i
, cfa_offset
;
5001 gboolean need_stack_frame
;
5003 cfg
->code_size
= MAX (cfg
->header
->code_size
* 4, 10240);
5005 if (cfg
->prof_options
& MONO_PROFILE_ENTER_LEAVE
)
5006 cfg
->code_size
+= 512;
5008 code
= cfg
->native_code
= g_malloc (cfg
->code_size
);
5014 /* Check that the stack is aligned on osx */
5015 x86_mov_reg_reg (code
, X86_EAX
, X86_ESP
, sizeof (mgreg_t
));
5016 x86_alu_reg_imm (code
, X86_AND
, X86_EAX
, 15);
5017 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, 0xc);
5019 x86_branch_disp (code
, X86_CC_Z
, 0, FALSE
);
5020 x86_push_membase (code
, X86_ESP
, 0);
5021 x86_push_imm (code
, cfg
->method
);
5022 x86_mov_reg_imm (code
, X86_EAX
, stack_unaligned
);
5023 x86_call_reg (code
, X86_EAX
);
5024 x86_patch (br
[0], code
);
5028 /* Offset between RSP and the CFA */
5032 cfa_offset
= sizeof (gpointer
);
5033 mono_emit_unwind_op_def_cfa (cfg
, code
, X86_ESP
, sizeof (gpointer
));
5034 // IP saved at CFA - 4
5035 /* There is no IP reg on x86 */
5036 mono_emit_unwind_op_offset (cfg
, code
, X86_NREG
, -cfa_offset
);
5037 mini_gc_set_slot_type_from_cfa (cfg
, -cfa_offset
, SLOT_NOREF
);
5039 need_stack_frame
= needs_stack_frame (cfg
);
5041 if (need_stack_frame
) {
5042 x86_push_reg (code
, X86_EBP
);
5043 cfa_offset
+= sizeof (gpointer
);
5044 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, cfa_offset
);
5045 mono_emit_unwind_op_offset (cfg
, code
, X86_EBP
, - cfa_offset
);
5046 x86_mov_reg_reg (code
, X86_EBP
, X86_ESP
, 4);
5047 mono_emit_unwind_op_def_cfa_reg (cfg
, code
, X86_EBP
);
5048 /* These are handled automatically by the stack marking code */
5049 mini_gc_set_slot_type_from_cfa (cfg
, -cfa_offset
, SLOT_NOREF
);
5051 cfg
->frame_reg
= X86_ESP
;
5054 cfg
->stack_offset
+= cfg
->param_area
;
5055 cfg
->stack_offset
= ALIGN_TO (cfg
->stack_offset
, MONO_ARCH_FRAME_ALIGNMENT
);
5057 alloc_size
= cfg
->stack_offset
;
5060 if (!method
->save_lmf
) {
5061 if (cfg
->used_int_regs
& (1 << X86_EBX
)) {
5062 x86_push_reg (code
, X86_EBX
);
5064 cfa_offset
+= sizeof (gpointer
);
5065 mono_emit_unwind_op_offset (cfg
, code
, X86_EBX
, - cfa_offset
);
5066 /* These are handled automatically by the stack marking code */
5067 mini_gc_set_slot_type_from_cfa (cfg
, - cfa_offset
, SLOT_NOREF
);
5070 if (cfg
->used_int_regs
& (1 << X86_EDI
)) {
5071 x86_push_reg (code
, X86_EDI
);
5073 cfa_offset
+= sizeof (gpointer
);
5074 mono_emit_unwind_op_offset (cfg
, code
, X86_EDI
, - cfa_offset
);
5075 mini_gc_set_slot_type_from_cfa (cfg
, - cfa_offset
, SLOT_NOREF
);
5078 if (cfg
->used_int_regs
& (1 << X86_ESI
)) {
5079 x86_push_reg (code
, X86_ESI
);
5081 cfa_offset
+= sizeof (gpointer
);
5082 mono_emit_unwind_op_offset (cfg
, code
, X86_ESI
, - cfa_offset
);
5083 mini_gc_set_slot_type_from_cfa (cfg
, - cfa_offset
, SLOT_NOREF
);
5089 /* the original alloc_size is already aligned: there is %ebp and retip pushed, so realign */
5090 if (mono_do_x86_stack_align
&& need_stack_frame
) {
5091 int tot
= alloc_size
+ pos
+ 4; /* ret ip */
5092 if (need_stack_frame
)
5094 tot
&= MONO_ARCH_FRAME_ALIGNMENT
- 1;
5096 alloc_size
+= MONO_ARCH_FRAME_ALIGNMENT
- tot
;
5097 for (i
= 0; i
< MONO_ARCH_FRAME_ALIGNMENT
- tot
; i
+= sizeof (mgreg_t
))
5098 mini_gc_set_slot_type_from_fp (cfg
, - (alloc_size
+ pos
- i
), SLOT_NOREF
);
5102 cfg
->arch
.sp_fp_offset
= alloc_size
+ pos
;
5105 /* See mono_emit_stack_alloc */
5106 #if defined(TARGET_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
5107 guint32 remaining_size
= alloc_size
;
5108 /*FIXME handle unbounded code expansion, we should use a loop in case of more than X interactions*/
5109 guint32 required_code_size
= ((remaining_size
/ 0x1000) + 1) * 8; /*8 is the max size of x86_alu_reg_imm + x86_test_membase_reg*/
5110 guint32 offset
= code
- cfg
->native_code
;
5111 if (G_UNLIKELY (required_code_size
>= (cfg
->code_size
- offset
))) {
5112 while (required_code_size
>= (cfg
->code_size
- offset
))
5113 cfg
->code_size
*= 2;
5114 cfg
->native_code
= mono_realloc_native_code(cfg
);
5115 code
= cfg
->native_code
+ offset
;
5116 cfg
->stat_code_reallocs
++;
5118 while (remaining_size
>= 0x1000) {
5119 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, 0x1000);
5120 x86_test_membase_reg (code
, X86_ESP
, 0, X86_ESP
);
5121 remaining_size
-= 0x1000;
5124 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, remaining_size
);
5126 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, alloc_size
);
5129 g_assert (need_stack_frame
);
5132 if (cfg
->method
->wrapper_type
== MONO_WRAPPER_NATIVE_TO_MANAGED
||
5133 cfg
->method
->wrapper_type
== MONO_WRAPPER_RUNTIME_INVOKE
) {
5134 x86_alu_reg_imm (code
, X86_AND
, X86_ESP
, -MONO_ARCH_FRAME_ALIGNMENT
);
5137 #if DEBUG_STACK_ALIGNMENT
5138 /* check the stack is aligned */
5139 if (need_stack_frame
&& method
->wrapper_type
== MONO_WRAPPER_NONE
) {
5140 x86_mov_reg_reg (code
, X86_ECX
, X86_ESP
, 4);
5141 x86_alu_reg_imm (code
, X86_AND
, X86_ECX
, MONO_ARCH_FRAME_ALIGNMENT
- 1);
5142 x86_alu_reg_imm (code
, X86_CMP
, X86_ECX
, 0);
5143 x86_branch_disp (code
, X86_CC_EQ
, 3, FALSE
);
5144 x86_breakpoint (code
);
5148 /* compute max_offset in order to use short forward jumps */
5150 if (cfg
->opt
& MONO_OPT_BRANCH
) {
5151 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
5153 bb
->max_offset
= max_offset
;
5155 if (cfg
->prof_options
& MONO_PROFILE_COVERAGE
)
5157 /* max alignment for loops */
5158 if ((cfg
->opt
& MONO_OPT_LOOP
) && bb_is_loop_start (bb
))
5159 max_offset
+= LOOP_ALIGNMENT
;
5160 MONO_BB_FOR_EACH_INS (bb
, ins
) {
5161 if (ins
->opcode
== OP_LABEL
)
5162 ins
->inst_c1
= max_offset
;
5163 max_offset
+= ((guint8
*)ins_get_spec (ins
->opcode
))[MONO_INST_LEN
];
5168 /* store runtime generic context */
5169 if (cfg
->rgctx_var
) {
5170 g_assert (cfg
->rgctx_var
->opcode
== OP_REGOFFSET
&& cfg
->rgctx_var
->inst_basereg
== X86_EBP
);
5172 x86_mov_membase_reg (code
, X86_EBP
, cfg
->rgctx_var
->inst_offset
, MONO_ARCH_RGCTX_REG
, 4);
5175 if (method
->save_lmf
)
5176 code
= emit_setup_lmf (cfg
, code
, cfg
->lmf_var
->inst_offset
, cfa_offset
);
5178 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
))
5179 code
= mono_arch_instrument_prolog (cfg
, mono_trace_enter_method
, code
, TRUE
);
5184 if (cfg
->arch
.ss_tramp_var
) {
5185 /* Initialize ss_tramp_var */
5186 ins
= cfg
->arch
.ss_tramp_var
;
5187 g_assert (ins
->opcode
== OP_REGOFFSET
);
5189 g_assert (!cfg
->compile_aot
);
5190 x86_mov_membase_imm (code
, ins
->inst_basereg
, ins
->inst_offset
, (guint32
)&ss_trampoline
, 4);
5193 if (cfg
->arch
.bp_tramp_var
) {
5194 /* Initialize bp_tramp_var */
5195 ins
= cfg
->arch
.bp_tramp_var
;
5196 g_assert (ins
->opcode
== OP_REGOFFSET
);
5198 g_assert (!cfg
->compile_aot
);
5199 x86_mov_membase_imm (code
, ins
->inst_basereg
, ins
->inst_offset
, (guint32
)&bp_trampoline
, 4);
5203 /* load arguments allocated to register from the stack */
5204 sig
= mono_method_signature (method
);
5207 cinfo
= (CallInfo
*)cfg
->arch
.cinfo
;
5209 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
5210 inst
= cfg
->args
[pos
];
5211 ainfo
= &cinfo
->args
[pos
];
5212 if (inst
->opcode
== OP_REGVAR
) {
5213 g_assert (need_stack_frame
);
5214 x86_mov_reg_membase (code
, inst
->dreg
, X86_EBP
, ainfo
->offset
+ ARGS_OFFSET
, 4);
5215 if (cfg
->verbose_level
> 2)
5216 g_print ("Argument %d assigned to register %s\n", pos
, mono_arch_regname (inst
->dreg
));
5221 cfg
->code_len
= code
- cfg
->native_code
;
5223 g_assert (cfg
->code_len
< cfg
->code_size
);
5229 mono_arch_emit_epilog (MonoCompile
*cfg
)
5231 MonoMethod
*method
= cfg
->method
;
5232 MonoMethodSignature
*sig
= mono_method_signature (method
);
5234 guint32 stack_to_pop
;
5236 int max_epilog_size
= 16;
5238 gboolean need_stack_frame
= needs_stack_frame (cfg
);
5240 if (cfg
->method
->save_lmf
)
5241 max_epilog_size
+= 128;
5243 while (cfg
->code_len
+ max_epilog_size
> (cfg
->code_size
- 16)) {
5244 cfg
->code_size
*= 2;
5245 cfg
->native_code
= mono_realloc_native_code(cfg
);
5246 cfg
->stat_code_reallocs
++;
5249 code
= cfg
->native_code
+ cfg
->code_len
;
5251 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
))
5252 code
= mono_arch_instrument_epilog (cfg
, mono_trace_leave_method
, code
, TRUE
);
5254 /* the code restoring the registers must be kept in sync with OP_TAILCALL */
5257 if (method
->save_lmf
) {
5258 gint32 lmf_offset
= cfg
->lmf_var
->inst_offset
;
5261 /* check if we need to restore protection of the stack after a stack overflow */
5262 if (!cfg
->compile_aot
&& mono_arch_have_fast_tls () && mono_tls_get_tls_offset (TLS_KEY_JIT_TLS
) != -1) {
5263 code
= mono_x86_emit_tls_get (code
, X86_ECX
, mono_tls_get_tls_offset (TLS_KEY_JIT_TLS
));
5265 gpointer func
= mono_tls_get_tls_getter (TLS_KEY_JIT_TLS
, TRUE
);
5266 /* FIXME use tls only from IR level */
5267 x86_xchg_reg_reg (code
, X86_EAX
, X86_ECX
, 4);
5268 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_INTERNAL_METHOD
, func
);
5269 x86_xchg_reg_reg (code
, X86_EAX
, X86_ECX
, 4);
5272 /* we load the value in a separate instruction: this mechanism may be
5273 * used later as a safer way to do thread interruption
5275 x86_mov_reg_membase (code
, X86_ECX
, X86_ECX
, MONO_STRUCT_OFFSET (MonoJitTlsData
, restore_stack_prot
), 4);
5276 x86_alu_reg_imm (code
, X86_CMP
, X86_ECX
, 0);
5278 x86_branch8 (code
, X86_CC_Z
, 0, FALSE
);
5279 /* note that the call trampoline will preserve eax/edx */
5280 x86_call_reg (code
, X86_ECX
);
5281 x86_patch (patch
, code
);
5283 /* restore caller saved regs */
5284 if (cfg
->used_int_regs
& (1 << X86_EBX
)) {
5285 x86_mov_reg_membase (code
, X86_EBX
, cfg
->frame_reg
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, ebx
), 4);
5288 if (cfg
->used_int_regs
& (1 << X86_EDI
)) {
5289 x86_mov_reg_membase (code
, X86_EDI
, cfg
->frame_reg
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, edi
), 4);
5291 if (cfg
->used_int_regs
& (1 << X86_ESI
)) {
5292 x86_mov_reg_membase (code
, X86_ESI
, cfg
->frame_reg
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, esi
), 4);
5295 /* EBP is restored by LEAVE */
5297 for (i
= 0; i
< X86_NREG
; ++i
) {
5298 if ((cfg
->used_int_regs
& X86_CALLER_REGS
& (1 << i
)) && (i
!= X86_EBP
)) {
5304 g_assert (need_stack_frame
);
5305 x86_lea_membase (code
, X86_ESP
, X86_EBP
, pos
);
5309 g_assert (need_stack_frame
);
5310 x86_lea_membase (code
, X86_ESP
, X86_EBP
, pos
);
5313 if (cfg
->used_int_regs
& (1 << X86_ESI
)) {
5314 x86_pop_reg (code
, X86_ESI
);
5316 if (cfg
->used_int_regs
& (1 << X86_EDI
)) {
5317 x86_pop_reg (code
, X86_EDI
);
5319 if (cfg
->used_int_regs
& (1 << X86_EBX
)) {
5320 x86_pop_reg (code
, X86_EBX
);
5324 /* Load returned vtypes into registers if needed */
5325 cinfo
= (CallInfo
*)cfg
->arch
.cinfo
;
5326 if (cinfo
->ret
.storage
== ArgValuetypeInReg
) {
5327 for (quad
= 0; quad
< 2; quad
++) {
5328 switch (cinfo
->ret
.pair_storage
[quad
]) {
5330 x86_mov_reg_membase (code
, cinfo
->ret
.pair_regs
[quad
], cfg
->ret
->inst_basereg
, cfg
->ret
->inst_offset
+ (quad
* sizeof (gpointer
)), 4);
5332 case ArgOnFloatFpStack
:
5333 x86_fld_membase (code
, cfg
->ret
->inst_basereg
, cfg
->ret
->inst_offset
+ (quad
* sizeof (gpointer
)), FALSE
);
5335 case ArgOnDoubleFpStack
:
5336 x86_fld_membase (code
, cfg
->ret
->inst_basereg
, cfg
->ret
->inst_offset
+ (quad
* sizeof (gpointer
)), TRUE
);
5341 g_assert_not_reached ();
5346 if (need_stack_frame
)
5349 if (CALLCONV_IS_STDCALL (sig
)) {
5350 MonoJitArgumentInfo
*arg_info
= alloca (sizeof (MonoJitArgumentInfo
) * (sig
->param_count
+ 1));
5352 stack_to_pop
= mono_arch_get_argument_info (sig
, sig
->param_count
, arg_info
);
5353 } else if (cinfo
->callee_stack_pop
)
5354 stack_to_pop
= cinfo
->callee_stack_pop
;
5359 g_assert (need_stack_frame
);
5360 x86_ret_imm (code
, stack_to_pop
);
5365 cfg
->code_len
= code
- cfg
->native_code
;
5367 g_assert (cfg
->code_len
< cfg
->code_size
);
5371 mono_arch_emit_exceptions (MonoCompile
*cfg
)
5373 MonoJumpInfo
*patch_info
;
5376 MonoClass
*exc_classes
[16];
5377 guint8
*exc_throw_start
[16], *exc_throw_end
[16];
5381 /* Compute needed space */
5382 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
5383 if (patch_info
->type
== MONO_PATCH_INFO_EXC
)
5388 * make sure we have enough space for exceptions
5389 * 16 is the size of two push_imm instructions and a call
5391 if (cfg
->compile_aot
)
5392 code_size
= exc_count
* 32;
5394 code_size
= exc_count
* 16;
5396 while (cfg
->code_len
+ code_size
> (cfg
->code_size
- 16)) {
5397 cfg
->code_size
*= 2;
5398 cfg
->native_code
= mono_realloc_native_code(cfg
);
5399 cfg
->stat_code_reallocs
++;
5402 code
= cfg
->native_code
+ cfg
->code_len
;
5405 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
5406 switch (patch_info
->type
) {
5407 case MONO_PATCH_INFO_EXC
: {
5408 MonoClass
*exc_class
;
5412 x86_patch (patch_info
->ip
.i
+ cfg
->native_code
, code
);
5414 exc_class
= mono_class_load_from_name (mono_defaults
.corlib
, "System", patch_info
->data
.name
);
5415 throw_ip
= patch_info
->ip
.i
;
5417 /* Find a throw sequence for the same exception class */
5418 for (i
= 0; i
< nthrows
; ++i
)
5419 if (exc_classes
[i
] == exc_class
)
5422 x86_push_imm (code
, (exc_throw_end
[i
] - cfg
->native_code
) - throw_ip
);
5423 x86_jump_code (code
, exc_throw_start
[i
]);
5424 patch_info
->type
= MONO_PATCH_INFO_NONE
;
5429 /* Compute size of code following the push <OFFSET> */
5432 /*This is aligned to 16 bytes by the callee. This way we save a few bytes here.*/
5434 if ((code
- cfg
->native_code
) - throw_ip
< 126 - size
) {
5435 /* Use the shorter form */
5437 x86_push_imm (code
, 0);
5441 x86_push_imm (code
, 0xf0f0f0f0);
5446 exc_classes
[nthrows
] = exc_class
;
5447 exc_throw_start
[nthrows
] = code
;
5450 x86_push_imm (code
, exc_class
->type_token
- MONO_TOKEN_TYPE_DEF
);
5451 patch_info
->data
.name
= "mono_arch_throw_corlib_exception";
5452 patch_info
->type
= MONO_PATCH_INFO_INTERNAL_METHOD
;
5453 patch_info
->ip
.i
= code
- cfg
->native_code
;
5454 x86_call_code (code
, 0);
5455 x86_push_imm (buf
, (code
- cfg
->native_code
) - throw_ip
);
5460 exc_throw_end
[nthrows
] = code
;
5472 cfg
->code_len
= code
- cfg
->native_code
;
5474 g_assert (cfg
->code_len
< cfg
->code_size
);
5478 mono_arch_flush_icache (guint8
*code
, gint size
)
5484 mono_arch_flush_register_windows (void)
5489 mono_arch_is_inst_imm (gint64 imm
)
5495 mono_arch_finish_init (void)
5497 if (!g_getenv ("MONO_NO_TLS")) {
5498 #ifndef TARGET_WIN32
5500 optimize_for_xen
= access ("/proc/xen", F_OK
) == 0;
5507 mono_arch_free_jit_tls_data (MonoJitTlsData
*tls
)
5511 // Linear handler, the bsearch head compare is shorter
5512 //[2 + 4] x86_alu_reg_imm (code, X86_CMP, ins->sreg1, ins->inst_imm);
5513 //[1 + 1] x86_branch8(inst,cond,imm,is_signed)
5514 // x86_patch(ins,target)
5515 //[1 + 5] x86_jump_mem(inst,mem)
5518 #define BR_SMALL_SIZE 2
5519 #define BR_LARGE_SIZE 5
5520 #define JUMP_IMM_SIZE 6
5521 #define ENABLE_WRONG_METHOD_CHECK 0
5525 imt_branch_distance (MonoIMTCheckItem
**imt_entries
, int start
, int target
)
5527 int i
, distance
= 0;
5528 for (i
= start
; i
< target
; ++i
)
5529 distance
+= imt_entries
[i
]->chunk_size
;
5534 * LOCKING: called with the domain lock held
5537 mono_arch_build_imt_trampoline (MonoVTable
*vtable
, MonoDomain
*domain
, MonoIMTCheckItem
**imt_entries
, int count
,
5538 gpointer fail_tramp
)
5542 guint8
*code
, *start
;
5545 for (i
= 0; i
< count
; ++i
) {
5546 MonoIMTCheckItem
*item
= imt_entries
[i
];
5547 if (item
->is_equals
) {
5548 if (item
->check_target_idx
) {
5549 if (!item
->compare_done
)
5550 item
->chunk_size
+= CMP_SIZE
;
5551 item
->chunk_size
+= BR_SMALL_SIZE
+ JUMP_IMM_SIZE
;
5554 item
->chunk_size
+= CMP_SIZE
+ BR_SMALL_SIZE
+ JUMP_IMM_SIZE
* 2;
5556 item
->chunk_size
+= JUMP_IMM_SIZE
;
5557 #if ENABLE_WRONG_METHOD_CHECK
5558 item
->chunk_size
+= CMP_SIZE
+ BR_SMALL_SIZE
+ 1;
5563 item
->chunk_size
+= CMP_SIZE
+ BR_LARGE_SIZE
;
5564 imt_entries
[item
->check_target_idx
]->compare_done
= TRUE
;
5566 size
+= item
->chunk_size
;
5569 code
= mono_method_alloc_generic_virtual_trampoline (domain
, size
);
5571 code
= mono_domain_code_reserve (domain
, size
);
5574 unwind_ops
= mono_arch_get_cie_program ();
5576 for (i
= 0; i
< count
; ++i
) {
5577 MonoIMTCheckItem
*item
= imt_entries
[i
];
5578 item
->code_target
= code
;
5579 if (item
->is_equals
) {
5580 if (item
->check_target_idx
) {
5581 if (!item
->compare_done
)
5582 x86_alu_reg_imm (code
, X86_CMP
, MONO_ARCH_IMT_REG
, (guint32
)item
->key
);
5583 item
->jmp_code
= code
;
5584 x86_branch8 (code
, X86_CC_NE
, 0, FALSE
);
5585 if (item
->has_target_code
)
5586 x86_jump_code (code
, item
->value
.target_code
);
5588 x86_jump_mem (code
, & (vtable
->vtable
[item
->value
.vtable_slot
]));
5591 x86_alu_reg_imm (code
, X86_CMP
, MONO_ARCH_IMT_REG
, (guint32
)item
->key
);
5592 item
->jmp_code
= code
;
5593 x86_branch8 (code
, X86_CC_NE
, 0, FALSE
);
5594 if (item
->has_target_code
)
5595 x86_jump_code (code
, item
->value
.target_code
);
5597 x86_jump_mem (code
, & (vtable
->vtable
[item
->value
.vtable_slot
]));
5598 x86_patch (item
->jmp_code
, code
);
5599 x86_jump_code (code
, fail_tramp
);
5600 item
->jmp_code
= NULL
;
5602 /* enable the commented code to assert on wrong method */
5603 #if ENABLE_WRONG_METHOD_CHECK
5604 x86_alu_reg_imm (code
, X86_CMP
, MONO_ARCH_IMT_REG
, (guint32
)item
->key
);
5605 item
->jmp_code
= code
;
5606 x86_branch8 (code
, X86_CC_NE
, 0, FALSE
);
5608 if (item
->has_target_code
)
5609 x86_jump_code (code
, item
->value
.target_code
);
5611 x86_jump_mem (code
, & (vtable
->vtable
[item
->value
.vtable_slot
]));
5612 #if ENABLE_WRONG_METHOD_CHECK
5613 x86_patch (item
->jmp_code
, code
);
5614 x86_breakpoint (code
);
5615 item
->jmp_code
= NULL
;
5620 x86_alu_reg_imm (code
, X86_CMP
, MONO_ARCH_IMT_REG
, (guint32
)item
->key
);
5621 item
->jmp_code
= code
;
5622 if (x86_is_imm8 (imt_branch_distance (imt_entries
, i
, item
->check_target_idx
)))
5623 x86_branch8 (code
, X86_CC_GE
, 0, FALSE
);
5625 x86_branch32 (code
, X86_CC_GE
, 0, FALSE
);
5628 /* patch the branches to get to the target items */
5629 for (i
= 0; i
< count
; ++i
) {
5630 MonoIMTCheckItem
*item
= imt_entries
[i
];
5631 if (item
->jmp_code
) {
5632 if (item
->check_target_idx
) {
5633 x86_patch (item
->jmp_code
, imt_entries
[item
->check_target_idx
]->code_target
);
5639 mono_stats
.imt_trampolines_size
+= code
- start
;
5640 g_assert (code
- start
<= size
);
5644 char *buff
= g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable
->klass
->name_space
, vtable
->klass
->name
, count
);
5645 mono_disassemble_code (NULL
, (guint8
*)start
, code
- start
, buff
);
5649 if (mono_jit_map_is_enabled ()) {
5652 buff
= g_strdup_printf ("imt_%s_%s_entries_%d", vtable
->klass
->name_space
, vtable
->klass
->name
, count
);
5654 buff
= g_strdup_printf ("imt_trampoline_entries_%d", count
);
5655 mono_emit_jit_tramp (start
, code
- start
, buff
);
5659 mono_profiler_code_buffer_new (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE
, NULL
);
5661 mono_tramp_info_register (mono_tramp_info_create (NULL
, start
, code
- start
, NULL
, unwind_ops
), domain
);
5667 mono_arch_find_imt_method (mgreg_t
*regs
, guint8
*code
)
5669 return (MonoMethod
*) regs
[MONO_ARCH_IMT_REG
];
5673 mono_arch_find_static_call_vtable (mgreg_t
*regs
, guint8
*code
)
5675 return (MonoVTable
*) regs
[MONO_ARCH_RGCTX_REG
];
5679 mono_arch_get_cie_program (void)
5683 mono_add_unwind_op_def_cfa (l
, (guint8
*)NULL
, (guint8
*)NULL
, X86_ESP
, 4);
5684 mono_add_unwind_op_offset (l
, (guint8
*)NULL
, (guint8
*)NULL
, X86_NREG
, -4);
5690 mono_arch_emit_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
5692 MonoInst
*ins
= NULL
;
5695 if (cmethod
->klass
== mono_defaults
.math_class
) {
5696 if (strcmp (cmethod
->name
, "Sin") == 0) {
5698 } else if (strcmp (cmethod
->name
, "Cos") == 0) {
5700 } else if (strcmp (cmethod
->name
, "Tan") == 0) {
5702 } else if (strcmp (cmethod
->name
, "Atan") == 0) {
5704 } else if (strcmp (cmethod
->name
, "Sqrt") == 0) {
5706 } else if (strcmp (cmethod
->name
, "Abs") == 0 && fsig
->params
[0]->type
== MONO_TYPE_R8
) {
5708 } else if (strcmp (cmethod
->name
, "Round") == 0 && fsig
->param_count
== 1 && fsig
->params
[0]->type
== MONO_TYPE_R8
) {
5712 if (opcode
&& fsig
->param_count
== 1) {
5713 MONO_INST_NEW (cfg
, ins
, opcode
);
5714 ins
->type
= STACK_R8
;
5715 ins
->dreg
= mono_alloc_freg (cfg
);
5716 ins
->sreg1
= args
[0]->dreg
;
5717 MONO_ADD_INS (cfg
->cbb
, ins
);
5720 if (cfg
->opt
& MONO_OPT_CMOV
) {
5723 if (strcmp (cmethod
->name
, "Min") == 0) {
5724 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
5726 } else if (strcmp (cmethod
->name
, "Max") == 0) {
5727 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
5731 if (opcode
&& fsig
->param_count
== 2) {
5732 MONO_INST_NEW (cfg
, ins
, opcode
);
5733 ins
->type
= STACK_I4
;
5734 ins
->dreg
= mono_alloc_ireg (cfg
);
5735 ins
->sreg1
= args
[0]->dreg
;
5736 ins
->sreg2
= args
[1]->dreg
;
5737 MONO_ADD_INS (cfg
->cbb
, ins
);
5742 /* OP_FREM is not IEEE compatible */
5743 else if (strcmp (cmethod
->name
, "IEEERemainder") == 0 && fsig
->param_count
== 2) {
5744 MONO_INST_NEW (cfg
, ins
, OP_FREM
);
5745 ins
->inst_i0
= args
[0];
5746 ins
->inst_i1
= args
[1];
5755 mono_arch_print_tree (MonoInst
*tree
, int arity
)
5761 mono_arch_get_patch_offset (guint8
*code
)
5763 if ((code
[0] == 0x8b) && (x86_modrm_mod (code
[1]) == 0x2))
5765 else if (code
[0] == 0xba)
5767 else if (code
[0] == 0x68)
5770 else if ((code
[0] == 0xff) && (x86_modrm_reg (code
[1]) == 0x6))
5771 /* push <OFFSET>(<REG>) */
5773 else if ((code
[0] == 0xff) && (x86_modrm_reg (code
[1]) == 0x2))
5774 /* call *<OFFSET>(<REG>) */
5776 else if ((code
[0] == 0xdd) || (code
[0] == 0xd9))
5779 else if ((code
[0] == 0x58) && (code
[1] == 0x05))
5780 /* pop %eax; add <OFFSET>, %eax */
5782 else if ((code
[0] >= 0x58) && (code
[0] <= 0x58 + X86_NREG
) && (code
[1] == 0x81))
5783 /* pop <REG>; add <OFFSET>, <REG> */
5785 else if ((code
[0] >= 0xb8) && (code
[0] < 0xb8 + 8))
5786 /* mov <REG>, imm */
5789 g_assert_not_reached ();
5795 * mono_breakpoint_clean_code:
5797 * Copy @size bytes from @code - @offset to the buffer @buf. If the debugger inserted software
5798 * breakpoints in the original code, they are removed in the copy.
5800 * Returns TRUE if no sw breakpoint was present.
5803 mono_breakpoint_clean_code (guint8
*method_start
, guint8
*code
, int offset
, guint8
*buf
, int size
)
5806 * If method_start is non-NULL we need to perform bound checks, since we access memory
5807 * at code - offset we could go before the start of the method and end up in a different
5808 * page of memory that is not mapped or read incorrect data anyway. We zero-fill the bytes
5811 if (!method_start
|| code
- offset
>= method_start
) {
5812 memcpy (buf
, code
- offset
, size
);
5814 int diff
= code
- method_start
;
5815 memset (buf
, 0, size
);
5816 memcpy (buf
+ offset
- diff
, method_start
, diff
+ size
- offset
);
5822 * mono_x86_get_this_arg_offset:
5824 * Return the offset of the stack location where this is passed during a virtual
5828 mono_x86_get_this_arg_offset (MonoMethodSignature
*sig
)
5834 mono_arch_get_this_arg_from_call (mgreg_t
*regs
, guint8
*code
)
5836 guint32 esp
= regs
[X86_ESP
];
5843 * The stack looks like:
5847 res
= ((MonoObject
**)esp
) [0];
5851 #define MAX_ARCH_DELEGATE_PARAMS 10
5854 get_delegate_invoke_impl (MonoTrampInfo
**info
, gboolean has_target
, guint32 param_count
)
5856 guint8
*code
, *start
;
5857 int code_reserve
= 64;
5860 unwind_ops
= mono_arch_get_cie_program ();
5863 * The stack contains:
5869 start
= code
= mono_global_codeman_reserve (code_reserve
);
5871 /* Replace the this argument with the target */
5872 x86_mov_reg_membase (code
, X86_EAX
, X86_ESP
, 4, 4);
5873 x86_mov_reg_membase (code
, X86_ECX
, X86_EAX
, MONO_STRUCT_OFFSET (MonoDelegate
, target
), 4);
5874 x86_mov_membase_reg (code
, X86_ESP
, 4, X86_ECX
, 4);
5875 x86_jump_membase (code
, X86_EAX
, MONO_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
5877 g_assert ((code
- start
) < code_reserve
);
5880 /* 8 for mov_reg and jump, plus 8 for each parameter */
5881 code_reserve
= 8 + (param_count
* 8);
5883 * The stack contains:
5884 * <args in reverse order>
5889 * <args in reverse order>
5892 * without unbalancing the stack.
5893 * So move each arg up a spot in the stack (overwriting un-needed 'this' arg)
5894 * and leaving original spot of first arg as placeholder in stack so
5895 * when callee pops stack everything works.
5898 start
= code
= mono_global_codeman_reserve (code_reserve
);
5900 /* store delegate for access to method_ptr */
5901 x86_mov_reg_membase (code
, X86_ECX
, X86_ESP
, 4, 4);
5904 for (i
= 0; i
< param_count
; ++i
) {
5905 x86_mov_reg_membase (code
, X86_EAX
, X86_ESP
, (i
+2)*4, 4);
5906 x86_mov_membase_reg (code
, X86_ESP
, (i
+1)*4, X86_EAX
, 4);
5909 x86_jump_membase (code
, X86_ECX
, MONO_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
5911 g_assert ((code
- start
) < code_reserve
);
5915 *info
= mono_tramp_info_create ("delegate_invoke_impl_has_target", start
, code
- start
, NULL
, unwind_ops
);
5917 char *name
= g_strdup_printf ("delegate_invoke_impl_target_%d", param_count
);
5918 *info
= mono_tramp_info_create (name
, start
, code
- start
, NULL
, unwind_ops
);
5922 if (mono_jit_map_is_enabled ()) {
5925 buff
= (char*)"delegate_invoke_has_target";
5927 buff
= g_strdup_printf ("delegate_invoke_no_target_%d", param_count
);
5928 mono_emit_jit_tramp (start
, code
- start
, buff
);
5932 mono_profiler_code_buffer_new (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE
, NULL
);
5937 #define MAX_VIRTUAL_DELEGATE_OFFSET 32
5940 get_delegate_virtual_invoke_impl (MonoTrampInfo
**info
, gboolean load_imt_reg
, int offset
)
5942 guint8
*code
, *start
;
5947 if (offset
/ (int)sizeof (gpointer
) > MAX_VIRTUAL_DELEGATE_OFFSET
)
5951 * The stack contains:
5955 start
= code
= mono_global_codeman_reserve (size
);
5957 unwind_ops
= mono_arch_get_cie_program ();
5959 /* Replace the this argument with the target */
5960 x86_mov_reg_membase (code
, X86_EAX
, X86_ESP
, 4, 4);
5961 x86_mov_reg_membase (code
, X86_ECX
, X86_EAX
, MONO_STRUCT_OFFSET (MonoDelegate
, target
), 4);
5962 x86_mov_membase_reg (code
, X86_ESP
, 4, X86_ECX
, 4);
5965 /* Load the IMT reg */
5966 x86_mov_reg_membase (code
, MONO_ARCH_IMT_REG
, X86_EAX
, MONO_STRUCT_OFFSET (MonoDelegate
, method
), 4);
5969 /* Load the vtable */
5970 x86_mov_reg_membase (code
, X86_EAX
, X86_ECX
, MONO_STRUCT_OFFSET (MonoObject
, vtable
), 4);
5971 x86_jump_membase (code
, X86_EAX
, offset
);
5972 mono_profiler_code_buffer_new (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE
, NULL
);
5974 tramp_name
= mono_get_delegate_virtual_invoke_impl_name (load_imt_reg
, offset
);
5975 *info
= mono_tramp_info_create (tramp_name
, start
, code
- start
, NULL
, unwind_ops
);
5976 g_free (tramp_name
);
5983 mono_arch_get_delegate_invoke_impls (void)
5986 MonoTrampInfo
*info
;
5989 get_delegate_invoke_impl (&info
, TRUE
, 0);
5990 res
= g_slist_prepend (res
, info
);
5992 for (i
= 0; i
<= MAX_ARCH_DELEGATE_PARAMS
; ++i
) {
5993 get_delegate_invoke_impl (&info
, FALSE
, i
);
5994 res
= g_slist_prepend (res
, info
);
5997 for (i
= 0; i
<= MAX_VIRTUAL_DELEGATE_OFFSET
; ++i
) {
5998 get_delegate_virtual_invoke_impl (&info
, TRUE
, - i
* SIZEOF_VOID_P
);
5999 res
= g_slist_prepend (res
, info
);
6001 get_delegate_virtual_invoke_impl (&info
, FALSE
, i
* SIZEOF_VOID_P
);
6002 res
= g_slist_prepend (res
, info
);
6009 mono_arch_get_delegate_invoke_impl (MonoMethodSignature
*sig
, gboolean has_target
)
6011 guint8
*code
, *start
;
6013 if (sig
->param_count
> MAX_ARCH_DELEGATE_PARAMS
)
6016 /* FIXME: Support more cases */
6017 if (MONO_TYPE_ISSTRUCT (sig
->ret
))
6021 * The stack contains:
6027 static guint8
* cached
= NULL
;
6031 if (mono_aot_only
) {
6032 start
= mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
6034 MonoTrampInfo
*info
;
6035 start
= get_delegate_invoke_impl (&info
, TRUE
, 0);
6036 mono_tramp_info_register (info
, NULL
);
6039 mono_memory_barrier ();
6043 static guint8
* cache
[MAX_ARCH_DELEGATE_PARAMS
+ 1] = {NULL
};
6046 for (i
= 0; i
< sig
->param_count
; ++i
)
6047 if (!mono_is_regsize_var (sig
->params
[i
]))
6050 code
= cache
[sig
->param_count
];
6054 if (mono_aot_only
) {
6055 char *name
= g_strdup_printf ("delegate_invoke_impl_target_%d", sig
->param_count
);
6056 start
= mono_aot_get_trampoline (name
);
6059 MonoTrampInfo
*info
;
6060 start
= get_delegate_invoke_impl (&info
, FALSE
, sig
->param_count
);
6061 mono_tramp_info_register (info
, NULL
);
6064 mono_memory_barrier ();
6066 cache
[sig
->param_count
] = start
;
6073 mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature
*sig
, MonoMethod
*method
, int offset
, gboolean load_imt_reg
)
6075 MonoTrampInfo
*info
;
6078 code
= get_delegate_virtual_invoke_impl (&info
, load_imt_reg
, offset
);
6080 mono_tramp_info_register (info
, NULL
);
6085 mono_arch_context_get_int_reg (MonoContext
*ctx
, int reg
)
6088 case X86_EAX
: return ctx
->eax
;
6089 case X86_EBX
: return ctx
->ebx
;
6090 case X86_ECX
: return ctx
->ecx
;
6091 case X86_EDX
: return ctx
->edx
;
6092 case X86_ESP
: return ctx
->esp
;
6093 case X86_EBP
: return ctx
->ebp
;
6094 case X86_ESI
: return ctx
->esi
;
6095 case X86_EDI
: return ctx
->edi
;
6097 g_assert_not_reached ();
6103 mono_arch_context_set_int_reg (MonoContext
*ctx
, int reg
, mgreg_t val
)
6131 g_assert_not_reached ();
6135 #ifdef MONO_ARCH_SIMD_INTRINSICS
6138 get_float_to_x_spill_area (MonoCompile
*cfg
)
6140 if (!cfg
->fconv_to_r8_x_var
) {
6141 cfg
->fconv_to_r8_x_var
= mono_compile_create_var (cfg
, &mono_defaults
.double_class
->byval_arg
, OP_LOCAL
);
6142 cfg
->fconv_to_r8_x_var
->flags
|= MONO_INST_VOLATILE
; /*FIXME, use the don't regalloc flag*/
6144 return cfg
->fconv_to_r8_x_var
;
6148 * Convert all fconv opts that MONO_OPT_SSE2 would get wrong.
6151 mono_arch_decompose_opts (MonoCompile
*cfg
, MonoInst
*ins
)
6154 int dreg
, src_opcode
;
6156 if (!(cfg
->opt
& MONO_OPT_SSE2
) || !(cfg
->opt
& MONO_OPT_SIMD
) || COMPILE_LLVM (cfg
))
6159 switch (src_opcode
= ins
->opcode
) {
6160 case OP_FCONV_TO_I1
:
6161 case OP_FCONV_TO_U1
:
6162 case OP_FCONV_TO_I2
:
6163 case OP_FCONV_TO_U2
:
6164 case OP_FCONV_TO_I4
:
6171 /* dreg is the IREG and sreg1 is the FREG */
6172 MONO_INST_NEW (cfg
, fconv
, OP_FCONV_TO_R8_X
);
6173 fconv
->klass
= NULL
; /*FIXME, what can I use here as the Mono.Simd lib might not be loaded yet*/
6174 fconv
->sreg1
= ins
->sreg1
;
6175 fconv
->dreg
= mono_alloc_ireg (cfg
);
6176 fconv
->type
= STACK_VTYPE
;
6177 fconv
->backend
.spill_var
= get_float_to_x_spill_area (cfg
);
6179 mono_bblock_insert_before_ins (cfg
->cbb
, ins
, fconv
);
6183 ins
->opcode
= OP_XCONV_R8_TO_I4
;
6185 ins
->klass
= mono_defaults
.int32_class
;
6186 ins
->sreg1
= fconv
->dreg
;
6188 ins
->type
= STACK_I4
;
6189 ins
->backend
.source_opcode
= src_opcode
;
6192 #endif /* #ifdef MONO_ARCH_SIMD_INTRINSICS */
6195 mono_arch_decompose_long_opts (MonoCompile
*cfg
, MonoInst
*long_ins
)
6200 if (long_ins
->opcode
== OP_LNEG
) {
6202 MONO_EMIT_NEW_UNALU (cfg
, OP_INEG
, MONO_LVREG_LS (ins
->dreg
), MONO_LVREG_LS (ins
->sreg1
));
6203 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ADC_IMM
, MONO_LVREG_MS (ins
->dreg
), MONO_LVREG_MS (ins
->sreg1
), 0);
6204 MONO_EMIT_NEW_UNALU (cfg
, OP_INEG
, MONO_LVREG_MS (ins
->dreg
), MONO_LVREG_MS (ins
->dreg
));
6209 #ifdef MONO_ARCH_SIMD_INTRINSICS
6211 if (!(cfg
->opt
& MONO_OPT_SIMD
))
6214 /*TODO move this to simd-intrinsic.c once we support sse 4.1 dword extractors since we need the runtime caps info */
6215 switch (long_ins
->opcode
) {
6217 vreg
= long_ins
->sreg1
;
6219 if (long_ins
->inst_c0
) {
6220 MONO_INST_NEW (cfg
, ins
, OP_PSHUFLED
);
6221 ins
->klass
= long_ins
->klass
;
6222 ins
->sreg1
= long_ins
->sreg1
;
6224 ins
->type
= STACK_VTYPE
;
6225 ins
->dreg
= vreg
= alloc_ireg (cfg
);
6226 MONO_ADD_INS (cfg
->cbb
, ins
);
6229 MONO_INST_NEW (cfg
, ins
, OP_EXTRACT_I4
);
6230 ins
->klass
= mono_defaults
.int32_class
;
6232 ins
->type
= STACK_I4
;
6233 ins
->dreg
= MONO_LVREG_LS (long_ins
->dreg
);
6234 MONO_ADD_INS (cfg
->cbb
, ins
);
6236 MONO_INST_NEW (cfg
, ins
, OP_PSHUFLED
);
6237 ins
->klass
= long_ins
->klass
;
6238 ins
->sreg1
= long_ins
->sreg1
;
6239 ins
->inst_c0
= long_ins
->inst_c0
? 3 : 1;
6240 ins
->type
= STACK_VTYPE
;
6241 ins
->dreg
= vreg
= alloc_ireg (cfg
);
6242 MONO_ADD_INS (cfg
->cbb
, ins
);
6244 MONO_INST_NEW (cfg
, ins
, OP_EXTRACT_I4
);
6245 ins
->klass
= mono_defaults
.int32_class
;
6247 ins
->type
= STACK_I4
;
6248 ins
->dreg
= MONO_LVREG_MS (long_ins
->dreg
);
6249 MONO_ADD_INS (cfg
->cbb
, ins
);
6251 long_ins
->opcode
= OP_NOP
;
6253 case OP_INSERTX_I8_SLOW
:
6254 MONO_INST_NEW (cfg
, ins
, OP_INSERTX_I4_SLOW
);
6255 ins
->dreg
= long_ins
->dreg
;
6256 ins
->sreg1
= long_ins
->dreg
;
6257 ins
->sreg2
= MONO_LVREG_LS (long_ins
->sreg2
);
6258 ins
->inst_c0
= long_ins
->inst_c0
* 2;
6259 MONO_ADD_INS (cfg
->cbb
, ins
);
6261 MONO_INST_NEW (cfg
, ins
, OP_INSERTX_I4_SLOW
);
6262 ins
->dreg
= long_ins
->dreg
;
6263 ins
->sreg1
= long_ins
->dreg
;
6264 ins
->sreg2
= MONO_LVREG_MS (long_ins
->sreg2
);
6265 ins
->inst_c0
= long_ins
->inst_c0
* 2 + 1;
6266 MONO_ADD_INS (cfg
->cbb
, ins
);
6268 long_ins
->opcode
= OP_NOP
;
6271 MONO_INST_NEW (cfg
, ins
, OP_ICONV_TO_X
);
6272 ins
->dreg
= long_ins
->dreg
;
6273 ins
->sreg1
= MONO_LVREG_LS (long_ins
->sreg1
);
6274 ins
->klass
= long_ins
->klass
;
6275 ins
->type
= STACK_VTYPE
;
6276 MONO_ADD_INS (cfg
->cbb
, ins
);
6278 MONO_INST_NEW (cfg
, ins
, OP_INSERTX_I4_SLOW
);
6279 ins
->dreg
= long_ins
->dreg
;
6280 ins
->sreg1
= long_ins
->dreg
;
6281 ins
->sreg2
= MONO_LVREG_MS (long_ins
->sreg1
);
6283 ins
->klass
= long_ins
->klass
;
6284 ins
->type
= STACK_VTYPE
;
6285 MONO_ADD_INS (cfg
->cbb
, ins
);
6287 MONO_INST_NEW (cfg
, ins
, OP_PSHUFLED
);
6288 ins
->dreg
= long_ins
->dreg
;
6289 ins
->sreg1
= long_ins
->dreg
;;
6290 ins
->inst_c0
= 0x44; /*Magic number for swizzling (X,Y,X,Y)*/
6291 ins
->klass
= long_ins
->klass
;
6292 ins
->type
= STACK_VTYPE
;
6293 MONO_ADD_INS (cfg
->cbb
, ins
);
6295 long_ins
->opcode
= OP_NOP
;
6298 #endif /* MONO_ARCH_SIMD_INTRINSICS */
6301 /*MONO_ARCH_HAVE_HANDLER_BLOCK_GUARD*/
6303 mono_arch_install_handler_block_guard (MonoJitInfo
*ji
, MonoJitExceptionInfo
*clause
, MonoContext
*ctx
, gpointer new_value
)
6306 gpointer
*sp
, old_value
;
6309 offset
= clause
->exvar_offset
;
6312 bp
= MONO_CONTEXT_GET_BP (ctx
);
6313 sp
= *(gpointer
*)(bp
+ offset
);
6316 if (old_value
< ji
->code_start
|| (char*)old_value
> ((char*)ji
->code_start
+ ji
->code_size
))
6325 * mono_aot_emit_load_got_addr:
6327 * Emit code to load the got address.
6328 * On x86, the result is placed into EBX.
6331 mono_arch_emit_load_got_addr (guint8
*start
, guint8
*code
, MonoCompile
*cfg
, MonoJumpInfo
**ji
)
6333 x86_call_imm (code
, 0);
6335 * The patch needs to point to the pop, since the GOT offset needs
6336 * to be added to that address.
6339 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_GOT_OFFSET
, NULL
);
6341 *ji
= mono_patch_info_list_prepend (*ji
, code
- start
, MONO_PATCH_INFO_GOT_OFFSET
, NULL
);
6342 x86_pop_reg (code
, MONO_ARCH_GOT_REG
);
6343 x86_alu_reg_imm (code
, X86_ADD
, MONO_ARCH_GOT_REG
, 0xf0f0f0f0);
6349 emit_load_aotconst (guint8
*start
, guint8
*code
, MonoCompile
*cfg
, MonoJumpInfo
**ji
, int dreg
, int tramp_type
, gconstpointer target
)
6352 mono_add_patch_info (cfg
, code
- cfg
->native_code
, tramp_type
, target
);
6354 g_assert_not_reached ();
6355 x86_mov_reg_membase (code
, dreg
, MONO_ARCH_GOT_REG
, 0xf0f0f0f0, 4);
6360 * mono_arch_emit_load_aotconst:
6362 * Emit code to load the contents of the GOT slot identified by TRAMP_TYPE and
6363 * TARGET from the mscorlib GOT in full-aot code.
6364 * On x86, the GOT address is assumed to be in EBX, and the result is placed into
6368 mono_arch_emit_load_aotconst (guint8
*start
, guint8
*code
, MonoJumpInfo
**ji
, MonoJumpInfoType tramp_type
, gconstpointer target
)
6370 /* Load the mscorlib got address */
6371 x86_mov_reg_membase (code
, X86_EAX
, MONO_ARCH_GOT_REG
, sizeof (gpointer
), 4);
6372 *ji
= mono_patch_info_list_prepend (*ji
, code
- start
, tramp_type
, target
);
6373 /* arch_emit_got_access () patches this */
6374 x86_mov_reg_membase (code
, X86_EAX
, X86_EAX
, 0xf0f0f0f0, 4);
6379 /* Can't put this into mini-x86.h */
6381 mono_x86_get_signal_exception_trampoline (MonoTrampInfo
**info
, gboolean aot
);
6384 mono_arch_get_trampolines (gboolean aot
)
6386 MonoTrampInfo
*info
;
6387 GSList
*tramps
= NULL
;
6389 mono_x86_get_signal_exception_trampoline (&info
, aot
);
6391 tramps
= g_slist_append (tramps
, info
);
6396 /* Soft Debug support */
6397 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
6400 * mono_arch_set_breakpoint:
6402 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
6403 * The location should contain code emitted by OP_SEQ_POINT.
6406 mono_arch_set_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
6408 guint8
*code
= ip
+ OP_SEQ_POINT_BP_OFFSET
;
6410 g_assert (code
[0] == 0x90);
6411 x86_call_membase (code
, X86_ECX
, 0);
6415 * mono_arch_clear_breakpoint:
6417 * Clear the breakpoint at IP.
6420 mono_arch_clear_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
6422 guint8
*code
= ip
+ OP_SEQ_POINT_BP_OFFSET
;
6425 for (i
= 0; i
< 2; ++i
)
6430 * mono_arch_start_single_stepping:
6432 * Start single stepping.
6435 mono_arch_start_single_stepping (void)
6437 ss_trampoline
= mini_get_single_step_trampoline ();
6441 * mono_arch_stop_single_stepping:
6443 * Stop single stepping.
6446 mono_arch_stop_single_stepping (void)
6448 ss_trampoline
= NULL
;
6452 * mono_arch_is_single_step_event:
6454 * Return whenever the machine state in SIGCTX corresponds to a single
6458 mono_arch_is_single_step_event (void *info
, void *sigctx
)
6460 /* We use soft breakpoints */
6465 mono_arch_is_breakpoint_event (void *info
, void *sigctx
)
6467 /* We use soft breakpoints */
6471 #define BREAKPOINT_SIZE 2
6474 * mono_arch_skip_breakpoint:
6476 * See mini-amd64.c for docs.
6479 mono_arch_skip_breakpoint (MonoContext
*ctx
, MonoJitInfo
*ji
)
6481 g_assert_not_reached ();
6485 * mono_arch_skip_single_step:
6487 * See mini-amd64.c for docs.
6490 mono_arch_skip_single_step (MonoContext
*ctx
)
6492 g_assert_not_reached ();
6496 * mono_arch_get_seq_point_info:
6498 * See mini-amd64.c for docs.
6501 mono_arch_get_seq_point_info (MonoDomain
*domain
, guint8
*code
)
6508 mono_arch_init_lmf_ext (MonoLMFExt
*ext
, gpointer prev_lmf
)
6510 ext
->lmf
.previous_lmf
= (gsize
)prev_lmf
;
6511 /* Mark that this is a MonoLMFExt */
6512 ext
->lmf
.previous_lmf
= (gsize
)(gpointer
)(((gssize
)ext
->lmf
.previous_lmf
) | 2);
6513 ext
->lmf
.ebp
= (gssize
)ext
;
6519 mono_arch_opcode_supported (int opcode
)
6522 case OP_ATOMIC_ADD_I4
:
6523 case OP_ATOMIC_EXCHANGE_I4
:
6524 case OP_ATOMIC_CAS_I4
:
6525 case OP_ATOMIC_LOAD_I1
:
6526 case OP_ATOMIC_LOAD_I2
:
6527 case OP_ATOMIC_LOAD_I4
:
6528 case OP_ATOMIC_LOAD_U1
:
6529 case OP_ATOMIC_LOAD_U2
:
6530 case OP_ATOMIC_LOAD_U4
:
6531 case OP_ATOMIC_LOAD_R4
:
6532 case OP_ATOMIC_LOAD_R8
:
6533 case OP_ATOMIC_STORE_I1
:
6534 case OP_ATOMIC_STORE_I2
:
6535 case OP_ATOMIC_STORE_I4
:
6536 case OP_ATOMIC_STORE_U1
:
6537 case OP_ATOMIC_STORE_U2
:
6538 case OP_ATOMIC_STORE_U4
:
6539 case OP_ATOMIC_STORE_R4
:
6540 case OP_ATOMIC_STORE_R8
:
6548 mono_arch_get_call_info (MonoMemPool
*mp
, MonoMethodSignature
*sig
)
6550 return get_call_info (mp
, sig
);