2 * mini-x86.c: x86 backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
9 * (C) 2003 Ximian, Inc.
18 #include <mono/metadata/appdomain.h>
19 #include <mono/metadata/debug-helpers.h>
20 #include <mono/metadata/threads.h>
21 #include <mono/metadata/profiler-private.h>
22 #include <mono/metadata/mono-debug.h>
23 #include <mono/utils/mono-math.h>
24 #include <mono/utils/mono-counters.h>
25 #include <mono/utils/mono-mmap.h>
32 /* On windows, these hold the key returned by TlsAlloc () */
33 static gint lmf_tls_offset
= -1;
34 static gint lmf_addr_tls_offset
= -1;
35 static gint appdomain_tls_offset
= -1;
38 static gboolean optimize_for_xen
= TRUE
;
40 #define optimize_for_xen 0
44 static gboolean is_win32
= TRUE
;
46 static gboolean is_win32
= FALSE
;
49 /* This mutex protects architecture specific caches */
50 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
51 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
52 static CRITICAL_SECTION mini_arch_mutex
;
54 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
59 /* Under windows, the default pinvoke calling convention is stdcall */
60 #define CALLCONV_IS_STDCALL(sig) ((((sig)->call_convention) == MONO_CALL_STDCALL) || ((sig)->pinvoke && ((sig)->call_convention) == MONO_CALL_DEFAULT))
62 #define CALLCONV_IS_STDCALL(sig) (((sig)->call_convention) == MONO_CALL_STDCALL)
66 mono_breakpoint_info
[MONO_BREAKPOINT_ARRAY_SIZE
];
69 * The code generated for sequence points reads from this location, which is
70 * made read-only when single stepping is enabled.
72 static gpointer ss_trigger_page
;
74 /* Enabled breakpoints read from this trigger page */
75 static gpointer bp_trigger_page
;
78 mono_arch_regname (int reg
)
81 case X86_EAX
: return "%eax";
82 case X86_EBX
: return "%ebx";
83 case X86_ECX
: return "%ecx";
84 case X86_EDX
: return "%edx";
85 case X86_ESP
: return "%esp";
86 case X86_EBP
: return "%ebp";
87 case X86_EDI
: return "%edi";
88 case X86_ESI
: return "%esi";
94 mono_arch_fregname (int reg
)
119 mono_arch_xregname (int reg
)
160 /* Only if storage == ArgValuetypeInReg */
161 ArgStorage pair_storage
[2];
170 gboolean need_stack_align
;
171 guint32 stack_align_amount
;
179 #define FLOAT_PARAM_REGS 0
181 static X86_Reg_No param_regs
[] = { 0 };
183 #if defined(TARGET_WIN32) || defined(__APPLE__) || defined(__FreeBSD__)
184 #define SMALL_STRUCTS_IN_REGS
185 static X86_Reg_No return_regs
[] = { X86_EAX
, X86_EDX
};
189 add_general (guint32
*gr
, guint32
*stack_size
, ArgInfo
*ainfo
)
191 ainfo
->offset
= *stack_size
;
193 if (*gr
>= PARAM_REGS
) {
194 ainfo
->storage
= ArgOnStack
;
195 (*stack_size
) += sizeof (gpointer
);
198 ainfo
->storage
= ArgInIReg
;
199 ainfo
->reg
= param_regs
[*gr
];
205 add_general_pair (guint32
*gr
, guint32
*stack_size
, ArgInfo
*ainfo
)
207 ainfo
->offset
= *stack_size
;
209 g_assert (PARAM_REGS
== 0);
211 ainfo
->storage
= ArgOnStack
;
212 (*stack_size
) += sizeof (gpointer
) * 2;
216 add_float (guint32
*gr
, guint32
*stack_size
, ArgInfo
*ainfo
, gboolean is_double
)
218 ainfo
->offset
= *stack_size
;
220 if (*gr
>= FLOAT_PARAM_REGS
) {
221 ainfo
->storage
= ArgOnStack
;
222 (*stack_size
) += is_double
? 8 : 4;
225 /* A double register */
227 ainfo
->storage
= ArgInDoubleSSEReg
;
229 ainfo
->storage
= ArgInFloatSSEReg
;
237 add_valuetype (MonoGenericSharingContext
*gsctx
, MonoMethodSignature
*sig
, ArgInfo
*ainfo
, MonoType
*type
,
239 guint32
*gr
, guint32
*fr
, guint32
*stack_size
)
244 klass
= mono_class_from_mono_type (type
);
245 size
= mini_type_stack_size_full (gsctx
, &klass
->byval_arg
, NULL
, sig
->pinvoke
);
247 #ifdef SMALL_STRUCTS_IN_REGS
248 if (sig
->pinvoke
&& is_return
) {
249 MonoMarshalType
*info
;
252 * the exact rules are not very well documented, the code below seems to work with the
253 * code generated by gcc 3.3.3 -mno-cygwin.
255 info
= mono_marshal_load_type_info (klass
);
258 ainfo
->pair_storage
[0] = ainfo
->pair_storage
[1] = ArgNone
;
260 /* Special case structs with only a float member */
261 if ((info
->native_size
== 8) && (info
->num_fields
== 1) && (info
->fields
[0].field
->type
->type
== MONO_TYPE_R8
)) {
262 ainfo
->storage
= ArgValuetypeInReg
;
263 ainfo
->pair_storage
[0] = ArgOnDoubleFpStack
;
266 if ((info
->native_size
== 4) && (info
->num_fields
== 1) && (info
->fields
[0].field
->type
->type
== MONO_TYPE_R4
)) {
267 ainfo
->storage
= ArgValuetypeInReg
;
268 ainfo
->pair_storage
[0] = ArgOnFloatFpStack
;
271 if ((info
->native_size
== 1) || (info
->native_size
== 2) || (info
->native_size
== 4) || (info
->native_size
== 8)) {
272 ainfo
->storage
= ArgValuetypeInReg
;
273 ainfo
->pair_storage
[0] = ArgInIReg
;
274 ainfo
->pair_regs
[0] = return_regs
[0];
275 if (info
->native_size
> 4) {
276 ainfo
->pair_storage
[1] = ArgInIReg
;
277 ainfo
->pair_regs
[1] = return_regs
[1];
284 ainfo
->offset
= *stack_size
;
285 ainfo
->storage
= ArgOnStack
;
286 *stack_size
+= ALIGN_TO (size
, sizeof (gpointer
));
292 * Obtain information about a call according to the calling convention.
293 * For x86 ELF, see the "System V Application Binary Interface Intel386
294 * Architecture Processor Supplment, Fourth Edition" document for more
296 * For x86 win32, see ???.
299 get_call_info_internal (MonoGenericSharingContext
*gsctx
, CallInfo
*cinfo
, MonoMethodSignature
*sig
, gboolean is_pinvoke
)
303 int n
= sig
->hasthis
+ sig
->param_count
;
304 guint32 stack_size
= 0;
311 ret_type
= mini_type_get_underlying_type (gsctx
, sig
->ret
);
312 switch (ret_type
->type
) {
313 case MONO_TYPE_BOOLEAN
:
324 case MONO_TYPE_FNPTR
:
325 case MONO_TYPE_CLASS
:
326 case MONO_TYPE_OBJECT
:
327 case MONO_TYPE_SZARRAY
:
328 case MONO_TYPE_ARRAY
:
329 case MONO_TYPE_STRING
:
330 cinfo
->ret
.storage
= ArgInIReg
;
331 cinfo
->ret
.reg
= X86_EAX
;
335 cinfo
->ret
.storage
= ArgInIReg
;
336 cinfo
->ret
.reg
= X86_EAX
;
339 cinfo
->ret
.storage
= ArgOnFloatFpStack
;
342 cinfo
->ret
.storage
= ArgOnDoubleFpStack
;
344 case MONO_TYPE_GENERICINST
:
345 if (!mono_type_generic_inst_is_valuetype (ret_type
)) {
346 cinfo
->ret
.storage
= ArgInIReg
;
347 cinfo
->ret
.reg
= X86_EAX
;
351 case MONO_TYPE_VALUETYPE
: {
352 guint32 tmp_gr
= 0, tmp_fr
= 0, tmp_stacksize
= 0;
354 add_valuetype (gsctx
, sig
, &cinfo
->ret
, sig
->ret
, TRUE
, &tmp_gr
, &tmp_fr
, &tmp_stacksize
);
355 if (cinfo
->ret
.storage
== ArgOnStack
)
356 /* The caller passes the address where the value is stored */
357 add_general (&gr
, &stack_size
, &cinfo
->ret
);
360 case MONO_TYPE_TYPEDBYREF
:
361 /* Same as a valuetype with size 24 */
362 add_general (&gr
, &stack_size
, &cinfo
->ret
);
366 cinfo
->ret
.storage
= ArgNone
;
369 g_error ("Can't handle as return value 0x%x", sig
->ret
->type
);
375 add_general (&gr
, &stack_size
, cinfo
->args
+ 0);
377 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (n
== 0)) {
379 fr
= FLOAT_PARAM_REGS
;
381 /* Emit the signature cookie just before the implicit arguments */
382 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
);
385 for (i
= 0; i
< sig
->param_count
; ++i
) {
386 ArgInfo
*ainfo
= &cinfo
->args
[sig
->hasthis
+ i
];
389 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
390 /* We allways pass the sig cookie on the stack for simplicity */
392 * Prevent implicit arguments + the sig cookie from being passed
396 fr
= FLOAT_PARAM_REGS
;
398 /* Emit the signature cookie just before the implicit arguments */
399 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
);
402 if (sig
->params
[i
]->byref
) {
403 add_general (&gr
, &stack_size
, ainfo
);
406 ptype
= mini_type_get_underlying_type (gsctx
, sig
->params
[i
]);
407 switch (ptype
->type
) {
408 case MONO_TYPE_BOOLEAN
:
411 add_general (&gr
, &stack_size
, ainfo
);
416 add_general (&gr
, &stack_size
, ainfo
);
420 add_general (&gr
, &stack_size
, ainfo
);
425 case MONO_TYPE_FNPTR
:
426 case MONO_TYPE_CLASS
:
427 case MONO_TYPE_OBJECT
:
428 case MONO_TYPE_STRING
:
429 case MONO_TYPE_SZARRAY
:
430 case MONO_TYPE_ARRAY
:
431 add_general (&gr
, &stack_size
, ainfo
);
433 case MONO_TYPE_GENERICINST
:
434 if (!mono_type_generic_inst_is_valuetype (ptype
)) {
435 add_general (&gr
, &stack_size
, ainfo
);
439 case MONO_TYPE_VALUETYPE
:
440 add_valuetype (gsctx
, sig
, ainfo
, sig
->params
[i
], FALSE
, &gr
, &fr
, &stack_size
);
442 case MONO_TYPE_TYPEDBYREF
:
443 stack_size
+= sizeof (MonoTypedRef
);
444 ainfo
->storage
= ArgOnStack
;
448 add_general_pair (&gr
, &stack_size
, ainfo
);
451 add_float (&fr
, &stack_size
, ainfo
, FALSE
);
454 add_float (&fr
, &stack_size
, ainfo
, TRUE
);
457 g_error ("unexpected type 0x%x", ptype
->type
);
458 g_assert_not_reached ();
462 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (n
> 0) && (sig
->sentinelpos
== sig
->param_count
)) {
464 fr
= FLOAT_PARAM_REGS
;
466 /* Emit the signature cookie just before the implicit arguments */
467 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
);
470 if (mono_do_x86_stack_align
&& (stack_size
% MONO_ARCH_FRAME_ALIGNMENT
) != 0) {
471 cinfo
->need_stack_align
= TRUE
;
472 cinfo
->stack_align_amount
= MONO_ARCH_FRAME_ALIGNMENT
- (stack_size
% MONO_ARCH_FRAME_ALIGNMENT
);
473 stack_size
+= cinfo
->stack_align_amount
;
476 cinfo
->stack_usage
= stack_size
;
477 cinfo
->reg_usage
= gr
;
478 cinfo
->freg_usage
= fr
;
483 get_call_info (MonoGenericSharingContext
*gsctx
, MonoMemPool
*mp
, MonoMethodSignature
*sig
, gboolean is_pinvoke
)
485 int n
= sig
->hasthis
+ sig
->param_count
;
489 cinfo
= mono_mempool_alloc0 (mp
, sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
491 cinfo
= g_malloc0 (sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
493 return get_call_info_internal (gsctx
, cinfo
, sig
, is_pinvoke
);
497 * mono_arch_get_argument_info:
498 * @csig: a method signature
499 * @param_count: the number of parameters to consider
500 * @arg_info: an array to store the result infos
502 * Gathers information on parameters such as size, alignment and
503 * padding. arg_info should be large enought to hold param_count + 1 entries.
505 * Returns the size of the argument area on the stack.
506 * This should be signal safe, since it is called from
507 * mono_arch_find_jit_info_ext ().
508 * FIXME: The metadata calls might not be signal safe.
511 mono_arch_get_argument_info (MonoMethodSignature
*csig
, int param_count
, MonoJitArgumentInfo
*arg_info
)
513 int k
, args_size
= 0;
519 /* Avoid g_malloc as it is not signal safe */
520 cinfo
= (CallInfo
*)g_newa (guint8
*, sizeof (CallInfo
) + (sizeof (ArgInfo
) * (csig
->param_count
+ 1)));
522 cinfo
= get_call_info_internal (NULL
, cinfo
, csig
, FALSE
);
524 if (MONO_TYPE_ISSTRUCT (csig
->ret
) && (cinfo
->ret
.storage
== ArgOnStack
)) {
525 args_size
+= sizeof (gpointer
);
529 arg_info
[0].offset
= offset
;
532 args_size
+= sizeof (gpointer
);
536 arg_info
[0].size
= args_size
;
538 for (k
= 0; k
< param_count
; k
++) {
539 size
= mini_type_stack_size_full (NULL
, csig
->params
[k
], &align
, csig
->pinvoke
);
541 /* ignore alignment for now */
544 args_size
+= pad
= (align
- (args_size
& (align
- 1))) & (align
- 1);
545 arg_info
[k
].pad
= pad
;
547 arg_info
[k
+ 1].pad
= 0;
548 arg_info
[k
+ 1].size
= size
;
550 arg_info
[k
+ 1].offset
= offset
;
554 if (mono_do_x86_stack_align
&& !CALLCONV_IS_STDCALL (csig
))
555 align
= MONO_ARCH_FRAME_ALIGNMENT
;
558 args_size
+= pad
= (align
- (args_size
& (align
- 1))) & (align
- 1);
559 arg_info
[k
].pad
= pad
;
564 static const guchar cpuid_impl
[] = {
565 0x55, /* push %ebp */
566 0x89, 0xe5, /* mov %esp,%ebp */
567 0x53, /* push %ebx */
568 0x8b, 0x45, 0x08, /* mov 0x8(%ebp),%eax */
569 0x0f, 0xa2, /* cpuid */
570 0x50, /* push %eax */
571 0x8b, 0x45, 0x10, /* mov 0x10(%ebp),%eax */
572 0x89, 0x18, /* mov %ebx,(%eax) */
573 0x8b, 0x45, 0x14, /* mov 0x14(%ebp),%eax */
574 0x89, 0x08, /* mov %ecx,(%eax) */
575 0x8b, 0x45, 0x18, /* mov 0x18(%ebp),%eax */
576 0x89, 0x10, /* mov %edx,(%eax) */
578 0x8b, 0x55, 0x0c, /* mov 0xc(%ebp),%edx */
579 0x89, 0x02, /* mov %eax,(%edx) */
585 typedef void (*CpuidFunc
) (int id
, int* p_eax
, int* p_ebx
, int* p_ecx
, int* p_edx
);
588 cpuid (int id
, int* p_eax
, int* p_ebx
, int* p_ecx
, int* p_edx
)
592 __asm__
__volatile__ (
595 "movl %%eax, %%edx\n"
596 "xorl $0x200000, %%eax\n"
601 "xorl %%edx, %%eax\n"
602 "andl $0x200000, %%eax\n"
624 /* Have to use the code manager to get around WinXP DEP */
625 static CpuidFunc func
= NULL
;
628 ptr
= mono_global_codeman_reserve (sizeof (cpuid_impl
));
629 memcpy (ptr
, cpuid_impl
, sizeof (cpuid_impl
));
630 func
= (CpuidFunc
)ptr
;
632 func (id
, p_eax
, p_ebx
, p_ecx
, p_edx
);
635 * We use this approach because of issues with gcc and pic code, see:
636 * http://gcc.gnu.org/cgi-bin/gnatsweb.pl?cmd=view%20audit-trail&database=gcc&pr=7329
637 __asm__ __volatile__ ("cpuid"
638 : "=a" (*p_eax), "=b" (*p_ebx), "=c" (*p_ecx), "=d" (*p_edx)
647 * Initialize the cpu to execute managed code.
650 mono_arch_cpu_init (void)
652 /* spec compliance requires running with double precision */
656 __asm__
__volatile__ ("fnstcw %0\n": "=m" (fpcw
));
657 fpcw
&= ~X86_FPCW_PRECC_MASK
;
658 fpcw
|= X86_FPCW_PREC_DOUBLE
;
659 __asm__
__volatile__ ("fldcw %0\n": : "m" (fpcw
));
660 __asm__
__volatile__ ("fnstcw %0\n": "=m" (fpcw
));
662 _control87 (_PC_53
, MCW_PC
);
667 * Initialize architecture specific code.
670 mono_arch_init (void)
672 InitializeCriticalSection (&mini_arch_mutex
);
674 ss_trigger_page
= mono_valloc (NULL
, mono_pagesize (), MONO_MMAP_READ
);
675 bp_trigger_page
= mono_valloc (NULL
, mono_pagesize (), MONO_MMAP_READ
|MONO_MMAP_32BIT
);
676 mono_mprotect (bp_trigger_page
, mono_pagesize (), 0);
680 * Cleanup architecture specific code.
683 mono_arch_cleanup (void)
685 DeleteCriticalSection (&mini_arch_mutex
);
689 * This function returns the optimizations supported on this cpu.
692 mono_arch_cpu_optimizazions (guint32
*exclude_mask
)
694 int eax
, ebx
, ecx
, edx
;
698 /* Feature Flags function, flags returned in EDX. */
699 if (cpuid (1, &eax
, &ebx
, &ecx
, &edx
)) {
700 if (edx
& (1 << 15)) {
701 opts
|= MONO_OPT_CMOV
;
703 opts
|= MONO_OPT_FCMOV
;
705 *exclude_mask
|= MONO_OPT_FCMOV
;
707 *exclude_mask
|= MONO_OPT_CMOV
;
709 opts
|= MONO_OPT_SSE2
;
711 *exclude_mask
|= MONO_OPT_SSE2
;
713 #ifdef MONO_ARCH_SIMD_INTRINSICS
714 /*SIMD intrinsics require at least SSE2.*/
715 if (!(opts
& MONO_OPT_SSE2
))
716 *exclude_mask
|= MONO_OPT_SIMD
;
723 * This function test for all SSE functions supported.
725 * Returns a bitmask corresponding to all supported versions.
729 mono_arch_cpu_enumerate_simd_versions (void)
731 int eax
, ebx
, ecx
, edx
;
732 guint32 sse_opts
= 0;
734 if (cpuid (1, &eax
, &ebx
, &ecx
, &edx
)) {
736 sse_opts
|= SIMD_VERSION_SSE1
;
738 sse_opts
|= SIMD_VERSION_SSE2
;
740 sse_opts
|= SIMD_VERSION_SSE3
;
742 sse_opts
|= SIMD_VERSION_SSSE3
;
744 sse_opts
|= SIMD_VERSION_SSE41
;
746 sse_opts
|= SIMD_VERSION_SSE42
;
749 /* Yes, all this needs to be done to check for sse4a.
750 See: "Amd: CPUID Specification"
752 if (cpuid (0x80000000, &eax
, &ebx
, &ecx
, &edx
)) {
753 /* eax greater or equal than 0x80000001, ebx = 'htuA', ecx = DMAc', edx = 'itne'*/
754 if ((((unsigned int) eax
) >= 0x80000001) && (ebx
== 0x68747541) && (ecx
== 0x444D4163) && (edx
== 0x69746E65)) {
755 cpuid (0x80000001, &eax
, &ebx
, &ecx
, &edx
);
757 sse_opts
|= SIMD_VERSION_SSE4a
;
766 * Determine whenever the trap whose info is in SIGINFO is caused by
770 mono_arch_is_int_overflow (void *sigctx
, void *info
)
775 mono_arch_sigctx_to_monoctx (sigctx
, &ctx
);
777 ip
= (guint8
*)ctx
.eip
;
779 if ((ip
[0] == 0xf7) && (x86_modrm_mod (ip
[1]) == 0x3) && (x86_modrm_reg (ip
[1]) == 0x7)) {
783 switch (x86_modrm_rm (ip
[1])) {
803 g_assert_not_reached ();
815 mono_arch_get_allocatable_int_vars (MonoCompile
*cfg
)
820 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
821 MonoInst
*ins
= cfg
->varinfo
[i
];
822 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
825 if (vmv
->range
.first_use
.abs_pos
>= vmv
->range
.last_use
.abs_pos
)
828 if ((ins
->flags
& (MONO_INST_IS_DEAD
|MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)) ||
829 (ins
->opcode
!= OP_LOCAL
&& ins
->opcode
!= OP_ARG
))
832 /* we dont allocate I1 to registers because there is no simply way to sign extend
833 * 8bit quantities in caller saved registers on x86 */
834 if (mono_is_regsize_var (ins
->inst_vtype
) && (ins
->inst_vtype
->type
!= MONO_TYPE_I1
)) {
835 g_assert (MONO_VARINFO (cfg
, i
)->reg
== -1);
836 g_assert (i
== vmv
->idx
);
837 vars
= g_list_prepend (vars
, vmv
);
841 vars
= mono_varlist_sort (cfg
, vars
, 0);
847 mono_arch_get_global_int_regs (MonoCompile
*cfg
)
851 /* we can use 3 registers for global allocation */
852 regs
= g_list_prepend (regs
, (gpointer
)X86_EBX
);
853 regs
= g_list_prepend (regs
, (gpointer
)X86_ESI
);
854 regs
= g_list_prepend (regs
, (gpointer
)X86_EDI
);
860 * mono_arch_regalloc_cost:
862 * Return the cost, in number of memory references, of the action of
863 * allocating the variable VMV into a register during global register
867 mono_arch_regalloc_cost (MonoCompile
*cfg
, MonoMethodVar
*vmv
)
869 MonoInst
*ins
= cfg
->varinfo
[vmv
->idx
];
871 if (cfg
->method
->save_lmf
)
872 /* The register is already saved */
873 return (ins
->opcode
== OP_ARG
) ? 1 : 0;
875 /* push+pop+possible load if it is an argument */
876 return (ins
->opcode
== OP_ARG
) ? 3 : 2;
880 set_needs_stack_frame (MonoCompile
*cfg
, gboolean flag
)
882 static int inited
= FALSE
;
883 static int count
= 0;
885 if (cfg
->arch
.need_stack_frame_inited
) {
886 g_assert (cfg
->arch
.need_stack_frame
== flag
);
890 cfg
->arch
.need_stack_frame
= flag
;
891 cfg
->arch
.need_stack_frame_inited
= TRUE
;
897 mono_counters_register ("Could eliminate stack frame", MONO_COUNTER_INT
|MONO_COUNTER_JIT
, &count
);
902 //g_print ("will eliminate %s.%s.%s\n", cfg->method->klass->name_space, cfg->method->klass->name, cfg->method->name);
906 needs_stack_frame (MonoCompile
*cfg
)
908 MonoMethodSignature
*sig
;
909 MonoMethodHeader
*header
;
910 gboolean result
= FALSE
;
912 #if defined(__APPLE__)
913 /*OSX requires stack frame code to have the correct alignment. */
917 if (cfg
->arch
.need_stack_frame_inited
)
918 return cfg
->arch
.need_stack_frame
;
920 header
= cfg
->header
;
921 sig
= mono_method_signature (cfg
->method
);
923 if (cfg
->disable_omit_fp
)
925 else if (cfg
->flags
& MONO_CFG_HAS_ALLOCA
)
927 else if (cfg
->method
->save_lmf
)
929 else if (cfg
->stack_offset
)
931 else if (cfg
->param_area
)
933 else if (cfg
->flags
& (MONO_CFG_HAS_CALLS
| MONO_CFG_HAS_ALLOCA
| MONO_CFG_HAS_TAIL
))
935 else if (header
->num_clauses
)
937 else if (sig
->param_count
+ sig
->hasthis
)
939 else if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
))
941 else if ((mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
)) ||
942 (cfg
->prof_options
& MONO_PROFILE_ENTER_LEAVE
))
945 set_needs_stack_frame (cfg
, result
);
947 return cfg
->arch
.need_stack_frame
;
951 * Set var information according to the calling convention. X86 version.
952 * The locals var stuff should most likely be split in another method.
955 mono_arch_allocate_vars (MonoCompile
*cfg
)
957 MonoMethodSignature
*sig
;
958 MonoMethodHeader
*header
;
960 guint32 locals_stack_size
, locals_stack_align
;
965 header
= cfg
->header
;
966 sig
= mono_method_signature (cfg
->method
);
968 cinfo
= get_call_info (cfg
->generic_sharing_context
, cfg
->mempool
, sig
, FALSE
);
970 cfg
->frame_reg
= X86_EBP
;
973 /* Reserve space to save LMF and caller saved registers */
975 if (cfg
->method
->save_lmf
) {
976 offset
+= sizeof (MonoLMF
);
978 if (cfg
->used_int_regs
& (1 << X86_EBX
)) {
982 if (cfg
->used_int_regs
& (1 << X86_EDI
)) {
986 if (cfg
->used_int_regs
& (1 << X86_ESI
)) {
991 switch (cinfo
->ret
.storage
) {
992 case ArgValuetypeInReg
:
993 /* Allocate a local to hold the result, the epilog will copy it to the correct place */
995 cfg
->ret
->opcode
= OP_REGOFFSET
;
996 cfg
->ret
->inst_basereg
= X86_EBP
;
997 cfg
->ret
->inst_offset
= - offset
;
1003 /* Allocate locals */
1004 offsets
= mono_allocate_stack_slots (cfg
, &locals_stack_size
, &locals_stack_align
);
1005 if (locals_stack_size
> MONO_ARCH_MAX_FRAME_SIZE
) {
1006 char *mname
= mono_method_full_name (cfg
->method
, TRUE
);
1007 cfg
->exception_type
= MONO_EXCEPTION_INVALID_PROGRAM
;
1008 cfg
->exception_message
= g_strdup_printf ("Method %s stack is too big.", mname
);
1012 if (locals_stack_align
) {
1013 offset
+= (locals_stack_align
- 1);
1014 offset
&= ~(locals_stack_align
- 1);
1017 * EBP is at alignment 8 % MONO_ARCH_FRAME_ALIGNMENT, so if we
1018 * have locals larger than 8 bytes we need to make sure that
1019 * they have the appropriate offset.
1021 if (MONO_ARCH_FRAME_ALIGNMENT
> 8 && locals_stack_align
> 8)
1022 offset
+= MONO_ARCH_FRAME_ALIGNMENT
- sizeof (gpointer
) * 2;
1023 for (i
= cfg
->locals_start
; i
< cfg
->num_varinfo
; i
++) {
1024 if (offsets
[i
] != -1) {
1025 MonoInst
*inst
= cfg
->varinfo
[i
];
1026 inst
->opcode
= OP_REGOFFSET
;
1027 inst
->inst_basereg
= X86_EBP
;
1028 inst
->inst_offset
= - (offset
+ offsets
[i
]);
1029 //printf ("allocated local %d to ", i); mono_print_tree_nl (inst);
1032 offset
+= locals_stack_size
;
1036 * Allocate arguments+return value
1039 switch (cinfo
->ret
.storage
) {
1041 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
1043 * In the new IR, the cfg->vret_addr variable represents the
1044 * vtype return value.
1046 cfg
->vret_addr
->opcode
= OP_REGOFFSET
;
1047 cfg
->vret_addr
->inst_basereg
= cfg
->frame_reg
;
1048 cfg
->vret_addr
->inst_offset
= cinfo
->ret
.offset
+ ARGS_OFFSET
;
1049 if (G_UNLIKELY (cfg
->verbose_level
> 1)) {
1050 printf ("vret_addr =");
1051 mono_print_ins (cfg
->vret_addr
);
1054 cfg
->ret
->opcode
= OP_REGOFFSET
;
1055 cfg
->ret
->inst_basereg
= X86_EBP
;
1056 cfg
->ret
->inst_offset
= cinfo
->ret
.offset
+ ARGS_OFFSET
;
1059 case ArgValuetypeInReg
:
1062 cfg
->ret
->opcode
= OP_REGVAR
;
1063 cfg
->ret
->inst_c0
= cinfo
->ret
.reg
;
1064 cfg
->ret
->dreg
= cinfo
->ret
.reg
;
1067 case ArgOnFloatFpStack
:
1068 case ArgOnDoubleFpStack
:
1071 g_assert_not_reached ();
1074 if (sig
->call_convention
== MONO_CALL_VARARG
) {
1075 g_assert (cinfo
->sig_cookie
.storage
== ArgOnStack
);
1076 cfg
->sig_cookie
= cinfo
->sig_cookie
.offset
+ ARGS_OFFSET
;
1079 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
1080 ArgInfo
*ainfo
= &cinfo
->args
[i
];
1081 inst
= cfg
->args
[i
];
1082 if (inst
->opcode
!= OP_REGVAR
) {
1083 inst
->opcode
= OP_REGOFFSET
;
1084 inst
->inst_basereg
= X86_EBP
;
1086 inst
->inst_offset
= ainfo
->offset
+ ARGS_OFFSET
;
1089 cfg
->stack_offset
= offset
;
1093 mono_arch_create_vars (MonoCompile
*cfg
)
1095 MonoMethodSignature
*sig
;
1098 sig
= mono_method_signature (cfg
->method
);
1100 cinfo
= get_call_info (cfg
->generic_sharing_context
, cfg
->mempool
, sig
, FALSE
);
1102 if (cinfo
->ret
.storage
== ArgValuetypeInReg
)
1103 cfg
->ret_var_is_local
= TRUE
;
1104 if ((cinfo
->ret
.storage
!= ArgValuetypeInReg
) && MONO_TYPE_ISSTRUCT (sig
->ret
)) {
1105 cfg
->vret_addr
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_ARG
);
1110 * It is expensive to adjust esp for each individual fp argument pushed on the stack
1111 * so we try to do it just once when we have multiple fp arguments in a row.
1112 * We don't use this mechanism generally because for int arguments the generated code
1113 * is slightly bigger and new generation cpus optimize away the dependency chains
1114 * created by push instructions on the esp value.
1115 * fp_arg_setup is the first argument in the execution sequence where the esp register
1118 static G_GNUC_UNUSED
int
1119 collect_fp_stack_space (MonoMethodSignature
*sig
, int start_arg
, int *fp_arg_setup
)
1124 for (; start_arg
< sig
->param_count
; ++start_arg
) {
1125 t
= mini_type_get_underlying_type (NULL
, sig
->params
[start_arg
]);
1126 if (!t
->byref
&& t
->type
== MONO_TYPE_R8
) {
1127 fp_space
+= sizeof (double);
1128 *fp_arg_setup
= start_arg
;
1137 emit_sig_cookie (MonoCompile
*cfg
, MonoCallInst
*call
, CallInfo
*cinfo
)
1139 MonoMethodSignature
*tmp_sig
;
1141 /* FIXME: Add support for signature tokens to AOT */
1142 cfg
->disable_aot
= TRUE
;
1145 * mono_ArgIterator_Setup assumes the signature cookie is
1146 * passed first and all the arguments which were before it are
1147 * passed on the stack after the signature. So compensate by
1148 * passing a different signature.
1150 tmp_sig
= mono_metadata_signature_dup (call
->signature
);
1151 tmp_sig
->param_count
-= call
->signature
->sentinelpos
;
1152 tmp_sig
->sentinelpos
= 0;
1153 memcpy (tmp_sig
->params
, call
->signature
->params
+ call
->signature
->sentinelpos
, tmp_sig
->param_count
* sizeof (MonoType
*));
1155 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_X86_PUSH_IMM
, -1, -1, tmp_sig
);
1160 mono_arch_get_llvm_call_info (MonoCompile
*cfg
, MonoMethodSignature
*sig
)
1165 LLVMCallInfo
*linfo
;
1168 n
= sig
->param_count
+ sig
->hasthis
;
1170 cinfo
= get_call_info (cfg
->generic_sharing_context
, cfg
->mempool
, sig
, sig
->pinvoke
);
1172 linfo
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (LLVMCallInfo
) + (sizeof (LLVMArgInfo
) * n
));
1175 * LLVM always uses the native ABI while we use our own ABI, the
1176 * only difference is the handling of vtypes:
1177 * - we only pass/receive them in registers in some cases, and only
1178 * in 1 or 2 integer registers.
1180 if (cinfo
->ret
.storage
== ArgValuetypeInReg
) {
1182 cfg
->exception_message
= g_strdup ("pinvoke + vtypes");
1183 cfg
->disable_llvm
= TRUE
;
1187 cfg
->exception_message
= g_strdup ("vtype ret in call");
1188 cfg
->disable_llvm
= TRUE
;
1190 linfo->ret.storage = LLVMArgVtypeInReg;
1191 for (j = 0; j < 2; ++j)
1192 linfo->ret.pair_storage [j] = arg_storage_to_llvm_arg_storage (cfg, cinfo->ret.pair_storage [j]);
1196 if (MONO_TYPE_ISSTRUCT (sig
->ret
) && cinfo
->ret
.storage
== ArgInIReg
) {
1197 /* Vtype returned using a hidden argument */
1198 linfo
->ret
.storage
= LLVMArgVtypeRetAddr
;
1201 if (MONO_TYPE_ISSTRUCT (sig
->ret
) && cinfo
->ret
.storage
!= ArgInIReg
) {
1203 cfg
->exception_message
= g_strdup ("vtype ret in call");
1204 cfg
->disable_llvm
= TRUE
;
1207 for (i
= 0; i
< n
; ++i
) {
1208 ainfo
= cinfo
->args
+ i
;
1210 if (i
>= sig
->hasthis
)
1211 t
= sig
->params
[i
- sig
->hasthis
];
1213 t
= &mono_defaults
.int_class
->byval_arg
;
1215 linfo
->args
[i
].storage
= LLVMArgNone
;
1217 switch (ainfo
->storage
) {
1219 linfo
->args
[i
].storage
= LLVMArgInIReg
;
1221 case ArgInDoubleSSEReg
:
1222 case ArgInFloatSSEReg
:
1223 linfo
->args
[i
].storage
= LLVMArgInFPReg
;
1226 if (MONO_TYPE_ISSTRUCT (t
)) {
1227 if (mono_class_value_size (mono_class_from_mono_type (t
), NULL
) == 0)
1228 /* LLVM seems to allocate argument space for empty structures too */
1229 linfo
->args
[i
].storage
= LLVMArgNone
;
1231 linfo
->args
[i
].storage
= LLVMArgVtypeByVal
;
1233 linfo
->args
[i
].storage
= LLVMArgInIReg
;
1235 if (t
->type
== MONO_TYPE_R4
)
1236 linfo
->args
[i
].storage
= LLVMArgInFPReg
;
1237 else if (t
->type
== MONO_TYPE_R8
)
1238 linfo
->args
[i
].storage
= LLVMArgInFPReg
;
1242 case ArgValuetypeInReg
:
1244 cfg
->exception_message
= g_strdup ("pinvoke + vtypes");
1245 cfg
->disable_llvm
= TRUE
;
1249 cfg
->exception_message
= g_strdup ("vtype arg");
1250 cfg
->disable_llvm
= TRUE
;
1252 linfo->args [i].storage = LLVMArgVtypeInReg;
1253 for (j = 0; j < 2; ++j)
1254 linfo->args [i].pair_storage [j] = arg_storage_to_llvm_arg_storage (cfg, ainfo->pair_storage [j]);
1258 cfg
->exception_message
= g_strdup ("ainfo->storage");
1259 cfg
->disable_llvm
= TRUE
;
1269 mono_arch_emit_call (MonoCompile
*cfg
, MonoCallInst
*call
)
1272 MonoMethodSignature
*sig
;
1275 int sentinelpos
= 0;
1277 sig
= call
->signature
;
1278 n
= sig
->param_count
+ sig
->hasthis
;
1280 cinfo
= get_call_info (cfg
->generic_sharing_context
, cfg
->mempool
, sig
, FALSE
);
1282 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
))
1283 sentinelpos
= sig
->sentinelpos
+ (sig
->hasthis
? 1 : 0);
1285 if (cinfo
->need_stack_align
) {
1286 MONO_INST_NEW (cfg
, arg
, OP_SUB_IMM
);
1287 arg
->dreg
= X86_ESP
;
1288 arg
->sreg1
= X86_ESP
;
1289 arg
->inst_imm
= cinfo
->stack_align_amount
;
1290 MONO_ADD_INS (cfg
->cbb
, arg
);
1293 if (sig
->ret
&& MONO_TYPE_ISSTRUCT (sig
->ret
)) {
1294 if (cinfo
->ret
.storage
== ArgValuetypeInReg
) {
1296 * Tell the JIT to use a more efficient calling convention: call using
1297 * OP_CALL, compute the result location after the call, and save the
1300 call
->vret_in_reg
= TRUE
;
1302 NULLIFY_INS (call
->vret_var
);
1306 /* Handle the case where there are no implicit arguments */
1307 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (n
== sentinelpos
)) {
1308 emit_sig_cookie (cfg
, call
, cinfo
);
1311 /* Arguments are pushed in the reverse order */
1312 for (i
= n
- 1; i
>= 0; i
--) {
1313 ArgInfo
*ainfo
= cinfo
->args
+ i
;
1316 if (i
>= sig
->hasthis
)
1317 t
= sig
->params
[i
- sig
->hasthis
];
1319 t
= &mono_defaults
.int_class
->byval_arg
;
1320 t
= mini_type_get_underlying_type (cfg
->generic_sharing_context
, t
);
1322 MONO_INST_NEW (cfg
, arg
, OP_X86_PUSH
);
1324 in
= call
->args
[i
];
1325 arg
->cil_code
= in
->cil_code
;
1326 arg
->sreg1
= in
->dreg
;
1327 arg
->type
= in
->type
;
1329 g_assert (in
->dreg
!= -1);
1331 if ((i
>= sig
->hasthis
) && (MONO_TYPE_ISSTRUCT(t
))) {
1335 g_assert (in
->klass
);
1337 if (t
->type
== MONO_TYPE_TYPEDBYREF
) {
1338 size
= sizeof (MonoTypedRef
);
1339 align
= sizeof (gpointer
);
1342 size
= mini_type_stack_size_full (cfg
->generic_sharing_context
, &in
->klass
->byval_arg
, &align
, sig
->pinvoke
);
1346 arg
->opcode
= OP_OUTARG_VT
;
1347 arg
->sreg1
= in
->dreg
;
1348 arg
->klass
= in
->klass
;
1349 arg
->backend
.size
= size
;
1351 MONO_ADD_INS (cfg
->cbb
, arg
);
1355 switch (ainfo
->storage
) {
1357 arg
->opcode
= OP_X86_PUSH
;
1359 if (t
->type
== MONO_TYPE_R4
) {
1360 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SUB_IMM
, X86_ESP
, X86_ESP
, 4);
1361 arg
->opcode
= OP_STORER4_MEMBASE_REG
;
1362 arg
->inst_destbasereg
= X86_ESP
;
1363 arg
->inst_offset
= 0;
1364 } else if (t
->type
== MONO_TYPE_R8
) {
1365 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SUB_IMM
, X86_ESP
, X86_ESP
, 8);
1366 arg
->opcode
= OP_STORER8_MEMBASE_REG
;
1367 arg
->inst_destbasereg
= X86_ESP
;
1368 arg
->inst_offset
= 0;
1369 } else if (t
->type
== MONO_TYPE_I8
|| t
->type
== MONO_TYPE_U8
) {
1371 MONO_EMIT_NEW_UNALU (cfg
, OP_X86_PUSH
, -1, in
->dreg
+ 2);
1376 g_assert_not_reached ();
1379 MONO_ADD_INS (cfg
->cbb
, arg
);
1382 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sentinelpos
)) {
1383 /* Emit the signature cookie just before the implicit arguments */
1384 emit_sig_cookie (cfg
, call
, cinfo
);
1388 if (sig
->ret
&& MONO_TYPE_ISSTRUCT (sig
->ret
)) {
1391 if (cinfo
->ret
.storage
== ArgValuetypeInReg
) {
1394 else if (cinfo
->ret
.storage
== ArgInIReg
) {
1396 /* The return address is passed in a register */
1397 MONO_INST_NEW (cfg
, vtarg
, OP_MOVE
);
1398 vtarg
->sreg1
= call
->inst
.dreg
;
1399 vtarg
->dreg
= mono_alloc_ireg (cfg
);
1400 MONO_ADD_INS (cfg
->cbb
, vtarg
);
1402 mono_call_inst_add_outarg_reg (cfg
, call
, vtarg
->dreg
, cinfo
->ret
.reg
, FALSE
);
1405 MONO_INST_NEW (cfg
, vtarg
, OP_X86_PUSH
);
1406 vtarg
->type
= STACK_MP
;
1407 vtarg
->sreg1
= call
->vret_var
->dreg
;
1408 MONO_ADD_INS (cfg
->cbb
, vtarg
);
1411 /* if the function returns a struct on stack, the called method already does a ret $0x4 */
1412 if (cinfo
->ret
.storage
!= ArgValuetypeInReg
)
1413 cinfo
->stack_usage
-= 4;
1416 call
->stack_usage
= cinfo
->stack_usage
;
1420 mono_arch_emit_outarg_vt (MonoCompile
*cfg
, MonoInst
*ins
, MonoInst
*src
)
1423 int size
= ins
->backend
.size
;
1426 MONO_INST_NEW (cfg
, arg
, OP_X86_PUSH_MEMBASE
);
1427 arg
->sreg1
= src
->dreg
;
1429 MONO_ADD_INS (cfg
->cbb
, arg
);
1430 } else if (size
<= 20) {
1431 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SUB_IMM
, X86_ESP
, X86_ESP
, ALIGN_TO (size
, 4));
1432 mini_emit_memcpy (cfg
, X86_ESP
, 0, src
->dreg
, 0, size
, 4);
1434 MONO_INST_NEW (cfg
, arg
, OP_X86_PUSH_OBJ
);
1435 arg
->inst_basereg
= src
->dreg
;
1436 arg
->inst_offset
= 0;
1437 arg
->inst_imm
= size
;
1439 MONO_ADD_INS (cfg
->cbb
, arg
);
1444 mono_arch_emit_setret (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
*val
)
1446 MonoType
*ret
= mini_type_get_underlying_type (cfg
->generic_sharing_context
, mono_method_signature (method
)->ret
);
1449 if (ret
->type
== MONO_TYPE_R4
) {
1450 if (COMPILE_LLVM (cfg
))
1451 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, cfg
->ret
->dreg
, val
->dreg
);
1454 } else if (ret
->type
== MONO_TYPE_R8
) {
1455 if (COMPILE_LLVM (cfg
))
1456 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, cfg
->ret
->dreg
, val
->dreg
);
1459 } else if (ret
->type
== MONO_TYPE_I8
|| ret
->type
== MONO_TYPE_U8
) {
1460 if (COMPILE_LLVM (cfg
))
1461 MONO_EMIT_NEW_UNALU (cfg
, OP_LMOVE
, cfg
->ret
->dreg
, val
->dreg
);
1463 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, X86_EAX
, val
->dreg
+ 1);
1464 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, X86_EDX
, val
->dreg
+ 2);
1470 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
1474 * Allow tracing to work with this interface (with an optional argument)
1477 mono_arch_instrument_prolog (MonoCompile
*cfg
, void *func
, void *p
, gboolean enable_arguments
)
1481 g_assert (MONO_ARCH_FRAME_ALIGNMENT
>= 8);
1482 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, MONO_ARCH_FRAME_ALIGNMENT
- 8);
1484 /* if some args are passed in registers, we need to save them here */
1485 x86_push_reg (code
, X86_EBP
);
1487 if (cfg
->compile_aot
) {
1488 x86_push_imm (code
, cfg
->method
);
1489 x86_mov_reg_imm (code
, X86_EAX
, func
);
1490 x86_call_reg (code
, X86_EAX
);
1492 mono_add_patch_info (cfg
, code
-cfg
->native_code
, MONO_PATCH_INFO_METHODCONST
, cfg
->method
);
1493 x86_push_imm (code
, cfg
->method
);
1494 mono_add_patch_info (cfg
, code
-cfg
->native_code
, MONO_PATCH_INFO_ABS
, func
);
1495 x86_call_code (code
, 0);
1497 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, MONO_ARCH_FRAME_ALIGNMENT
);
1511 mono_arch_instrument_epilog_full (MonoCompile
*cfg
, void *func
, void *p
, gboolean enable_arguments
, gboolean preserve_argument_registers
)
1514 int arg_size
= 0, stack_usage
= 0, save_mode
= SAVE_NONE
;
1515 MonoMethod
*method
= cfg
->method
;
1516 MonoType
*ret_type
= mini_type_get_underlying_type (cfg
->generic_sharing_context
, mono_method_signature (method
)->ret
);
1518 switch (ret_type
->type
) {
1519 case MONO_TYPE_VOID
:
1520 /* special case string .ctor icall */
1521 if (strcmp (".ctor", method
->name
) && method
->klass
== mono_defaults
.string_class
) {
1522 save_mode
= SAVE_EAX
;
1523 stack_usage
= enable_arguments
? 8 : 4;
1525 save_mode
= SAVE_NONE
;
1529 save_mode
= SAVE_EAX_EDX
;
1530 stack_usage
= enable_arguments
? 16 : 8;
1534 save_mode
= SAVE_FP
;
1535 stack_usage
= enable_arguments
? 16 : 8;
1537 case MONO_TYPE_GENERICINST
:
1538 if (!mono_type_generic_inst_is_valuetype (ret_type
)) {
1539 save_mode
= SAVE_EAX
;
1540 stack_usage
= enable_arguments
? 8 : 4;
1544 case MONO_TYPE_VALUETYPE
:
1545 // FIXME: Handle SMALL_STRUCT_IN_REG here for proper alignment on darwin-x86
1546 save_mode
= SAVE_STRUCT
;
1547 stack_usage
= enable_arguments
? 4 : 0;
1550 save_mode
= SAVE_EAX
;
1551 stack_usage
= enable_arguments
? 8 : 4;
1555 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, MONO_ARCH_FRAME_ALIGNMENT
- stack_usage
- 4);
1557 switch (save_mode
) {
1559 x86_push_reg (code
, X86_EDX
);
1560 x86_push_reg (code
, X86_EAX
);
1561 if (enable_arguments
) {
1562 x86_push_reg (code
, X86_EDX
);
1563 x86_push_reg (code
, X86_EAX
);
1568 x86_push_reg (code
, X86_EAX
);
1569 if (enable_arguments
) {
1570 x86_push_reg (code
, X86_EAX
);
1575 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, 8);
1576 x86_fst_membase (code
, X86_ESP
, 0, TRUE
, TRUE
);
1577 if (enable_arguments
) {
1578 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, 8);
1579 x86_fst_membase (code
, X86_ESP
, 0, TRUE
, TRUE
);
1584 if (enable_arguments
) {
1585 x86_push_membase (code
, X86_EBP
, 8);
1594 if (cfg
->compile_aot
) {
1595 x86_push_imm (code
, method
);
1596 x86_mov_reg_imm (code
, X86_EAX
, func
);
1597 x86_call_reg (code
, X86_EAX
);
1599 mono_add_patch_info (cfg
, code
-cfg
->native_code
, MONO_PATCH_INFO_METHODCONST
, method
);
1600 x86_push_imm (code
, method
);
1601 mono_add_patch_info (cfg
, code
-cfg
->native_code
, MONO_PATCH_INFO_ABS
, func
);
1602 x86_call_code (code
, 0);
1605 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, arg_size
+ 4);
1607 switch (save_mode
) {
1609 x86_pop_reg (code
, X86_EAX
);
1610 x86_pop_reg (code
, X86_EDX
);
1613 x86_pop_reg (code
, X86_EAX
);
1616 x86_fld_membase (code
, X86_ESP
, 0, TRUE
);
1617 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 8);
1624 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, MONO_ARCH_FRAME_ALIGNMENT
- stack_usage
);
1629 #define EMIT_COND_BRANCH(ins,cond,sign) \
1630 if (ins->inst_true_bb->native_offset) { \
1631 x86_branch (code, cond, cfg->native_code + ins->inst_true_bb->native_offset, sign); \
1633 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1634 if ((cfg->opt & MONO_OPT_BRANCH) && \
1635 x86_is_imm8 (ins->inst_true_bb->max_offset - cpos)) \
1636 x86_branch8 (code, cond, 0, sign); \
1638 x86_branch32 (code, cond, 0, sign); \
1642 * Emit an exception if condition is fail and
1643 * if possible do a directly branch to target
1645 #define EMIT_COND_SYSTEM_EXCEPTION(cond,signed,exc_name) \
1647 MonoInst *tins = mono_branch_optimize_exception_target (cfg, bb, exc_name); \
1648 if (tins == NULL) { \
1649 mono_add_patch_info (cfg, code - cfg->native_code, \
1650 MONO_PATCH_INFO_EXC, exc_name); \
1651 x86_branch32 (code, cond, 0, signed); \
1653 EMIT_COND_BRANCH (tins, cond, signed); \
1657 #define EMIT_FPCOMPARE(code) do { \
1658 x86_fcompp (code); \
1659 x86_fnstsw (code); \
1664 emit_call (MonoCompile
*cfg
, guint8
*code
, guint32 patch_type
, gconstpointer data
)
1666 mono_add_patch_info (cfg
, code
- cfg
->native_code
, patch_type
, data
);
1667 x86_call_code (code
, 0);
1672 #define INST_IGNORES_CFLAGS(opcode) (!(((opcode) == OP_ADC) || ((opcode) == OP_IADC) || ((opcode) == OP_ADC_IMM) || ((opcode) == OP_IADC_IMM) || ((opcode) == OP_SBB) || ((opcode) == OP_ISBB) || ((opcode) == OP_SBB_IMM) || ((opcode) == OP_ISBB_IMM)))
1675 * mono_peephole_pass_1:
1677 * Perform peephole opts which should/can be performed before local regalloc
1680 mono_arch_peephole_pass_1 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
1684 MONO_BB_FOR_EACH_INS_SAFE (bb
, n
, ins
) {
1685 MonoInst
*last_ins
= ins
->prev
;
1687 switch (ins
->opcode
) {
1690 if ((ins
->sreg1
< MONO_MAX_IREGS
) && (ins
->dreg
>= MONO_MAX_IREGS
)) {
1692 * X86_LEA is like ADD, but doesn't have the
1693 * sreg1==dreg restriction.
1695 ins
->opcode
= OP_X86_LEA_MEMBASE
;
1696 ins
->inst_basereg
= ins
->sreg1
;
1697 } else if ((ins
->inst_imm
== 1) && (ins
->dreg
== ins
->sreg1
))
1698 ins
->opcode
= OP_X86_INC_REG
;
1702 if ((ins
->sreg1
< MONO_MAX_IREGS
) && (ins
->dreg
>= MONO_MAX_IREGS
)) {
1703 ins
->opcode
= OP_X86_LEA_MEMBASE
;
1704 ins
->inst_basereg
= ins
->sreg1
;
1705 ins
->inst_imm
= -ins
->inst_imm
;
1706 } else if ((ins
->inst_imm
== 1) && (ins
->dreg
== ins
->sreg1
))
1707 ins
->opcode
= OP_X86_DEC_REG
;
1709 case OP_COMPARE_IMM
:
1710 case OP_ICOMPARE_IMM
:
1711 /* OP_COMPARE_IMM (reg, 0)
1713 * OP_X86_TEST_NULL (reg)
1716 ins
->opcode
= OP_X86_TEST_NULL
;
1718 case OP_X86_COMPARE_MEMBASE_IMM
:
1720 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1721 * OP_X86_COMPARE_MEMBASE_IMM offset(basereg), imm
1723 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1724 * OP_COMPARE_IMM reg, imm
1726 * Note: if imm = 0 then OP_COMPARE_IMM replaced with OP_X86_TEST_NULL
1728 if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_REG
) &&
1729 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
1730 ins
->inst_offset
== last_ins
->inst_offset
) {
1731 ins
->opcode
= OP_COMPARE_IMM
;
1732 ins
->sreg1
= last_ins
->sreg1
;
1734 /* check if we can remove cmp reg,0 with test null */
1736 ins
->opcode
= OP_X86_TEST_NULL
;
1740 case OP_X86_PUSH_MEMBASE
:
1741 if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_REG
||
1742 last_ins
->opcode
== OP_STORE_MEMBASE_REG
) &&
1743 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
1744 ins
->inst_offset
== last_ins
->inst_offset
) {
1745 ins
->opcode
= OP_X86_PUSH
;
1746 ins
->sreg1
= last_ins
->sreg1
;
1751 mono_peephole_ins (bb
, ins
);
1756 mono_arch_peephole_pass_2 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
1760 MONO_BB_FOR_EACH_INS_SAFE (bb
, n
, ins
) {
1761 switch (ins
->opcode
) {
1763 /* reg = 0 -> XOR (reg, reg) */
1764 /* XOR sets cflags on x86, so we cant do it always */
1765 if (ins
->inst_c0
== 0 && (!ins
->next
|| (ins
->next
&& INST_IGNORES_CFLAGS (ins
->next
->opcode
)))) {
1768 ins
->opcode
= OP_IXOR
;
1769 ins
->sreg1
= ins
->dreg
;
1770 ins
->sreg2
= ins
->dreg
;
1773 * Convert succeeding STORE_MEMBASE_IMM 0 ins to STORE_MEMBASE_REG
1774 * since it takes 3 bytes instead of 7.
1776 for (ins2
= ins
->next
; ins2
; ins2
= ins2
->next
) {
1777 if ((ins2
->opcode
== OP_STORE_MEMBASE_IMM
) && (ins2
->inst_imm
== 0)) {
1778 ins2
->opcode
= OP_STORE_MEMBASE_REG
;
1779 ins2
->sreg1
= ins
->dreg
;
1781 else if ((ins2
->opcode
== OP_STOREI4_MEMBASE_IMM
) && (ins2
->inst_imm
== 0)) {
1782 ins2
->opcode
= OP_STOREI4_MEMBASE_REG
;
1783 ins2
->sreg1
= ins
->dreg
;
1785 else if ((ins2
->opcode
== OP_STOREI1_MEMBASE_IMM
) || (ins2
->opcode
== OP_STOREI2_MEMBASE_IMM
)) {
1786 /* Continue iteration */
1795 if ((ins
->inst_imm
== 1) && (ins
->dreg
== ins
->sreg1
))
1796 ins
->opcode
= OP_X86_INC_REG
;
1800 if ((ins
->inst_imm
== 1) && (ins
->dreg
== ins
->sreg1
))
1801 ins
->opcode
= OP_X86_DEC_REG
;
1805 mono_peephole_ins (bb
, ins
);
1810 * mono_arch_lowering_pass:
1812 * Converts complex opcodes into simpler ones so that each IR instruction
1813 * corresponds to one machine instruction.
1816 mono_arch_lowering_pass (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
1818 MonoInst
*ins
, *next
;
1821 * FIXME: Need to add more instructions, but the current machine
1822 * description can't model some parts of the composite instructions like
1825 MONO_BB_FOR_EACH_INS_SAFE (bb
, next
, ins
) {
1826 switch (ins
->opcode
) {
1829 case OP_IDIV_UN_IMM
:
1830 case OP_IREM_UN_IMM
:
1832 * Keep the cases where we could generated optimized code, otherwise convert
1833 * to the non-imm variant.
1835 if ((ins
->opcode
== OP_IREM_IMM
) && mono_is_power_of_two (ins
->inst_imm
) >= 0)
1837 mono_decompose_op_imm (cfg
, bb
, ins
);
1844 bb
->max_vreg
= cfg
->next_vreg
;
1848 branch_cc_table
[] = {
1849 X86_CC_EQ
, X86_CC_GE
, X86_CC_GT
, X86_CC_LE
, X86_CC_LT
,
1850 X86_CC_NE
, X86_CC_GE
, X86_CC_GT
, X86_CC_LE
, X86_CC_LT
,
1851 X86_CC_O
, X86_CC_NO
, X86_CC_C
, X86_CC_NC
1854 /* Maps CMP_... constants to X86_CC_... constants */
1857 X86_CC_EQ
, X86_CC_NE
, X86_CC_LE
, X86_CC_GE
, X86_CC_LT
, X86_CC_GT
,
1858 X86_CC_LE
, X86_CC_GE
, X86_CC_LT
, X86_CC_GT
1862 cc_signed_table
[] = {
1863 TRUE
, TRUE
, TRUE
, TRUE
, TRUE
, TRUE
,
1864 FALSE
, FALSE
, FALSE
, FALSE
1867 static unsigned char*
1868 emit_float_to_int (MonoCompile
*cfg
, guchar
*code
, int dreg
, int size
, gboolean is_signed
)
1870 #define XMM_TEMP_REG 0
1871 /*This SSE2 optimization must not be done which OPT_SIMD in place as it clobbers xmm0.*/
1872 /*The xmm pass decomposes OP_FCONV_ ops anyway anyway.*/
1873 if (cfg
->opt
& MONO_OPT_SSE2
&& size
< 8 && !(cfg
->opt
& MONO_OPT_SIMD
)) {
1874 /* optimize by assigning a local var for this use so we avoid
1875 * the stack manipulations */
1876 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, 8);
1877 x86_fst_membase (code
, X86_ESP
, 0, TRUE
, TRUE
);
1878 x86_movsd_reg_membase (code
, XMM_TEMP_REG
, X86_ESP
, 0);
1879 x86_cvttsd2si (code
, dreg
, XMM_TEMP_REG
);
1880 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 8);
1882 x86_widen_reg (code
, dreg
, dreg
, is_signed
, FALSE
);
1884 x86_widen_reg (code
, dreg
, dreg
, is_signed
, TRUE
);
1887 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, 4);
1888 x86_fnstcw_membase(code
, X86_ESP
, 0);
1889 x86_mov_reg_membase (code
, dreg
, X86_ESP
, 0, 2);
1890 x86_alu_reg_imm (code
, X86_OR
, dreg
, 0xc00);
1891 x86_mov_membase_reg (code
, X86_ESP
, 2, dreg
, 2);
1892 x86_fldcw_membase (code
, X86_ESP
, 2);
1894 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, 8);
1895 x86_fist_pop_membase (code
, X86_ESP
, 0, TRUE
);
1896 x86_pop_reg (code
, dreg
);
1897 /* FIXME: need the high register
1898 * x86_pop_reg (code, dreg_high);
1901 x86_push_reg (code
, X86_EAX
); // SP = SP - 4
1902 x86_fist_pop_membase (code
, X86_ESP
, 0, FALSE
);
1903 x86_pop_reg (code
, dreg
);
1905 x86_fldcw_membase (code
, X86_ESP
, 0);
1906 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 4);
1909 x86_widen_reg (code
, dreg
, dreg
, is_signed
, FALSE
);
1911 x86_widen_reg (code
, dreg
, dreg
, is_signed
, TRUE
);
1915 static unsigned char*
1916 mono_emit_stack_alloc (guchar
*code
, MonoInst
* tree
)
1918 int sreg
= tree
->sreg1
;
1919 int need_touch
= FALSE
;
1921 #if defined(TARGET_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
1930 * If requested stack size is larger than one page,
1931 * perform stack-touch operation
1934 * Generate stack probe code.
1935 * Under Windows, it is necessary to allocate one page at a time,
1936 * "touching" stack after each successful sub-allocation. This is
1937 * because of the way stack growth is implemented - there is a
1938 * guard page before the lowest stack page that is currently commited.
1939 * Stack normally grows sequentially so OS traps access to the
1940 * guard page and commits more pages when needed.
1942 x86_test_reg_imm (code
, sreg
, ~0xFFF);
1943 br
[0] = code
; x86_branch8 (code
, X86_CC_Z
, 0, FALSE
);
1945 br
[2] = code
; /* loop */
1946 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, 0x1000);
1947 x86_test_membase_reg (code
, X86_ESP
, 0, X86_ESP
);
1950 * By the end of the loop, sreg2 is smaller than 0x1000, so the init routine
1951 * that follows only initializes the last part of the area.
1953 /* Same as the init code below with size==0x1000 */
1954 if (tree
->flags
& MONO_INST_INIT
) {
1955 x86_push_reg (code
, X86_EAX
);
1956 x86_push_reg (code
, X86_ECX
);
1957 x86_push_reg (code
, X86_EDI
);
1958 x86_mov_reg_imm (code
, X86_ECX
, (0x1000 >> 2));
1959 x86_alu_reg_reg (code
, X86_XOR
, X86_EAX
, X86_EAX
);
1960 x86_lea_membase (code
, X86_EDI
, X86_ESP
, 12);
1962 x86_prefix (code
, X86_REP_PREFIX
);
1964 x86_pop_reg (code
, X86_EDI
);
1965 x86_pop_reg (code
, X86_ECX
);
1966 x86_pop_reg (code
, X86_EAX
);
1969 x86_alu_reg_imm (code
, X86_SUB
, sreg
, 0x1000);
1970 x86_alu_reg_imm (code
, X86_CMP
, sreg
, 0x1000);
1971 br
[3] = code
; x86_branch8 (code
, X86_CC_AE
, 0, FALSE
);
1972 x86_patch (br
[3], br
[2]);
1973 x86_test_reg_reg (code
, sreg
, sreg
);
1974 br
[4] = code
; x86_branch8 (code
, X86_CC_Z
, 0, FALSE
);
1975 x86_alu_reg_reg (code
, X86_SUB
, X86_ESP
, sreg
);
1977 br
[1] = code
; x86_jump8 (code
, 0);
1979 x86_patch (br
[0], code
);
1980 x86_alu_reg_reg (code
, X86_SUB
, X86_ESP
, sreg
);
1981 x86_patch (br
[1], code
);
1982 x86_patch (br
[4], code
);
1985 x86_alu_reg_reg (code
, X86_SUB
, X86_ESP
, tree
->sreg1
);
1987 if (tree
->flags
& MONO_INST_INIT
) {
1989 if (tree
->dreg
!= X86_EAX
&& sreg
!= X86_EAX
) {
1990 x86_push_reg (code
, X86_EAX
);
1993 if (tree
->dreg
!= X86_ECX
&& sreg
!= X86_ECX
) {
1994 x86_push_reg (code
, X86_ECX
);
1997 if (tree
->dreg
!= X86_EDI
&& sreg
!= X86_EDI
) {
1998 x86_push_reg (code
, X86_EDI
);
2002 x86_shift_reg_imm (code
, X86_SHR
, sreg
, 2);
2003 if (sreg
!= X86_ECX
)
2004 x86_mov_reg_reg (code
, X86_ECX
, sreg
, 4);
2005 x86_alu_reg_reg (code
, X86_XOR
, X86_EAX
, X86_EAX
);
2007 x86_lea_membase (code
, X86_EDI
, X86_ESP
, offset
);
2009 x86_prefix (code
, X86_REP_PREFIX
);
2012 if (tree
->dreg
!= X86_EDI
&& sreg
!= X86_EDI
)
2013 x86_pop_reg (code
, X86_EDI
);
2014 if (tree
->dreg
!= X86_ECX
&& sreg
!= X86_ECX
)
2015 x86_pop_reg (code
, X86_ECX
);
2016 if (tree
->dreg
!= X86_EAX
&& sreg
!= X86_EAX
)
2017 x86_pop_reg (code
, X86_EAX
);
2024 emit_move_return_value (MonoCompile
*cfg
, MonoInst
*ins
, guint8
*code
)
2026 /* Move return value to the target register */
2027 switch (ins
->opcode
) {
2030 case OP_CALL_MEMBASE
:
2031 if (ins
->dreg
!= X86_EAX
)
2032 x86_mov_reg_reg (code
, ins
->dreg
, X86_EAX
, 4);
2042 mono_x86_have_tls_get (void)
2045 guint32
*ins
= (guint32
*)pthread_getspecific
;
2047 * We're looking for these two instructions:
2049 * mov 0x4(%esp),%eax
2050 * mov %gs:0x48(,%eax,4),%eax
2052 return ins
[0] == 0x0424448b && ins
[1] == 0x85048b65 && ins
[2] == 0x00000048;
2059 * mono_x86_emit_tls_get:
2060 * @code: buffer to store code to
2061 * @dreg: hard register where to place the result
2062 * @tls_offset: offset info
2064 * mono_x86_emit_tls_get emits in @code the native code that puts in
2065 * the dreg register the item in the thread local storage identified
2068 * Returns: a pointer to the end of the stored code
2071 mono_x86_emit_tls_get (guint8
* code
, int dreg
, int tls_offset
)
2073 #if defined(__APPLE__)
2074 x86_prefix (code
, X86_GS_PREFIX
);
2075 x86_mov_reg_mem (code
, dreg
, 0x48 + tls_offset
* 4, 4);
2076 #elif defined(TARGET_WIN32)
2078 * See the Under the Hood article in the May 1996 issue of Microsoft Systems
2079 * Journal and/or a disassembly of the TlsGet () function.
2081 g_assert (tls_offset
< 64);
2082 x86_prefix (code
, X86_FS_PREFIX
);
2083 x86_mov_reg_mem (code
, dreg
, 0x18, 4);
2084 /* Dunno what this does but TlsGetValue () contains it */
2085 x86_alu_membase_imm (code
, X86_AND
, dreg
, 0x34, 0);
2086 x86_mov_reg_membase (code
, dreg
, dreg
, 3600 + (tls_offset
* 4), 4);
2088 if (optimize_for_xen
) {
2089 x86_prefix (code
, X86_GS_PREFIX
);
2090 x86_mov_reg_mem (code
, dreg
, 0, 4);
2091 x86_mov_reg_membase (code
, dreg
, dreg
, tls_offset
, 4);
2093 x86_prefix (code
, X86_GS_PREFIX
);
2094 x86_mov_reg_mem (code
, dreg
, tls_offset
, 4);
2101 * emit_load_volatile_arguments:
2103 * Load volatile arguments from the stack to the original input registers.
2104 * Required before a tail call.
2107 emit_load_volatile_arguments (MonoCompile
*cfg
, guint8
*code
)
2109 MonoMethod
*method
= cfg
->method
;
2110 MonoMethodSignature
*sig
;
2115 /* FIXME: Generate intermediate code instead */
2117 sig
= mono_method_signature (method
);
2119 cinfo
= get_call_info (cfg
->generic_sharing_context
, cfg
->mempool
, sig
, FALSE
);
2121 /* This is the opposite of the code in emit_prolog */
2123 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
2124 ArgInfo
*ainfo
= cinfo
->args
+ i
;
2126 inst
= cfg
->args
[i
];
2128 if (sig
->hasthis
&& (i
== 0))
2129 arg_type
= &mono_defaults
.object_class
->byval_arg
;
2131 arg_type
= sig
->params
[i
- sig
->hasthis
];
2134 * On x86, the arguments are either in their original stack locations, or in
2137 if (inst
->opcode
== OP_REGVAR
) {
2138 g_assert (ainfo
->storage
== ArgOnStack
);
2140 x86_mov_membase_reg (code
, X86_EBP
, inst
->inst_offset
, inst
->dreg
, 4);
2147 #define REAL_PRINT_REG(text,reg) \
2148 mono_assert (reg >= 0); \
2149 x86_push_reg (code, X86_EAX); \
2150 x86_push_reg (code, X86_EDX); \
2151 x86_push_reg (code, X86_ECX); \
2152 x86_push_reg (code, reg); \
2153 x86_push_imm (code, reg); \
2154 x86_push_imm (code, text " %d %p\n"); \
2155 x86_mov_reg_imm (code, X86_EAX, printf); \
2156 x86_call_reg (code, X86_EAX); \
2157 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 3*4); \
2158 x86_pop_reg (code, X86_ECX); \
2159 x86_pop_reg (code, X86_EDX); \
2160 x86_pop_reg (code, X86_EAX);
2162 /* benchmark and set based on cpu */
2163 #define LOOP_ALIGNMENT 8
2164 #define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
2169 mono_arch_output_basic_block (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
2174 guint8
*code
= cfg
->native_code
+ cfg
->code_len
;
2177 if (cfg
->opt
& MONO_OPT_LOOP
) {
2178 int pad
, align
= LOOP_ALIGNMENT
;
2179 /* set alignment depending on cpu */
2180 if (bb_is_loop_start (bb
) && (pad
= (cfg
->code_len
& (align
- 1)))) {
2182 /*g_print ("adding %d pad at %x to loop in %s\n", pad, cfg->code_len, cfg->method->name);*/
2183 x86_padding (code
, pad
);
2184 cfg
->code_len
+= pad
;
2185 bb
->native_offset
= cfg
->code_len
;
2189 if (cfg
->verbose_level
> 2)
2190 g_print ("Basic block %d starting at offset 0x%x\n", bb
->block_num
, bb
->native_offset
);
2192 cpos
= bb
->max_offset
;
2194 if (cfg
->prof_options
& MONO_PROFILE_COVERAGE
) {
2195 MonoProfileCoverageInfo
*cov
= cfg
->coverage_info
;
2196 g_assert (!cfg
->compile_aot
);
2199 cov
->data
[bb
->dfn
].cil_code
= bb
->cil_code
;
2200 /* this is not thread save, but good enough */
2201 x86_inc_mem (code
, &cov
->data
[bb
->dfn
].count
);
2204 offset
= code
- cfg
->native_code
;
2206 mono_debug_open_block (cfg
, bb
, offset
);
2208 MONO_BB_FOR_EACH_INS (bb
, ins
) {
2209 offset
= code
- cfg
->native_code
;
2211 max_len
= ((guint8
*)ins_get_spec (ins
->opcode
))[MONO_INST_LEN
];
2213 if (G_UNLIKELY (offset
> (cfg
->code_size
- max_len
- 16))) {
2214 cfg
->code_size
*= 2;
2215 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
2216 code
= cfg
->native_code
+ offset
;
2217 mono_jit_stats
.code_reallocs
++;
2220 if (cfg
->debug_info
)
2221 mono_debug_record_line_number (cfg
, ins
, offset
);
2223 switch (ins
->opcode
) {
2225 x86_mul_reg (code
, ins
->sreg2
, TRUE
);
2228 x86_mul_reg (code
, ins
->sreg2
, FALSE
);
2230 case OP_X86_SETEQ_MEMBASE
:
2231 case OP_X86_SETNE_MEMBASE
:
2232 x86_set_membase (code
, ins
->opcode
== OP_X86_SETEQ_MEMBASE
? X86_CC_EQ
: X86_CC_NE
,
2233 ins
->inst_basereg
, ins
->inst_offset
, TRUE
);
2235 case OP_STOREI1_MEMBASE_IMM
:
2236 x86_mov_membase_imm (code
, ins
->inst_destbasereg
, ins
->inst_offset
, ins
->inst_imm
, 1);
2238 case OP_STOREI2_MEMBASE_IMM
:
2239 x86_mov_membase_imm (code
, ins
->inst_destbasereg
, ins
->inst_offset
, ins
->inst_imm
, 2);
2241 case OP_STORE_MEMBASE_IMM
:
2242 case OP_STOREI4_MEMBASE_IMM
:
2243 x86_mov_membase_imm (code
, ins
->inst_destbasereg
, ins
->inst_offset
, ins
->inst_imm
, 4);
2245 case OP_STOREI1_MEMBASE_REG
:
2246 x86_mov_membase_reg (code
, ins
->inst_destbasereg
, ins
->inst_offset
, ins
->sreg1
, 1);
2248 case OP_STOREI2_MEMBASE_REG
:
2249 x86_mov_membase_reg (code
, ins
->inst_destbasereg
, ins
->inst_offset
, ins
->sreg1
, 2);
2251 case OP_STORE_MEMBASE_REG
:
2252 case OP_STOREI4_MEMBASE_REG
:
2253 x86_mov_membase_reg (code
, ins
->inst_destbasereg
, ins
->inst_offset
, ins
->sreg1
, 4);
2255 case OP_STORE_MEM_IMM
:
2256 x86_mov_mem_imm (code
, ins
->inst_p0
, ins
->inst_c0
, 4);
2259 x86_mov_reg_mem (code
, ins
->dreg
, ins
->inst_imm
, 4);
2263 /* These are created by the cprop pass so they use inst_imm as the source */
2264 x86_mov_reg_mem (code
, ins
->dreg
, ins
->inst_imm
, 4);
2267 x86_widen_mem (code
, ins
->dreg
, ins
->inst_imm
, FALSE
, FALSE
);
2270 x86_widen_mem (code
, ins
->dreg
, ins
->inst_imm
, FALSE
, TRUE
);
2272 case OP_LOAD_MEMBASE
:
2273 case OP_LOADI4_MEMBASE
:
2274 case OP_LOADU4_MEMBASE
:
2275 x86_mov_reg_membase (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
, 4);
2277 case OP_LOADU1_MEMBASE
:
2278 x86_widen_membase (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
, FALSE
, FALSE
);
2280 case OP_LOADI1_MEMBASE
:
2281 x86_widen_membase (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
, TRUE
, FALSE
);
2283 case OP_LOADU2_MEMBASE
:
2284 x86_widen_membase (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
, FALSE
, TRUE
);
2286 case OP_LOADI2_MEMBASE
:
2287 x86_widen_membase (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
, TRUE
, TRUE
);
2289 case OP_ICONV_TO_I1
:
2291 x86_widen_reg (code
, ins
->dreg
, ins
->sreg1
, TRUE
, FALSE
);
2293 case OP_ICONV_TO_I2
:
2295 x86_widen_reg (code
, ins
->dreg
, ins
->sreg1
, TRUE
, TRUE
);
2297 case OP_ICONV_TO_U1
:
2298 x86_widen_reg (code
, ins
->dreg
, ins
->sreg1
, FALSE
, FALSE
);
2300 case OP_ICONV_TO_U2
:
2301 x86_widen_reg (code
, ins
->dreg
, ins
->sreg1
, FALSE
, TRUE
);
2305 x86_alu_reg_reg (code
, X86_CMP
, ins
->sreg1
, ins
->sreg2
);
2307 case OP_COMPARE_IMM
:
2308 case OP_ICOMPARE_IMM
:
2309 x86_alu_reg_imm (code
, X86_CMP
, ins
->sreg1
, ins
->inst_imm
);
2311 case OP_X86_COMPARE_MEMBASE_REG
:
2312 x86_alu_membase_reg (code
, X86_CMP
, ins
->inst_basereg
, ins
->inst_offset
, ins
->sreg2
);
2314 case OP_X86_COMPARE_MEMBASE_IMM
:
2315 x86_alu_membase_imm (code
, X86_CMP
, ins
->inst_basereg
, ins
->inst_offset
, ins
->inst_imm
);
2317 case OP_X86_COMPARE_MEMBASE8_IMM
:
2318 x86_alu_membase8_imm (code
, X86_CMP
, ins
->inst_basereg
, ins
->inst_offset
, ins
->inst_imm
);
2320 case OP_X86_COMPARE_REG_MEMBASE
:
2321 x86_alu_reg_membase (code
, X86_CMP
, ins
->sreg1
, ins
->sreg2
, ins
->inst_offset
);
2323 case OP_X86_COMPARE_MEM_IMM
:
2324 x86_alu_mem_imm (code
, X86_CMP
, ins
->inst_offset
, ins
->inst_imm
);
2326 case OP_X86_TEST_NULL
:
2327 x86_test_reg_reg (code
, ins
->sreg1
, ins
->sreg1
);
2329 case OP_X86_ADD_MEMBASE_IMM
:
2330 x86_alu_membase_imm (code
, X86_ADD
, ins
->inst_basereg
, ins
->inst_offset
, ins
->inst_imm
);
2332 case OP_X86_ADD_REG_MEMBASE
:
2333 x86_alu_reg_membase (code
, X86_ADD
, ins
->sreg1
, ins
->sreg2
, ins
->inst_offset
);
2335 case OP_X86_SUB_MEMBASE_IMM
:
2336 x86_alu_membase_imm (code
, X86_SUB
, ins
->inst_basereg
, ins
->inst_offset
, ins
->inst_imm
);
2338 case OP_X86_SUB_REG_MEMBASE
:
2339 x86_alu_reg_membase (code
, X86_SUB
, ins
->sreg1
, ins
->sreg2
, ins
->inst_offset
);
2341 case OP_X86_AND_MEMBASE_IMM
:
2342 x86_alu_membase_imm (code
, X86_AND
, ins
->inst_basereg
, ins
->inst_offset
, ins
->inst_imm
);
2344 case OP_X86_OR_MEMBASE_IMM
:
2345 x86_alu_membase_imm (code
, X86_OR
, ins
->inst_basereg
, ins
->inst_offset
, ins
->inst_imm
);
2347 case OP_X86_XOR_MEMBASE_IMM
:
2348 x86_alu_membase_imm (code
, X86_XOR
, ins
->inst_basereg
, ins
->inst_offset
, ins
->inst_imm
);
2350 case OP_X86_ADD_MEMBASE_REG
:
2351 x86_alu_membase_reg (code
, X86_ADD
, ins
->inst_basereg
, ins
->inst_offset
, ins
->sreg2
);
2353 case OP_X86_SUB_MEMBASE_REG
:
2354 x86_alu_membase_reg (code
, X86_SUB
, ins
->inst_basereg
, ins
->inst_offset
, ins
->sreg2
);
2356 case OP_X86_AND_MEMBASE_REG
:
2357 x86_alu_membase_reg (code
, X86_AND
, ins
->inst_basereg
, ins
->inst_offset
, ins
->sreg2
);
2359 case OP_X86_OR_MEMBASE_REG
:
2360 x86_alu_membase_reg (code
, X86_OR
, ins
->inst_basereg
, ins
->inst_offset
, ins
->sreg2
);
2362 case OP_X86_XOR_MEMBASE_REG
:
2363 x86_alu_membase_reg (code
, X86_XOR
, ins
->inst_basereg
, ins
->inst_offset
, ins
->sreg2
);
2365 case OP_X86_INC_MEMBASE
:
2366 x86_inc_membase (code
, ins
->inst_basereg
, ins
->inst_offset
);
2368 case OP_X86_INC_REG
:
2369 x86_inc_reg (code
, ins
->dreg
);
2371 case OP_X86_DEC_MEMBASE
:
2372 x86_dec_membase (code
, ins
->inst_basereg
, ins
->inst_offset
);
2374 case OP_X86_DEC_REG
:
2375 x86_dec_reg (code
, ins
->dreg
);
2377 case OP_X86_MUL_REG_MEMBASE
:
2378 x86_imul_reg_membase (code
, ins
->sreg1
, ins
->sreg2
, ins
->inst_offset
);
2380 case OP_X86_AND_REG_MEMBASE
:
2381 x86_alu_reg_membase (code
, X86_AND
, ins
->sreg1
, ins
->sreg2
, ins
->inst_offset
);
2383 case OP_X86_OR_REG_MEMBASE
:
2384 x86_alu_reg_membase (code
, X86_OR
, ins
->sreg1
, ins
->sreg2
, ins
->inst_offset
);
2386 case OP_X86_XOR_REG_MEMBASE
:
2387 x86_alu_reg_membase (code
, X86_XOR
, ins
->sreg1
, ins
->sreg2
, ins
->inst_offset
);
2390 x86_breakpoint (code
);
2392 case OP_RELAXED_NOP
:
2393 x86_prefix (code
, X86_REP_PREFIX
);
2401 case OP_DUMMY_STORE
:
2402 case OP_NOT_REACHED
:
2405 case OP_SEQ_POINT
: {
2408 if (cfg
->compile_aot
)
2412 * Read from the single stepping trigger page. This will cause a
2413 * SIGSEGV when single stepping is enabled.
2414 * We do this _before_ the breakpoint, so single stepping after
2415 * a breakpoint is hit will step to the next IL offset.
2417 if (ins
->flags
& MONO_INST_SINGLE_STEP_LOC
)
2418 x86_alu_reg_mem (code
, X86_CMP
, X86_EAX
, (guint32
)ss_trigger_page
);
2420 mono_add_seq_point (cfg
, bb
, ins
, code
- cfg
->native_code
);
2423 * A placeholder for a possible breakpoint inserted by
2424 * mono_arch_set_breakpoint ().
2426 for (i
= 0; i
< 6; ++i
)
2433 x86_alu_reg_reg (code
, X86_ADD
, ins
->sreg1
, ins
->sreg2
);
2437 x86_alu_reg_reg (code
, X86_ADC
, ins
->sreg1
, ins
->sreg2
);
2442 x86_alu_reg_imm (code
, X86_ADD
, ins
->dreg
, ins
->inst_imm
);
2446 x86_alu_reg_imm (code
, X86_ADC
, ins
->dreg
, ins
->inst_imm
);
2451 x86_alu_reg_reg (code
, X86_SUB
, ins
->sreg1
, ins
->sreg2
);
2455 x86_alu_reg_reg (code
, X86_SBB
, ins
->sreg1
, ins
->sreg2
);
2460 x86_alu_reg_imm (code
, X86_SUB
, ins
->dreg
, ins
->inst_imm
);
2464 x86_alu_reg_imm (code
, X86_SBB
, ins
->dreg
, ins
->inst_imm
);
2467 x86_alu_reg_reg (code
, X86_AND
, ins
->sreg1
, ins
->sreg2
);
2471 x86_alu_reg_imm (code
, X86_AND
, ins
->sreg1
, ins
->inst_imm
);
2476 * The code is the same for div/rem, the allocator will allocate dreg
2477 * to RAX/RDX as appropriate.
2479 if (ins
->sreg2
== X86_EDX
) {
2480 /* cdq clobbers this */
2481 x86_push_reg (code
, ins
->sreg2
);
2483 x86_div_membase (code
, X86_ESP
, 0, TRUE
);
2484 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 4);
2487 x86_div_reg (code
, ins
->sreg2
, TRUE
);
2492 if (ins
->sreg2
== X86_EDX
) {
2493 x86_push_reg (code
, ins
->sreg2
);
2494 x86_alu_reg_reg (code
, X86_XOR
, X86_EDX
, X86_EDX
);
2495 x86_div_membase (code
, X86_ESP
, 0, FALSE
);
2496 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 4);
2498 x86_alu_reg_reg (code
, X86_XOR
, X86_EDX
, X86_EDX
);
2499 x86_div_reg (code
, ins
->sreg2
, FALSE
);
2503 x86_mov_reg_imm (code
, ins
->sreg2
, ins
->inst_imm
);
2505 x86_div_reg (code
, ins
->sreg2
, TRUE
);
2508 int power
= mono_is_power_of_two (ins
->inst_imm
);
2510 g_assert (ins
->sreg1
== X86_EAX
);
2511 g_assert (ins
->dreg
== X86_EAX
);
2512 g_assert (power
>= 0);
2515 /* Based on http://compilers.iecc.com/comparch/article/93-04-079 */
2517 x86_alu_reg_imm (code
, X86_AND
, X86_EAX
, 1);
2519 * If the divident is >= 0, this does not nothing. If it is positive, it
2520 * it transforms %eax=0 into %eax=0, and %eax=1 into %eax=-1.
2522 x86_alu_reg_reg (code
, X86_XOR
, X86_EAX
, X86_EDX
);
2523 x86_alu_reg_reg (code
, X86_SUB
, X86_EAX
, X86_EDX
);
2524 } else if (power
== 0) {
2525 x86_alu_reg_reg (code
, X86_XOR
, ins
->dreg
, ins
->dreg
);
2527 /* Based on gcc code */
2529 /* Add compensation for negative dividents */
2531 x86_shift_reg_imm (code
, X86_SHR
, X86_EDX
, 32 - power
);
2532 x86_alu_reg_reg (code
, X86_ADD
, X86_EAX
, X86_EDX
);
2533 /* Compute remainder */
2534 x86_alu_reg_imm (code
, X86_AND
, X86_EAX
, (1 << power
) - 1);
2535 /* Remove compensation */
2536 x86_alu_reg_reg (code
, X86_SUB
, X86_EAX
, X86_EDX
);
2541 x86_alu_reg_reg (code
, X86_OR
, ins
->sreg1
, ins
->sreg2
);
2545 x86_alu_reg_imm (code
, X86_OR
, ins
->sreg1
, ins
->inst_imm
);
2548 x86_alu_reg_reg (code
, X86_XOR
, ins
->sreg1
, ins
->sreg2
);
2552 x86_alu_reg_imm (code
, X86_XOR
, ins
->sreg1
, ins
->inst_imm
);
2555 g_assert (ins
->sreg2
== X86_ECX
);
2556 x86_shift_reg (code
, X86_SHL
, ins
->dreg
);
2559 g_assert (ins
->sreg2
== X86_ECX
);
2560 x86_shift_reg (code
, X86_SAR
, ins
->dreg
);
2564 x86_shift_reg_imm (code
, X86_SAR
, ins
->dreg
, ins
->inst_imm
);
2567 case OP_ISHR_UN_IMM
:
2568 x86_shift_reg_imm (code
, X86_SHR
, ins
->dreg
, ins
->inst_imm
);
2571 g_assert (ins
->sreg2
== X86_ECX
);
2572 x86_shift_reg (code
, X86_SHR
, ins
->dreg
);
2576 x86_shift_reg_imm (code
, X86_SHL
, ins
->dreg
, ins
->inst_imm
);
2579 guint8
*jump_to_end
;
2581 /* handle shifts below 32 bits */
2582 x86_shld_reg (code
, ins
->backend
.reg3
, ins
->sreg1
);
2583 x86_shift_reg (code
, X86_SHL
, ins
->sreg1
);
2585 x86_test_reg_imm (code
, X86_ECX
, 32);
2586 jump_to_end
= code
; x86_branch8 (code
, X86_CC_EQ
, 0, TRUE
);
2588 /* handle shift over 32 bit */
2589 x86_mov_reg_reg (code
, ins
->backend
.reg3
, ins
->sreg1
, 4);
2590 x86_clear_reg (code
, ins
->sreg1
);
2592 x86_patch (jump_to_end
, code
);
2596 guint8
*jump_to_end
;
2598 /* handle shifts below 32 bits */
2599 x86_shrd_reg (code
, ins
->sreg1
, ins
->backend
.reg3
);
2600 x86_shift_reg (code
, X86_SAR
, ins
->backend
.reg3
);
2602 x86_test_reg_imm (code
, X86_ECX
, 32);
2603 jump_to_end
= code
; x86_branch8 (code
, X86_CC_EQ
, 0, FALSE
);
2605 /* handle shifts over 31 bits */
2606 x86_mov_reg_reg (code
, ins
->sreg1
, ins
->backend
.reg3
, 4);
2607 x86_shift_reg_imm (code
, X86_SAR
, ins
->backend
.reg3
, 31);
2609 x86_patch (jump_to_end
, code
);
2613 guint8
*jump_to_end
;
2615 /* handle shifts below 32 bits */
2616 x86_shrd_reg (code
, ins
->sreg1
, ins
->backend
.reg3
);
2617 x86_shift_reg (code
, X86_SHR
, ins
->backend
.reg3
);
2619 x86_test_reg_imm (code
, X86_ECX
, 32);
2620 jump_to_end
= code
; x86_branch8 (code
, X86_CC_EQ
, 0, FALSE
);
2622 /* handle shifts over 31 bits */
2623 x86_mov_reg_reg (code
, ins
->sreg1
, ins
->backend
.reg3
, 4);
2624 x86_clear_reg (code
, ins
->backend
.reg3
);
2626 x86_patch (jump_to_end
, code
);
2630 if (ins
->inst_imm
>= 32) {
2631 x86_mov_reg_reg (code
, ins
->backend
.reg3
, ins
->sreg1
, 4);
2632 x86_clear_reg (code
, ins
->sreg1
);
2633 x86_shift_reg_imm (code
, X86_SHL
, ins
->backend
.reg3
, ins
->inst_imm
- 32);
2635 x86_shld_reg_imm (code
, ins
->backend
.reg3
, ins
->sreg1
, ins
->inst_imm
);
2636 x86_shift_reg_imm (code
, X86_SHL
, ins
->sreg1
, ins
->inst_imm
);
2640 if (ins
->inst_imm
>= 32) {
2641 x86_mov_reg_reg (code
, ins
->sreg1
, ins
->backend
.reg3
, 4);
2642 x86_shift_reg_imm (code
, X86_SAR
, ins
->backend
.reg3
, 0x1f);
2643 x86_shift_reg_imm (code
, X86_SAR
, ins
->sreg1
, ins
->inst_imm
- 32);
2645 x86_shrd_reg_imm (code
, ins
->sreg1
, ins
->backend
.reg3
, ins
->inst_imm
);
2646 x86_shift_reg_imm (code
, X86_SAR
, ins
->backend
.reg3
, ins
->inst_imm
);
2649 case OP_LSHR_UN_IMM
:
2650 if (ins
->inst_imm
>= 32) {
2651 x86_mov_reg_reg (code
, ins
->sreg1
, ins
->backend
.reg3
, 4);
2652 x86_clear_reg (code
, ins
->backend
.reg3
);
2653 x86_shift_reg_imm (code
, X86_SHR
, ins
->sreg1
, ins
->inst_imm
- 32);
2655 x86_shrd_reg_imm (code
, ins
->sreg1
, ins
->backend
.reg3
, ins
->inst_imm
);
2656 x86_shift_reg_imm (code
, X86_SHR
, ins
->backend
.reg3
, ins
->inst_imm
);
2660 x86_not_reg (code
, ins
->sreg1
);
2663 x86_neg_reg (code
, ins
->sreg1
);
2667 x86_imul_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
2671 switch (ins
->inst_imm
) {
2675 if (ins
->dreg
!= ins
->sreg1
)
2676 x86_mov_reg_reg (code
, ins
->dreg
, ins
->sreg1
, 4);
2677 x86_alu_reg_reg (code
, X86_ADD
, ins
->dreg
, ins
->dreg
);
2680 /* LEA r1, [r2 + r2*2] */
2681 x86_lea_memindex (code
, ins
->dreg
, ins
->sreg1
, 0, ins
->sreg1
, 1);
2684 /* LEA r1, [r2 + r2*4] */
2685 x86_lea_memindex (code
, ins
->dreg
, ins
->sreg1
, 0, ins
->sreg1
, 2);
2688 /* LEA r1, [r2 + r2*2] */
2690 x86_lea_memindex (code
, ins
->dreg
, ins
->sreg1
, 0, ins
->sreg1
, 1);
2691 x86_alu_reg_reg (code
, X86_ADD
, ins
->dreg
, ins
->dreg
);
2694 /* LEA r1, [r2 + r2*8] */
2695 x86_lea_memindex (code
, ins
->dreg
, ins
->sreg1
, 0, ins
->sreg1
, 3);
2698 /* LEA r1, [r2 + r2*4] */
2700 x86_lea_memindex (code
, ins
->dreg
, ins
->sreg1
, 0, ins
->sreg1
, 2);
2701 x86_alu_reg_reg (code
, X86_ADD
, ins
->dreg
, ins
->dreg
);
2704 /* LEA r1, [r2 + r2*2] */
2706 x86_lea_memindex (code
, ins
->dreg
, ins
->sreg1
, 0, ins
->sreg1
, 1);
2707 x86_shift_reg_imm (code
, X86_SHL
, ins
->dreg
, 2);
2710 /* LEA r1, [r2 + r2*4] */
2711 /* LEA r1, [r1 + r1*4] */
2712 x86_lea_memindex (code
, ins
->dreg
, ins
->sreg1
, 0, ins
->sreg1
, 2);
2713 x86_lea_memindex (code
, ins
->dreg
, ins
->dreg
, 0, ins
->dreg
, 2);
2716 /* LEA r1, [r2 + r2*4] */
2718 /* LEA r1, [r1 + r1*4] */
2719 x86_lea_memindex (code
, ins
->dreg
, ins
->sreg1
, 0, ins
->sreg1
, 2);
2720 x86_shift_reg_imm (code
, X86_SHL
, ins
->dreg
, 2);
2721 x86_lea_memindex (code
, ins
->dreg
, ins
->dreg
, 0, ins
->dreg
, 2);
2724 x86_imul_reg_reg_imm (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_imm
);
2729 x86_imul_reg_reg (code
, ins
->sreg1
, ins
->sreg2
);
2730 EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O
, FALSE
, "OverflowException");
2732 case OP_IMUL_OVF_UN
: {
2733 /* the mul operation and the exception check should most likely be split */
2734 int non_eax_reg
, saved_eax
= FALSE
, saved_edx
= FALSE
;
2735 /*g_assert (ins->sreg2 == X86_EAX);
2736 g_assert (ins->dreg == X86_EAX);*/
2737 if (ins
->sreg2
== X86_EAX
) {
2738 non_eax_reg
= ins
->sreg1
;
2739 } else if (ins
->sreg1
== X86_EAX
) {
2740 non_eax_reg
= ins
->sreg2
;
2742 /* no need to save since we're going to store to it anyway */
2743 if (ins
->dreg
!= X86_EAX
) {
2745 x86_push_reg (code
, X86_EAX
);
2747 x86_mov_reg_reg (code
, X86_EAX
, ins
->sreg1
, 4);
2748 non_eax_reg
= ins
->sreg2
;
2750 if (ins
->dreg
== X86_EDX
) {
2753 x86_push_reg (code
, X86_EAX
);
2755 } else if (ins
->dreg
!= X86_EAX
) {
2757 x86_push_reg (code
, X86_EDX
);
2759 x86_mul_reg (code
, non_eax_reg
, FALSE
);
2760 /* save before the check since pop and mov don't change the flags */
2761 if (ins
->dreg
!= X86_EAX
)
2762 x86_mov_reg_reg (code
, ins
->dreg
, X86_EAX
, 4);
2764 x86_pop_reg (code
, X86_EDX
);
2766 x86_pop_reg (code
, X86_EAX
);
2767 EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O
, FALSE
, "OverflowException");
2771 x86_mov_reg_imm (code
, ins
->dreg
, ins
->inst_c0
);
2774 g_assert_not_reached ();
2775 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)ins
->inst_i1
, ins
->inst_p0
);
2776 x86_mov_reg_imm (code
, ins
->dreg
, 0);
2779 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)ins
->inst_i1
, ins
->inst_p0
);
2780 x86_mov_reg_imm (code
, ins
->dreg
, 0);
2782 case OP_LOAD_GOTADDR
:
2783 x86_call_imm (code
, 0);
2785 * The patch needs to point to the pop, since the GOT offset needs
2786 * to be added to that address.
2788 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_GOT_OFFSET
, NULL
);
2789 x86_pop_reg (code
, ins
->dreg
);
2790 x86_alu_reg_imm (code
, X86_ADD
, ins
->dreg
, 0xf0f0f0f0);
2793 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)ins
->inst_right
->inst_i1
, ins
->inst_right
->inst_p0
);
2794 x86_mov_reg_membase (code
, ins
->dreg
, ins
->inst_basereg
, 0xf0f0f0f0, 4);
2796 case OP_X86_PUSH_GOT_ENTRY
:
2797 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)ins
->inst_right
->inst_i1
, ins
->inst_right
->inst_p0
);
2798 x86_push_membase (code
, ins
->inst_basereg
, 0xf0f0f0f0);
2801 x86_mov_reg_reg (code
, ins
->dreg
, ins
->sreg1
, 4);
2805 * Note: this 'frame destruction' logic is useful for tail calls, too.
2806 * Keep in sync with the code in emit_epilog.
2810 /* FIXME: no tracing support... */
2811 if (cfg
->prof_options
& MONO_PROFILE_ENTER_LEAVE
)
2812 code
= mono_arch_instrument_epilog (cfg
, mono_profiler_method_leave
, code
, FALSE
);
2813 /* reset offset to make max_len work */
2814 offset
= code
- cfg
->native_code
;
2816 g_assert (!cfg
->method
->save_lmf
);
2818 code
= emit_load_volatile_arguments (cfg
, code
);
2820 if (cfg
->used_int_regs
& (1 << X86_EBX
))
2822 if (cfg
->used_int_regs
& (1 << X86_EDI
))
2824 if (cfg
->used_int_regs
& (1 << X86_ESI
))
2827 x86_lea_membase (code
, X86_ESP
, X86_EBP
, pos
);
2829 if (cfg
->used_int_regs
& (1 << X86_ESI
))
2830 x86_pop_reg (code
, X86_ESI
);
2831 if (cfg
->used_int_regs
& (1 << X86_EDI
))
2832 x86_pop_reg (code
, X86_EDI
);
2833 if (cfg
->used_int_regs
& (1 << X86_EBX
))
2834 x86_pop_reg (code
, X86_EBX
);
2836 /* restore ESP/EBP */
2838 offset
= code
- cfg
->native_code
;
2839 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_METHOD_JUMP
, ins
->inst_p0
);
2840 x86_jump32 (code
, 0);
2842 cfg
->disable_aot
= TRUE
;
2846 /* ensure ins->sreg1 is not NULL
2847 * note that cmp DWORD PTR [eax], eax is one byte shorter than
2848 * cmp DWORD PTR [eax], 0
2850 x86_alu_membase_reg (code
, X86_CMP
, ins
->sreg1
, 0, ins
->sreg1
);
2853 int hreg
= ins
->sreg1
== X86_EAX
? X86_ECX
: X86_EAX
;
2854 x86_push_reg (code
, hreg
);
2855 x86_lea_membase (code
, hreg
, X86_EBP
, cfg
->sig_cookie
);
2856 x86_mov_membase_reg (code
, ins
->sreg1
, 0, hreg
, 4);
2857 x86_pop_reg (code
, hreg
);
2866 call
= (MonoCallInst
*)ins
;
2867 if (ins
->flags
& MONO_INST_HAS_METHOD
)
2868 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_METHOD
, call
->method
);
2870 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_ABS
, call
->fptr
);
2871 if (call
->stack_usage
&& !CALLCONV_IS_STDCALL (call
->signature
)) {
2872 /* a pop is one byte, while an add reg, imm is 3. So if there are 4 or 8
2873 * bytes to pop, we want to use pops. GCC does this (note it won't happen
2874 * for P4 or i686 because gcc will avoid using pop push at all. But we aren't
2875 * smart enough to do that optimization yet
2877 * It turns out that on my P4, doing two pops for 8 bytes on the stack makes
2878 * mcs botstrap slow down. However, doing 1 pop for 4 bytes creates a small,
2879 * (most likely from locality benefits). People with other processors should
2880 * check on theirs to see what happens.
2882 if (call
->stack_usage
== 4) {
2883 /* we want to use registers that won't get used soon, so use
2884 * ecx, as eax will get allocated first. edx is used by long calls,
2885 * so we can't use that.
2888 x86_pop_reg (code
, X86_ECX
);
2890 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, call
->stack_usage
);
2893 code
= emit_move_return_value (cfg
, ins
, code
);
2899 case OP_VOIDCALL_REG
:
2901 call
= (MonoCallInst
*)ins
;
2902 x86_call_reg (code
, ins
->sreg1
);
2903 if (call
->stack_usage
&& !CALLCONV_IS_STDCALL (call
->signature
)) {
2904 if (call
->stack_usage
== 4)
2905 x86_pop_reg (code
, X86_ECX
);
2907 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, call
->stack_usage
);
2909 code
= emit_move_return_value (cfg
, ins
, code
);
2911 case OP_FCALL_MEMBASE
:
2912 case OP_LCALL_MEMBASE
:
2913 case OP_VCALL_MEMBASE
:
2914 case OP_VCALL2_MEMBASE
:
2915 case OP_VOIDCALL_MEMBASE
:
2916 case OP_CALL_MEMBASE
:
2917 call
= (MonoCallInst
*)ins
;
2920 * Emit a few nops to simplify get_vcall_slot ().
2926 x86_call_membase (code
, ins
->sreg1
, ins
->inst_offset
);
2927 if (call
->stack_usage
&& !CALLCONV_IS_STDCALL (call
->signature
)) {
2928 if (call
->stack_usage
== 4)
2929 x86_pop_reg (code
, X86_ECX
);
2931 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, call
->stack_usage
);
2933 code
= emit_move_return_value (cfg
, ins
, code
);
2936 x86_push_reg (code
, ins
->sreg1
);
2938 case OP_X86_PUSH_IMM
:
2939 x86_push_imm (code
, ins
->inst_imm
);
2941 case OP_X86_PUSH_MEMBASE
:
2942 x86_push_membase (code
, ins
->inst_basereg
, ins
->inst_offset
);
2944 case OP_X86_PUSH_OBJ
:
2945 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, ins
->inst_imm
);
2946 x86_push_reg (code
, X86_EDI
);
2947 x86_push_reg (code
, X86_ESI
);
2948 x86_push_reg (code
, X86_ECX
);
2949 if (ins
->inst_offset
)
2950 x86_lea_membase (code
, X86_ESI
, ins
->inst_basereg
, ins
->inst_offset
);
2952 x86_mov_reg_reg (code
, X86_ESI
, ins
->inst_basereg
, 4);
2953 x86_lea_membase (code
, X86_EDI
, X86_ESP
, 12);
2954 x86_mov_reg_imm (code
, X86_ECX
, (ins
->inst_imm
>> 2));
2956 x86_prefix (code
, X86_REP_PREFIX
);
2958 x86_pop_reg (code
, X86_ECX
);
2959 x86_pop_reg (code
, X86_ESI
);
2960 x86_pop_reg (code
, X86_EDI
);
2963 x86_lea_memindex (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_imm
, ins
->sreg2
, ins
->backend
.shift_amount
);
2965 case OP_X86_LEA_MEMBASE
:
2966 x86_lea_membase (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_imm
);
2969 x86_xchg_reg_reg (code
, ins
->sreg1
, ins
->sreg2
, 4);
2972 /* keep alignment */
2973 x86_alu_reg_imm (code
, X86_ADD
, ins
->sreg1
, MONO_ARCH_LOCALLOC_ALIGNMENT
- 1);
2974 x86_alu_reg_imm (code
, X86_AND
, ins
->sreg1
, ~(MONO_ARCH_LOCALLOC_ALIGNMENT
- 1));
2975 code
= mono_emit_stack_alloc (code
, ins
);
2976 x86_mov_reg_reg (code
, ins
->dreg
, X86_ESP
, 4);
2978 case OP_LOCALLOC_IMM
: {
2979 guint32 size
= ins
->inst_imm
;
2980 size
= (size
+ (MONO_ARCH_FRAME_ALIGNMENT
- 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT
- 1);
2982 if (ins
->flags
& MONO_INST_INIT
) {
2983 /* FIXME: Optimize this */
2984 x86_mov_reg_imm (code
, ins
->dreg
, size
);
2985 ins
->sreg1
= ins
->dreg
;
2987 code
= mono_emit_stack_alloc (code
, ins
);
2988 x86_mov_reg_reg (code
, ins
->dreg
, X86_ESP
, 4);
2990 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, size
);
2991 x86_mov_reg_reg (code
, ins
->dreg
, X86_ESP
, 4);
2996 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, MONO_ARCH_FRAME_ALIGNMENT
- 4);
2997 x86_push_reg (code
, ins
->sreg1
);
2998 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
2999 (gpointer
)"mono_arch_throw_exception");
3003 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, MONO_ARCH_FRAME_ALIGNMENT
- 4);
3004 x86_push_reg (code
, ins
->sreg1
);
3005 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
3006 (gpointer
)"mono_arch_rethrow_exception");
3009 case OP_CALL_HANDLER
:
3010 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, MONO_ARCH_FRAME_ALIGNMENT
- 4);
3011 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
3012 x86_call_imm (code
, 0);
3013 mono_cfg_add_try_hole (cfg
, ins
->inst_eh_block
, code
, bb
);
3014 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, MONO_ARCH_FRAME_ALIGNMENT
- 4);
3016 case OP_START_HANDLER
: {
3017 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3018 x86_mov_membase_reg (code
, spvar
->inst_basereg
, spvar
->inst_offset
, X86_ESP
, 4);
3021 case OP_ENDFINALLY
: {
3022 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3023 x86_mov_reg_membase (code
, X86_ESP
, spvar
->inst_basereg
, spvar
->inst_offset
, 4);
3027 case OP_ENDFILTER
: {
3028 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3029 x86_mov_reg_membase (code
, X86_ESP
, spvar
->inst_basereg
, spvar
->inst_offset
, 4);
3030 /* The local allocator will put the result into EAX */
3036 ins
->inst_c0
= code
- cfg
->native_code
;
3039 if (ins
->inst_target_bb
->native_offset
) {
3040 x86_jump_code (code
, cfg
->native_code
+ ins
->inst_target_bb
->native_offset
);
3042 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
3043 if ((cfg
->opt
& MONO_OPT_BRANCH
) &&
3044 x86_is_imm8 (ins
->inst_target_bb
->max_offset
- cpos
))
3045 x86_jump8 (code
, 0);
3047 x86_jump32 (code
, 0);
3051 x86_jump_reg (code
, ins
->sreg1
);
3064 x86_set_reg (code
, cc_table
[mono_opcode_to_cond (ins
->opcode
)], ins
->dreg
, cc_signed_table
[mono_opcode_to_cond (ins
->opcode
)]);
3065 x86_widen_reg (code
, ins
->dreg
, ins
->dreg
, FALSE
, FALSE
);
3067 case OP_COND_EXC_EQ
:
3068 case OP_COND_EXC_NE_UN
:
3069 case OP_COND_EXC_LT
:
3070 case OP_COND_EXC_LT_UN
:
3071 case OP_COND_EXC_GT
:
3072 case OP_COND_EXC_GT_UN
:
3073 case OP_COND_EXC_GE
:
3074 case OP_COND_EXC_GE_UN
:
3075 case OP_COND_EXC_LE
:
3076 case OP_COND_EXC_LE_UN
:
3077 case OP_COND_EXC_IEQ
:
3078 case OP_COND_EXC_INE_UN
:
3079 case OP_COND_EXC_ILT
:
3080 case OP_COND_EXC_ILT_UN
:
3081 case OP_COND_EXC_IGT
:
3082 case OP_COND_EXC_IGT_UN
:
3083 case OP_COND_EXC_IGE
:
3084 case OP_COND_EXC_IGE_UN
:
3085 case OP_COND_EXC_ILE
:
3086 case OP_COND_EXC_ILE_UN
:
3087 EMIT_COND_SYSTEM_EXCEPTION (cc_table
[mono_opcode_to_cond (ins
->opcode
)], cc_signed_table
[mono_opcode_to_cond (ins
->opcode
)], ins
->inst_p1
);
3089 case OP_COND_EXC_OV
:
3090 case OP_COND_EXC_NO
:
3092 case OP_COND_EXC_NC
:
3093 EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table
[ins
->opcode
- OP_COND_EXC_EQ
], (ins
->opcode
< OP_COND_EXC_NE_UN
), ins
->inst_p1
);
3095 case OP_COND_EXC_IOV
:
3096 case OP_COND_EXC_INO
:
3097 case OP_COND_EXC_IC
:
3098 case OP_COND_EXC_INC
:
3099 EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table
[ins
->opcode
- OP_COND_EXC_IEQ
], (ins
->opcode
< OP_COND_EXC_INE_UN
), ins
->inst_p1
);
3111 EMIT_COND_BRANCH (ins
, cc_table
[mono_opcode_to_cond (ins
->opcode
)], cc_signed_table
[mono_opcode_to_cond (ins
->opcode
)]);
3119 case OP_CMOV_INE_UN
:
3120 case OP_CMOV_IGE_UN
:
3121 case OP_CMOV_IGT_UN
:
3122 case OP_CMOV_ILE_UN
:
3123 case OP_CMOV_ILT_UN
:
3124 g_assert (ins
->dreg
== ins
->sreg1
);
3125 x86_cmov_reg (code
, cc_table
[mono_opcode_to_cond (ins
->opcode
)], cc_signed_table
[mono_opcode_to_cond (ins
->opcode
)], ins
->dreg
, ins
->sreg2
);
3128 /* floating point opcodes */
3130 double d
= *(double *)ins
->inst_p0
;
3132 if ((d
== 0.0) && (mono_signbit (d
) == 0)) {
3134 } else if (d
== 1.0) {
3137 if (cfg
->compile_aot
) {
3138 guint32
*val
= (guint32
*)&d
;
3139 x86_push_imm (code
, val
[1]);
3140 x86_push_imm (code
, val
[0]);
3141 x86_fld_membase (code
, X86_ESP
, 0, TRUE
);
3142 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 8);
3145 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_R8
, ins
->inst_p0
);
3146 x86_fld (code
, NULL
, TRUE
);
3152 float f
= *(float *)ins
->inst_p0
;
3154 if ((f
== 0.0) && (mono_signbit (f
) == 0)) {
3156 } else if (f
== 1.0) {
3159 if (cfg
->compile_aot
) {
3160 guint32 val
= *(guint32
*)&f
;
3161 x86_push_imm (code
, val
);
3162 x86_fld_membase (code
, X86_ESP
, 0, FALSE
);
3163 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 4);
3166 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_R4
, ins
->inst_p0
);
3167 x86_fld (code
, NULL
, FALSE
);
3172 case OP_STORER8_MEMBASE_REG
:
3173 x86_fst_membase (code
, ins
->inst_destbasereg
, ins
->inst_offset
, TRUE
, TRUE
);
3175 case OP_LOADR8_MEMBASE
:
3176 x86_fld_membase (code
, ins
->inst_basereg
, ins
->inst_offset
, TRUE
);
3178 case OP_STORER4_MEMBASE_REG
:
3179 x86_fst_membase (code
, ins
->inst_destbasereg
, ins
->inst_offset
, FALSE
, TRUE
);
3181 case OP_LOADR4_MEMBASE
:
3182 x86_fld_membase (code
, ins
->inst_basereg
, ins
->inst_offset
, FALSE
);
3184 case OP_ICONV_TO_R4
:
3185 x86_push_reg (code
, ins
->sreg1
);
3186 x86_fild_membase (code
, X86_ESP
, 0, FALSE
);
3187 /* Change precision */
3188 x86_fst_membase (code
, X86_ESP
, 0, FALSE
, TRUE
);
3189 x86_fld_membase (code
, X86_ESP
, 0, FALSE
);
3190 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 4);
3192 case OP_ICONV_TO_R8
:
3193 x86_push_reg (code
, ins
->sreg1
);
3194 x86_fild_membase (code
, X86_ESP
, 0, FALSE
);
3195 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 4);
3197 case OP_ICONV_TO_R_UN
:
3198 x86_push_imm (code
, 0);
3199 x86_push_reg (code
, ins
->sreg1
);
3200 x86_fild_membase (code
, X86_ESP
, 0, TRUE
);
3201 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 8);
3203 case OP_X86_FP_LOAD_I8
:
3204 x86_fild_membase (code
, ins
->inst_basereg
, ins
->inst_offset
, TRUE
);
3206 case OP_X86_FP_LOAD_I4
:
3207 x86_fild_membase (code
, ins
->inst_basereg
, ins
->inst_offset
, FALSE
);
3209 case OP_FCONV_TO_R4
:
3210 /* Change precision */
3211 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, 4);
3212 x86_fst_membase (code
, X86_ESP
, 0, FALSE
, TRUE
);
3213 x86_fld_membase (code
, X86_ESP
, 0, FALSE
);
3214 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 4);
3216 case OP_FCONV_TO_I1
:
3217 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, 1, TRUE
);
3219 case OP_FCONV_TO_U1
:
3220 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, 1, FALSE
);
3222 case OP_FCONV_TO_I2
:
3223 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, 2, TRUE
);
3225 case OP_FCONV_TO_U2
:
3226 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, 2, FALSE
);
3228 case OP_FCONV_TO_I4
:
3230 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, 4, TRUE
);
3232 case OP_FCONV_TO_I8
:
3233 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, 4);
3234 x86_fnstcw_membase(code
, X86_ESP
, 0);
3235 x86_mov_reg_membase (code
, ins
->dreg
, X86_ESP
, 0, 2);
3236 x86_alu_reg_imm (code
, X86_OR
, ins
->dreg
, 0xc00);
3237 x86_mov_membase_reg (code
, X86_ESP
, 2, ins
->dreg
, 2);
3238 x86_fldcw_membase (code
, X86_ESP
, 2);
3239 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, 8);
3240 x86_fist_pop_membase (code
, X86_ESP
, 0, TRUE
);
3241 x86_pop_reg (code
, ins
->dreg
);
3242 x86_pop_reg (code
, ins
->backend
.reg3
);
3243 x86_fldcw_membase (code
, X86_ESP
, 0);
3244 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 4);
3246 case OP_LCONV_TO_R8_2
:
3247 x86_push_reg (code
, ins
->sreg2
);
3248 x86_push_reg (code
, ins
->sreg1
);
3249 x86_fild_membase (code
, X86_ESP
, 0, TRUE
);
3250 /* Change precision */
3251 x86_fst_membase (code
, X86_ESP
, 0, TRUE
, TRUE
);
3252 x86_fld_membase (code
, X86_ESP
, 0, TRUE
);
3253 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 8);
3255 case OP_LCONV_TO_R4_2
:
3256 x86_push_reg (code
, ins
->sreg2
);
3257 x86_push_reg (code
, ins
->sreg1
);
3258 x86_fild_membase (code
, X86_ESP
, 0, TRUE
);
3259 /* Change precision */
3260 x86_fst_membase (code
, X86_ESP
, 0, FALSE
, TRUE
);
3261 x86_fld_membase (code
, X86_ESP
, 0, FALSE
);
3262 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 8);
3264 case OP_LCONV_TO_R_UN_2
: {
3265 static guint8 mn
[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x3f, 0x40 };
3268 /* load 64bit integer to FP stack */
3269 x86_push_reg (code
, ins
->sreg2
);
3270 x86_push_reg (code
, ins
->sreg1
);
3271 x86_fild_membase (code
, X86_ESP
, 0, TRUE
);
3273 /* test if lreg is negative */
3274 x86_test_reg_reg (code
, ins
->sreg2
, ins
->sreg2
);
3275 br
= code
; x86_branch8 (code
, X86_CC_GEZ
, 0, TRUE
);
3277 /* add correction constant mn */
3278 x86_fld80_mem (code
, mn
);
3279 x86_fp_op_reg (code
, X86_FADD
, 1, TRUE
);
3281 x86_patch (br
, code
);
3283 /* Change precision */
3284 x86_fst_membase (code
, X86_ESP
, 0, TRUE
, TRUE
);
3285 x86_fld_membase (code
, X86_ESP
, 0, TRUE
);
3287 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 8);
3291 case OP_LCONV_TO_OVF_I
:
3292 case OP_LCONV_TO_OVF_I4_2
: {
3293 guint8
*br
[3], *label
[1];
3297 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
3299 x86_test_reg_reg (code
, ins
->sreg1
, ins
->sreg1
);
3301 /* If the low word top bit is set, see if we are negative */
3302 br
[0] = code
; x86_branch8 (code
, X86_CC_LT
, 0, TRUE
);
3303 /* We are not negative (no top bit set, check for our top word to be zero */
3304 x86_test_reg_reg (code
, ins
->sreg2
, ins
->sreg2
);
3305 br
[1] = code
; x86_branch8 (code
, X86_CC_EQ
, 0, TRUE
);
3308 /* throw exception */
3309 tins
= mono_branch_optimize_exception_target (cfg
, bb
, "OverflowException");
3311 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_BB
, tins
->inst_true_bb
);
3312 if ((cfg
->opt
& MONO_OPT_BRANCH
) && x86_is_imm8 (tins
->inst_true_bb
->max_offset
- cpos
))
3313 x86_jump8 (code
, 0);
3315 x86_jump32 (code
, 0);
3317 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_EXC
, "OverflowException");
3318 x86_jump32 (code
, 0);
3322 x86_patch (br
[0], code
);
3323 /* our top bit is set, check that top word is 0xfffffff */
3324 x86_alu_reg_imm (code
, X86_CMP
, ins
->sreg2
, 0xffffffff);
3326 x86_patch (br
[1], code
);
3327 /* nope, emit exception */
3328 br
[2] = code
; x86_branch8 (code
, X86_CC_NE
, 0, TRUE
);
3329 x86_patch (br
[2], label
[0]);
3331 if (ins
->dreg
!= ins
->sreg1
)
3332 x86_mov_reg_reg (code
, ins
->dreg
, ins
->sreg1
, 4);
3336 /* Not needed on the fp stack */
3339 x86_fp_op_reg (code
, X86_FADD
, 1, TRUE
);
3342 x86_fp_op_reg (code
, X86_FSUB
, 1, TRUE
);
3345 x86_fp_op_reg (code
, X86_FMUL
, 1, TRUE
);
3348 x86_fp_op_reg (code
, X86_FDIV
, 1, TRUE
);
3356 x86_fp_op_reg (code
, X86_FADD
, 1, TRUE
);
3361 x86_fp_op_reg (code
, X86_FADD
, 1, TRUE
);
3368 * it really doesn't make sense to inline all this code,
3369 * it's here just to show that things may not be as simple
3372 guchar
*check_pos
, *end_tan
, *pop_jump
;
3373 x86_push_reg (code
, X86_EAX
);
3376 x86_test_reg_imm (code
, X86_EAX
, X86_FP_C2
);
3378 x86_branch8 (code
, X86_CC_NE
, 0, FALSE
);
3379 x86_fstp (code
, 0); /* pop the 1.0 */
3381 x86_jump8 (code
, 0);
3383 x86_fp_op (code
, X86_FADD
, 0);
3387 x86_test_reg_imm (code
, X86_EAX
, X86_FP_C2
);
3389 x86_branch8 (code
, X86_CC_NE
, 0, FALSE
);
3392 x86_patch (pop_jump
, code
);
3393 x86_fstp (code
, 0); /* pop the 1.0 */
3394 x86_patch (check_pos
, code
);
3395 x86_patch (end_tan
, code
);
3397 x86_fp_op_reg (code
, X86_FADD
, 1, TRUE
);
3398 x86_pop_reg (code
, X86_EAX
);
3405 x86_fp_op_reg (code
, X86_FADD
, 1, TRUE
);
3414 g_assert (cfg
->opt
& MONO_OPT_CMOV
);
3415 g_assert (ins
->dreg
== ins
->sreg1
);
3416 x86_alu_reg_reg (code
, X86_CMP
, ins
->sreg1
, ins
->sreg2
);
3417 x86_cmov_reg (code
, X86_CC_GT
, TRUE
, ins
->dreg
, ins
->sreg2
);
3420 g_assert (cfg
->opt
& MONO_OPT_CMOV
);
3421 g_assert (ins
->dreg
== ins
->sreg1
);
3422 x86_alu_reg_reg (code
, X86_CMP
, ins
->sreg1
, ins
->sreg2
);
3423 x86_cmov_reg (code
, X86_CC_GT
, FALSE
, ins
->dreg
, ins
->sreg2
);
3426 g_assert (cfg
->opt
& MONO_OPT_CMOV
);
3427 g_assert (ins
->dreg
== ins
->sreg1
);
3428 x86_alu_reg_reg (code
, X86_CMP
, ins
->sreg1
, ins
->sreg2
);
3429 x86_cmov_reg (code
, X86_CC_LT
, TRUE
, ins
->dreg
, ins
->sreg2
);
3432 g_assert (cfg
->opt
& MONO_OPT_CMOV
);
3433 g_assert (ins
->dreg
== ins
->sreg1
);
3434 x86_alu_reg_reg (code
, X86_CMP
, ins
->sreg1
, ins
->sreg2
);
3435 x86_cmov_reg (code
, X86_CC_LT
, FALSE
, ins
->dreg
, ins
->sreg2
);
3441 x86_fxch (code
, ins
->inst_imm
);
3446 x86_push_reg (code
, X86_EAX
);
3447 /* we need to exchange ST(0) with ST(1) */
3450 /* this requires a loop, because fprem somtimes
3451 * returns a partial remainder */
3453 /* looks like MS is using fprem instead of the IEEE compatible fprem1 */
3454 /* x86_fprem1 (code); */
3457 x86_alu_reg_imm (code
, X86_AND
, X86_EAX
, X86_FP_C2
);
3459 x86_branch8 (code
, X86_CC_NE
, l1
- l2
, FALSE
);
3464 x86_pop_reg (code
, X86_EAX
);
3468 if (cfg
->opt
& MONO_OPT_FCMOV
) {
3469 x86_fcomip (code
, 1);
3473 /* this overwrites EAX */
3474 EMIT_FPCOMPARE(code
);
3475 x86_alu_reg_imm (code
, X86_AND
, X86_EAX
, X86_FP_CC_MASK
);
3478 if (cfg
->opt
& MONO_OPT_FCMOV
) {
3479 /* zeroing the register at the start results in
3480 * shorter and faster code (we can also remove the widening op)
3482 guchar
*unordered_check
;
3483 x86_alu_reg_reg (code
, X86_XOR
, ins
->dreg
, ins
->dreg
);
3484 x86_fcomip (code
, 1);
3486 unordered_check
= code
;
3487 x86_branch8 (code
, X86_CC_P
, 0, FALSE
);
3488 x86_set_reg (code
, X86_CC_EQ
, ins
->dreg
, FALSE
);
3489 x86_patch (unordered_check
, code
);
3492 if (ins
->dreg
!= X86_EAX
)
3493 x86_push_reg (code
, X86_EAX
);
3495 EMIT_FPCOMPARE(code
);
3496 x86_alu_reg_imm (code
, X86_AND
, X86_EAX
, X86_FP_CC_MASK
);
3497 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, 0x4000);
3498 x86_set_reg (code
, X86_CC_EQ
, ins
->dreg
, TRUE
);
3499 x86_widen_reg (code
, ins
->dreg
, ins
->dreg
, FALSE
, FALSE
);
3501 if (ins
->dreg
!= X86_EAX
)
3502 x86_pop_reg (code
, X86_EAX
);
3506 if (cfg
->opt
& MONO_OPT_FCMOV
) {
3507 /* zeroing the register at the start results in
3508 * shorter and faster code (we can also remove the widening op)
3510 x86_alu_reg_reg (code
, X86_XOR
, ins
->dreg
, ins
->dreg
);
3511 x86_fcomip (code
, 1);
3513 if (ins
->opcode
== OP_FCLT_UN
) {
3514 guchar
*unordered_check
= code
;
3515 guchar
*jump_to_end
;
3516 x86_branch8 (code
, X86_CC_P
, 0, FALSE
);
3517 x86_set_reg (code
, X86_CC_GT
, ins
->dreg
, FALSE
);
3519 x86_jump8 (code
, 0);
3520 x86_patch (unordered_check
, code
);
3521 x86_inc_reg (code
, ins
->dreg
);
3522 x86_patch (jump_to_end
, code
);
3524 x86_set_reg (code
, X86_CC_GT
, ins
->dreg
, FALSE
);
3528 if (ins
->dreg
!= X86_EAX
)
3529 x86_push_reg (code
, X86_EAX
);
3531 EMIT_FPCOMPARE(code
);
3532 x86_alu_reg_imm (code
, X86_AND
, X86_EAX
, X86_FP_CC_MASK
);
3533 if (ins
->opcode
== OP_FCLT_UN
) {
3534 guchar
*is_not_zero_check
, *end_jump
;
3535 is_not_zero_check
= code
;
3536 x86_branch8 (code
, X86_CC_NZ
, 0, TRUE
);
3538 x86_jump8 (code
, 0);
3539 x86_patch (is_not_zero_check
, code
);
3540 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, X86_FP_CC_MASK
);
3542 x86_patch (end_jump
, code
);
3544 x86_set_reg (code
, X86_CC_EQ
, ins
->dreg
, TRUE
);
3545 x86_widen_reg (code
, ins
->dreg
, ins
->dreg
, FALSE
, FALSE
);
3547 if (ins
->dreg
!= X86_EAX
)
3548 x86_pop_reg (code
, X86_EAX
);
3552 if (cfg
->opt
& MONO_OPT_FCMOV
) {
3553 /* zeroing the register at the start results in
3554 * shorter and faster code (we can also remove the widening op)
3556 guchar
*unordered_check
;
3557 x86_alu_reg_reg (code
, X86_XOR
, ins
->dreg
, ins
->dreg
);
3558 x86_fcomip (code
, 1);
3560 if (ins
->opcode
== OP_FCGT
) {
3561 unordered_check
= code
;
3562 x86_branch8 (code
, X86_CC_P
, 0, FALSE
);
3563 x86_set_reg (code
, X86_CC_LT
, ins
->dreg
, FALSE
);
3564 x86_patch (unordered_check
, code
);
3566 x86_set_reg (code
, X86_CC_LT
, ins
->dreg
, FALSE
);
3570 if (ins
->dreg
!= X86_EAX
)
3571 x86_push_reg (code
, X86_EAX
);
3573 EMIT_FPCOMPARE(code
);
3574 x86_alu_reg_imm (code
, X86_AND
, X86_EAX
, X86_FP_CC_MASK
);
3575 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, X86_FP_C0
);
3576 if (ins
->opcode
== OP_FCGT_UN
) {
3577 guchar
*is_not_zero_check
, *end_jump
;
3578 is_not_zero_check
= code
;
3579 x86_branch8 (code
, X86_CC_NZ
, 0, TRUE
);
3581 x86_jump8 (code
, 0);
3582 x86_patch (is_not_zero_check
, code
);
3583 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, X86_FP_CC_MASK
);
3585 x86_patch (end_jump
, code
);
3587 x86_set_reg (code
, X86_CC_EQ
, ins
->dreg
, TRUE
);
3588 x86_widen_reg (code
, ins
->dreg
, ins
->dreg
, FALSE
, FALSE
);
3590 if (ins
->dreg
!= X86_EAX
)
3591 x86_pop_reg (code
, X86_EAX
);
3594 if (cfg
->opt
& MONO_OPT_FCMOV
) {
3595 guchar
*jump
= code
;
3596 x86_branch8 (code
, X86_CC_P
, 0, TRUE
);
3597 EMIT_COND_BRANCH (ins
, X86_CC_EQ
, FALSE
);
3598 x86_patch (jump
, code
);
3601 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, 0x4000);
3602 EMIT_COND_BRANCH (ins
, X86_CC_EQ
, TRUE
);
3605 /* Branch if C013 != 100 */
3606 if (cfg
->opt
& MONO_OPT_FCMOV
) {
3607 /* branch if !ZF or (PF|CF) */
3608 EMIT_COND_BRANCH (ins
, X86_CC_NE
, FALSE
);
3609 EMIT_COND_BRANCH (ins
, X86_CC_P
, FALSE
);
3610 EMIT_COND_BRANCH (ins
, X86_CC_B
, FALSE
);
3613 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, X86_FP_C3
);
3614 EMIT_COND_BRANCH (ins
, X86_CC_NE
, FALSE
);
3617 if (cfg
->opt
& MONO_OPT_FCMOV
) {
3618 EMIT_COND_BRANCH (ins
, X86_CC_GT
, FALSE
);
3621 EMIT_COND_BRANCH (ins
, X86_CC_EQ
, FALSE
);
3624 if (cfg
->opt
& MONO_OPT_FCMOV
) {
3625 EMIT_COND_BRANCH (ins
, X86_CC_P
, FALSE
);
3626 EMIT_COND_BRANCH (ins
, X86_CC_GT
, FALSE
);
3629 if (ins
->opcode
== OP_FBLT_UN
) {
3630 guchar
*is_not_zero_check
, *end_jump
;
3631 is_not_zero_check
= code
;
3632 x86_branch8 (code
, X86_CC_NZ
, 0, TRUE
);
3634 x86_jump8 (code
, 0);
3635 x86_patch (is_not_zero_check
, code
);
3636 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, X86_FP_CC_MASK
);
3638 x86_patch (end_jump
, code
);
3640 EMIT_COND_BRANCH (ins
, X86_CC_EQ
, FALSE
);
3644 if (cfg
->opt
& MONO_OPT_FCMOV
) {
3645 if (ins
->opcode
== OP_FBGT
) {
3648 /* skip branch if C1=1 */
3650 x86_branch8 (code
, X86_CC_P
, 0, FALSE
);
3651 /* branch if (C0 | C3) = 1 */
3652 EMIT_COND_BRANCH (ins
, X86_CC_LT
, FALSE
);
3653 x86_patch (br1
, code
);
3655 EMIT_COND_BRANCH (ins
, X86_CC_LT
, FALSE
);
3659 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, X86_FP_C0
);
3660 if (ins
->opcode
== OP_FBGT_UN
) {
3661 guchar
*is_not_zero_check
, *end_jump
;
3662 is_not_zero_check
= code
;
3663 x86_branch8 (code
, X86_CC_NZ
, 0, TRUE
);
3665 x86_jump8 (code
, 0);
3666 x86_patch (is_not_zero_check
, code
);
3667 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, X86_FP_CC_MASK
);
3669 x86_patch (end_jump
, code
);
3671 EMIT_COND_BRANCH (ins
, X86_CC_EQ
, FALSE
);
3674 /* Branch if C013 == 100 or 001 */
3675 if (cfg
->opt
& MONO_OPT_FCMOV
) {
3678 /* skip branch if C1=1 */
3680 x86_branch8 (code
, X86_CC_P
, 0, FALSE
);
3681 /* branch if (C0 | C3) = 1 */
3682 EMIT_COND_BRANCH (ins
, X86_CC_BE
, FALSE
);
3683 x86_patch (br1
, code
);
3686 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, X86_FP_C0
);
3687 EMIT_COND_BRANCH (ins
, X86_CC_EQ
, FALSE
);
3688 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, X86_FP_C3
);
3689 EMIT_COND_BRANCH (ins
, X86_CC_EQ
, FALSE
);
3692 /* Branch if C013 == 000 */
3693 if (cfg
->opt
& MONO_OPT_FCMOV
) {
3694 EMIT_COND_BRANCH (ins
, X86_CC_LE
, FALSE
);
3697 EMIT_COND_BRANCH (ins
, X86_CC_NE
, FALSE
);
3700 /* Branch if C013=000 or 100 */
3701 if (cfg
->opt
& MONO_OPT_FCMOV
) {
3704 /* skip branch if C1=1 */
3706 x86_branch8 (code
, X86_CC_P
, 0, FALSE
);
3707 /* branch if C0=0 */
3708 EMIT_COND_BRANCH (ins
, X86_CC_NB
, FALSE
);
3709 x86_patch (br1
, code
);
3712 x86_alu_reg_imm (code
, X86_AND
, X86_EAX
, (X86_FP_C0
|X86_FP_C1
));
3713 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, 0);
3714 EMIT_COND_BRANCH (ins
, X86_CC_EQ
, FALSE
);
3717 /* Branch if C013 != 001 */
3718 if (cfg
->opt
& MONO_OPT_FCMOV
) {
3719 EMIT_COND_BRANCH (ins
, X86_CC_P
, FALSE
);
3720 EMIT_COND_BRANCH (ins
, X86_CC_GE
, FALSE
);
3723 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, X86_FP_C0
);
3724 EMIT_COND_BRANCH (ins
, X86_CC_NE
, FALSE
);
3728 x86_push_reg (code
, X86_EAX
);
3731 x86_alu_reg_imm (code
, X86_AND
, X86_EAX
, 0x4100);
3732 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, X86_FP_C0
);
3733 x86_pop_reg (code
, X86_EAX
);
3735 /* Have to clean up the fp stack before throwing the exception */
3737 x86_branch8 (code
, X86_CC_NE
, 0, FALSE
);
3740 EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ
, FALSE
, "ArithmeticException");
3742 x86_patch (br1
, code
);
3746 code
= mono_x86_emit_tls_get (code
, ins
->dreg
, ins
->inst_offset
);
3749 case OP_MEMORY_BARRIER
: {
3750 /* Not needed on x86 */
3753 case OP_ATOMIC_ADD_I4
: {
3754 int dreg
= ins
->dreg
;
3756 if (dreg
== ins
->inst_basereg
) {
3757 x86_push_reg (code
, ins
->sreg2
);
3761 if (dreg
!= ins
->sreg2
)
3762 x86_mov_reg_reg (code
, ins
->dreg
, ins
->sreg2
, 4);
3764 x86_prefix (code
, X86_LOCK_PREFIX
);
3765 x86_xadd_membase_reg (code
, ins
->inst_basereg
, ins
->inst_offset
, dreg
, 4);
3767 if (dreg
!= ins
->dreg
) {
3768 x86_mov_reg_reg (code
, ins
->dreg
, dreg
, 4);
3769 x86_pop_reg (code
, dreg
);
3774 case OP_ATOMIC_ADD_NEW_I4
: {
3775 int dreg
= ins
->dreg
;
3777 /* hack: limit in regalloc, dreg != sreg1 && dreg != sreg2 */
3778 if (ins
->sreg2
== dreg
) {
3779 if (dreg
== X86_EBX
) {
3781 if (ins
->inst_basereg
== X86_EDI
)
3785 if (ins
->inst_basereg
== X86_EBX
)
3788 } else if (ins
->inst_basereg
== dreg
) {
3789 if (dreg
== X86_EBX
) {
3791 if (ins
->sreg2
== X86_EDI
)
3795 if (ins
->sreg2
== X86_EBX
)
3800 if (dreg
!= ins
->dreg
) {
3801 x86_push_reg (code
, dreg
);
3804 x86_mov_reg_reg (code
, dreg
, ins
->sreg2
, 4);
3805 x86_prefix (code
, X86_LOCK_PREFIX
);
3806 x86_xadd_membase_reg (code
, ins
->inst_basereg
, ins
->inst_offset
, dreg
, 4);
3807 /* dreg contains the old value, add with sreg2 value */
3808 x86_alu_reg_reg (code
, X86_ADD
, dreg
, ins
->sreg2
);
3810 if (ins
->dreg
!= dreg
) {
3811 x86_mov_reg_reg (code
, ins
->dreg
, dreg
, 4);
3812 x86_pop_reg (code
, dreg
);
3817 case OP_ATOMIC_EXCHANGE_I4
: {
3819 int sreg2
= ins
->sreg2
;
3820 int breg
= ins
->inst_basereg
;
3822 /* cmpxchg uses eax as comperand, need to make sure we can use it
3823 * hack to overcome limits in x86 reg allocator
3824 * (req: dreg == eax and sreg2 != eax and breg != eax)
3826 g_assert (ins
->dreg
== X86_EAX
);
3828 /* We need the EAX reg for the cmpxchg */
3829 if (ins
->sreg2
== X86_EAX
) {
3830 sreg2
= (breg
== X86_EDX
) ? X86_EBX
: X86_EDX
;
3831 x86_push_reg (code
, sreg2
);
3832 x86_mov_reg_reg (code
, sreg2
, X86_EAX
, 4);
3835 if (breg
== X86_EAX
) {
3836 breg
= (sreg2
== X86_ESI
) ? X86_EDI
: X86_ESI
;
3837 x86_push_reg (code
, breg
);
3838 x86_mov_reg_reg (code
, breg
, X86_EAX
, 4);
3841 x86_mov_reg_membase (code
, X86_EAX
, breg
, ins
->inst_offset
, 4);
3843 br
[0] = code
; x86_prefix (code
, X86_LOCK_PREFIX
);
3844 x86_cmpxchg_membase_reg (code
, breg
, ins
->inst_offset
, sreg2
);
3845 br
[1] = code
; x86_branch8 (code
, X86_CC_NE
, -1, FALSE
);
3846 x86_patch (br
[1], br
[0]);
3848 if (breg
!= ins
->inst_basereg
)
3849 x86_pop_reg (code
, breg
);
3851 if (ins
->sreg2
!= sreg2
)
3852 x86_pop_reg (code
, sreg2
);
3856 case OP_ATOMIC_CAS_I4
: {
3857 g_assert (ins
->sreg3
== X86_EAX
);
3858 g_assert (ins
->sreg1
!= X86_EAX
);
3859 g_assert (ins
->sreg1
!= ins
->sreg2
);
3861 x86_prefix (code
, X86_LOCK_PREFIX
);
3862 x86_cmpxchg_membase_reg (code
, ins
->sreg1
, ins
->inst_offset
, ins
->sreg2
);
3864 if (ins
->dreg
!= X86_EAX
)
3865 x86_mov_reg_reg (code
, ins
->dreg
, X86_EAX
, 4);
3868 #ifdef MONO_ARCH_SIMD_INTRINSICS
3870 x86_sse_alu_ps_reg_reg (code
, X86_SSE_ADD
, ins
->sreg1
, ins
->sreg2
);
3873 x86_sse_alu_ps_reg_reg (code
, X86_SSE_DIV
, ins
->sreg1
, ins
->sreg2
);
3876 x86_sse_alu_ps_reg_reg (code
, X86_SSE_MUL
, ins
->sreg1
, ins
->sreg2
);
3879 x86_sse_alu_ps_reg_reg (code
, X86_SSE_SUB
, ins
->sreg1
, ins
->sreg2
);
3882 x86_sse_alu_ps_reg_reg (code
, X86_SSE_MAX
, ins
->sreg1
, ins
->sreg2
);
3885 x86_sse_alu_ps_reg_reg (code
, X86_SSE_MIN
, ins
->sreg1
, ins
->sreg2
);
3888 g_assert (ins
->inst_c0
>= 0 && ins
->inst_c0
<= 7);
3889 x86_sse_alu_ps_reg_reg_imm (code
, X86_SSE_COMP
, ins
->sreg1
, ins
->sreg2
, ins
->inst_c0
);
3892 x86_sse_alu_ps_reg_reg (code
, X86_SSE_AND
, ins
->sreg1
, ins
->sreg2
);
3895 x86_sse_alu_ps_reg_reg (code
, X86_SSE_ANDN
, ins
->sreg1
, ins
->sreg2
);
3898 x86_sse_alu_ps_reg_reg (code
, X86_SSE_OR
, ins
->sreg1
, ins
->sreg2
);
3901 x86_sse_alu_ps_reg_reg (code
, X86_SSE_XOR
, ins
->sreg1
, ins
->sreg2
);
3904 x86_sse_alu_ps_reg_reg (code
, X86_SSE_SQRT
, ins
->dreg
, ins
->sreg1
);
3907 x86_sse_alu_ps_reg_reg (code
, X86_SSE_RSQRT
, ins
->dreg
, ins
->sreg1
);
3910 x86_sse_alu_ps_reg_reg (code
, X86_SSE_RCP
, ins
->dreg
, ins
->sreg1
);
3913 x86_sse_alu_sd_reg_reg (code
, X86_SSE_ADDSUB
, ins
->sreg1
, ins
->sreg2
);
3916 x86_sse_alu_sd_reg_reg (code
, X86_SSE_HADD
, ins
->sreg1
, ins
->sreg2
);
3919 x86_sse_alu_sd_reg_reg (code
, X86_SSE_HSUB
, ins
->sreg1
, ins
->sreg2
);
3922 x86_sse_alu_ss_reg_reg (code
, X86_SSE_MOVSHDUP
, ins
->dreg
, ins
->sreg1
);
3925 x86_sse_alu_ss_reg_reg (code
, X86_SSE_MOVSLDUP
, ins
->dreg
, ins
->sreg1
);
3928 case OP_PSHUFLEW_HIGH
:
3929 g_assert (ins
->inst_c0
>= 0 && ins
->inst_c0
<= 0xFF);
3930 x86_pshufw_reg_reg (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_c0
, 1);
3932 case OP_PSHUFLEW_LOW
:
3933 g_assert (ins
->inst_c0
>= 0 && ins
->inst_c0
<= 0xFF);
3934 x86_pshufw_reg_reg (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_c0
, 0);
3937 g_assert (ins
->inst_c0
>= 0 && ins
->inst_c0
<= 0xFF);
3938 x86_sse_shift_reg_imm (code
, X86_SSE_PSHUFD
, ins
->dreg
, ins
->sreg1
, ins
->inst_c0
);
3942 x86_sse_alu_pd_reg_reg (code
, X86_SSE_ADD
, ins
->sreg1
, ins
->sreg2
);
3945 x86_sse_alu_pd_reg_reg (code
, X86_SSE_DIV
, ins
->sreg1
, ins
->sreg2
);
3948 x86_sse_alu_pd_reg_reg (code
, X86_SSE_MUL
, ins
->sreg1
, ins
->sreg2
);
3951 x86_sse_alu_pd_reg_reg (code
, X86_SSE_SUB
, ins
->sreg1
, ins
->sreg2
);
3954 x86_sse_alu_pd_reg_reg (code
, X86_SSE_MAX
, ins
->sreg1
, ins
->sreg2
);
3957 x86_sse_alu_pd_reg_reg (code
, X86_SSE_MIN
, ins
->sreg1
, ins
->sreg2
);
3960 g_assert (ins
->inst_c0
>= 0 && ins
->inst_c0
<= 7);
3961 x86_sse_alu_pd_reg_reg_imm (code
, X86_SSE_COMP
, ins
->sreg1
, ins
->sreg2
, ins
->inst_c0
);
3964 x86_sse_alu_pd_reg_reg (code
, X86_SSE_AND
, ins
->sreg1
, ins
->sreg2
);
3967 x86_sse_alu_pd_reg_reg (code
, X86_SSE_ANDN
, ins
->sreg1
, ins
->sreg2
);
3970 x86_sse_alu_pd_reg_reg (code
, X86_SSE_OR
, ins
->sreg1
, ins
->sreg2
);
3973 x86_sse_alu_pd_reg_reg (code
, X86_SSE_XOR
, ins
->sreg1
, ins
->sreg2
);
3976 x86_sse_alu_pd_reg_reg (code
, X86_SSE_SQRT
, ins
->dreg
, ins
->sreg1
);
3979 x86_sse_alu_pd_reg_reg (code
, X86_SSE_ADDSUB
, ins
->sreg1
, ins
->sreg2
);
3982 x86_sse_alu_pd_reg_reg (code
, X86_SSE_HADD
, ins
->sreg1
, ins
->sreg2
);
3985 x86_sse_alu_pd_reg_reg (code
, X86_SSE_HSUB
, ins
->sreg1
, ins
->sreg2
);
3988 x86_sse_alu_sd_reg_reg (code
, X86_SSE_MOVDDUP
, ins
->dreg
, ins
->sreg1
);
3991 case OP_EXTRACT_MASK
:
3992 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PMOVMSKB
, ins
->dreg
, ins
->sreg1
);
3996 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PAND
, ins
->sreg1
, ins
->sreg2
);
3999 x86_sse_alu_pd_reg_reg (code
, X86_SSE_POR
, ins
->sreg1
, ins
->sreg2
);
4002 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PXOR
, ins
->sreg1
, ins
->sreg2
);
4006 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PADDB
, ins
->sreg1
, ins
->sreg2
);
4009 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PADDW
, ins
->sreg1
, ins
->sreg2
);
4012 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PADDD
, ins
->sreg1
, ins
->sreg2
);
4015 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PADDQ
, ins
->sreg1
, ins
->sreg2
);
4019 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PSUBB
, ins
->sreg1
, ins
->sreg2
);
4022 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PSUBW
, ins
->sreg1
, ins
->sreg2
);
4025 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PSUBD
, ins
->sreg1
, ins
->sreg2
);
4028 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PSUBQ
, ins
->sreg1
, ins
->sreg2
);
4032 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PMAXUB
, ins
->sreg1
, ins
->sreg2
);
4035 x86_sse_alu_sse41_reg_reg (code
, X86_SSE_PMAXUW
, ins
->sreg1
, ins
->sreg2
);
4038 x86_sse_alu_sse41_reg_reg (code
, X86_SSE_PMAXUD
, ins
->sreg1
, ins
->sreg2
);
4042 x86_sse_alu_sse41_reg_reg (code
, X86_SSE_PMAXSB
, ins
->sreg1
, ins
->sreg2
);
4045 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PMAXSW
, ins
->sreg1
, ins
->sreg2
);
4048 x86_sse_alu_sse41_reg_reg (code
, X86_SSE_PMAXSD
, ins
->sreg1
, ins
->sreg2
);
4052 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PAVGB
, ins
->sreg1
, ins
->sreg2
);
4055 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PAVGW
, ins
->sreg1
, ins
->sreg2
);
4059 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PMINUB
, ins
->sreg1
, ins
->sreg2
);
4062 x86_sse_alu_sse41_reg_reg (code
, X86_SSE_PMINUW
, ins
->sreg1
, ins
->sreg2
);
4065 x86_sse_alu_sse41_reg_reg (code
, X86_SSE_PMINUD
, ins
->sreg1
, ins
->sreg2
);
4069 x86_sse_alu_sse41_reg_reg (code
, X86_SSE_PMINSB
, ins
->sreg1
, ins
->sreg2
);
4072 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PMINSW
, ins
->sreg1
, ins
->sreg2
);
4075 x86_sse_alu_sse41_reg_reg (code
, X86_SSE_PMINSD
, ins
->sreg1
, ins
->sreg2
);
4079 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PCMPEQB
, ins
->sreg1
, ins
->sreg2
);
4082 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PCMPEQW
, ins
->sreg1
, ins
->sreg2
);
4085 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PCMPEQD
, ins
->sreg1
, ins
->sreg2
);
4088 x86_sse_alu_sse41_reg_reg (code
, X86_SSE_PCMPEQQ
, ins
->sreg1
, ins
->sreg2
);
4092 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PCMPGTB
, ins
->sreg1
, ins
->sreg2
);
4095 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PCMPGTW
, ins
->sreg1
, ins
->sreg2
);
4098 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PCMPGTD
, ins
->sreg1
, ins
->sreg2
);
4101 x86_sse_alu_sse41_reg_reg (code
, X86_SSE_PCMPGTQ
, ins
->sreg1
, ins
->sreg2
);
4104 case OP_PSUM_ABS_DIFF
:
4105 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PSADBW
, ins
->sreg1
, ins
->sreg2
);
4108 case OP_UNPACK_LOWB
:
4109 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PUNPCKLBW
, ins
->sreg1
, ins
->sreg2
);
4111 case OP_UNPACK_LOWW
:
4112 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PUNPCKLWD
, ins
->sreg1
, ins
->sreg2
);
4114 case OP_UNPACK_LOWD
:
4115 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PUNPCKLDQ
, ins
->sreg1
, ins
->sreg2
);
4117 case OP_UNPACK_LOWQ
:
4118 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PUNPCKLQDQ
, ins
->sreg1
, ins
->sreg2
);
4120 case OP_UNPACK_LOWPS
:
4121 x86_sse_alu_ps_reg_reg (code
, X86_SSE_UNPCKL
, ins
->sreg1
, ins
->sreg2
);
4123 case OP_UNPACK_LOWPD
:
4124 x86_sse_alu_pd_reg_reg (code
, X86_SSE_UNPCKL
, ins
->sreg1
, ins
->sreg2
);
4127 case OP_UNPACK_HIGHB
:
4128 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PUNPCKHBW
, ins
->sreg1
, ins
->sreg2
);
4130 case OP_UNPACK_HIGHW
:
4131 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PUNPCKHWD
, ins
->sreg1
, ins
->sreg2
);
4133 case OP_UNPACK_HIGHD
:
4134 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PUNPCKHDQ
, ins
->sreg1
, ins
->sreg2
);
4136 case OP_UNPACK_HIGHQ
:
4137 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PUNPCKHQDQ
, ins
->sreg1
, ins
->sreg2
);
4139 case OP_UNPACK_HIGHPS
:
4140 x86_sse_alu_ps_reg_reg (code
, X86_SSE_UNPCKH
, ins
->sreg1
, ins
->sreg2
);
4142 case OP_UNPACK_HIGHPD
:
4143 x86_sse_alu_pd_reg_reg (code
, X86_SSE_UNPCKH
, ins
->sreg1
, ins
->sreg2
);
4147 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PACKSSWB
, ins
->sreg1
, ins
->sreg2
);
4150 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PACKSSDW
, ins
->sreg1
, ins
->sreg2
);
4153 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PACKUSWB
, ins
->sreg1
, ins
->sreg2
);
4156 x86_sse_alu_sse41_reg_reg (code
, X86_SSE_PACKUSDW
, ins
->sreg1
, ins
->sreg2
);
4159 case OP_PADDB_SAT_UN
:
4160 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PADDUSB
, ins
->sreg1
, ins
->sreg2
);
4162 case OP_PSUBB_SAT_UN
:
4163 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PSUBUSB
, ins
->sreg1
, ins
->sreg2
);
4165 case OP_PADDW_SAT_UN
:
4166 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PADDUSW
, ins
->sreg1
, ins
->sreg2
);
4168 case OP_PSUBW_SAT_UN
:
4169 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PSUBUSW
, ins
->sreg1
, ins
->sreg2
);
4173 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PADDSB
, ins
->sreg1
, ins
->sreg2
);
4176 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PSUBSB
, ins
->sreg1
, ins
->sreg2
);
4179 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PADDSW
, ins
->sreg1
, ins
->sreg2
);
4182 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PSUBSW
, ins
->sreg1
, ins
->sreg2
);
4186 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PMULLW
, ins
->sreg1
, ins
->sreg2
);
4189 x86_sse_alu_sse41_reg_reg (code
, X86_SSE_PMULLD
, ins
->sreg1
, ins
->sreg2
);
4192 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PMULUDQ
, ins
->sreg1
, ins
->sreg2
);
4194 case OP_PMULW_HIGH_UN
:
4195 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PMULHUW
, ins
->sreg1
, ins
->sreg2
);
4198 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PMULHW
, ins
->sreg1
, ins
->sreg2
);
4202 x86_sse_shift_reg_imm (code
, X86_SSE_PSHIFTW
, X86_SSE_SHR
, ins
->dreg
, ins
->inst_imm
);
4205 x86_sse_shift_reg_reg (code
, X86_SSE_PSRLW_REG
, ins
->dreg
, ins
->sreg2
);
4209 x86_sse_shift_reg_imm (code
, X86_SSE_PSHIFTW
, X86_SSE_SAR
, ins
->dreg
, ins
->inst_imm
);
4212 x86_sse_shift_reg_reg (code
, X86_SSE_PSRAW_REG
, ins
->dreg
, ins
->sreg2
);
4216 x86_sse_shift_reg_imm (code
, X86_SSE_PSHIFTW
, X86_SSE_SHL
, ins
->dreg
, ins
->inst_imm
);
4219 x86_sse_shift_reg_reg (code
, X86_SSE_PSLLW_REG
, ins
->dreg
, ins
->sreg2
);
4223 x86_sse_shift_reg_imm (code
, X86_SSE_PSHIFTD
, X86_SSE_SHR
, ins
->dreg
, ins
->inst_imm
);
4226 x86_sse_shift_reg_reg (code
, X86_SSE_PSRLD_REG
, ins
->dreg
, ins
->sreg2
);
4230 x86_sse_shift_reg_imm (code
, X86_SSE_PSHIFTD
, X86_SSE_SAR
, ins
->dreg
, ins
->inst_imm
);
4233 x86_sse_shift_reg_reg (code
, X86_SSE_PSRAD_REG
, ins
->dreg
, ins
->sreg2
);
4237 x86_sse_shift_reg_imm (code
, X86_SSE_PSHIFTD
, X86_SSE_SHL
, ins
->dreg
, ins
->inst_imm
);
4240 x86_sse_shift_reg_reg (code
, X86_SSE_PSLLD_REG
, ins
->dreg
, ins
->sreg2
);
4244 x86_sse_shift_reg_imm (code
, X86_SSE_PSHIFTQ
, X86_SSE_SHR
, ins
->dreg
, ins
->inst_imm
);
4247 x86_sse_shift_reg_reg (code
, X86_SSE_PSRLQ_REG
, ins
->dreg
, ins
->sreg2
);
4251 x86_sse_shift_reg_imm (code
, X86_SSE_PSHIFTQ
, X86_SSE_SHL
, ins
->dreg
, ins
->inst_imm
);
4254 x86_sse_shift_reg_reg (code
, X86_SSE_PSLLQ_REG
, ins
->dreg
, ins
->sreg2
);
4258 x86_movd_xreg_reg (code
, ins
->dreg
, ins
->sreg1
);
4261 x86_movd_reg_xreg (code
, ins
->dreg
, ins
->sreg1
);
4265 x86_movd_reg_xreg (code
, ins
->dreg
, ins
->sreg1
);
4267 x86_shift_reg_imm (code
, X86_SHR
, ins
->dreg
, ins
->inst_c0
* 8);
4268 x86_widen_reg (code
, ins
->dreg
, ins
->dreg
, ins
->opcode
== OP_EXTRACT_I1
, FALSE
);
4272 x86_movd_reg_xreg (code
, ins
->dreg
, ins
->sreg1
);
4274 x86_shift_reg_imm (code
, X86_SHR
, ins
->dreg
, 16);
4275 x86_widen_reg (code
, ins
->dreg
, ins
->dreg
, ins
->opcode
== OP_EXTRACT_I2
, TRUE
);
4279 x86_sse_alu_pd_membase_reg (code
, X86_SSE_MOVHPD_MEMBASE_REG
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
, ins
->sreg1
);
4281 x86_sse_alu_sd_membase_reg (code
, X86_SSE_MOVSD_MEMBASE_REG
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
, ins
->sreg1
);
4282 x86_fld_membase (code
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
, TRUE
);
4286 x86_sse_alu_pd_reg_reg_imm (code
, X86_SSE_PINSRW
, ins
->sreg1
, ins
->sreg2
, ins
->inst_c0
);
4288 case OP_EXTRACTX_U2
:
4289 x86_sse_alu_pd_reg_reg_imm (code
, X86_SSE_PEXTRW
, ins
->dreg
, ins
->sreg1
, ins
->inst_c0
);
4291 case OP_INSERTX_U1_SLOW
:
4292 /*sreg1 is the extracted ireg (scratch)
4293 /sreg2 is the to be inserted ireg (scratch)
4294 /dreg is the xreg to receive the value*/
4296 /*clear the bits from the extracted word*/
4297 x86_alu_reg_imm (code
, X86_AND
, ins
->sreg1
, ins
->inst_c0
& 1 ? 0x00FF : 0xFF00);
4298 /*shift the value to insert if needed*/
4299 if (ins
->inst_c0
& 1)
4300 x86_shift_reg_imm (code
, X86_SHL
, ins
->sreg2
, 8);
4301 /*join them together*/
4302 x86_alu_reg_reg (code
, X86_OR
, ins
->sreg1
, ins
->sreg2
);
4303 x86_sse_alu_pd_reg_reg_imm (code
, X86_SSE_PINSRW
, ins
->dreg
, ins
->sreg1
, ins
->inst_c0
/ 2);
4305 case OP_INSERTX_I4_SLOW
:
4306 x86_sse_alu_pd_reg_reg_imm (code
, X86_SSE_PINSRW
, ins
->dreg
, ins
->sreg2
, ins
->inst_c0
* 2);
4307 x86_shift_reg_imm (code
, X86_SHR
, ins
->sreg2
, 16);
4308 x86_sse_alu_pd_reg_reg_imm (code
, X86_SSE_PINSRW
, ins
->dreg
, ins
->sreg2
, ins
->inst_c0
* 2 + 1);
4311 case OP_INSERTX_R4_SLOW
:
4312 x86_fst_membase (code
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
, FALSE
, TRUE
);
4313 /*TODO if inst_c0 == 0 use movss*/
4314 x86_sse_alu_pd_reg_membase_imm (code
, X86_SSE_PINSRW
, ins
->dreg
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
+ 0, ins
->inst_c0
* 2);
4315 x86_sse_alu_pd_reg_membase_imm (code
, X86_SSE_PINSRW
, ins
->dreg
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
+ 2, ins
->inst_c0
* 2 + 1);
4317 case OP_INSERTX_R8_SLOW
:
4318 x86_fst_membase (code
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
, TRUE
, TRUE
);
4320 x86_sse_alu_pd_reg_membase (code
, X86_SSE_MOVHPD_REG_MEMBASE
, ins
->dreg
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
);
4322 x86_sse_alu_pd_reg_membase (code
, X86_SSE_MOVSD_REG_MEMBASE
, ins
->dreg
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
);
4325 case OP_STOREX_MEMBASE_REG
:
4326 case OP_STOREX_MEMBASE
:
4327 x86_movups_membase_reg (code
, ins
->dreg
, ins
->inst_offset
, ins
->sreg1
);
4329 case OP_LOADX_MEMBASE
:
4330 x86_movups_reg_membase (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_offset
);
4332 case OP_LOADX_ALIGNED_MEMBASE
:
4333 x86_movaps_reg_membase (code
, ins
->dreg
, ins
->sreg1
, ins
->inst_offset
);
4335 case OP_STOREX_ALIGNED_MEMBASE_REG
:
4336 x86_movaps_membase_reg (code
, ins
->dreg
, ins
->inst_offset
, ins
->sreg1
);
4338 case OP_STOREX_NTA_MEMBASE_REG
:
4339 x86_sse_alu_reg_membase (code
, X86_SSE_MOVNTPS
, ins
->dreg
, ins
->sreg1
, ins
->inst_offset
);
4341 case OP_PREFETCH_MEMBASE
:
4342 x86_sse_alu_reg_membase (code
, X86_SSE_PREFETCH
, ins
->backend
.arg_info
, ins
->sreg1
, ins
->inst_offset
);
4346 /*FIXME the peephole pass should have killed this*/
4347 if (ins
->dreg
!= ins
->sreg1
)
4348 x86_movaps_reg_reg (code
, ins
->dreg
, ins
->sreg1
);
4351 x86_sse_alu_pd_reg_reg (code
, X86_SSE_PXOR
, ins
->dreg
, ins
->dreg
);
4353 case OP_ICONV_TO_R8_RAW
:
4354 x86_mov_membase_reg (code
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
, ins
->sreg1
, 4);
4355 x86_fld_membase (code
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
, FALSE
);
4358 case OP_FCONV_TO_R8_X
:
4359 x86_fst_membase (code
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
, TRUE
, TRUE
);
4360 x86_movsd_reg_membase (code
, ins
->dreg
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
);
4363 case OP_XCONV_R8_TO_I4
:
4364 x86_cvttsd2si (code
, ins
->dreg
, ins
->sreg1
);
4365 switch (ins
->backend
.source_opcode
) {
4366 case OP_FCONV_TO_I1
:
4367 x86_widen_reg (code
, ins
->dreg
, ins
->dreg
, TRUE
, FALSE
);
4369 case OP_FCONV_TO_U1
:
4370 x86_widen_reg (code
, ins
->dreg
, ins
->dreg
, FALSE
, FALSE
);
4372 case OP_FCONV_TO_I2
:
4373 x86_widen_reg (code
, ins
->dreg
, ins
->dreg
, TRUE
, TRUE
);
4375 case OP_FCONV_TO_U2
:
4376 x86_widen_reg (code
, ins
->dreg
, ins
->dreg
, FALSE
, TRUE
);
4382 /*FIXME this causes a partial register stall, maybe it would not be that bad to use shift + mask + or*/
4383 /*The +4 is to get a mov ?h, ?l over the same reg.*/
4384 x86_mov_reg_reg (code
, ins
->dreg
+ 4, ins
->dreg
, 1);
4385 x86_sse_alu_pd_reg_reg_imm (code
, X86_SSE_PINSRW
, ins
->dreg
, ins
->sreg1
, 0);
4386 x86_sse_alu_pd_reg_reg_imm (code
, X86_SSE_PINSRW
, ins
->dreg
, ins
->sreg1
, 1);
4387 x86_sse_shift_reg_imm (code
, X86_SSE_PSHUFD
, ins
->dreg
, ins
->dreg
, 0);
4390 x86_sse_alu_pd_reg_reg_imm (code
, X86_SSE_PINSRW
, ins
->dreg
, ins
->sreg1
, 0);
4391 x86_sse_alu_pd_reg_reg_imm (code
, X86_SSE_PINSRW
, ins
->dreg
, ins
->sreg1
, 1);
4392 x86_sse_shift_reg_imm (code
, X86_SSE_PSHUFD
, ins
->dreg
, ins
->dreg
, 0);
4395 x86_movd_xreg_reg (code
, ins
->dreg
, ins
->sreg1
);
4396 x86_sse_shift_reg_imm (code
, X86_SSE_PSHUFD
, ins
->dreg
, ins
->dreg
, 0);
4399 x86_fst_membase (code
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
, FALSE
, TRUE
);
4400 x86_movd_xreg_membase (code
, ins
->dreg
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
);
4401 x86_sse_shift_reg_imm (code
, X86_SSE_PSHUFD
, ins
->dreg
, ins
->dreg
, 0);
4404 x86_fst_membase (code
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
, TRUE
, TRUE
);
4405 x86_movsd_reg_membase (code
, ins
->dreg
, ins
->backend
.spill_var
->inst_basereg
, ins
->backend
.spill_var
->inst_offset
);
4406 x86_sse_shift_reg_imm (code
, X86_SSE_PSHUFD
, ins
->dreg
, ins
->dreg
, 0x44);
4409 case OP_LIVERANGE_START
: {
4410 if (cfg
->verbose_level
> 1)
4411 printf ("R%d START=0x%x\n", MONO_VARINFO (cfg
, ins
->inst_c0
)->vreg
, (int)(code
- cfg
->native_code
));
4412 MONO_VARINFO (cfg
, ins
->inst_c0
)->live_range_start
= code
- cfg
->native_code
;
4415 case OP_LIVERANGE_END
: {
4416 if (cfg
->verbose_level
> 1)
4417 printf ("R%d END=0x%x\n", MONO_VARINFO (cfg
, ins
->inst_c0
)->vreg
, (int)(code
- cfg
->native_code
));
4418 MONO_VARINFO (cfg
, ins
->inst_c0
)->live_range_end
= code
- cfg
->native_code
;
4422 g_warning ("unknown opcode %s\n", mono_inst_name (ins
->opcode
));
4423 g_assert_not_reached ();
4426 if (G_UNLIKELY ((code
- cfg
->native_code
- offset
) > max_len
)) {
4427 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
4428 mono_inst_name (ins
->opcode
), max_len
, code
- cfg
->native_code
- offset
);
4429 g_assert_not_reached ();
4435 cfg
->code_len
= code
- cfg
->native_code
;
4438 #endif /* DISABLE_JIT */
4441 mono_arch_register_lowlevel_calls (void)
4446 mono_arch_patch_code (MonoMethod
*method
, MonoDomain
*domain
, guint8
*code
, MonoJumpInfo
*ji
, gboolean run_cctors
)
4448 MonoJumpInfo
*patch_info
;
4449 gboolean compile_aot
= !run_cctors
;
4451 for (patch_info
= ji
; patch_info
; patch_info
= patch_info
->next
) {
4452 unsigned char *ip
= patch_info
->ip
.i
+ code
;
4453 const unsigned char *target
;
4455 target
= mono_resolve_patch_target (method
, domain
, code
, patch_info
, run_cctors
);
4458 switch (patch_info
->type
) {
4459 case MONO_PATCH_INFO_BB
:
4460 case MONO_PATCH_INFO_LABEL
:
4463 /* No need to patch these */
4468 switch (patch_info
->type
) {
4469 case MONO_PATCH_INFO_IP
:
4470 *((gconstpointer
*)(ip
)) = target
;
4472 case MONO_PATCH_INFO_CLASS_INIT
: {
4474 /* Might already been changed to a nop */
4475 x86_call_code (code
, 0);
4476 x86_patch (ip
, target
);
4479 case MONO_PATCH_INFO_ABS
:
4480 case MONO_PATCH_INFO_METHOD
:
4481 case MONO_PATCH_INFO_METHOD_JUMP
:
4482 case MONO_PATCH_INFO_INTERNAL_METHOD
:
4483 case MONO_PATCH_INFO_BB
:
4484 case MONO_PATCH_INFO_LABEL
:
4485 case MONO_PATCH_INFO_RGCTX_FETCH
:
4486 case MONO_PATCH_INFO_GENERIC_CLASS_INIT
:
4487 case MONO_PATCH_INFO_MONITOR_ENTER
:
4488 case MONO_PATCH_INFO_MONITOR_EXIT
:
4489 x86_patch (ip
, target
);
4491 case MONO_PATCH_INFO_NONE
:
4494 guint32 offset
= mono_arch_get_patch_offset (ip
);
4495 *((gconstpointer
*)(ip
+ offset
)) = target
;
4503 mono_arch_emit_prolog (MonoCompile
*cfg
)
4505 MonoMethod
*method
= cfg
->method
;
4507 MonoMethodSignature
*sig
;
4509 int alloc_size
, pos
, max_offset
, i
, cfa_offset
;
4511 gboolean need_stack_frame
;
4513 cfg
->code_size
= MAX (cfg
->header
->code_size
* 4, 10240);
4515 if (cfg
->prof_options
& MONO_PROFILE_ENTER_LEAVE
)
4516 cfg
->code_size
+= 512;
4518 code
= cfg
->native_code
= g_malloc (cfg
->code_size
);
4520 /* Offset between RSP and the CFA */
4524 cfa_offset
= sizeof (gpointer
);
4525 mono_emit_unwind_op_def_cfa (cfg
, code
, X86_ESP
, sizeof (gpointer
));
4526 // IP saved at CFA - 4
4527 /* There is no IP reg on x86 */
4528 mono_emit_unwind_op_offset (cfg
, code
, X86_NREG
, -cfa_offset
);
4530 need_stack_frame
= needs_stack_frame (cfg
);
4532 if (need_stack_frame
) {
4533 x86_push_reg (code
, X86_EBP
);
4534 cfa_offset
+= sizeof (gpointer
);
4535 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, cfa_offset
);
4536 mono_emit_unwind_op_offset (cfg
, code
, X86_EBP
, - cfa_offset
);
4537 x86_mov_reg_reg (code
, X86_EBP
, X86_ESP
, 4);
4538 mono_emit_unwind_op_def_cfa_reg (cfg
, code
, X86_EBP
);
4541 alloc_size
= cfg
->stack_offset
;
4544 if (method
->wrapper_type
== MONO_WRAPPER_NATIVE_TO_MANAGED
) {
4545 /* Might need to attach the thread to the JIT or change the domain for the callback */
4546 if (appdomain_tls_offset
!= -1 && lmf_tls_offset
!= -1) {
4547 guint8
*buf
, *no_domain_branch
;
4549 code
= mono_x86_emit_tls_get (code
, X86_EAX
, appdomain_tls_offset
);
4550 x86_alu_reg_imm (code
, X86_CMP
, X86_EAX
, GPOINTER_TO_UINT (cfg
->domain
));
4551 no_domain_branch
= code
;
4552 x86_branch8 (code
, X86_CC_NE
, 0, 0);
4553 code
= mono_x86_emit_tls_get ( code
, X86_EAX
, lmf_tls_offset
);
4554 x86_test_reg_reg (code
, X86_EAX
, X86_EAX
);
4556 x86_branch8 (code
, X86_CC_NE
, 0, 0);
4557 x86_patch (no_domain_branch
, code
);
4558 x86_push_imm (code
, cfg
->domain
);
4559 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_INTERNAL_METHOD
, (gpointer
)"mono_jit_thread_attach");
4560 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 4);
4561 x86_patch (buf
, code
);
4563 /* The TLS key actually contains a pointer to the MonoJitTlsData structure */
4564 /* FIXME: Add a separate key for LMF to avoid this */
4565 x86_alu_reg_imm (code
, X86_ADD
, X86_EAX
, G_STRUCT_OFFSET (MonoJitTlsData
, lmf
));
4569 g_assert (!cfg
->compile_aot
);
4570 x86_push_imm (code
, cfg
->domain
);
4571 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_INTERNAL_METHOD
, (gpointer
)"mono_jit_thread_attach");
4572 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 4);
4576 if (method
->save_lmf
) {
4577 pos
+= sizeof (MonoLMF
);
4579 if (cfg
->compile_aot
)
4580 cfg
->disable_aot
= TRUE
;
4582 /* save the current IP */
4583 mono_add_patch_info (cfg
, code
+ 1 - cfg
->native_code
, MONO_PATCH_INFO_IP
, NULL
);
4584 x86_push_imm_template (code
);
4585 cfa_offset
+= sizeof (gpointer
);
4587 /* save all caller saved regs */
4588 x86_push_reg (code
, X86_EBP
);
4589 cfa_offset
+= sizeof (gpointer
);
4590 x86_push_reg (code
, X86_ESI
);
4591 cfa_offset
+= sizeof (gpointer
);
4592 mono_emit_unwind_op_offset (cfg
, code
, X86_ESI
, - cfa_offset
);
4593 x86_push_reg (code
, X86_EDI
);
4594 cfa_offset
+= sizeof (gpointer
);
4595 mono_emit_unwind_op_offset (cfg
, code
, X86_EDI
, - cfa_offset
);
4596 x86_push_reg (code
, X86_EBX
);
4597 cfa_offset
+= sizeof (gpointer
);
4598 mono_emit_unwind_op_offset (cfg
, code
, X86_EBX
, - cfa_offset
);
4600 if ((lmf_tls_offset
!= -1) && !is_win32
&& !optimize_for_xen
) {
4602 * Optimized version which uses the mono_lmf TLS variable instead of indirection
4603 * through the mono_lmf_addr TLS variable.
4605 /* %eax = previous_lmf */
4606 x86_prefix (code
, X86_GS_PREFIX
);
4607 x86_mov_reg_mem (code
, X86_EAX
, lmf_tls_offset
, 4);
4608 /* skip esp + method_info + lmf */
4609 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, 12);
4610 /* push previous_lmf */
4611 x86_push_reg (code
, X86_EAX
);
4613 x86_prefix (code
, X86_GS_PREFIX
);
4614 x86_mov_mem_reg (code
, lmf_tls_offset
, X86_ESP
, 4);
4616 /* get the address of lmf for the current thread */
4618 * This is performance critical so we try to use some tricks to make
4622 if (lmf_addr_tls_offset
!= -1) {
4623 /* Load lmf quicky using the GS register */
4624 code
= mono_x86_emit_tls_get (code
, X86_EAX
, lmf_addr_tls_offset
);
4626 /* The TLS key actually contains a pointer to the MonoJitTlsData structure */
4627 /* FIXME: Add a separate key for LMF to avoid this */
4628 x86_alu_reg_imm (code
, X86_ADD
, X86_EAX
, G_STRUCT_OFFSET (MonoJitTlsData
, lmf
));
4631 code
= emit_call (cfg
, code
, MONO_PATCH_INFO_INTERNAL_METHOD
, (gpointer
)"mono_get_lmf_addr");
4634 /* Skip esp + method info */
4635 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, 8);
4638 x86_push_reg (code
, X86_EAX
);
4639 /* push *lfm (previous_lmf) */
4640 x86_push_membase (code
, X86_EAX
, 0);
4642 x86_mov_membase_reg (code
, X86_EAX
, 0, X86_ESP
, 4);
4646 if (cfg
->used_int_regs
& (1 << X86_EBX
)) {
4647 x86_push_reg (code
, X86_EBX
);
4649 cfa_offset
+= sizeof (gpointer
);
4650 mono_emit_unwind_op_offset (cfg
, code
, X86_EBX
, - cfa_offset
);
4653 if (cfg
->used_int_regs
& (1 << X86_EDI
)) {
4654 x86_push_reg (code
, X86_EDI
);
4656 cfa_offset
+= sizeof (gpointer
);
4657 mono_emit_unwind_op_offset (cfg
, code
, X86_EDI
, - cfa_offset
);
4660 if (cfg
->used_int_regs
& (1 << X86_ESI
)) {
4661 x86_push_reg (code
, X86_ESI
);
4663 cfa_offset
+= sizeof (gpointer
);
4664 mono_emit_unwind_op_offset (cfg
, code
, X86_ESI
, - cfa_offset
);
4670 /* the original alloc_size is already aligned: there is %ebp and retip pushed, so realign */
4671 if (mono_do_x86_stack_align
&& need_stack_frame
) {
4672 int tot
= alloc_size
+ pos
+ 4; /* ret ip */
4673 if (need_stack_frame
)
4675 tot
&= MONO_ARCH_FRAME_ALIGNMENT
- 1;
4677 alloc_size
+= MONO_ARCH_FRAME_ALIGNMENT
- tot
;
4681 /* See mono_emit_stack_alloc */
4682 #if defined(TARGET_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
4683 guint32 remaining_size
= alloc_size
;
4684 /*FIXME handle unbounded code expansion, we should use a loop in case of more than X interactions*/
4685 guint32 required_code_size
= ((remaining_size
/ 0x1000) + 1) * 8; /*8 is the max size of x86_alu_reg_imm + x86_test_membase_reg*/
4686 guint32 offset
= code
- cfg
->native_code
;
4687 if (G_UNLIKELY (required_code_size
>= (cfg
->code_size
- offset
))) {
4688 while (required_code_size
>= (cfg
->code_size
- offset
))
4689 cfg
->code_size
*= 2;
4690 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
4691 code
= cfg
->native_code
+ offset
;
4692 mono_jit_stats
.code_reallocs
++;
4694 while (remaining_size
>= 0x1000) {
4695 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, 0x1000);
4696 x86_test_membase_reg (code
, X86_ESP
, 0, X86_ESP
);
4697 remaining_size
-= 0x1000;
4700 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, remaining_size
);
4702 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, alloc_size
);
4705 g_assert (need_stack_frame
);
4708 if (cfg
->method
->wrapper_type
== MONO_WRAPPER_NATIVE_TO_MANAGED
||
4709 cfg
->method
->wrapper_type
== MONO_WRAPPER_RUNTIME_INVOKE
) {
4710 x86_alu_reg_imm (code
, X86_AND
, X86_ESP
, -MONO_ARCH_FRAME_ALIGNMENT
);
4713 #if DEBUG_STACK_ALIGNMENT
4714 /* check the stack is aligned */
4715 if (need_stack_frame
&& method
->wrapper_type
== MONO_WRAPPER_NONE
) {
4716 x86_mov_reg_reg (code
, X86_ECX
, X86_ESP
, 4);
4717 x86_alu_reg_imm (code
, X86_AND
, X86_ECX
, MONO_ARCH_FRAME_ALIGNMENT
- 1);
4718 x86_alu_reg_imm (code
, X86_CMP
, X86_ECX
, 0);
4719 x86_branch_disp (code
, X86_CC_EQ
, 3, FALSE
);
4720 x86_breakpoint (code
);
4724 /* compute max_offset in order to use short forward jumps */
4726 if (cfg
->opt
& MONO_OPT_BRANCH
) {
4727 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
4729 bb
->max_offset
= max_offset
;
4731 if (cfg
->prof_options
& MONO_PROFILE_COVERAGE
)
4733 /* max alignment for loops */
4734 if ((cfg
->opt
& MONO_OPT_LOOP
) && bb_is_loop_start (bb
))
4735 max_offset
+= LOOP_ALIGNMENT
;
4737 MONO_BB_FOR_EACH_INS (bb
, ins
) {
4738 if (ins
->opcode
== OP_LABEL
)
4739 ins
->inst_c1
= max_offset
;
4741 max_offset
+= ((guint8
*)ins_get_spec (ins
->opcode
))[MONO_INST_LEN
];
4746 /* store runtime generic context */
4747 if (cfg
->rgctx_var
) {
4748 g_assert (cfg
->rgctx_var
->opcode
== OP_REGOFFSET
&& cfg
->rgctx_var
->inst_basereg
== X86_EBP
);
4750 x86_mov_membase_reg (code
, X86_EBP
, cfg
->rgctx_var
->inst_offset
, MONO_ARCH_RGCTX_REG
, 4);
4753 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
))
4754 code
= mono_arch_instrument_prolog (cfg
, mono_trace_enter_method
, code
, TRUE
);
4756 /* load arguments allocated to register from the stack */
4757 sig
= mono_method_signature (method
);
4760 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
4761 inst
= cfg
->args
[pos
];
4762 if (inst
->opcode
== OP_REGVAR
) {
4763 g_assert (need_stack_frame
);
4764 x86_mov_reg_membase (code
, inst
->dreg
, X86_EBP
, inst
->inst_offset
, 4);
4765 if (cfg
->verbose_level
> 2)
4766 g_print ("Argument %d assigned to register %s\n", pos
, mono_arch_regname (inst
->dreg
));
4771 cfg
->code_len
= code
- cfg
->native_code
;
4773 g_assert (cfg
->code_len
< cfg
->code_size
);
4779 mono_arch_emit_epilog (MonoCompile
*cfg
)
4781 MonoMethod
*method
= cfg
->method
;
4782 MonoMethodSignature
*sig
= mono_method_signature (method
);
4784 guint32 stack_to_pop
;
4786 int max_epilog_size
= 16;
4788 gboolean need_stack_frame
= needs_stack_frame (cfg
);
4790 if (cfg
->method
->save_lmf
)
4791 max_epilog_size
+= 128;
4793 while (cfg
->code_len
+ max_epilog_size
> (cfg
->code_size
- 16)) {
4794 cfg
->code_size
*= 2;
4795 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
4796 mono_jit_stats
.code_reallocs
++;
4799 code
= cfg
->native_code
+ cfg
->code_len
;
4801 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
))
4802 code
= mono_arch_instrument_epilog (cfg
, mono_trace_leave_method
, code
, TRUE
);
4804 /* the code restoring the registers must be kept in sync with OP_JMP */
4807 if (method
->save_lmf
) {
4808 gint32 prev_lmf_reg
;
4809 gint32 lmf_offset
= -sizeof (MonoLMF
);
4811 /* check if we need to restore protection of the stack after a stack overflow */
4812 if (mono_get_jit_tls_offset () != -1) {
4814 code
= mono_x86_emit_tls_get (code
, X86_ECX
, mono_get_jit_tls_offset ());
4815 /* we load the value in a separate instruction: this mechanism may be
4816 * used later as a safer way to do thread interruption
4818 x86_mov_reg_membase (code
, X86_ECX
, X86_ECX
, G_STRUCT_OFFSET (MonoJitTlsData
, restore_stack_prot
), 4);
4819 x86_alu_reg_imm (code
, X86_CMP
, X86_ECX
, 0);
4821 x86_branch8 (code
, X86_CC_Z
, 0, FALSE
);
4822 /* note that the call trampoline will preserve eax/edx */
4823 x86_call_reg (code
, X86_ECX
);
4824 x86_patch (patch
, code
);
4826 /* FIXME: maybe save the jit tls in the prolog */
4828 if ((lmf_tls_offset
!= -1) && !is_win32
&& !optimize_for_xen
) {
4830 * Optimized version which uses the mono_lmf TLS variable instead of indirection
4831 * through the mono_lmf_addr TLS variable.
4833 /* reg = previous_lmf */
4834 x86_mov_reg_membase (code
, X86_ECX
, X86_EBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, previous_lmf
), 4);
4836 /* lmf = previous_lmf */
4837 x86_prefix (code
, X86_GS_PREFIX
);
4838 x86_mov_mem_reg (code
, lmf_tls_offset
, X86_ECX
, 4);
4840 /* Find a spare register */
4841 switch (mini_type_get_underlying_type (cfg
->generic_sharing_context
, sig
->ret
)->type
) {
4844 prev_lmf_reg
= X86_EDI
;
4845 cfg
->used_int_regs
|= (1 << X86_EDI
);
4848 prev_lmf_reg
= X86_EDX
;
4852 /* reg = previous_lmf */
4853 x86_mov_reg_membase (code
, prev_lmf_reg
, X86_EBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, previous_lmf
), 4);
4856 x86_mov_reg_membase (code
, X86_ECX
, X86_EBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, lmf_addr
), 4);
4858 /* *(lmf) = previous_lmf */
4859 x86_mov_membase_reg (code
, X86_ECX
, 0, prev_lmf_reg
, 4);
4862 /* restore caller saved regs */
4863 if (cfg
->used_int_regs
& (1 << X86_EBX
)) {
4864 x86_mov_reg_membase (code
, X86_EBX
, X86_EBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, ebx
), 4);
4867 if (cfg
->used_int_regs
& (1 << X86_EDI
)) {
4868 x86_mov_reg_membase (code
, X86_EDI
, X86_EBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, edi
), 4);
4870 if (cfg
->used_int_regs
& (1 << X86_ESI
)) {
4871 x86_mov_reg_membase (code
, X86_ESI
, X86_EBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, esi
), 4);
4874 /* EBP is restored by LEAVE */
4876 if (cfg
->used_int_regs
& (1 << X86_EBX
)) {
4879 if (cfg
->used_int_regs
& (1 << X86_EDI
)) {
4882 if (cfg
->used_int_regs
& (1 << X86_ESI
)) {
4887 g_assert (need_stack_frame
);
4888 x86_lea_membase (code
, X86_ESP
, X86_EBP
, pos
);
4891 if (cfg
->used_int_regs
& (1 << X86_ESI
)) {
4892 x86_pop_reg (code
, X86_ESI
);
4894 if (cfg
->used_int_regs
& (1 << X86_EDI
)) {
4895 x86_pop_reg (code
, X86_EDI
);
4897 if (cfg
->used_int_regs
& (1 << X86_EBX
)) {
4898 x86_pop_reg (code
, X86_EBX
);
4902 /* Load returned vtypes into registers if needed */
4903 cinfo
= get_call_info (cfg
->generic_sharing_context
, cfg
->mempool
, sig
, FALSE
);
4904 if (cinfo
->ret
.storage
== ArgValuetypeInReg
) {
4905 for (quad
= 0; quad
< 2; quad
++) {
4906 switch (cinfo
->ret
.pair_storage
[quad
]) {
4908 x86_mov_reg_membase (code
, cinfo
->ret
.pair_regs
[quad
], cfg
->ret
->inst_basereg
, cfg
->ret
->inst_offset
+ (quad
* sizeof (gpointer
)), 4);
4910 case ArgOnFloatFpStack
:
4911 x86_fld_membase (code
, cfg
->ret
->inst_basereg
, cfg
->ret
->inst_offset
+ (quad
* sizeof (gpointer
)), FALSE
);
4913 case ArgOnDoubleFpStack
:
4914 x86_fld_membase (code
, cfg
->ret
->inst_basereg
, cfg
->ret
->inst_offset
+ (quad
* sizeof (gpointer
)), TRUE
);
4919 g_assert_not_reached ();
4924 if (need_stack_frame
)
4927 if (CALLCONV_IS_STDCALL (sig
)) {
4928 MonoJitArgumentInfo
*arg_info
= alloca (sizeof (MonoJitArgumentInfo
) * (sig
->param_count
+ 1));
4930 stack_to_pop
= mono_arch_get_argument_info (sig
, sig
->param_count
, arg_info
);
4931 } else if (MONO_TYPE_ISSTRUCT (mono_method_signature (cfg
->method
)->ret
) && (cinfo
->ret
.storage
== ArgOnStack
))
4937 g_assert (need_stack_frame
);
4938 x86_ret_imm (code
, stack_to_pop
);
4943 cfg
->code_len
= code
- cfg
->native_code
;
4945 g_assert (cfg
->code_len
< cfg
->code_size
);
4949 mono_arch_emit_exceptions (MonoCompile
*cfg
)
4951 MonoJumpInfo
*patch_info
;
4954 MonoClass
*exc_classes
[16];
4955 guint8
*exc_throw_start
[16], *exc_throw_end
[16];
4959 /* Compute needed space */
4960 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
4961 if (patch_info
->type
== MONO_PATCH_INFO_EXC
)
4966 * make sure we have enough space for exceptions
4967 * 16 is the size of two push_imm instructions and a call
4969 if (cfg
->compile_aot
)
4970 code_size
= exc_count
* 32;
4972 code_size
= exc_count
* 16;
4974 while (cfg
->code_len
+ code_size
> (cfg
->code_size
- 16)) {
4975 cfg
->code_size
*= 2;
4976 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
4977 mono_jit_stats
.code_reallocs
++;
4980 code
= cfg
->native_code
+ cfg
->code_len
;
4983 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
4984 switch (patch_info
->type
) {
4985 case MONO_PATCH_INFO_EXC
: {
4986 MonoClass
*exc_class
;
4990 x86_patch (patch_info
->ip
.i
+ cfg
->native_code
, code
);
4992 exc_class
= mono_class_from_name (mono_defaults
.corlib
, "System", patch_info
->data
.name
);
4993 g_assert (exc_class
);
4994 throw_ip
= patch_info
->ip
.i
;
4996 /* Find a throw sequence for the same exception class */
4997 for (i
= 0; i
< nthrows
; ++i
)
4998 if (exc_classes
[i
] == exc_class
)
5001 x86_push_imm (code
, (exc_throw_end
[i
] - cfg
->native_code
) - throw_ip
);
5002 x86_jump_code (code
, exc_throw_start
[i
]);
5003 patch_info
->type
= MONO_PATCH_INFO_NONE
;
5008 /* Compute size of code following the push <OFFSET> */
5011 /*This is aligned to 16 bytes by the callee. This way we save a few bytes here.*/
5013 if ((code
- cfg
->native_code
) - throw_ip
< 126 - size
) {
5014 /* Use the shorter form */
5016 x86_push_imm (code
, 0);
5020 x86_push_imm (code
, 0xf0f0f0f0);
5025 exc_classes
[nthrows
] = exc_class
;
5026 exc_throw_start
[nthrows
] = code
;
5029 x86_push_imm (code
, exc_class
->type_token
- MONO_TOKEN_TYPE_DEF
);
5030 patch_info
->data
.name
= "mono_arch_throw_corlib_exception";
5031 patch_info
->type
= MONO_PATCH_INFO_INTERNAL_METHOD
;
5032 patch_info
->ip
.i
= code
- cfg
->native_code
;
5033 x86_call_code (code
, 0);
5034 x86_push_imm (buf
, (code
- cfg
->native_code
) - throw_ip
);
5039 exc_throw_end
[nthrows
] = code
;
5051 cfg
->code_len
= code
- cfg
->native_code
;
5053 g_assert (cfg
->code_len
< cfg
->code_size
);
5057 mono_arch_flush_icache (guint8
*code
, gint size
)
5063 mono_arch_flush_register_windows (void)
5068 mono_arch_is_inst_imm (gint64 imm
)
5074 * Support for fast access to the thread-local lmf structure using the GS
5075 * segment register on NPTL + kernel 2.6.x.
5078 static gboolean tls_offset_inited
= FALSE
;
5081 mono_arch_setup_jit_tls_data (MonoJitTlsData
*tls
)
5083 if (!tls_offset_inited
) {
5084 if (!getenv ("MONO_NO_TLS")) {
5087 * We need to init this multiple times, since when we are first called, the key might not
5088 * be initialized yet.
5090 appdomain_tls_offset
= mono_domain_get_tls_key ();
5091 lmf_tls_offset
= mono_get_jit_tls_key ();
5093 /* Only 64 tls entries can be accessed using inline code */
5094 if (appdomain_tls_offset
>= 64)
5095 appdomain_tls_offset
= -1;
5096 if (lmf_tls_offset
>= 64)
5097 lmf_tls_offset
= -1;
5100 optimize_for_xen
= access ("/proc/xen", F_OK
) == 0;
5102 tls_offset_inited
= TRUE
;
5103 appdomain_tls_offset
= mono_domain_get_tls_offset ();
5104 lmf_tls_offset
= mono_get_lmf_tls_offset ();
5105 lmf_addr_tls_offset
= mono_get_lmf_addr_tls_offset ();
5112 mono_arch_free_jit_tls_data (MonoJitTlsData
*tls
)
5116 #ifdef MONO_ARCH_HAVE_IMT
5118 // Linear handler, the bsearch head compare is shorter
5119 //[2 + 4] x86_alu_reg_imm (code, X86_CMP, ins->sreg1, ins->inst_imm);
5120 //[1 + 1] x86_branch8(inst,cond,imm,is_signed)
5121 // x86_patch(ins,target)
5122 //[1 + 5] x86_jump_mem(inst,mem)
5125 #define BR_SMALL_SIZE 2
5126 #define BR_LARGE_SIZE 5
5127 #define JUMP_IMM_SIZE 6
5128 #define ENABLE_WRONG_METHOD_CHECK 0
5132 imt_branch_distance (MonoIMTCheckItem
**imt_entries
, int start
, int target
)
5134 int i
, distance
= 0;
5135 for (i
= start
; i
< target
; ++i
)
5136 distance
+= imt_entries
[i
]->chunk_size
;
5141 * LOCKING: called with the domain lock held
5144 mono_arch_build_imt_thunk (MonoVTable
*vtable
, MonoDomain
*domain
, MonoIMTCheckItem
**imt_entries
, int count
,
5145 gpointer fail_tramp
)
5149 guint8
*code
, *start
;
5151 for (i
= 0; i
< count
; ++i
) {
5152 MonoIMTCheckItem
*item
= imt_entries
[i
];
5153 if (item
->is_equals
) {
5154 if (item
->check_target_idx
) {
5155 if (!item
->compare_done
)
5156 item
->chunk_size
+= CMP_SIZE
;
5157 item
->chunk_size
+= BR_SMALL_SIZE
+ JUMP_IMM_SIZE
;
5160 item
->chunk_size
+= CMP_SIZE
+ BR_SMALL_SIZE
+ JUMP_IMM_SIZE
* 2;
5162 item
->chunk_size
+= JUMP_IMM_SIZE
;
5163 #if ENABLE_WRONG_METHOD_CHECK
5164 item
->chunk_size
+= CMP_SIZE
+ BR_SMALL_SIZE
+ 1;
5169 item
->chunk_size
+= CMP_SIZE
+ BR_LARGE_SIZE
;
5170 imt_entries
[item
->check_target_idx
]->compare_done
= TRUE
;
5172 size
+= item
->chunk_size
;
5175 code
= mono_method_alloc_generic_virtual_thunk (domain
, size
);
5177 code
= mono_domain_code_reserve (domain
, size
);
5179 for (i
= 0; i
< count
; ++i
) {
5180 MonoIMTCheckItem
*item
= imt_entries
[i
];
5181 item
->code_target
= code
;
5182 if (item
->is_equals
) {
5183 if (item
->check_target_idx
) {
5184 if (!item
->compare_done
)
5185 x86_alu_reg_imm (code
, X86_CMP
, MONO_ARCH_IMT_REG
, (guint32
)item
->key
);
5186 item
->jmp_code
= code
;
5187 x86_branch8 (code
, X86_CC_NE
, 0, FALSE
);
5188 if (item
->has_target_code
)
5189 x86_jump_code (code
, item
->value
.target_code
);
5191 x86_jump_mem (code
, & (vtable
->vtable
[item
->value
.vtable_slot
]));
5194 x86_alu_reg_imm (code
, X86_CMP
, MONO_ARCH_IMT_REG
, (guint32
)item
->key
);
5195 item
->jmp_code
= code
;
5196 x86_branch8 (code
, X86_CC_NE
, 0, FALSE
);
5197 if (item
->has_target_code
)
5198 x86_jump_code (code
, item
->value
.target_code
);
5200 x86_jump_mem (code
, & (vtable
->vtable
[item
->value
.vtable_slot
]));
5201 x86_patch (item
->jmp_code
, code
);
5202 x86_jump_code (code
, fail_tramp
);
5203 item
->jmp_code
= NULL
;
5205 /* enable the commented code to assert on wrong method */
5206 #if ENABLE_WRONG_METHOD_CHECK
5207 x86_alu_reg_imm (code
, X86_CMP
, MONO_ARCH_IMT_REG
, (guint32
)item
->key
);
5208 item
->jmp_code
= code
;
5209 x86_branch8 (code
, X86_CC_NE
, 0, FALSE
);
5211 if (item
->has_target_code
)
5212 x86_jump_code (code
, item
->value
.target_code
);
5214 x86_jump_mem (code
, & (vtable
->vtable
[item
->value
.vtable_slot
]));
5215 #if ENABLE_WRONG_METHOD_CHECK
5216 x86_patch (item
->jmp_code
, code
);
5217 x86_breakpoint (code
);
5218 item
->jmp_code
= NULL
;
5223 x86_alu_reg_imm (code
, X86_CMP
, MONO_ARCH_IMT_REG
, (guint32
)item
->key
);
5224 item
->jmp_code
= code
;
5225 if (x86_is_imm8 (imt_branch_distance (imt_entries
, i
, item
->check_target_idx
)))
5226 x86_branch8 (code
, X86_CC_GE
, 0, FALSE
);
5228 x86_branch32 (code
, X86_CC_GE
, 0, FALSE
);
5231 /* patch the branches to get to the target items */
5232 for (i
= 0; i
< count
; ++i
) {
5233 MonoIMTCheckItem
*item
= imt_entries
[i
];
5234 if (item
->jmp_code
) {
5235 if (item
->check_target_idx
) {
5236 x86_patch (item
->jmp_code
, imt_entries
[item
->check_target_idx
]->code_target
);
5242 mono_stats
.imt_thunks_size
+= code
- start
;
5243 g_assert (code
- start
<= size
);
5247 char *buff
= g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable
->klass
->name_space
, vtable
->klass
->name
, count
);
5248 mono_disassemble_code (NULL
, (guint8
*)start
, code
- start
, buff
);
5257 mono_arch_find_imt_method (mgreg_t
*regs
, guint8
*code
)
5259 return (MonoMethod
*) regs
[MONO_ARCH_IMT_REG
];
5264 mono_arch_find_static_call_vtable (mgreg_t
*regs
, guint8
*code
)
5266 return (MonoVTable
*) regs
[MONO_ARCH_RGCTX_REG
];
5270 mono_arch_emit_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
5272 MonoInst
*ins
= NULL
;
5275 if (cmethod
->klass
== mono_defaults
.math_class
) {
5276 if (strcmp (cmethod
->name
, "Sin") == 0) {
5278 } else if (strcmp (cmethod
->name
, "Cos") == 0) {
5280 } else if (strcmp (cmethod
->name
, "Tan") == 0) {
5282 } else if (strcmp (cmethod
->name
, "Atan") == 0) {
5284 } else if (strcmp (cmethod
->name
, "Sqrt") == 0) {
5286 } else if (strcmp (cmethod
->name
, "Abs") == 0 && fsig
->params
[0]->type
== MONO_TYPE_R8
) {
5288 } else if (strcmp (cmethod
->name
, "Round") == 0 && fsig
->param_count
== 1 && fsig
->params
[0]->type
== MONO_TYPE_R8
) {
5293 MONO_INST_NEW (cfg
, ins
, opcode
);
5294 ins
->type
= STACK_R8
;
5295 ins
->dreg
= mono_alloc_freg (cfg
);
5296 ins
->sreg1
= args
[0]->dreg
;
5297 MONO_ADD_INS (cfg
->cbb
, ins
);
5300 if (cfg
->opt
& MONO_OPT_CMOV
) {
5303 if (strcmp (cmethod
->name
, "Min") == 0) {
5304 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
5306 } else if (strcmp (cmethod
->name
, "Max") == 0) {
5307 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
5312 MONO_INST_NEW (cfg
, ins
, opcode
);
5313 ins
->type
= STACK_I4
;
5314 ins
->dreg
= mono_alloc_ireg (cfg
);
5315 ins
->sreg1
= args
[0]->dreg
;
5316 ins
->sreg2
= args
[1]->dreg
;
5317 MONO_ADD_INS (cfg
->cbb
, ins
);
5322 /* OP_FREM is not IEEE compatible */
5323 else if (strcmp (cmethod
->name
, "IEEERemainder") == 0) {
5324 MONO_INST_NEW (cfg
, ins
, OP_FREM
);
5325 ins
->inst_i0
= args
[0];
5326 ins
->inst_i1
= args
[1];
5335 mono_arch_print_tree (MonoInst
*tree
, int arity
)
5340 MonoInst
* mono_arch_get_domain_intrinsic (MonoCompile
* cfg
)
5346 if (appdomain_tls_offset
== -1)
5349 MONO_INST_NEW (cfg
, ins
, OP_TLS_GET
);
5350 ins
->inst_offset
= appdomain_tls_offset
;
5355 mono_arch_get_patch_offset (guint8
*code
)
5357 if ((code
[0] == 0x8b) && (x86_modrm_mod (code
[1]) == 0x2))
5359 else if ((code
[0] == 0xba))
5361 else if ((code
[0] == 0x68))
5364 else if ((code
[0] == 0xff) && (x86_modrm_reg (code
[1]) == 0x6))
5365 /* push <OFFSET>(<REG>) */
5367 else if ((code
[0] == 0xff) && (x86_modrm_reg (code
[1]) == 0x2))
5368 /* call *<OFFSET>(<REG>) */
5370 else if ((code
[0] == 0xdd) || (code
[0] == 0xd9))
5373 else if ((code
[0] == 0x58) && (code
[1] == 0x05))
5374 /* pop %eax; add <OFFSET>, %eax */
5376 else if ((code
[0] >= 0x58) && (code
[0] <= 0x58 + X86_NREG
) && (code
[1] == 0x81))
5377 /* pop <REG>; add <OFFSET>, <REG> */
5379 else if ((code
[0] >= 0xb8) && (code
[0] < 0xb8 + 8))
5380 /* mov <REG>, imm */
5383 g_assert_not_reached ();
5389 * mono_breakpoint_clean_code:
5391 * Copy @size bytes from @code - @offset to the buffer @buf. If the debugger inserted software
5392 * breakpoints in the original code, they are removed in the copy.
5394 * Returns TRUE if no sw breakpoint was present.
5397 mono_breakpoint_clean_code (guint8
*method_start
, guint8
*code
, int offset
, guint8
*buf
, int size
)
5400 gboolean can_write
= TRUE
;
5402 * If method_start is non-NULL we need to perform bound checks, since we access memory
5403 * at code - offset we could go before the start of the method and end up in a different
5404 * page of memory that is not mapped or read incorrect data anyway. We zero-fill the bytes
5407 if (!method_start
|| code
- offset
>= method_start
) {
5408 memcpy (buf
, code
- offset
, size
);
5410 int diff
= code
- method_start
;
5411 memset (buf
, 0, size
);
5412 memcpy (buf
+ offset
- diff
, method_start
, diff
+ size
- offset
);
5415 for (i
= 0; i
< MONO_BREAKPOINT_ARRAY_SIZE
; ++i
) {
5416 int idx
= mono_breakpoint_info_index
[i
];
5420 ptr
= mono_breakpoint_info
[idx
].address
;
5421 if (ptr
>= code
&& ptr
< code
+ size
) {
5422 guint8 saved_byte
= mono_breakpoint_info
[idx
].saved_byte
;
5424 /*g_print ("patching %p with 0x%02x (was: 0x%02x)\n", ptr, saved_byte, buf [ptr - code]);*/
5425 buf
[ptr
- code
] = saved_byte
;
5432 mono_arch_get_vcall_slot (guint8
*code
, mgreg_t
*regs
, int *displacement
)
5438 mono_breakpoint_clean_code (NULL
, code
, 8, buf
, sizeof (buf
));
5446 * A given byte sequence can match more than case here, so we have to be
5447 * really careful about the ordering of the cases. Longer sequences
5449 * There are two types of calls:
5450 * - direct calls: 0xff address_byte 8/32 bits displacement
5451 * - indirect calls: nop nop nop <call>
5452 * The nops make sure we don't confuse the instruction preceeding an indirect
5453 * call with a direct call.
5455 if ((code
[1] != 0xe8) && (code
[3] == 0xff) && ((code
[4] & 0x18) == 0x10) && ((code
[4] >> 6) == 1)) {
5456 reg
= code
[4] & 0x07;
5457 disp
= (signed char)code
[5];
5458 } else if ((code
[0] == 0xff) && ((code
[1] & 0x18) == 0x10) && ((code
[1] >> 6) == 2)) {
5459 reg
= code
[1] & 0x07;
5460 disp
= *((gint32
*)(code
+ 2));
5461 } else if ((code
[1] == 0xe8)) {
5463 } else if ((code
[4] == 0xff) && (((code
[5] >> 6) & 0x3) == 0) && (((code
[5] >> 3) & 0x7) == 2)) {
5465 * This is a interface call
5466 * 8b 40 30 mov 0x30(%eax),%eax
5467 * ff 10 call *(%eax)
5470 reg
= code
[5] & 0x07;
5475 *displacement
= disp
;
5476 return (gpointer
)regs
[reg
];
5480 * mono_x86_get_this_arg_offset:
5482 * Return the offset of the stack location where this is passed during a virtual
5486 mono_x86_get_this_arg_offset (MonoGenericSharingContext
*gsctx
, MonoMethodSignature
*sig
)
5488 CallInfo
*cinfo
= NULL
;
5491 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
5492 cinfo
= get_call_info (gsctx
, NULL
, sig
, FALSE
);
5494 offset
= cinfo
->args
[0].offset
;
5503 mono_arch_get_this_arg_from_call (MonoGenericSharingContext
*gsctx
, MonoMethodSignature
*sig
,
5504 mgreg_t
*regs
, guint8
*code
)
5506 guint32 esp
= regs
[X86_ESP
];
5507 CallInfo
*cinfo
= NULL
;
5512 * Avoid expensive calls to get_generic_context_from_code () + get_call_info
5515 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
5517 gsctx
= mono_get_generic_context_from_code (code
);
5518 cinfo
= get_call_info (gsctx
, NULL
, sig
, FALSE
);
5520 offset
= cinfo
->args
[0].offset
;
5526 * The stack looks like:
5529 * <possible vtype return address>
5531 * <4 pointers pushed by mono_arch_create_trampoline_code ()>
5533 res
= (((MonoObject
**)esp
) [5 + (offset
/ 4)]);
5539 #define MAX_ARCH_DELEGATE_PARAMS 10
5542 mono_arch_get_delegate_invoke_impl (MonoMethodSignature
*sig
, gboolean has_target
)
5544 guint8
*code
, *start
;
5546 if (sig
->param_count
> MAX_ARCH_DELEGATE_PARAMS
)
5549 /* FIXME: Support more cases */
5550 if (MONO_TYPE_ISSTRUCT (sig
->ret
))
5554 * The stack contains:
5560 static guint8
* cached
= NULL
;
5564 start
= code
= mono_global_codeman_reserve (64);
5566 /* Replace the this argument with the target */
5567 x86_mov_reg_membase (code
, X86_EAX
, X86_ESP
, 4, 4);
5568 x86_mov_reg_membase (code
, X86_ECX
, X86_EAX
, G_STRUCT_OFFSET (MonoDelegate
, target
), 4);
5569 x86_mov_membase_reg (code
, X86_ESP
, 4, X86_ECX
, 4);
5570 x86_jump_membase (code
, X86_EAX
, G_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
5572 g_assert ((code
- start
) < 64);
5574 mono_debug_add_delegate_trampoline (start
, code
- start
);
5576 mono_memory_barrier ();
5580 static guint8
* cache
[MAX_ARCH_DELEGATE_PARAMS
+ 1] = {NULL
};
5582 /* 8 for mov_reg and jump, plus 8 for each parameter */
5583 int code_reserve
= 8 + (sig
->param_count
* 8);
5585 for (i
= 0; i
< sig
->param_count
; ++i
)
5586 if (!mono_is_regsize_var (sig
->params
[i
]))
5589 code
= cache
[sig
->param_count
];
5594 * The stack contains:
5595 * <args in reverse order>
5600 * <args in reverse order>
5603 * without unbalancing the stack.
5604 * So move each arg up a spot in the stack (overwriting un-needed 'this' arg)
5605 * and leaving original spot of first arg as placeholder in stack so
5606 * when callee pops stack everything works.
5609 start
= code
= mono_global_codeman_reserve (code_reserve
);
5611 /* store delegate for access to method_ptr */
5612 x86_mov_reg_membase (code
, X86_ECX
, X86_ESP
, 4, 4);
5615 for (i
= 0; i
< sig
->param_count
; ++i
) {
5616 x86_mov_reg_membase (code
, X86_EAX
, X86_ESP
, (i
+2)*4, 4);
5617 x86_mov_membase_reg (code
, X86_ESP
, (i
+1)*4, X86_EAX
, 4);
5620 x86_jump_membase (code
, X86_ECX
, G_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
5622 g_assert ((code
- start
) < code_reserve
);
5624 mono_debug_add_delegate_trampoline (start
, code
- start
);
5626 mono_memory_barrier ();
5628 cache
[sig
->param_count
] = start
;
5635 mono_arch_context_get_int_reg (MonoContext
*ctx
, int reg
)
5638 case X86_EAX
: return (gpointer
)ctx
->eax
;
5639 case X86_EBX
: return (gpointer
)ctx
->ebx
;
5640 case X86_ECX
: return (gpointer
)ctx
->ecx
;
5641 case X86_EDX
: return (gpointer
)ctx
->edx
;
5642 case X86_ESP
: return (gpointer
)ctx
->esp
;
5643 case X86_EBP
: return (gpointer
)ctx
->ebp
;
5644 case X86_ESI
: return (gpointer
)ctx
->esi
;
5645 case X86_EDI
: return (gpointer
)ctx
->edi
;
5646 default: g_assert_not_reached ();
5650 #ifdef MONO_ARCH_SIMD_INTRINSICS
5653 get_float_to_x_spill_area (MonoCompile
*cfg
)
5655 if (!cfg
->fconv_to_r8_x_var
) {
5656 cfg
->fconv_to_r8_x_var
= mono_compile_create_var (cfg
, &mono_defaults
.double_class
->byval_arg
, OP_LOCAL
);
5657 cfg
->fconv_to_r8_x_var
->flags
|= MONO_INST_VOLATILE
; /*FIXME, use the don't regalloc flag*/
5659 return cfg
->fconv_to_r8_x_var
;
5663 * Convert all fconv opts that MONO_OPT_SSE2 would get wrong.
5666 mono_arch_decompose_opts (MonoCompile
*cfg
, MonoInst
*ins
)
5669 int dreg
, src_opcode
;
5671 if (!(cfg
->opt
& MONO_OPT_SSE2
) || !(cfg
->opt
& MONO_OPT_SIMD
) || COMPILE_LLVM (cfg
))
5674 switch (src_opcode
= ins
->opcode
) {
5675 case OP_FCONV_TO_I1
:
5676 case OP_FCONV_TO_U1
:
5677 case OP_FCONV_TO_I2
:
5678 case OP_FCONV_TO_U2
:
5679 case OP_FCONV_TO_I4
:
5686 /* dreg is the IREG and sreg1 is the FREG */
5687 MONO_INST_NEW (cfg
, fconv
, OP_FCONV_TO_R8_X
);
5688 fconv
->klass
= NULL
; /*FIXME, what can I use here as the Mono.Simd lib might not be loaded yet*/
5689 fconv
->sreg1
= ins
->sreg1
;
5690 fconv
->dreg
= mono_alloc_ireg (cfg
);
5691 fconv
->type
= STACK_VTYPE
;
5692 fconv
->backend
.spill_var
= get_float_to_x_spill_area (cfg
);
5694 mono_bblock_insert_before_ins (cfg
->cbb
, ins
, fconv
);
5698 ins
->opcode
= OP_XCONV_R8_TO_I4
;
5700 ins
->klass
= mono_defaults
.int32_class
;
5701 ins
->sreg1
= fconv
->dreg
;
5703 ins
->type
= STACK_I4
;
5704 ins
->backend
.source_opcode
= src_opcode
;
5707 #endif /* #ifdef MONO_ARCH_SIMD_INTRINSICS */
5710 mono_arch_decompose_long_opts (MonoCompile
*cfg
, MonoInst
*long_ins
)
5715 if (long_ins
->opcode
== OP_LNEG
) {
5717 MONO_EMIT_NEW_UNALU (cfg
, OP_INEG
, ins
->dreg
+ 1, ins
->sreg1
+ 1);
5718 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ADC_IMM
, ins
->dreg
+ 2, ins
->sreg1
+ 2, 0);
5719 MONO_EMIT_NEW_UNALU (cfg
, OP_INEG
, ins
->dreg
+ 2, ins
->dreg
+ 2);
5724 #ifdef MONO_ARCH_SIMD_INTRINSICS
5726 if (!(cfg
->opt
& MONO_OPT_SIMD
))
5729 /*TODO move this to simd-intrinsic.c once we support sse 4.1 dword extractors since we need the runtime caps info */
5730 switch (long_ins
->opcode
) {
5732 vreg
= long_ins
->sreg1
;
5734 if (long_ins
->inst_c0
) {
5735 MONO_INST_NEW (cfg
, ins
, OP_PSHUFLED
);
5736 ins
->klass
= long_ins
->klass
;
5737 ins
->sreg1
= long_ins
->sreg1
;
5739 ins
->type
= STACK_VTYPE
;
5740 ins
->dreg
= vreg
= alloc_ireg (cfg
);
5741 MONO_ADD_INS (cfg
->cbb
, ins
);
5744 MONO_INST_NEW (cfg
, ins
, OP_EXTRACT_I4
);
5745 ins
->klass
= mono_defaults
.int32_class
;
5747 ins
->type
= STACK_I4
;
5748 ins
->dreg
= long_ins
->dreg
+ 1;
5749 MONO_ADD_INS (cfg
->cbb
, ins
);
5751 MONO_INST_NEW (cfg
, ins
, OP_PSHUFLED
);
5752 ins
->klass
= long_ins
->klass
;
5753 ins
->sreg1
= long_ins
->sreg1
;
5754 ins
->inst_c0
= long_ins
->inst_c0
? 3 : 1;
5755 ins
->type
= STACK_VTYPE
;
5756 ins
->dreg
= vreg
= alloc_ireg (cfg
);
5757 MONO_ADD_INS (cfg
->cbb
, ins
);
5759 MONO_INST_NEW (cfg
, ins
, OP_EXTRACT_I4
);
5760 ins
->klass
= mono_defaults
.int32_class
;
5762 ins
->type
= STACK_I4
;
5763 ins
->dreg
= long_ins
->dreg
+ 2;
5764 MONO_ADD_INS (cfg
->cbb
, ins
);
5766 long_ins
->opcode
= OP_NOP
;
5768 case OP_INSERTX_I8_SLOW
:
5769 MONO_INST_NEW (cfg
, ins
, OP_INSERTX_I4_SLOW
);
5770 ins
->dreg
= long_ins
->dreg
;
5771 ins
->sreg1
= long_ins
->dreg
;
5772 ins
->sreg2
= long_ins
->sreg2
+ 1;
5773 ins
->inst_c0
= long_ins
->inst_c0
* 2;
5774 MONO_ADD_INS (cfg
->cbb
, ins
);
5776 MONO_INST_NEW (cfg
, ins
, OP_INSERTX_I4_SLOW
);
5777 ins
->dreg
= long_ins
->dreg
;
5778 ins
->sreg1
= long_ins
->dreg
;
5779 ins
->sreg2
= long_ins
->sreg2
+ 2;
5780 ins
->inst_c0
= long_ins
->inst_c0
* 2 + 1;
5781 MONO_ADD_INS (cfg
->cbb
, ins
);
5783 long_ins
->opcode
= OP_NOP
;
5786 MONO_INST_NEW (cfg
, ins
, OP_ICONV_TO_X
);
5787 ins
->dreg
= long_ins
->dreg
;
5788 ins
->sreg1
= long_ins
->sreg1
+ 1;
5789 ins
->klass
= long_ins
->klass
;
5790 ins
->type
= STACK_VTYPE
;
5791 MONO_ADD_INS (cfg
->cbb
, ins
);
5793 MONO_INST_NEW (cfg
, ins
, OP_INSERTX_I4_SLOW
);
5794 ins
->dreg
= long_ins
->dreg
;
5795 ins
->sreg1
= long_ins
->dreg
;
5796 ins
->sreg2
= long_ins
->sreg1
+ 2;
5798 ins
->klass
= long_ins
->klass
;
5799 ins
->type
= STACK_VTYPE
;
5800 MONO_ADD_INS (cfg
->cbb
, ins
);
5802 MONO_INST_NEW (cfg
, ins
, OP_PSHUFLED
);
5803 ins
->dreg
= long_ins
->dreg
;
5804 ins
->sreg1
= long_ins
->dreg
;;
5805 ins
->inst_c0
= 0x44; /*Magic number for swizzling (X,Y,X,Y)*/
5806 ins
->klass
= long_ins
->klass
;
5807 ins
->type
= STACK_VTYPE
;
5808 MONO_ADD_INS (cfg
->cbb
, ins
);
5810 long_ins
->opcode
= OP_NOP
;
5813 #endif /* MONO_ARCH_SIMD_INTRINSICS */
5816 /*MONO_ARCH_HAVE_HANDLER_BLOCK_GUARD*/
5818 mono_arch_install_handler_block_guard (MonoJitInfo
*ji
, MonoJitExceptionInfo
*clause
, MonoContext
*ctx
, gpointer new_value
)
5821 gpointer
*sp
, old_value
;
5823 const unsigned char *handler
;
5825 /*Decode the first instruction to figure out where did we store the spvar*/
5826 /*Our jit MUST generate the following:
5828 Which is encoded as: 0x89 mod_rm.
5829 mod_rm (esp, ebp, imm) which can be: (imm will never be zero)
5830 mod (reg + imm8): 01 reg(esp): 100 rm(ebp): 101 -> 01100101 (0x65)
5831 mod (reg + imm32): 10 reg(esp): 100 rm(ebp): 101 -> 10100101 (0xA5)
5833 handler
= clause
->handler_start
;
5835 if (*handler
!= 0x89)
5840 if (*handler
== 0x65)
5841 offset
= *(signed char*)(handler
+ 1);
5842 else if (*handler
== 0xA5)
5843 offset
= *(int*)(handler
+ 1);
5848 bp
= MONO_CONTEXT_GET_BP (ctx
);
5849 sp
= *(gpointer
*)(bp
+ offset
);
5852 if (old_value
< ji
->code_start
|| (char*)old_value
> ((char*)ji
->code_start
+ ji
->code_size
))
5861 #define DBG_SIGNAL SIGBUS
5863 #define DBG_SIGNAL SIGSEGV
5866 /* Soft Debug support */
5867 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
5870 * mono_arch_set_breakpoint:
5872 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
5873 * The location should contain code emitted by OP_SEQ_POINT.
5876 mono_arch_set_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
5881 * In production, we will use int3 (has to fix the size in the md
5882 * file). But that could confuse gdb, so during development, we emit a SIGSEGV
5885 g_assert (code
[0] == 0x90);
5886 x86_alu_reg_mem (code
, X86_CMP
, X86_EAX
, (guint32
)bp_trigger_page
);
5890 * mono_arch_clear_breakpoint:
5892 * Clear the breakpoint at IP.
5895 mono_arch_clear_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
5900 for (i
= 0; i
< 6; ++i
)
5905 * mono_arch_start_single_stepping:
5907 * Start single stepping.
5910 mono_arch_start_single_stepping (void)
5912 mono_mprotect (ss_trigger_page
, mono_pagesize (), 0);
5916 * mono_arch_stop_single_stepping:
5918 * Stop single stepping.
5921 mono_arch_stop_single_stepping (void)
5923 mono_mprotect (ss_trigger_page
, mono_pagesize (), MONO_MMAP_READ
);
5927 * mono_arch_is_single_step_event:
5929 * Return whenever the machine state in SIGCTX corresponds to a single
5933 mono_arch_is_single_step_event (void *info
, void *sigctx
)
5936 EXCEPTION_RECORD
* einfo
= (EXCEPTION_RECORD
*)info
; /* Sometimes the address is off by 4 */
5937 if ((einfo
->ExceptionInformation
[1] >= ss_trigger_page
&& (guint8
*)einfo
->ExceptionInformation
[1] <= (guint8
*)ss_trigger_page
+ 128))
5942 siginfo_t
* sinfo
= (siginfo_t
*) info
;
5943 /* Sometimes the address is off by 4 */
5944 if (sinfo
->si_signo
== DBG_SIGNAL
&& (sinfo
->si_addr
>= ss_trigger_page
&& (guint8
*)sinfo
->si_addr
<= (guint8
*)ss_trigger_page
+ 128))
5952 mono_arch_is_breakpoint_event (void *info
, void *sigctx
)
5955 EXCEPTION_RECORD
* einfo
= (EXCEPTION_RECORD
*)info
; /* Sometimes the address is off by 4 */
5956 if ((einfo
->ExceptionInformation
[1] >= bp_trigger_page
&& (guint8
*)einfo
->ExceptionInformation
[1] <= (guint8
*)bp_trigger_page
+ 128))
5961 siginfo_t
* sinfo
= (siginfo_t
*)info
;
5962 /* Sometimes the address is off by 4 */
5963 if (sinfo
->si_signo
== DBG_SIGNAL
&& (sinfo
->si_addr
>= bp_trigger_page
&& (guint8
*)sinfo
->si_addr
<= (guint8
*)bp_trigger_page
+ 128))
5971 * mono_arch_get_ip_for_breakpoint:
5973 * See mini-amd64.c for docs.
5976 mono_arch_get_ip_for_breakpoint (MonoJitInfo
*ji
, MonoContext
*ctx
)
5978 guint8
*ip
= MONO_CONTEXT_GET_IP (ctx
);
5983 #define BREAKPOINT_SIZE 6
5986 * mono_arch_get_ip_for_single_step:
5988 * See mini-amd64.c for docs.
5991 mono_arch_get_ip_for_single_step (MonoJitInfo
*ji
, MonoContext
*ctx
)
5993 guint8
*ip
= MONO_CONTEXT_GET_IP (ctx
);
5995 /* Size of x86_alu_reg_imm */
6002 * mono_arch_skip_breakpoint:
6004 * See mini-amd64.c for docs.
6007 mono_arch_skip_breakpoint (MonoContext
*ctx
)
6009 MONO_CONTEXT_SET_IP (ctx
, (guint8
*)MONO_CONTEXT_GET_IP (ctx
) + BREAKPOINT_SIZE
);
6013 * mono_arch_skip_single_step:
6015 * See mini-amd64.c for docs.
6018 mono_arch_skip_single_step (MonoContext
*ctx
)
6020 MONO_CONTEXT_SET_IP (ctx
, (guint8
*)MONO_CONTEXT_GET_IP (ctx
) + 6);
6024 * mono_arch_get_seq_point_info:
6026 * See mini-amd64.c for docs.
6029 mono_arch_get_seq_point_info (MonoDomain
*domain
, guint8
*code
)