6 #include <mono/utils/mono-compiler.h>
12 #include "mini-runtime.h"
13 #include "llvmonly-runtime.h"
14 #include "mini-llvm.h"
15 #include "jit-icalls.h"
16 #include "aot-compiler.h"
17 #include <mono/metadata/abi-details.h>
18 #include <mono/metadata/class-abi-details.h>
19 #include <mono/utils/mono-utils-debug.h>
20 #include "mono/metadata/icall-signatures.h"
22 static const gboolean debug_tailcall_break_compile
= FALSE
; // break in method_to_ir
23 static const gboolean debug_tailcall_break_run
= FALSE
; // insert breakpoint in generated code
26 mono_call_to_patch (MonoCallInst
*call
)
28 MonoJumpInfoTarget patch
;
29 MonoJitICallId jit_icall_id
;
31 // This is similar to amd64 emit_call.
33 if (call
->inst
.flags
& MONO_INST_HAS_METHOD
) {
34 patch
.type
= MONO_PATCH_INFO_METHOD
;
35 patch
.target
= call
->method
;
36 } else if ((jit_icall_id
= call
->jit_icall_id
)) {
37 patch
.type
= MONO_PATCH_INFO_JIT_ICALL_ID
;
38 patch
.target
= GUINT_TO_POINTER (jit_icall_id
);
40 patch
.type
= MONO_PATCH_INFO_ABS
;
41 patch
.target
= call
->fptr
;
47 mono_call_add_patch_info (MonoCompile
*cfg
, MonoCallInst
*call
, int ip
)
49 const MonoJumpInfoTarget patch
= mono_call_to_patch (call
);
50 mono_add_patch_info (cfg
, ip
, patch
.type
, patch
.target
);
54 mini_test_tailcall (MonoCompile
*cfg
, gboolean tailcall
)
56 // A lot of tests say "tailcall" throughout their verbose output.
57 // "tailcalllog" is more searchable.
59 // Do not change "tailcalllog" here without changing other places, e.g. tests that search for it.
61 g_assertf (tailcall
|| !mini_debug_options
.test_tailcall_require
, "tailcalllog fail from %s", cfg
->method
->name
);
62 mono_tailcall_print ("tailcalllog %s from %s\n", tailcall
? "success" : "fail", cfg
->method
->name
);
66 mini_emit_tailcall_parameters (MonoCompile
*cfg
, MonoMethodSignature
*sig
)
68 // OP_TAILCALL_PARAMETER helps compute the size of code, in order
69 // to size branches around OP_TAILCALL_[REG,MEMBASE].
71 // The actual bytes are output from OP_TAILCALL_[REG,MEMBASE].
72 // OP_TAILCALL_PARAMETER is an overestimate because typically
73 // many parameters are in registers.
75 const int n
= sig
->param_count
+ (sig
->hasthis
? 1 : 0);
76 for (int i
= 0; i
< n
; ++i
) {
78 MONO_INST_NEW (cfg
, ins
, OP_TAILCALL_PARAMETER
);
79 MONO_ADD_INS (cfg
->cbb
, ins
);
85 ret_type_to_call_opcode (MonoCompile
*cfg
, MonoType
*type
, int calli
, int virt
)
88 type
= mini_get_underlying_type (type
);
91 return calli
? OP_VOIDCALL_REG
: virt
? OP_VOIDCALL_MEMBASE
: OP_VOIDCALL
;
98 return calli
? OP_CALL_REG
: virt
? OP_CALL_MEMBASE
: OP_CALL
;
102 case MONO_TYPE_FNPTR
:
103 return calli
? OP_CALL_REG
: virt
? OP_CALL_MEMBASE
: OP_CALL
;
104 case MONO_TYPE_CLASS
:
105 case MONO_TYPE_STRING
:
106 case MONO_TYPE_OBJECT
:
107 case MONO_TYPE_SZARRAY
:
108 case MONO_TYPE_ARRAY
:
109 return calli
? OP_CALL_REG
: virt
? OP_CALL_MEMBASE
: OP_CALL
;
112 return calli
? OP_LCALL_REG
: virt
? OP_LCALL_MEMBASE
: OP_LCALL
;
115 return calli
? OP_RCALL_REG
: virt
? OP_RCALL_MEMBASE
: OP_RCALL
;
117 return calli
? OP_FCALL_REG
: virt
? OP_FCALL_MEMBASE
: OP_FCALL
;
119 return calli
? OP_FCALL_REG
: virt
? OP_FCALL_MEMBASE
: OP_FCALL
;
120 case MONO_TYPE_VALUETYPE
:
121 if (m_class_is_enumtype (type
->data
.klass
)) {
122 type
= mono_class_enum_basetype_internal (type
->data
.klass
);
125 return calli
? OP_VCALL_REG
: virt
? OP_VCALL_MEMBASE
: OP_VCALL
;
126 case MONO_TYPE_TYPEDBYREF
:
127 return calli
? OP_VCALL_REG
: virt
? OP_VCALL_MEMBASE
: OP_VCALL
;
128 case MONO_TYPE_GENERICINST
:
129 type
= m_class_get_byval_arg (type
->data
.generic_class
->container_class
);
134 return calli
? OP_VCALL_REG
: virt
? OP_VCALL_MEMBASE
: OP_VCALL
;
136 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type
->type
);
142 mini_emit_call_args (MonoCompile
*cfg
, MonoMethodSignature
*sig
,
143 MonoInst
**args
, gboolean calli
, gboolean virtual_
, gboolean tailcall
,
144 gboolean rgctx
, gboolean unbox_trampoline
, MonoMethod
*target
)
149 cfg
->has_calls
= TRUE
;
151 if (tailcall
&& cfg
->llvm_only
) {
152 // FIXME tailcall should not be changed this late.
153 // FIXME It really should not be changed due to llvm_only.
154 // Accuracy is presently available MONO_IS_TAILCALL_OPCODE (call).
156 mono_tailcall_print ("losing tailcall in %s due to llvm_only\n", cfg
->method
->name
);
157 mini_test_tailcall (cfg
, FALSE
);
160 if (tailcall
&& (debug_tailcall_break_compile
|| debug_tailcall_break_run
)
161 && mono_is_usermode_native_debugger_present ()) {
163 if (debug_tailcall_break_compile
)
166 if (tailcall
&& debug_tailcall_break_run
) { // Can change tailcall in debugger.
168 MONO_INST_NEW (cfg
, brk
, OP_BREAK
);
169 MONO_ADD_INS (cfg
->cbb
, brk
);
174 mini_profiler_emit_tail_call (cfg
, target
);
175 mini_emit_tailcall_parameters (cfg
, sig
);
176 MONO_INST_NEW_CALL (cfg
, call
, calli
? OP_TAILCALL_REG
: virtual_
? OP_TAILCALL_MEMBASE
: OP_TAILCALL
);
178 MONO_INST_NEW_CALL (cfg
, call
, ret_type_to_call_opcode (cfg
, sig
->ret
, calli
, virtual_
));
181 call
->signature
= sig
;
182 call
->rgctx_reg
= rgctx
;
183 sig_ret
= mini_get_underlying_type (sig
->ret
);
185 mini_type_to_eval_stack_type ((cfg
), sig_ret
, &call
->inst
);
188 if (mini_type_is_vtype (sig_ret
)) {
189 call
->vret_var
= cfg
->vret_addr
;
190 //g_assert_not_reached ();
192 } else if (mini_type_is_vtype (sig_ret
)) {
193 MonoInst
*temp
= mono_compile_create_var (cfg
, sig_ret
, OP_LOCAL
);
196 temp
->backend
.is_pinvoke
= sig
->pinvoke
;
199 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
200 * address of return value to increase optimization opportunities.
201 * Before vtype decomposition, the dreg of the call ins itself represents the
202 * fact the call modifies the return value. After decomposition, the call will
203 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
204 * will be transformed into an LDADDR.
206 MONO_INST_NEW (cfg
, loada
, OP_OUTARG_VTRETADDR
);
207 loada
->dreg
= alloc_preg (cfg
);
208 loada
->inst_p0
= temp
;
209 /* We reference the call too since call->dreg could change during optimization */
210 loada
->inst_p1
= call
;
211 MONO_ADD_INS (cfg
->cbb
, loada
);
213 call
->inst
.dreg
= temp
->dreg
;
215 call
->vret_var
= loada
;
216 } else if (!MONO_TYPE_IS_VOID (sig_ret
))
217 call
->inst
.dreg
= alloc_dreg (cfg
, (MonoStackType
)call
->inst
.type
);
219 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
220 if (COMPILE_SOFT_FLOAT (cfg
)) {
222 * If the call has a float argument, we would need to do an r8->r4 conversion using
223 * an icall, but that cannot be done during the call sequence since it would clobber
224 * the call registers + the stack. So we do it before emitting the call.
226 for (int i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
228 MonoInst
*in
= call
->args
[i
];
230 if (i
>= sig
->hasthis
)
231 t
= sig
->params
[i
- sig
->hasthis
];
233 t
= mono_get_int_type ();
234 t
= mono_type_get_underlying_type (t
);
236 if (!t
->byref
&& t
->type
== MONO_TYPE_R4
) {
241 conv
= mono_emit_jit_icall (cfg
, mono_fload_r4_arg
, iargs
);
243 /* The result will be in an int vreg */
244 call
->args
[i
] = conv
;
250 call
->need_unbox_trampoline
= unbox_trampoline
;
253 if (COMPILE_LLVM (cfg
))
254 mono_llvm_emit_call (cfg
, call
);
256 mono_arch_emit_call (cfg
, call
);
258 mono_arch_emit_call (cfg
, call
);
261 cfg
->param_area
= MAX (cfg
->param_area
, call
->stack_usage
);
262 cfg
->flags
|= MONO_CFG_HAS_CALLS
;
268 mini_should_check_stack_pointer (MonoCompile
*cfg
)
270 // This logic is shared by mini_emit_calli_full and is_supported_tailcall,
271 // in order to compute tailcall_supported earlier. Alternatively it could be passed
272 // out from mini_emit_calli_full -- if it has not been copied around
273 // or decisions made based on it.
277 return cfg
->check_pinvoke_callconv
&&
278 cfg
->method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
&&
279 ((info
= mono_marshal_get_wrapper_info (cfg
->method
))) &&
280 info
->subtype
== WRAPPER_SUBTYPE_PINVOKE
;
284 set_rgctx_arg (MonoCompile
*cfg
, MonoCallInst
*call
, int rgctx_reg
, MonoInst
*rgctx_arg
)
286 mono_call_inst_add_outarg_reg (cfg
, call
, rgctx_reg
, MONO_ARCH_RGCTX_REG
, FALSE
);
287 cfg
->uses_rgctx_reg
= TRUE
;
288 call
->rgctx_reg
= TRUE
;
290 call
->rgctx_arg_reg
= rgctx_reg
;
294 /* Either METHOD or IMT_ARG needs to be set */
296 emit_imt_argument (MonoCompile
*cfg
, MonoCallInst
*call
, MonoMethod
*method
, MonoInst
*imt_arg
)
300 g_assert (method
|| imt_arg
);
302 if (COMPILE_LLVM (cfg
)) {
304 method_reg
= alloc_preg (cfg
);
305 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, method_reg
, imt_arg
->dreg
);
307 MonoInst
*ins
= mini_emit_runtime_constant (cfg
, MONO_PATCH_INFO_METHODCONST
, method
);
308 method_reg
= ins
->dreg
;
312 call
->imt_arg_reg
= method_reg
;
314 mono_call_inst_add_outarg_reg (cfg
, call
, method_reg
, MONO_ARCH_IMT_REG
, FALSE
);
319 method_reg
= alloc_preg (cfg
);
320 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, method_reg
, imt_arg
->dreg
);
322 MonoInst
*ins
= mini_emit_runtime_constant (cfg
, MONO_PATCH_INFO_METHODCONST
, method
);
323 method_reg
= ins
->dreg
;
326 mono_call_inst_add_outarg_reg (cfg
, call
, method_reg
, MONO_ARCH_IMT_REG
, FALSE
);
330 mini_emit_calli_full (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**args
, MonoInst
*addr
,
331 MonoInst
*imt_arg
, MonoInst
*rgctx_arg
, gboolean tailcall
)
337 g_assert (!rgctx_arg
|| !imt_arg
);
340 rgctx_reg
= mono_alloc_preg (cfg
);
341 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, rgctx_reg
, rgctx_arg
->dreg
);
344 const gboolean check_sp
= mini_should_check_stack_pointer (cfg
);
346 // Checking stack pointer requires running code after a function call, prevents tailcall.
347 // Caller needs to have decided that earlier.
348 g_assert (!check_sp
|| !tailcall
);
351 if (!cfg
->stack_inbalance_var
)
352 cfg
->stack_inbalance_var
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
354 MONO_INST_NEW (cfg
, ins
, OP_GET_SP
);
355 ins
->dreg
= cfg
->stack_inbalance_var
->dreg
;
356 MONO_ADD_INS (cfg
->cbb
, ins
);
359 call
= mini_emit_call_args (cfg
, sig
, args
, TRUE
, FALSE
, tailcall
, rgctx_arg
? TRUE
: FALSE
, FALSE
, NULL
);
361 call
->inst
.sreg1
= addr
->dreg
;
364 emit_imt_argument (cfg
, call
, NULL
, imt_arg
);
366 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
371 sp_reg
= mono_alloc_preg (cfg
);
373 MONO_INST_NEW (cfg
, ins
, OP_GET_SP
);
375 MONO_ADD_INS (cfg
->cbb
, ins
);
377 /* Restore the stack so we don't crash when throwing the exception */
378 MONO_INST_NEW (cfg
, ins
, OP_SET_SP
);
379 ins
->sreg1
= cfg
->stack_inbalance_var
->dreg
;
380 MONO_ADD_INS (cfg
->cbb
, ins
);
382 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, cfg
->stack_inbalance_var
->dreg
, sp_reg
);
383 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "ExecutionEngineException");
387 set_rgctx_arg (cfg
, call
, rgctx_reg
, rgctx_arg
);
389 return (MonoInst
*)call
;
393 mini_emit_calli (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**args
, MonoInst
*addr
, MonoInst
*imt_arg
, MonoInst
*rgctx_arg
)
394 // Historical version without gboolean tailcall parameter.
396 return mini_emit_calli_full (cfg
, sig
, args
, addr
, imt_arg
, rgctx_arg
, FALSE
);
400 callvirt_to_call (int opcode
)
403 case OP_TAILCALL_MEMBASE
:
405 case OP_CALL_MEMBASE
:
407 case OP_VOIDCALL_MEMBASE
:
409 case OP_FCALL_MEMBASE
:
411 case OP_RCALL_MEMBASE
:
413 case OP_VCALL_MEMBASE
:
415 case OP_LCALL_MEMBASE
:
418 g_assert_not_reached ();
425 can_enter_interp (MonoCompile
*cfg
, MonoMethod
*method
, gboolean virtual_
)
427 if (method
->wrapper_type
)
430 if (m_class_get_image (method
->klass
) == m_class_get_image (cfg
->method
->klass
)) {
431 /* When using AOT profiling, the method might not be AOTed */
432 if (cfg
->compile_aot
&& mono_aot_can_enter_interp (method
))
434 /* Virtual calls from corlib can go outside corlib */
439 /* See needs_extra_arg () in mini-llvm.c */
440 if (method
->string_ctor
)
442 if (method
->klass
== mono_get_string_class () && (strstr (method
->name
, "memcpy") || strstr (method
->name
, "bzero")))
445 /* Assume all calls outside the assembly can enter the interpreter */
450 mini_emit_method_call_full (MonoCompile
*cfg
, MonoMethod
*method
, MonoMethodSignature
*sig
, gboolean tailcall
,
451 MonoInst
**args
, MonoInst
*this_ins
, MonoInst
*imt_arg
, MonoInst
*rgctx_arg
)
453 #ifndef DISABLE_REMOTING
454 gboolean might_be_remote
= FALSE
;
456 gboolean virtual_
= this_ins
!= NULL
;
460 gboolean need_unbox_trampoline
;
463 sig
= mono_method_signature_internal (method
);
466 rgctx_reg
= mono_alloc_preg (cfg
);
467 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, rgctx_reg
, rgctx_arg
->dreg
);
470 if (method
->string_ctor
) {
471 /* Create the real signature */
472 /* FIXME: Cache these */
473 MonoMethodSignature
*ctor_sig
= mono_metadata_signature_dup_mempool (cfg
->mempool
, sig
);
474 ctor_sig
->ret
= m_class_get_byval_arg (mono_defaults
.string_class
);
479 context_used
= mini_method_check_context_used (cfg
, method
);
481 #ifndef DISABLE_REMOTING
482 might_be_remote
= this_ins
&& sig
->hasthis
&&
483 (mono_class_is_marshalbyref (method
->klass
) || method
->klass
== mono_defaults
.object_class
) &&
484 !(method
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) && (!MONO_CHECK_THIS (this_ins
) || context_used
);
486 if (might_be_remote
&& context_used
) {
489 g_assert (cfg
->gshared
);
491 addr
= mini_emit_get_rgctx_method (cfg
, context_used
, method
, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK
);
493 return mini_emit_calli (cfg
, sig
, args
, addr
, NULL
, NULL
);
497 if (cfg
->llvm_only
&& virtual_
&& (method
->flags
& METHOD_ATTRIBUTE_VIRTUAL
))
498 return mini_emit_llvmonly_virtual_call (cfg
, method
, sig
, 0, args
);
500 if (cfg
->llvm_only
&& cfg
->interp
&& !virtual_
&& !tailcall
&& can_enter_interp (cfg
, method
, FALSE
)) {
501 MonoInst
*ftndesc
= mini_emit_get_rgctx_method (cfg
, -1, method
, MONO_RGCTX_INFO_METHOD_FTNDESC
);
503 /* Need wrappers for this signature to be able to enter interpreter */
504 cfg
->interp_in_signatures
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->interp_in_signatures
, sig
);
506 /* This call might need to enter the interpreter so make it indirect */
507 return mini_emit_llvmonly_calli (cfg
, sig
, args
, ftndesc
);
510 need_unbox_trampoline
= method
->klass
== mono_defaults
.object_class
|| mono_class_is_interface (method
->klass
);
512 call
= mini_emit_call_args (cfg
, sig
, args
, FALSE
, virtual_
, tailcall
, rgctx_arg
? TRUE
: FALSE
, need_unbox_trampoline
, method
);
514 #ifndef DISABLE_REMOTING
515 if (might_be_remote
) {
517 call
->method
= mono_marshal_get_remoting_invoke_with_check (method
, error
);
518 mono_error_assert_ok (error
);
521 call
->method
= method
;
522 call
->inst
.flags
|= MONO_INST_HAS_METHOD
;
523 call
->inst
.inst_left
= this_ins
;
525 // FIXME This has already been read in amd64 parameter construction.
526 // Fixing it generates incorrect code. CEE_JMP needs attention.
527 call
->tailcall
= tailcall
;
530 int vtable_reg
, slot_reg
, this_reg
;
533 this_reg
= this_ins
->dreg
;
535 if (!cfg
->llvm_only
&& (m_class_get_parent (method
->klass
) == mono_defaults
.multicastdelegate_class
) && !strcmp (method
->name
, "Invoke")) {
538 MONO_EMIT_NULL_CHECK (cfg
, this_reg
, FALSE
);
540 /* Make a call to delegate->invoke_impl */
541 call
->inst
.inst_basereg
= this_reg
;
542 call
->inst
.inst_offset
= MONO_STRUCT_OFFSET (MonoDelegate
, invoke_impl
);
543 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
545 /* We must emit a dummy use here because the delegate trampoline will
546 replace the 'this' argument with the delegate target making this activation
547 no longer a root for the delegate.
548 This is an issue for delegates that target collectible code such as dynamic
549 methods of GC'able assemblies.
551 For a test case look into #667921.
553 FIXME: a dummy use is not the best way to do it as the local register allocator
554 will put it on a caller save register and spill it around the call.
555 Ideally, we would either put it on a callee save register or only do the store part.
557 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, args
[0]);
559 return (MonoInst
*)call
;
562 if ((!(method
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) ||
563 (MONO_METHOD_IS_FINAL (method
) &&
564 method
->wrapper_type
!= MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK
)) &&
565 !(mono_class_is_marshalbyref (method
->klass
) && context_used
)) {
567 * the method is not virtual, we just need to ensure this is not null
568 * and then we can call the method directly.
570 #ifndef DISABLE_REMOTING
571 if (mono_class_is_marshalbyref (method
->klass
) || method
->klass
== mono_defaults
.object_class
) {
574 * The check above ensures method is not gshared, this is needed since
575 * gshared methods can't have wrappers.
577 method
= call
->method
= mono_marshal_get_remoting_invoke_with_check (method
, error
);
578 mono_error_assert_ok (error
);
583 } else if ((method
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) && MONO_METHOD_IS_FINAL (method
)) {
585 * the method is virtual, but we can statically dispatch since either
586 * it's class or the method itself are sealed.
587 * But first we need to ensure it's not a null reference.
593 if (!method
->string_ctor
)
594 MONO_EMIT_NEW_CHECK_THIS (cfg
, this_reg
);
597 if (!virtual_
&& cfg
->llvm_only
&& cfg
->interp
&& !tailcall
&& can_enter_interp (cfg
, method
, FALSE
)) {
598 MonoInst
*ftndesc
= mini_emit_get_rgctx_method (cfg
, -1, method
, MONO_RGCTX_INFO_METHOD_FTNDESC
);
600 /* Need wrappers for this signature to be able to enter interpreter */
601 cfg
->interp_in_signatures
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->interp_in_signatures
, sig
);
603 /* This call might need to enter the interpreter so make it indirect */
604 return mini_emit_llvmonly_calli (cfg
, sig
, args
, ftndesc
);
605 } else if (!virtual_
) {
606 call
->inst
.opcode
= callvirt_to_call (call
->inst
.opcode
);
608 vtable_reg
= alloc_preg (cfg
);
609 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg
, vtable_reg
, this_reg
, MONO_STRUCT_OFFSET (MonoObject
, vtable
));
610 if (mono_class_is_interface (method
->klass
)) {
611 guint32 imt_slot
= mono_method_get_imt_slot (method
);
612 emit_imt_argument (cfg
, call
, call
->method
, imt_arg
);
613 slot_reg
= vtable_reg
;
614 offset
= ((gint32
)imt_slot
- MONO_IMT_SIZE
) * TARGET_SIZEOF_VOID_P
;
616 slot_reg
= vtable_reg
;
617 offset
= MONO_STRUCT_OFFSET (MonoVTable
, vtable
) +
618 ((mono_method_get_vtable_index (method
)) * (TARGET_SIZEOF_VOID_P
));
620 g_assert (mono_method_signature_internal (method
)->generic_param_count
);
621 emit_imt_argument (cfg
, call
, call
->method
, imt_arg
);
625 call
->inst
.sreg1
= slot_reg
;
626 call
->inst
.inst_offset
= offset
;
627 call
->is_virtual
= TRUE
;
631 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
634 set_rgctx_arg (cfg
, call
, rgctx_reg
, rgctx_arg
);
636 return (MonoInst
*)call
;
640 mono_emit_method_call (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
**args
, MonoInst
*this_ins
)
642 return mini_emit_method_call_full (cfg
, method
, mono_method_signature_internal (method
), FALSE
, args
, this_ins
, NULL
, NULL
);
647 mono_emit_native_call (MonoCompile
*cfg
, gconstpointer func
, MonoMethodSignature
*sig
,
654 call
= mini_emit_call_args (cfg
, sig
, args
, FALSE
, FALSE
, FALSE
, FALSE
, FALSE
, NULL
);
657 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
659 return (MonoInst
*)call
;
663 mono_emit_jit_icall_id (MonoCompile
*cfg
, MonoJitICallId jit_icall_id
, MonoInst
**args
)
665 MonoJitICallInfo
*info
= mono_find_jit_icall_info (jit_icall_id
);
667 MonoCallInst
*call
= (MonoCallInst
*)mono_emit_native_call (cfg
, mono_icall_get_wrapper (info
), info
->sig
, args
);
669 call
->jit_icall_id
= jit_icall_id
;
671 return (MonoInst
*)call
;
675 * mini_emit_abs_call:
677 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
680 mini_emit_abs_call (MonoCompile
*cfg
, MonoJumpInfoType patch_type
, gconstpointer data
,
681 MonoMethodSignature
*sig
, MonoInst
**args
)
683 MonoJumpInfo
*ji
= mono_patch_info_new (cfg
->mempool
, 0, patch_type
, data
);
687 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
689 * FIXME: Is the abs_patches hashtable avoidable?
690 * Such as by putting the patch info in the call instruction?
692 if (cfg
->abs_patches
== NULL
)
693 cfg
->abs_patches
= g_hash_table_new (NULL
, NULL
);
694 g_hash_table_insert (cfg
->abs_patches
, ji
, ji
);
695 ins
= mono_emit_native_call (cfg
, ji
, sig
, args
);
696 ((MonoCallInst
*)ins
)->fptr_is_patch
= TRUE
;
701 mini_emit_llvmonly_virtual_call (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, int context_used
, MonoInst
**sp
)
703 static MonoMethodSignature
*helper_sig_llvmonly_imt_trampoline
= NULL
;
704 MonoInst
*icall_args
[16];
705 MonoInst
*call_target
, *ins
, *vtable_ins
;
706 int arg_reg
, this_reg
, vtable_reg
;
707 gboolean is_iface
= mono_class_is_interface (cmethod
->klass
);
708 gboolean is_gsharedvt
= cfg
->gsharedvt
&& mini_is_gsharedvt_variable_signature (fsig
);
709 gboolean variant_iface
= FALSE
;
712 gboolean special_array_interface
= m_class_is_array_special_interface (cmethod
->klass
);
714 if (cfg
->interp
&& can_enter_interp (cfg
, cmethod
, TRUE
))
715 /* Need wrappers for this signature to be able to enter interpreter */
716 cfg
->interp_in_signatures
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->interp_in_signatures
, fsig
);
719 * In llvm-only mode, vtables contain function descriptors instead of
720 * method addresses/trampolines.
722 MONO_EMIT_NULL_CHECK (cfg
, sp
[0]->dreg
, FALSE
);
725 slot
= mono_method_get_imt_slot (cmethod
);
727 slot
= mono_method_get_vtable_index (cmethod
);
729 this_reg
= sp
[0]->dreg
;
731 if (is_iface
&& mono_class_has_variant_generic_params (cmethod
->klass
))
732 variant_iface
= TRUE
;
734 if (!helper_sig_llvmonly_imt_trampoline
) {
735 MonoMethodSignature
*tmp
= mono_icall_sig_ptr_ptr_ptr
;
736 mono_memory_barrier ();
737 helper_sig_llvmonly_imt_trampoline
= tmp
;
740 if (!fsig
->generic_param_count
&& !is_iface
&& !is_gsharedvt
) {
742 * The simplest case, a normal virtual call.
744 int slot_reg
= alloc_preg (cfg
);
745 int addr_reg
= alloc_preg (cfg
);
746 int arg_reg
= alloc_preg (cfg
);
747 MonoBasicBlock
*non_null_bb
;
749 vtable_reg
= alloc_preg (cfg
);
750 EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_ins
, OP_LOAD_MEMBASE
, vtable_reg
, this_reg
, MONO_STRUCT_OFFSET (MonoObject
, vtable
));
751 offset
= MONO_STRUCT_OFFSET (MonoVTable
, vtable
) + (slot
* TARGET_SIZEOF_VOID_P
);
753 /* Load the vtable slot, which contains a function descriptor. */
754 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, slot_reg
, vtable_reg
, offset
);
756 NEW_BBLOCK (cfg
, non_null_bb
);
758 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, slot_reg
, 0);
759 cfg
->cbb
->last_ins
->flags
|= MONO_INST_LIKELY
;
760 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, non_null_bb
);
763 // FIXME: Make the wrapper use the preserveall cconv
764 // FIXME: Use one icall per slot for small slot numbers ?
765 icall_args
[0] = vtable_ins
;
766 EMIT_NEW_ICONST (cfg
, icall_args
[1], slot
);
767 /* Make the icall return the vtable slot value to save some code space */
768 ins
= mono_emit_jit_icall (cfg
, mini_llvmonly_init_vtable_slot
, icall_args
);
769 ins
->dreg
= slot_reg
;
770 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, non_null_bb
);
773 MONO_START_BB (cfg
, non_null_bb
);
774 /* Load the address + arg from the vtable slot */
775 EMIT_NEW_LOAD_MEMBASE (cfg
, call_target
, OP_LOAD_MEMBASE
, addr_reg
, slot_reg
, 0);
776 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, arg_reg
, slot_reg
, TARGET_SIZEOF_VOID_P
);
778 return mini_emit_extra_arg_calli (cfg
, fsig
, sp
, arg_reg
, call_target
);
781 if (!fsig
->generic_param_count
&& is_iface
&& !variant_iface
&& !is_gsharedvt
&& !special_array_interface
) {
783 * A simple interface call
785 * We make a call through an imt slot to obtain the function descriptor we need to call.
786 * The imt slot contains a function descriptor for a runtime function + arg.
788 int slot_reg
= alloc_preg (cfg
);
789 int addr_reg
= alloc_preg (cfg
);
790 int arg_reg
= alloc_preg (cfg
);
791 MonoInst
*thunk_addr_ins
, *thunk_arg_ins
, *ftndesc_ins
;
793 vtable_reg
= alloc_preg (cfg
);
794 EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_ins
, OP_LOAD_MEMBASE
, vtable_reg
, this_reg
, MONO_STRUCT_OFFSET (MonoObject
, vtable
));
795 offset
= ((gint32
)slot
- MONO_IMT_SIZE
) * TARGET_SIZEOF_VOID_P
;
798 * The slot is already initialized when the vtable is created so there is no need
802 /* Load the imt slot, which contains a function descriptor. */
803 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, slot_reg
, vtable_reg
, offset
);
805 /* Load the address + arg of the imt thunk from the imt slot */
806 EMIT_NEW_LOAD_MEMBASE (cfg
, thunk_addr_ins
, OP_LOAD_MEMBASE
, addr_reg
, slot_reg
, 0);
807 EMIT_NEW_LOAD_MEMBASE (cfg
, thunk_arg_ins
, OP_LOAD_MEMBASE
, arg_reg
, slot_reg
, TARGET_SIZEOF_VOID_P
);
809 * IMT thunks in llvm-only mode are C functions which take an info argument
810 * plus the imt method and return the ftndesc to call.
812 icall_args
[0] = thunk_arg_ins
;
813 icall_args
[1] = mini_emit_get_rgctx_method (cfg
, context_used
,
814 cmethod
, MONO_RGCTX_INFO_METHOD
);
815 ftndesc_ins
= mini_emit_calli (cfg
, helper_sig_llvmonly_imt_trampoline
, icall_args
, thunk_addr_ins
, NULL
, NULL
);
817 return mini_emit_llvmonly_calli (cfg
, fsig
, sp
, ftndesc_ins
);
820 if ((fsig
->generic_param_count
|| variant_iface
|| special_array_interface
) && !is_gsharedvt
) {
822 * This is similar to the interface case, the vtable slot points to an imt thunk which is
823 * dynamically extended as more instantiations are discovered.
824 * This handles generic virtual methods both on classes and interfaces.
826 int slot_reg
= alloc_preg (cfg
);
827 int addr_reg
= alloc_preg (cfg
);
828 int arg_reg
= alloc_preg (cfg
);
829 int ftndesc_reg
= alloc_preg (cfg
);
830 MonoInst
*thunk_addr_ins
, *thunk_arg_ins
, *ftndesc_ins
;
831 MonoBasicBlock
*slowpath_bb
, *end_bb
;
833 NEW_BBLOCK (cfg
, slowpath_bb
);
834 NEW_BBLOCK (cfg
, end_bb
);
836 vtable_reg
= alloc_preg (cfg
);
837 EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_ins
, OP_LOAD_MEMBASE
, vtable_reg
, this_reg
, MONO_STRUCT_OFFSET (MonoObject
, vtable
));
839 offset
= ((gint32
)slot
- MONO_IMT_SIZE
) * TARGET_SIZEOF_VOID_P
;
841 offset
= MONO_STRUCT_OFFSET (MonoVTable
, vtable
) + (slot
* TARGET_SIZEOF_VOID_P
);
843 /* Load the slot, which contains a function descriptor. */
844 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, slot_reg
, vtable_reg
, offset
);
846 /* These slots are not initialized, so fall back to the slow path until they are initialized */
847 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
848 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, slot_reg
, 0);
849 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, slowpath_bb
);
852 /* Same as with iface calls */
853 EMIT_NEW_LOAD_MEMBASE (cfg
, thunk_addr_ins
, OP_LOAD_MEMBASE
, addr_reg
, slot_reg
, 0);
854 EMIT_NEW_LOAD_MEMBASE (cfg
, thunk_arg_ins
, OP_LOAD_MEMBASE
, arg_reg
, slot_reg
, TARGET_SIZEOF_VOID_P
);
855 icall_args
[0] = thunk_arg_ins
;
856 icall_args
[1] = mini_emit_get_rgctx_method (cfg
, context_used
,
857 cmethod
, MONO_RGCTX_INFO_METHOD
);
858 ftndesc_ins
= mini_emit_calli (cfg
, helper_sig_llvmonly_imt_trampoline
, icall_args
, thunk_addr_ins
, NULL
, NULL
);
859 ftndesc_ins
->dreg
= ftndesc_reg
;
861 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
862 * they don't know about yet. Fall back to the slowpath in that case.
864 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, ftndesc_reg
, 0);
865 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, slowpath_bb
);
867 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
870 MONO_START_BB (cfg
, slowpath_bb
);
871 icall_args
[0] = vtable_ins
;
872 EMIT_NEW_ICONST (cfg
, icall_args
[1], slot
);
873 icall_args
[2] = mini_emit_get_rgctx_method (cfg
, context_used
,
874 cmethod
, MONO_RGCTX_INFO_METHOD
);
876 ftndesc_ins
= mono_emit_jit_icall (cfg
, mini_llvmonly_resolve_generic_virtual_iface_call
, icall_args
);
878 ftndesc_ins
= mono_emit_jit_icall (cfg
, mini_llvmonly_resolve_generic_virtual_call
, icall_args
);
879 ftndesc_ins
->dreg
= ftndesc_reg
;
880 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
883 MONO_START_BB (cfg
, end_bb
);
884 return mini_emit_llvmonly_calli (cfg
, fsig
, sp
, ftndesc_ins
);
888 * Non-optimized cases
890 icall_args
[0] = sp
[0];
891 EMIT_NEW_ICONST (cfg
, icall_args
[1], slot
);
893 icall_args
[2] = mini_emit_get_rgctx_method (cfg
, context_used
,
894 cmethod
, MONO_RGCTX_INFO_METHOD
);
896 arg_reg
= alloc_preg (cfg
);
897 MONO_EMIT_NEW_PCONST (cfg
, arg_reg
, NULL
);
898 EMIT_NEW_VARLOADA_VREG (cfg
, icall_args
[3], arg_reg
, mono_get_int_type ());
900 g_assert (is_gsharedvt
);
902 call_target
= mono_emit_jit_icall (cfg
, mini_llvmonly_resolve_iface_call_gsharedvt
, icall_args
);
904 call_target
= mono_emit_jit_icall (cfg
, mini_llvmonly_resolve_vcall_gsharedvt
, icall_args
);
907 * Pass the extra argument even if the callee doesn't receive it, most
908 * calling conventions allow this.
910 return mini_emit_extra_arg_calli (cfg
, fsig
, sp
, arg_reg
, call_target
);
913 static MonoMethodSignature
*
914 sig_to_rgctx_sig (MonoMethodSignature
*sig
)
916 // FIXME: memory allocation
917 MonoMethodSignature
*res
;
920 res
= (MonoMethodSignature
*)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE
+ (sig
->param_count
+ 1) * sizeof (MonoType
*));
921 memcpy (res
, sig
, MONO_SIZEOF_METHOD_SIGNATURE
);
922 res
->param_count
= sig
->param_count
+ 1;
923 for (i
= 0; i
< sig
->param_count
; ++i
)
924 res
->params
[i
] = sig
->params
[i
];
925 res
->params
[sig
->param_count
] = m_class_get_this_arg (mono_defaults
.int_class
);
929 /* Make an indirect call to FSIG passing an additional argument */
931 mini_emit_extra_arg_calli (MonoCompile
*cfg
, MonoMethodSignature
*fsig
, MonoInst
**orig_args
, int arg_reg
, MonoInst
*call_target
)
933 MonoMethodSignature
*csig
;
934 MonoInst
*args_buf
[16];
936 int i
, pindex
, tmp_reg
;
938 /* Make a call with an rgctx/extra arg */
939 if (fsig
->param_count
+ 2 < 16)
942 args
= (MonoInst
**)mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoInst
*) * (fsig
->param_count
+ 2));
945 args
[pindex
++] = orig_args
[0];
946 for (i
= 0; i
< fsig
->param_count
; ++i
)
947 args
[pindex
++] = orig_args
[fsig
->hasthis
+ i
];
948 tmp_reg
= alloc_preg (cfg
);
949 EMIT_NEW_UNALU (cfg
, args
[pindex
], OP_MOVE
, tmp_reg
, arg_reg
);
950 csig
= sig_to_rgctx_sig (fsig
);
951 return mini_emit_calli (cfg
, csig
, args
, call_target
, NULL
, NULL
);
954 /* Emit an indirect call to the function descriptor ADDR */
956 mini_emit_llvmonly_calli (MonoCompile
*cfg
, MonoMethodSignature
*fsig
, MonoInst
**args
, MonoInst
*addr
)
957 // FIXME no tailcall support
959 int addr_reg
, arg_reg
;
960 MonoInst
*call_target
;
962 g_assert (cfg
->llvm_only
);
965 * addr points to a <addr, arg> pair, load both of them, and
966 * make a call to addr, passing arg as an extra arg.
968 addr_reg
= alloc_preg (cfg
);
969 EMIT_NEW_LOAD_MEMBASE (cfg
, call_target
, OP_LOAD_MEMBASE
, addr_reg
, addr
->dreg
, 0);
970 arg_reg
= alloc_preg (cfg
);
971 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, arg_reg
, addr
->dreg
, TARGET_SIZEOF_VOID_P
);
973 return mini_emit_extra_arg_calli (cfg
, fsig
, args
, arg_reg
, call_target
);
976 MONO_EMPTY_SOURCE_FILE (calls
);