6 #include <mono/utils/mono-compiler.h>
12 #include "mini-runtime.h"
13 #include "llvmonly-runtime.h"
14 #include "mini-llvm.h"
15 #include "jit-icalls.h"
16 #include <mono/metadata/abi-details.h>
17 #include <mono/metadata/class-abi-details.h>
18 #include <mono/utils/mono-utils-debug.h>
19 #include "mono/metadata/icall-signatures.h"
21 static const gboolean debug_tailcall_break_compile
= FALSE
; // break in method_to_ir
22 static const gboolean debug_tailcall_break_run
= FALSE
; // insert breakpoint in generated code
25 mono_call_to_patch (MonoCallInst
*call
)
27 MonoJumpInfoTarget patch
;
28 MonoJitICallId jit_icall_id
;
30 // This is similar to amd64 emit_call.
32 if (call
->inst
.flags
& MONO_INST_HAS_METHOD
) {
33 patch
.type
= MONO_PATCH_INFO_METHOD
;
34 patch
.target
= call
->method
;
35 } else if ((jit_icall_id
= call
->jit_icall_id
)) {
36 patch
.type
= MONO_PATCH_INFO_JIT_ICALL_ID
;
37 patch
.target
= GUINT_TO_POINTER (jit_icall_id
);
39 patch
.type
= MONO_PATCH_INFO_ABS
;
40 patch
.target
= call
->fptr
;
46 mono_call_add_patch_info (MonoCompile
*cfg
, MonoCallInst
*call
, int ip
)
48 const MonoJumpInfoTarget patch
= mono_call_to_patch (call
);
49 mono_add_patch_info (cfg
, ip
, patch
.type
, patch
.target
);
53 mini_test_tailcall (MonoCompile
*cfg
, gboolean tailcall
)
55 // A lot of tests say "tailcall" throughout their verbose output.
56 // "tailcalllog" is more searchable.
58 // Do not change "tailcalllog" here without changing other places, e.g. tests that search for it.
60 g_assertf (tailcall
|| !mini_get_debug_options ()->test_tailcall_require
, "tailcalllog fail from %s", cfg
->method
->name
);
61 mono_tailcall_print ("tailcalllog %s from %s\n", tailcall
? "success" : "fail", cfg
->method
->name
);
65 mini_emit_tailcall_parameters (MonoCompile
*cfg
, MonoMethodSignature
*sig
)
67 // OP_TAILCALL_PARAMETER helps compute the size of code, in order
68 // to size branches around OP_TAILCALL_[REG,MEMBASE].
70 // The actual bytes are output from OP_TAILCALL_[REG,MEMBASE].
71 // OP_TAILCALL_PARAMETER is an overestimate because typically
72 // many parameters are in registers.
74 const int n
= sig
->param_count
+ (sig
->hasthis
? 1 : 0);
75 for (int i
= 0; i
< n
; ++i
) {
77 MONO_INST_NEW (cfg
, ins
, OP_TAILCALL_PARAMETER
);
78 MONO_ADD_INS (cfg
->cbb
, ins
);
84 ret_type_to_call_opcode (MonoCompile
*cfg
, MonoType
*type
, int calli
, int virt
)
87 type
= mini_get_underlying_type (type
);
90 return calli
? OP_VOIDCALL_REG
: virt
? OP_VOIDCALL_MEMBASE
: OP_VOIDCALL
;
97 return calli
? OP_CALL_REG
: virt
? OP_CALL_MEMBASE
: OP_CALL
;
101 case MONO_TYPE_FNPTR
:
102 return calli
? OP_CALL_REG
: virt
? OP_CALL_MEMBASE
: OP_CALL
;
103 case MONO_TYPE_CLASS
:
104 case MONO_TYPE_STRING
:
105 case MONO_TYPE_OBJECT
:
106 case MONO_TYPE_SZARRAY
:
107 case MONO_TYPE_ARRAY
:
108 return calli
? OP_CALL_REG
: virt
? OP_CALL_MEMBASE
: OP_CALL
;
111 return calli
? OP_LCALL_REG
: virt
? OP_LCALL_MEMBASE
: OP_LCALL
;
114 return calli
? OP_RCALL_REG
: virt
? OP_RCALL_MEMBASE
: OP_RCALL
;
116 return calli
? OP_FCALL_REG
: virt
? OP_FCALL_MEMBASE
: OP_FCALL
;
118 return calli
? OP_FCALL_REG
: virt
? OP_FCALL_MEMBASE
: OP_FCALL
;
119 case MONO_TYPE_VALUETYPE
:
120 if (m_class_is_enumtype (type
->data
.klass
)) {
121 type
= mono_class_enum_basetype_internal (type
->data
.klass
);
124 return calli
? OP_VCALL_REG
: virt
? OP_VCALL_MEMBASE
: OP_VCALL
;
125 case MONO_TYPE_TYPEDBYREF
:
126 return calli
? OP_VCALL_REG
: virt
? OP_VCALL_MEMBASE
: OP_VCALL
;
127 case MONO_TYPE_GENERICINST
:
128 type
= m_class_get_byval_arg (type
->data
.generic_class
->container_class
);
133 return calli
? OP_VCALL_REG
: virt
? OP_VCALL_MEMBASE
: OP_VCALL
;
135 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type
->type
);
141 mini_emit_call_args (MonoCompile
*cfg
, MonoMethodSignature
*sig
,
142 MonoInst
**args
, gboolean calli
, gboolean virtual_
, gboolean tailcall
,
143 gboolean rgctx
, gboolean unbox_trampoline
, MonoMethod
*target
)
148 cfg
->has_calls
= TRUE
;
150 if (tailcall
&& cfg
->llvm_only
) {
151 // FIXME tailcall should not be changed this late.
152 // FIXME It really should not be changed due to llvm_only.
153 // Accuracy is presently available MONO_IS_TAILCALL_OPCODE (call).
155 mono_tailcall_print ("losing tailcall in %s due to llvm_only\n", cfg
->method
->name
);
156 mini_test_tailcall (cfg
, FALSE
);
159 if (tailcall
&& (debug_tailcall_break_compile
|| debug_tailcall_break_run
)
160 && mono_is_usermode_native_debugger_present ()) {
162 if (debug_tailcall_break_compile
)
165 if (tailcall
&& debug_tailcall_break_run
) { // Can change tailcall in debugger.
167 MONO_INST_NEW (cfg
, brk
, OP_BREAK
);
168 MONO_ADD_INS (cfg
->cbb
, brk
);
173 mini_profiler_emit_tail_call (cfg
, target
);
174 mini_emit_tailcall_parameters (cfg
, sig
);
175 MONO_INST_NEW_CALL (cfg
, call
, calli
? OP_TAILCALL_REG
: virtual_
? OP_TAILCALL_MEMBASE
: OP_TAILCALL
);
177 MONO_INST_NEW_CALL (cfg
, call
, ret_type_to_call_opcode (cfg
, sig
->ret
, calli
, virtual_
));
180 call
->signature
= sig
;
181 call
->rgctx_reg
= rgctx
;
182 sig_ret
= mini_get_underlying_type (sig
->ret
);
184 mini_type_to_eval_stack_type ((cfg
), sig_ret
, &call
->inst
);
187 if (mini_type_is_vtype (sig_ret
)) {
188 call
->vret_var
= cfg
->vret_addr
;
189 //g_assert_not_reached ();
191 } else if (mini_type_is_vtype (sig_ret
)) {
192 MonoInst
*temp
= mono_compile_create_var (cfg
, sig_ret
, OP_LOCAL
);
195 temp
->backend
.is_pinvoke
= sig
->pinvoke
;
198 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
199 * address of return value to increase optimization opportunities.
200 * Before vtype decomposition, the dreg of the call ins itself represents the
201 * fact the call modifies the return value. After decomposition, the call will
202 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
203 * will be transformed into an LDADDR.
205 MONO_INST_NEW (cfg
, loada
, OP_OUTARG_VTRETADDR
);
206 loada
->dreg
= alloc_preg (cfg
);
207 loada
->inst_p0
= temp
;
208 /* We reference the call too since call->dreg could change during optimization */
209 loada
->inst_p1
= call
;
210 MONO_ADD_INS (cfg
->cbb
, loada
);
212 call
->inst
.dreg
= temp
->dreg
;
214 call
->vret_var
= loada
;
215 } else if (!MONO_TYPE_IS_VOID (sig_ret
))
216 call
->inst
.dreg
= alloc_dreg (cfg
, (MonoStackType
)call
->inst
.type
);
218 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
219 if (COMPILE_SOFT_FLOAT (cfg
)) {
221 * If the call has a float argument, we would need to do an r8->r4 conversion using
222 * an icall, but that cannot be done during the call sequence since it would clobber
223 * the call registers + the stack. So we do it before emitting the call.
225 for (int i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
227 MonoInst
*in
= call
->args
[i
];
229 if (i
>= sig
->hasthis
)
230 t
= sig
->params
[i
- sig
->hasthis
];
232 t
= mono_get_int_type ();
233 t
= mono_type_get_underlying_type (t
);
235 if (!t
->byref
&& t
->type
== MONO_TYPE_R4
) {
240 conv
= mono_emit_jit_icall (cfg
, mono_fload_r4_arg
, iargs
);
242 /* The result will be in an int vreg */
243 call
->args
[i
] = conv
;
249 call
->need_unbox_trampoline
= unbox_trampoline
;
252 if (COMPILE_LLVM (cfg
))
253 mono_llvm_emit_call (cfg
, call
);
255 mono_arch_emit_call (cfg
, call
);
257 mono_arch_emit_call (cfg
, call
);
260 cfg
->param_area
= MAX (cfg
->param_area
, call
->stack_usage
);
261 cfg
->flags
|= MONO_CFG_HAS_CALLS
;
267 mini_should_check_stack_pointer (MonoCompile
*cfg
)
269 // This logic is shared by mini_emit_calli_full and is_supported_tailcall,
270 // in order to compute tailcall_supported earlier. Alternatively it could be passed
271 // out from mini_emit_calli_full -- if it has not been copied around
272 // or decisions made based on it.
276 return cfg
->check_pinvoke_callconv
&&
277 cfg
->method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
&&
278 ((info
= mono_marshal_get_wrapper_info (cfg
->method
))) &&
279 info
->subtype
== WRAPPER_SUBTYPE_PINVOKE
;
283 set_rgctx_arg (MonoCompile
*cfg
, MonoCallInst
*call
, int rgctx_reg
, MonoInst
*rgctx_arg
)
285 mono_call_inst_add_outarg_reg (cfg
, call
, rgctx_reg
, MONO_ARCH_RGCTX_REG
, FALSE
);
286 cfg
->uses_rgctx_reg
= TRUE
;
287 call
->rgctx_reg
= TRUE
;
289 call
->rgctx_arg_reg
= rgctx_reg
;
293 /* Either METHOD or IMT_ARG needs to be set */
295 emit_imt_argument (MonoCompile
*cfg
, MonoCallInst
*call
, MonoMethod
*method
, MonoInst
*imt_arg
)
299 g_assert (method
|| imt_arg
);
301 if (COMPILE_LLVM (cfg
)) {
303 method_reg
= alloc_preg (cfg
);
304 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, method_reg
, imt_arg
->dreg
);
306 MonoInst
*ins
= mini_emit_runtime_constant (cfg
, MONO_PATCH_INFO_METHODCONST
, method
);
307 method_reg
= ins
->dreg
;
311 call
->imt_arg_reg
= method_reg
;
313 mono_call_inst_add_outarg_reg (cfg
, call
, method_reg
, MONO_ARCH_IMT_REG
, FALSE
);
318 method_reg
= alloc_preg (cfg
);
319 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, method_reg
, imt_arg
->dreg
);
321 MonoInst
*ins
= mini_emit_runtime_constant (cfg
, MONO_PATCH_INFO_METHODCONST
, method
);
322 method_reg
= ins
->dreg
;
325 mono_call_inst_add_outarg_reg (cfg
, call
, method_reg
, MONO_ARCH_IMT_REG
, FALSE
);
329 mini_emit_calli_full (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**args
, MonoInst
*addr
,
330 MonoInst
*imt_arg
, MonoInst
*rgctx_arg
, gboolean tailcall
)
336 g_assert (!rgctx_arg
|| !imt_arg
);
339 rgctx_reg
= mono_alloc_preg (cfg
);
340 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, rgctx_reg
, rgctx_arg
->dreg
);
343 const gboolean check_sp
= mini_should_check_stack_pointer (cfg
);
345 // Checking stack pointer requires running code after a function call, prevents tailcall.
346 // Caller needs to have decided that earlier.
347 g_assert (!check_sp
|| !tailcall
);
350 if (!cfg
->stack_inbalance_var
)
351 cfg
->stack_inbalance_var
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
353 MONO_INST_NEW (cfg
, ins
, OP_GET_SP
);
354 ins
->dreg
= cfg
->stack_inbalance_var
->dreg
;
355 MONO_ADD_INS (cfg
->cbb
, ins
);
358 call
= mini_emit_call_args (cfg
, sig
, args
, TRUE
, FALSE
, tailcall
, rgctx_arg
? TRUE
: FALSE
, FALSE
, NULL
);
360 call
->inst
.sreg1
= addr
->dreg
;
363 emit_imt_argument (cfg
, call
, NULL
, imt_arg
);
365 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
370 sp_reg
= mono_alloc_preg (cfg
);
372 MONO_INST_NEW (cfg
, ins
, OP_GET_SP
);
374 MONO_ADD_INS (cfg
->cbb
, ins
);
376 /* Restore the stack so we don't crash when throwing the exception */
377 MONO_INST_NEW (cfg
, ins
, OP_SET_SP
);
378 ins
->sreg1
= cfg
->stack_inbalance_var
->dreg
;
379 MONO_ADD_INS (cfg
->cbb
, ins
);
381 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, cfg
->stack_inbalance_var
->dreg
, sp_reg
);
382 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "ExecutionEngineException");
386 set_rgctx_arg (cfg
, call
, rgctx_reg
, rgctx_arg
);
388 return (MonoInst
*)call
;
392 mini_emit_calli (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**args
, MonoInst
*addr
, MonoInst
*imt_arg
, MonoInst
*rgctx_arg
)
393 // Historical version without gboolean tailcall parameter.
395 return mini_emit_calli_full (cfg
, sig
, args
, addr
, imt_arg
, rgctx_arg
, FALSE
);
399 callvirt_to_call (int opcode
)
402 case OP_TAILCALL_MEMBASE
:
404 case OP_CALL_MEMBASE
:
406 case OP_VOIDCALL_MEMBASE
:
408 case OP_FCALL_MEMBASE
:
410 case OP_RCALL_MEMBASE
:
412 case OP_VCALL_MEMBASE
:
414 case OP_LCALL_MEMBASE
:
417 g_assert_not_reached ();
424 can_enter_interp (MonoCompile
*cfg
, MonoMethod
*method
, gboolean virtual_
)
426 if (method
->wrapper_type
)
428 /* Virtual calls from corlib can go outside corlib */
429 if ((m_class_get_image (method
->klass
) == m_class_get_image (cfg
->method
->klass
)) && !virtual_
)
432 /* See needs_extra_arg () in mini-llvm.c */
433 if (method
->string_ctor
)
435 if (method
->klass
== mono_get_string_class () && (strstr (method
->name
, "memcpy") || strstr (method
->name
, "bzero")))
438 /* Assume all calls outside the assembly can enter the interpreter */
443 mini_emit_method_call_full (MonoCompile
*cfg
, MonoMethod
*method
, MonoMethodSignature
*sig
, gboolean tailcall
,
444 MonoInst
**args
, MonoInst
*this_ins
, MonoInst
*imt_arg
, MonoInst
*rgctx_arg
)
446 #ifndef DISABLE_REMOTING
447 gboolean might_be_remote
= FALSE
;
449 gboolean virtual_
= this_ins
!= NULL
;
453 gboolean need_unbox_trampoline
;
456 sig
= mono_method_signature_internal (method
);
458 if (cfg
->llvm_only
&& mono_class_is_interface (method
->klass
))
459 g_assert_not_reached ();
462 rgctx_reg
= mono_alloc_preg (cfg
);
463 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, rgctx_reg
, rgctx_arg
->dreg
);
466 if (method
->string_ctor
) {
467 /* Create the real signature */
468 /* FIXME: Cache these */
469 MonoMethodSignature
*ctor_sig
= mono_metadata_signature_dup_mempool (cfg
->mempool
, sig
);
470 ctor_sig
->ret
= m_class_get_byval_arg (mono_defaults
.string_class
);
475 context_used
= mini_method_check_context_used (cfg
, method
);
477 #ifndef DISABLE_REMOTING
478 might_be_remote
= this_ins
&& sig
->hasthis
&&
479 (mono_class_is_marshalbyref (method
->klass
) || method
->klass
== mono_defaults
.object_class
) &&
480 !(method
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) && (!MONO_CHECK_THIS (this_ins
) || context_used
);
482 if (might_be_remote
&& context_used
) {
485 g_assert (cfg
->gshared
);
487 addr
= mini_emit_get_rgctx_method (cfg
, context_used
, method
, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK
);
489 return mini_emit_calli (cfg
, sig
, args
, addr
, NULL
, NULL
);
493 if (cfg
->llvm_only
&& virtual_
&& (method
->flags
& METHOD_ATTRIBUTE_VIRTUAL
))
494 return mini_emit_llvmonly_virtual_call (cfg
, method
, sig
, 0, args
);
496 if (cfg
->llvm_only
&& cfg
->interp
&& !virtual_
&& !tailcall
&& can_enter_interp (cfg
, method
, FALSE
)) {
497 MonoInst
*ftndesc
= mini_emit_get_rgctx_method (cfg
, -1, method
, MONO_RGCTX_INFO_METHOD_FTNDESC
);
499 /* Need wrappers for this signature to be able to enter interpreter */
500 cfg
->interp_in_signatures
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->interp_in_signatures
, sig
);
502 /* This call might need to enter the interpreter so make it indirect */
503 return mini_emit_llvmonly_calli (cfg
, sig
, args
, ftndesc
);
506 need_unbox_trampoline
= method
->klass
== mono_defaults
.object_class
|| mono_class_is_interface (method
->klass
);
508 call
= mini_emit_call_args (cfg
, sig
, args
, FALSE
, virtual_
, tailcall
, rgctx_arg
? TRUE
: FALSE
, need_unbox_trampoline
, method
);
510 #ifndef DISABLE_REMOTING
511 if (might_be_remote
) {
513 call
->method
= mono_marshal_get_remoting_invoke_with_check (method
, error
);
514 mono_error_assert_ok (error
);
517 call
->method
= method
;
518 call
->inst
.flags
|= MONO_INST_HAS_METHOD
;
519 call
->inst
.inst_left
= this_ins
;
521 // FIXME This has already been read in amd64 parameter construction.
522 // Fixing it generates incorrect code. CEE_JMP needs attention.
523 call
->tailcall
= tailcall
;
526 int vtable_reg
, slot_reg
, this_reg
;
529 this_reg
= this_ins
->dreg
;
531 if (!cfg
->llvm_only
&& (m_class_get_parent (method
->klass
) == mono_defaults
.multicastdelegate_class
) && !strcmp (method
->name
, "Invoke")) {
534 MONO_EMIT_NULL_CHECK (cfg
, this_reg
, FALSE
);
536 /* Make a call to delegate->invoke_impl */
537 call
->inst
.inst_basereg
= this_reg
;
538 call
->inst
.inst_offset
= MONO_STRUCT_OFFSET (MonoDelegate
, invoke_impl
);
539 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
541 /* We must emit a dummy use here because the delegate trampoline will
542 replace the 'this' argument with the delegate target making this activation
543 no longer a root for the delegate.
544 This is an issue for delegates that target collectible code such as dynamic
545 methods of GC'able assemblies.
547 For a test case look into #667921.
549 FIXME: a dummy use is not the best way to do it as the local register allocator
550 will put it on a caller save register and spill it around the call.
551 Ideally, we would either put it on a callee save register or only do the store part.
553 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, args
[0]);
555 return (MonoInst
*)call
;
558 if ((!(method
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) ||
559 (MONO_METHOD_IS_FINAL (method
) &&
560 method
->wrapper_type
!= MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK
)) &&
561 !(mono_class_is_marshalbyref (method
->klass
) && context_used
)) {
563 * the method is not virtual, we just need to ensure this is not null
564 * and then we can call the method directly.
566 #ifndef DISABLE_REMOTING
567 if (mono_class_is_marshalbyref (method
->klass
) || method
->klass
== mono_defaults
.object_class
) {
570 * The check above ensures method is not gshared, this is needed since
571 * gshared methods can't have wrappers.
573 method
= call
->method
= mono_marshal_get_remoting_invoke_with_check (method
, error
);
574 mono_error_assert_ok (error
);
579 } else if ((method
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) && MONO_METHOD_IS_FINAL (method
)) {
581 * the method is virtual, but we can statically dispatch since either
582 * it's class or the method itself are sealed.
583 * But first we need to ensure it's not a null reference.
589 if (!method
->string_ctor
)
590 MONO_EMIT_NEW_CHECK_THIS (cfg
, this_reg
);
593 if (!virtual_
&& cfg
->llvm_only
&& cfg
->interp
&& !tailcall
&& can_enter_interp (cfg
, method
, FALSE
)) {
594 MonoInst
*ftndesc
= mini_emit_get_rgctx_method (cfg
, -1, method
, MONO_RGCTX_INFO_METHOD_FTNDESC
);
596 /* This call might need to enter the interpreter so make it indirect */
597 return mini_emit_llvmonly_calli (cfg
, sig
, args
, ftndesc
);
598 } else if (!virtual_
) {
599 call
->inst
.opcode
= callvirt_to_call (call
->inst
.opcode
);
601 vtable_reg
= alloc_preg (cfg
);
602 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg
, vtable_reg
, this_reg
, MONO_STRUCT_OFFSET (MonoObject
, vtable
));
603 if (mono_class_is_interface (method
->klass
)) {
604 guint32 imt_slot
= mono_method_get_imt_slot (method
);
605 emit_imt_argument (cfg
, call
, call
->method
, imt_arg
);
606 slot_reg
= vtable_reg
;
607 offset
= ((gint32
)imt_slot
- MONO_IMT_SIZE
) * TARGET_SIZEOF_VOID_P
;
609 slot_reg
= vtable_reg
;
610 offset
= MONO_STRUCT_OFFSET (MonoVTable
, vtable
) +
611 ((mono_method_get_vtable_index (method
)) * (TARGET_SIZEOF_VOID_P
));
613 g_assert (mono_method_signature_internal (method
)->generic_param_count
);
614 emit_imt_argument (cfg
, call
, call
->method
, imt_arg
);
618 call
->inst
.sreg1
= slot_reg
;
619 call
->inst
.inst_offset
= offset
;
620 call
->is_virtual
= TRUE
;
624 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
627 set_rgctx_arg (cfg
, call
, rgctx_reg
, rgctx_arg
);
629 return (MonoInst
*)call
;
633 mono_emit_method_call (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
**args
, MonoInst
*this_ins
)
635 return mini_emit_method_call_full (cfg
, method
, mono_method_signature_internal (method
), FALSE
, args
, this_ins
, NULL
, NULL
);
640 mono_emit_native_call (MonoCompile
*cfg
, gconstpointer func
, MonoMethodSignature
*sig
,
647 call
= mini_emit_call_args (cfg
, sig
, args
, FALSE
, FALSE
, FALSE
, FALSE
, FALSE
, NULL
);
650 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
652 return (MonoInst
*)call
;
656 mono_emit_jit_icall_id (MonoCompile
*cfg
, MonoJitICallId jit_icall_id
, MonoInst
**args
)
658 MonoJitICallInfo
*info
= mono_find_jit_icall_info (jit_icall_id
);
660 MonoCallInst
*call
= (MonoCallInst
*)mono_emit_native_call (cfg
, mono_icall_get_wrapper (info
), info
->sig
, args
);
662 call
->jit_icall_id
= jit_icall_id
;
664 return (MonoInst
*)call
;
668 * mini_emit_abs_call:
670 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
673 mini_emit_abs_call (MonoCompile
*cfg
, MonoJumpInfoType patch_type
, gconstpointer data
,
674 MonoMethodSignature
*sig
, MonoInst
**args
)
676 MonoJumpInfo
*ji
= mono_patch_info_new (cfg
->mempool
, 0, patch_type
, data
);
680 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
682 * FIXME: Is the abs_patches hashtable avoidable?
683 * Such as by putting the patch info in the call instruction?
685 if (cfg
->abs_patches
== NULL
)
686 cfg
->abs_patches
= g_hash_table_new (NULL
, NULL
);
687 g_hash_table_insert (cfg
->abs_patches
, ji
, ji
);
688 ins
= mono_emit_native_call (cfg
, ji
, sig
, args
);
689 ((MonoCallInst
*)ins
)->fptr_is_patch
= TRUE
;
694 mini_emit_llvmonly_virtual_call (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, int context_used
, MonoInst
**sp
)
696 static MonoMethodSignature
*helper_sig_llvmonly_imt_trampoline
= NULL
;
697 MonoInst
*icall_args
[16];
698 MonoInst
*call_target
, *ins
, *vtable_ins
;
699 int arg_reg
, this_reg
, vtable_reg
;
700 gboolean is_iface
= mono_class_is_interface (cmethod
->klass
);
701 gboolean is_gsharedvt
= cfg
->gsharedvt
&& mini_is_gsharedvt_variable_signature (fsig
);
702 gboolean variant_iface
= FALSE
;
705 gboolean special_array_interface
= m_class_is_array_special_interface (cmethod
->klass
);
707 if (cfg
->interp
&& can_enter_interp (cfg
, cmethod
, TRUE
))
708 /* Need wrappers for this signature to be able to enter interpreter */
709 cfg
->interp_in_signatures
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->interp_in_signatures
, fsig
);
712 * In llvm-only mode, vtables contain function descriptors instead of
713 * method addresses/trampolines.
715 MONO_EMIT_NULL_CHECK (cfg
, sp
[0]->dreg
, FALSE
);
718 slot
= mono_method_get_imt_slot (cmethod
);
720 slot
= mono_method_get_vtable_index (cmethod
);
722 this_reg
= sp
[0]->dreg
;
724 if (is_iface
&& mono_class_has_variant_generic_params (cmethod
->klass
))
725 variant_iface
= TRUE
;
727 if (!helper_sig_llvmonly_imt_trampoline
) {
728 MonoMethodSignature
*tmp
= mono_icall_sig_ptr_ptr_ptr
;
729 mono_memory_barrier ();
730 helper_sig_llvmonly_imt_trampoline
= tmp
;
733 if (!fsig
->generic_param_count
&& !is_iface
&& !is_gsharedvt
) {
735 * The simplest case, a normal virtual call.
737 int slot_reg
= alloc_preg (cfg
);
738 int addr_reg
= alloc_preg (cfg
);
739 int arg_reg
= alloc_preg (cfg
);
740 MonoBasicBlock
*non_null_bb
;
742 vtable_reg
= alloc_preg (cfg
);
743 EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_ins
, OP_LOAD_MEMBASE
, vtable_reg
, this_reg
, MONO_STRUCT_OFFSET (MonoObject
, vtable
));
744 offset
= MONO_STRUCT_OFFSET (MonoVTable
, vtable
) + (slot
* TARGET_SIZEOF_VOID_P
);
746 /* Load the vtable slot, which contains a function descriptor. */
747 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, slot_reg
, vtable_reg
, offset
);
749 NEW_BBLOCK (cfg
, non_null_bb
);
751 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, slot_reg
, 0);
752 cfg
->cbb
->last_ins
->flags
|= MONO_INST_LIKELY
;
753 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, non_null_bb
);
756 // FIXME: Make the wrapper use the preserveall cconv
757 // FIXME: Use one icall per slot for small slot numbers ?
758 icall_args
[0] = vtable_ins
;
759 EMIT_NEW_ICONST (cfg
, icall_args
[1], slot
);
760 /* Make the icall return the vtable slot value to save some code space */
761 ins
= mono_emit_jit_icall (cfg
, mini_llvmonly_init_vtable_slot
, icall_args
);
762 ins
->dreg
= slot_reg
;
763 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, non_null_bb
);
766 MONO_START_BB (cfg
, non_null_bb
);
767 /* Load the address + arg from the vtable slot */
768 EMIT_NEW_LOAD_MEMBASE (cfg
, call_target
, OP_LOAD_MEMBASE
, addr_reg
, slot_reg
, 0);
769 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, arg_reg
, slot_reg
, TARGET_SIZEOF_VOID_P
);
771 return mini_emit_extra_arg_calli (cfg
, fsig
, sp
, arg_reg
, call_target
);
774 if (!fsig
->generic_param_count
&& is_iface
&& !variant_iface
&& !is_gsharedvt
&& !special_array_interface
) {
776 * A simple interface call
778 * We make a call through an imt slot to obtain the function descriptor we need to call.
779 * The imt slot contains a function descriptor for a runtime function + arg.
781 int slot_reg
= alloc_preg (cfg
);
782 int addr_reg
= alloc_preg (cfg
);
783 int arg_reg
= alloc_preg (cfg
);
784 MonoInst
*thunk_addr_ins
, *thunk_arg_ins
, *ftndesc_ins
;
786 vtable_reg
= alloc_preg (cfg
);
787 EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_ins
, OP_LOAD_MEMBASE
, vtable_reg
, this_reg
, MONO_STRUCT_OFFSET (MonoObject
, vtable
));
788 offset
= ((gint32
)slot
- MONO_IMT_SIZE
) * TARGET_SIZEOF_VOID_P
;
791 * The slot is already initialized when the vtable is created so there is no need
795 /* Load the imt slot, which contains a function descriptor. */
796 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, slot_reg
, vtable_reg
, offset
);
798 /* Load the address + arg of the imt thunk from the imt slot */
799 EMIT_NEW_LOAD_MEMBASE (cfg
, thunk_addr_ins
, OP_LOAD_MEMBASE
, addr_reg
, slot_reg
, 0);
800 EMIT_NEW_LOAD_MEMBASE (cfg
, thunk_arg_ins
, OP_LOAD_MEMBASE
, arg_reg
, slot_reg
, TARGET_SIZEOF_VOID_P
);
802 * IMT thunks in llvm-only mode are C functions which take an info argument
803 * plus the imt method and return the ftndesc to call.
805 icall_args
[0] = thunk_arg_ins
;
806 icall_args
[1] = mini_emit_get_rgctx_method (cfg
, context_used
,
807 cmethod
, MONO_RGCTX_INFO_METHOD
);
808 ftndesc_ins
= mini_emit_calli (cfg
, helper_sig_llvmonly_imt_trampoline
, icall_args
, thunk_addr_ins
, NULL
, NULL
);
810 return mini_emit_llvmonly_calli (cfg
, fsig
, sp
, ftndesc_ins
);
813 if ((fsig
->generic_param_count
|| variant_iface
|| special_array_interface
) && !is_gsharedvt
) {
815 * This is similar to the interface case, the vtable slot points to an imt thunk which is
816 * dynamically extended as more instantiations are discovered.
817 * This handles generic virtual methods both on classes and interfaces.
819 int slot_reg
= alloc_preg (cfg
);
820 int addr_reg
= alloc_preg (cfg
);
821 int arg_reg
= alloc_preg (cfg
);
822 int ftndesc_reg
= alloc_preg (cfg
);
823 MonoInst
*thunk_addr_ins
, *thunk_arg_ins
, *ftndesc_ins
;
824 MonoBasicBlock
*slowpath_bb
, *end_bb
;
826 NEW_BBLOCK (cfg
, slowpath_bb
);
827 NEW_BBLOCK (cfg
, end_bb
);
829 vtable_reg
= alloc_preg (cfg
);
830 EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_ins
, OP_LOAD_MEMBASE
, vtable_reg
, this_reg
, MONO_STRUCT_OFFSET (MonoObject
, vtable
));
832 offset
= ((gint32
)slot
- MONO_IMT_SIZE
) * TARGET_SIZEOF_VOID_P
;
834 offset
= MONO_STRUCT_OFFSET (MonoVTable
, vtable
) + (slot
* TARGET_SIZEOF_VOID_P
);
836 /* Load the slot, which contains a function descriptor. */
837 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, slot_reg
, vtable_reg
, offset
);
839 /* These slots are not initialized, so fall back to the slow path until they are initialized */
840 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
841 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, slot_reg
, 0);
842 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, slowpath_bb
);
845 /* Same as with iface calls */
846 EMIT_NEW_LOAD_MEMBASE (cfg
, thunk_addr_ins
, OP_LOAD_MEMBASE
, addr_reg
, slot_reg
, 0);
847 EMIT_NEW_LOAD_MEMBASE (cfg
, thunk_arg_ins
, OP_LOAD_MEMBASE
, arg_reg
, slot_reg
, TARGET_SIZEOF_VOID_P
);
848 icall_args
[0] = thunk_arg_ins
;
849 icall_args
[1] = mini_emit_get_rgctx_method (cfg
, context_used
,
850 cmethod
, MONO_RGCTX_INFO_METHOD
);
851 ftndesc_ins
= mini_emit_calli (cfg
, helper_sig_llvmonly_imt_trampoline
, icall_args
, thunk_addr_ins
, NULL
, NULL
);
852 ftndesc_ins
->dreg
= ftndesc_reg
;
854 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
855 * they don't know about yet. Fall back to the slowpath in that case.
857 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, ftndesc_reg
, 0);
858 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, slowpath_bb
);
860 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
863 MONO_START_BB (cfg
, slowpath_bb
);
864 icall_args
[0] = vtable_ins
;
865 EMIT_NEW_ICONST (cfg
, icall_args
[1], slot
);
866 icall_args
[2] = mini_emit_get_rgctx_method (cfg
, context_used
,
867 cmethod
, MONO_RGCTX_INFO_METHOD
);
869 ftndesc_ins
= mono_emit_jit_icall (cfg
, mini_llvmonly_resolve_generic_virtual_iface_call
, icall_args
);
871 ftndesc_ins
= mono_emit_jit_icall (cfg
, mini_llvmonly_resolve_generic_virtual_call
, icall_args
);
872 ftndesc_ins
->dreg
= ftndesc_reg
;
873 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
876 MONO_START_BB (cfg
, end_bb
);
877 return mini_emit_llvmonly_calli (cfg
, fsig
, sp
, ftndesc_ins
);
881 * Non-optimized cases
883 icall_args
[0] = sp
[0];
884 EMIT_NEW_ICONST (cfg
, icall_args
[1], slot
);
886 icall_args
[2] = mini_emit_get_rgctx_method (cfg
, context_used
,
887 cmethod
, MONO_RGCTX_INFO_METHOD
);
889 arg_reg
= alloc_preg (cfg
);
890 MONO_EMIT_NEW_PCONST (cfg
, arg_reg
, NULL
);
891 EMIT_NEW_VARLOADA_VREG (cfg
, icall_args
[3], arg_reg
, mono_get_int_type ());
893 g_assert (is_gsharedvt
);
895 call_target
= mono_emit_jit_icall (cfg
, mini_llvmonly_resolve_iface_call_gsharedvt
, icall_args
);
897 call_target
= mono_emit_jit_icall (cfg
, mini_llvmonly_resolve_vcall_gsharedvt
, icall_args
);
900 * Pass the extra argument even if the callee doesn't receive it, most
901 * calling conventions allow this.
903 return mini_emit_extra_arg_calli (cfg
, fsig
, sp
, arg_reg
, call_target
);
906 static MonoMethodSignature
*
907 sig_to_rgctx_sig (MonoMethodSignature
*sig
)
909 // FIXME: memory allocation
910 MonoMethodSignature
*res
;
913 res
= (MonoMethodSignature
*)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE
+ (sig
->param_count
+ 1) * sizeof (MonoType
*));
914 memcpy (res
, sig
, MONO_SIZEOF_METHOD_SIGNATURE
);
915 res
->param_count
= sig
->param_count
+ 1;
916 for (i
= 0; i
< sig
->param_count
; ++i
)
917 res
->params
[i
] = sig
->params
[i
];
918 res
->params
[sig
->param_count
] = m_class_get_this_arg (mono_defaults
.int_class
);
922 /* Make an indirect call to FSIG passing an additional argument */
924 mini_emit_extra_arg_calli (MonoCompile
*cfg
, MonoMethodSignature
*fsig
, MonoInst
**orig_args
, int arg_reg
, MonoInst
*call_target
)
926 MonoMethodSignature
*csig
;
927 MonoInst
*args_buf
[16];
929 int i
, pindex
, tmp_reg
;
931 /* Make a call with an rgctx/extra arg */
932 if (fsig
->param_count
+ 2 < 16)
935 args
= (MonoInst
**)mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoInst
*) * (fsig
->param_count
+ 2));
938 args
[pindex
++] = orig_args
[0];
939 for (i
= 0; i
< fsig
->param_count
; ++i
)
940 args
[pindex
++] = orig_args
[fsig
->hasthis
+ i
];
941 tmp_reg
= alloc_preg (cfg
);
942 EMIT_NEW_UNALU (cfg
, args
[pindex
], OP_MOVE
, tmp_reg
, arg_reg
);
943 csig
= sig_to_rgctx_sig (fsig
);
944 return mini_emit_calli (cfg
, csig
, args
, call_target
, NULL
, NULL
);
947 /* Emit an indirect call to the function descriptor ADDR */
949 mini_emit_llvmonly_calli (MonoCompile
*cfg
, MonoMethodSignature
*fsig
, MonoInst
**args
, MonoInst
*addr
)
950 // FIXME no tailcall support
952 int addr_reg
, arg_reg
;
953 MonoInst
*call_target
;
955 g_assert (cfg
->llvm_only
);
958 * addr points to a <addr, arg> pair, load both of them, and
959 * make a call to addr, passing arg as an extra arg.
961 addr_reg
= alloc_preg (cfg
);
962 EMIT_NEW_LOAD_MEMBASE (cfg
, call_target
, OP_LOAD_MEMBASE
, addr_reg
, addr
->dreg
, 0);
963 arg_reg
= alloc_preg (cfg
);
964 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, arg_reg
, addr
->dreg
, TARGET_SIZEOF_VOID_P
);
966 return mini_emit_extra_arg_calli (cfg
, fsig
, args
, arg_reg
, call_target
);
969 MONO_EMPTY_SOURCE_FILE (calls
);