[wasm] Add a --runtime-arg= argument to runtime-tests.js to allow setting runtime...
[mono-project.git] / mono / mini / calls.c
blob0b56d38a2fad61a52cbd2831dd55e8076d4f6e46
1 /**
2 * \file
3 */
5 #include <config.h>
6 #include <mono/utils/mono-compiler.h>
8 #ifndef DISABLE_JIT
10 #include "mini.h"
11 #include "ir-emit.h"
12 #include "mini-runtime.h"
13 #include "llvmonly-runtime.h"
14 #include "mini-llvm.h"
15 #include "jit-icalls.h"
16 #include <mono/metadata/abi-details.h>
17 #include <mono/metadata/class-abi-details.h>
18 #include <mono/utils/mono-utils-debug.h>
19 #include "mono/metadata/icall-signatures.h"
21 static const gboolean debug_tailcall_break_compile = FALSE; // break in method_to_ir
22 static const gboolean debug_tailcall_break_run = FALSE; // insert breakpoint in generated code
24 void
25 mono_call_add_patch_info (MonoCompile *cfg, MonoCallInst *call, int ip)
27 if (call->inst.flags & MONO_INST_HAS_METHOD)
28 mono_add_patch_info (cfg, ip, MONO_PATCH_INFO_METHOD, call->method);
29 else
30 mono_add_patch_info (cfg, ip, MONO_PATCH_INFO_ABS, call->fptr);
33 void
34 mini_test_tailcall (MonoCompile *cfg, gboolean tailcall)
36 // A lot of tests say "tailcall" throughout their verbose output.
37 // "tailcalllog" is more searchable.
39 // Do not change "tailcalllog" here without changing other places, e.g. tests that search for it.
41 g_assertf (tailcall || !mini_get_debug_options ()->test_tailcall_require, "tailcalllog fail from %s", cfg->method->name);
42 mono_tailcall_print ("tailcalllog %s from %s\n", tailcall ? "success" : "fail", cfg->method->name);
45 void
46 mini_emit_tailcall_parameters (MonoCompile *cfg, MonoMethodSignature *sig)
48 // OP_TAILCALL_PARAMETER helps compute the size of code, in order
49 // to size branches around OP_TAILCALL_[REG,MEMBASE].
51 // The actual bytes are output from OP_TAILCALL_[REG,MEMBASE].
52 // OP_TAILCALL_PARAMETER is an overestimate because typically
53 // many parameters are in registers.
55 const int n = sig->param_count + (sig->hasthis ? 1 : 0);
56 for (int i = 0; i < n; ++i) {
57 MonoInst *ins;
58 MONO_INST_NEW (cfg, ins, OP_TAILCALL_PARAMETER);
59 MONO_ADD_INS (cfg->cbb, ins);
64 static int
65 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
67 handle_enum:
68 type = mini_get_underlying_type (type);
69 switch (type->type) {
70 case MONO_TYPE_VOID:
71 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
72 case MONO_TYPE_I1:
73 case MONO_TYPE_U1:
74 case MONO_TYPE_I2:
75 case MONO_TYPE_U2:
76 case MONO_TYPE_I4:
77 case MONO_TYPE_U4:
78 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
79 case MONO_TYPE_I:
80 case MONO_TYPE_U:
81 case MONO_TYPE_PTR:
82 case MONO_TYPE_FNPTR:
83 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
84 case MONO_TYPE_CLASS:
85 case MONO_TYPE_STRING:
86 case MONO_TYPE_OBJECT:
87 case MONO_TYPE_SZARRAY:
88 case MONO_TYPE_ARRAY:
89 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
90 case MONO_TYPE_I8:
91 case MONO_TYPE_U8:
92 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
93 case MONO_TYPE_R4:
94 if (cfg->r4fp)
95 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
96 else
97 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
98 case MONO_TYPE_R8:
99 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
100 case MONO_TYPE_VALUETYPE:
101 if (m_class_is_enumtype (type->data.klass)) {
102 type = mono_class_enum_basetype_internal (type->data.klass);
103 goto handle_enum;
104 } else
105 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
106 case MONO_TYPE_TYPEDBYREF:
107 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
108 case MONO_TYPE_GENERICINST:
109 type = m_class_get_byval_arg (type->data.generic_class->container_class);
110 goto handle_enum;
111 case MONO_TYPE_VAR:
112 case MONO_TYPE_MVAR:
113 /* gsharedvt */
114 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
115 default:
116 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
118 return -1;
121 MonoCallInst *
122 mini_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
123 MonoInst **args, gboolean calli, gboolean virtual_, gboolean tailcall,
124 gboolean rgctx, gboolean unbox_trampoline, MonoMethod *target)
126 MonoType *sig_ret;
127 MonoCallInst *call;
129 cfg->has_calls = TRUE;
131 if (tailcall && cfg->llvm_only) {
132 // FIXME tailcall should not be changed this late.
133 // FIXME It really should not be changed due to llvm_only.
134 // Accuracy is presently available MONO_IS_TAILCALL_OPCODE (call).
135 tailcall = FALSE;
136 mono_tailcall_print ("losing tailcall in %s due to llvm_only\n", cfg->method->name);
137 mini_test_tailcall (cfg, FALSE);
140 if (tailcall && (debug_tailcall_break_compile || debug_tailcall_break_run)
141 && mono_is_usermode_native_debugger_present ()) {
143 if (debug_tailcall_break_compile)
144 G_BREAKPOINT ();
146 if (tailcall && debug_tailcall_break_run) { // Can change tailcall in debugger.
147 MonoInst *brk;
148 MONO_INST_NEW (cfg, brk, OP_BREAK);
149 MONO_ADD_INS (cfg->cbb, brk);
153 if (tailcall) {
154 mini_profiler_emit_tail_call (cfg, target);
155 mini_emit_tailcall_parameters (cfg, sig);
156 MONO_INST_NEW_CALL (cfg, call, calli ? OP_TAILCALL_REG : virtual_ ? OP_TAILCALL_MEMBASE : OP_TAILCALL);
157 } else
158 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
160 call->args = args;
161 call->signature = sig;
162 call->rgctx_reg = rgctx;
163 sig_ret = mini_get_underlying_type (sig->ret);
165 mini_type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
167 if (tailcall) {
168 if (mini_type_is_vtype (sig_ret)) {
169 call->vret_var = cfg->vret_addr;
170 //g_assert_not_reached ();
172 } else if (mini_type_is_vtype (sig_ret)) {
173 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
174 MonoInst *loada;
176 temp->backend.is_pinvoke = sig->pinvoke;
179 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
180 * address of return value to increase optimization opportunities.
181 * Before vtype decomposition, the dreg of the call ins itself represents the
182 * fact the call modifies the return value. After decomposition, the call will
183 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
184 * will be transformed into an LDADDR.
186 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
187 loada->dreg = alloc_preg (cfg);
188 loada->inst_p0 = temp;
189 /* We reference the call too since call->dreg could change during optimization */
190 loada->inst_p1 = call;
191 MONO_ADD_INS (cfg->cbb, loada);
193 call->inst.dreg = temp->dreg;
195 call->vret_var = loada;
196 } else if (!MONO_TYPE_IS_VOID (sig_ret))
197 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
199 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
200 if (COMPILE_SOFT_FLOAT (cfg)) {
202 * If the call has a float argument, we would need to do an r8->r4 conversion using
203 * an icall, but that cannot be done during the call sequence since it would clobber
204 * the call registers + the stack. So we do it before emitting the call.
206 for (int i = 0; i < sig->param_count + sig->hasthis; ++i) {
207 MonoType *t;
208 MonoInst *in = call->args [i];
210 if (i >= sig->hasthis)
211 t = sig->params [i - sig->hasthis];
212 else
213 t = mono_get_int_type ();
214 t = mono_type_get_underlying_type (t);
216 if (!t->byref && t->type == MONO_TYPE_R4) {
217 MonoInst *iargs [1];
218 MonoInst *conv;
220 iargs [0] = in;
221 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
223 /* The result will be in an int vreg */
224 call->args [i] = conv;
228 #endif
230 call->need_unbox_trampoline = unbox_trampoline;
232 #ifdef ENABLE_LLVM
233 if (COMPILE_LLVM (cfg))
234 mono_llvm_emit_call (cfg, call);
235 else
236 mono_arch_emit_call (cfg, call);
237 #else
238 mono_arch_emit_call (cfg, call);
239 #endif
241 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
242 cfg->flags |= MONO_CFG_HAS_CALLS;
244 return call;
247 gboolean
248 mini_should_check_stack_pointer (MonoCompile *cfg)
250 // This logic is shared by mini_emit_calli_full and is_supported_tailcall,
251 // in order to compute tailcall_supported earlier. Alternatively it could be passed
252 // out from mini_emit_calli_full -- if it has not been copied around
253 // or decisions made based on it.
255 WrapperInfo *info;
257 return cfg->check_pinvoke_callconv &&
258 cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE &&
259 ((info = mono_marshal_get_wrapper_info (cfg->method))) &&
260 info->subtype == WRAPPER_SUBTYPE_PINVOKE;
263 static void
264 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
266 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
267 cfg->uses_rgctx_reg = TRUE;
268 call->rgctx_reg = TRUE;
269 #ifdef ENABLE_LLVM
270 call->rgctx_arg_reg = rgctx_reg;
271 #endif
274 /* Either METHOD or IMT_ARG needs to be set */
275 static void
276 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
278 int method_reg;
280 g_assert (method || imt_arg);
282 if (COMPILE_LLVM (cfg)) {
283 if (imt_arg) {
284 method_reg = alloc_preg (cfg);
285 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
286 } else {
287 MonoInst *ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
288 method_reg = ins->dreg;
291 #ifdef ENABLE_LLVM
292 call->imt_arg_reg = method_reg;
293 #endif
294 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
295 return;
298 if (imt_arg) {
299 method_reg = alloc_preg (cfg);
300 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
301 } else {
302 MonoInst *ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
303 method_reg = ins->dreg;
306 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
309 MonoInst*
310 mini_emit_calli_full (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr,
311 MonoInst *imt_arg, MonoInst *rgctx_arg, gboolean tailcall)
313 MonoCallInst *call;
314 MonoInst *ins;
315 int rgctx_reg = -1;
317 g_assert (!rgctx_arg || !imt_arg);
319 if (rgctx_arg) {
320 rgctx_reg = mono_alloc_preg (cfg);
321 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
324 const gboolean check_sp = mini_should_check_stack_pointer (cfg);
326 // Checking stack pointer requires running code after a function call, prevents tailcall.
327 // Caller needs to have decided that earlier.
328 g_assert (!check_sp || !tailcall);
330 if (check_sp) {
331 if (!cfg->stack_inbalance_var)
332 cfg->stack_inbalance_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
334 MONO_INST_NEW (cfg, ins, OP_GET_SP);
335 ins->dreg = cfg->stack_inbalance_var->dreg;
336 MONO_ADD_INS (cfg->cbb, ins);
339 call = mini_emit_call_args (cfg, sig, args, TRUE, FALSE, tailcall, rgctx_arg ? TRUE : FALSE, FALSE, NULL);
341 call->inst.sreg1 = addr->dreg;
343 if (imt_arg)
344 emit_imt_argument (cfg, call, NULL, imt_arg);
346 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
348 if (check_sp) {
349 int sp_reg;
351 sp_reg = mono_alloc_preg (cfg);
353 MONO_INST_NEW (cfg, ins, OP_GET_SP);
354 ins->dreg = sp_reg;
355 MONO_ADD_INS (cfg->cbb, ins);
357 /* Restore the stack so we don't crash when throwing the exception */
358 MONO_INST_NEW (cfg, ins, OP_SET_SP);
359 ins->sreg1 = cfg->stack_inbalance_var->dreg;
360 MONO_ADD_INS (cfg->cbb, ins);
362 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
363 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
366 if (rgctx_arg)
367 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
369 return (MonoInst*)call;
372 MonoInst*
373 mini_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
374 // Historical version without gboolean tailcall parameter.
376 return mini_emit_calli_full (cfg, sig, args, addr, imt_arg, rgctx_arg, FALSE);
379 static int
380 callvirt_to_call (int opcode)
382 switch (opcode) {
383 case OP_TAILCALL_MEMBASE:
384 return OP_TAILCALL;
385 case OP_CALL_MEMBASE:
386 return OP_CALL;
387 case OP_VOIDCALL_MEMBASE:
388 return OP_VOIDCALL;
389 case OP_FCALL_MEMBASE:
390 return OP_FCALL;
391 case OP_RCALL_MEMBASE:
392 return OP_RCALL;
393 case OP_VCALL_MEMBASE:
394 return OP_VCALL;
395 case OP_LCALL_MEMBASE:
396 return OP_LCALL;
397 default:
398 g_assert_not_reached ();
401 return -1;
404 static gboolean
405 can_enter_interp (MonoCompile *cfg, MonoMethod *method, gboolean virtual_)
407 if (method->wrapper_type)
408 return FALSE;
409 /* Virtual calls from corlib can go outside corlib */
410 if ((m_class_get_image (method->klass) == m_class_get_image (cfg->method->klass)) && !virtual_)
411 return FALSE;
413 /* See needs_extra_arg () in mini-llvm.c */
414 if (method->string_ctor)
415 return FALSE;
416 if (method->klass == mono_get_string_class () && (strstr (method->name, "memcpy") || strstr (method->name, "bzero")))
417 return FALSE;
419 /* Assume all calls outside the assembly can enter the interpreter */
420 return TRUE;
423 MonoInst*
424 mini_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tailcall,
425 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
427 #ifndef DISABLE_REMOTING
428 gboolean might_be_remote = FALSE;
429 #endif
430 gboolean virtual_ = this_ins != NULL;
431 int context_used;
432 MonoCallInst *call;
433 int rgctx_reg = 0;
434 gboolean need_unbox_trampoline;
436 if (!sig)
437 sig = mono_method_signature_internal (method);
439 if (cfg->llvm_only && mono_class_is_interface (method->klass))
440 g_assert_not_reached ();
442 if (rgctx_arg) {
443 rgctx_reg = mono_alloc_preg (cfg);
444 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
447 if (method->string_ctor) {
448 /* Create the real signature */
449 /* FIXME: Cache these */
450 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
451 ctor_sig->ret = m_class_get_byval_arg (mono_defaults.string_class);
453 sig = ctor_sig;
456 context_used = mini_method_check_context_used (cfg, method);
458 #ifndef DISABLE_REMOTING
459 might_be_remote = this_ins && sig->hasthis &&
460 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
461 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
463 if (might_be_remote && context_used) {
464 MonoInst *addr;
466 g_assert (cfg->gshared);
468 addr = mini_emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
470 return mini_emit_calli (cfg, sig, args, addr, NULL, NULL);
472 #endif
474 if (cfg->llvm_only && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
475 return mini_emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
477 if (cfg->llvm_only && cfg->interp && !virtual_ && !tailcall && can_enter_interp (cfg, method, FALSE)) {
478 MonoInst *ftndesc = mini_emit_get_rgctx_method (cfg, -1, method, MONO_RGCTX_INFO_METHOD_FTNDESC);
480 /* Need wrappers for this signature to be able to enter interpreter */
481 cfg->interp_in_signatures = g_slist_prepend_mempool (cfg->mempool, cfg->interp_in_signatures, sig);
483 /* This call might need to enter the interpreter so make it indirect */
484 return mini_emit_llvmonly_calli (cfg, sig, args, ftndesc);
487 need_unbox_trampoline = method->klass == mono_defaults.object_class || mono_class_is_interface (method->klass);
489 call = mini_emit_call_args (cfg, sig, args, FALSE, virtual_, tailcall, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline, method);
491 #ifndef DISABLE_REMOTING
492 if (might_be_remote) {
493 ERROR_DECL (error);
494 call->method = mono_marshal_get_remoting_invoke_with_check (method, error);
495 mono_error_assert_ok (error);
496 } else
497 #endif
498 call->method = method;
499 call->inst.flags |= MONO_INST_HAS_METHOD;
500 call->inst.inst_left = this_ins;
502 // FIXME This has already been read in amd64 parameter construction.
503 // Fixing it generates incorrect code. CEE_JMP needs attention.
504 call->tailcall = tailcall;
506 if (virtual_) {
507 int vtable_reg, slot_reg, this_reg;
508 int offset;
510 this_reg = this_ins->dreg;
512 if (!cfg->llvm_only && (m_class_get_parent (method->klass) == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
513 MonoInst *dummy_use;
515 MONO_EMIT_NULL_CHECK (cfg, this_reg, FALSE);
517 /* Make a call to delegate->invoke_impl */
518 call->inst.inst_basereg = this_reg;
519 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
520 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
522 /* We must emit a dummy use here because the delegate trampoline will
523 replace the 'this' argument with the delegate target making this activation
524 no longer a root for the delegate.
525 This is an issue for delegates that target collectible code such as dynamic
526 methods of GC'able assemblies.
528 For a test case look into #667921.
530 FIXME: a dummy use is not the best way to do it as the local register allocator
531 will put it on a caller save register and spill it around the call.
532 Ideally, we would either put it on a callee save register or only do the store part.
534 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
536 return (MonoInst*)call;
539 if ((!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
540 (MONO_METHOD_IS_FINAL (method) &&
541 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
542 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
544 * the method is not virtual, we just need to ensure this is not null
545 * and then we can call the method directly.
547 #ifndef DISABLE_REMOTING
548 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
549 ERROR_DECL (error);
551 * The check above ensures method is not gshared, this is needed since
552 * gshared methods can't have wrappers.
554 method = call->method = mono_marshal_get_remoting_invoke_with_check (method, error);
555 mono_error_assert_ok (error);
557 #endif
559 virtual_ = FALSE;
560 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
562 * the method is virtual, but we can statically dispatch since either
563 * it's class or the method itself are sealed.
564 * But first we need to ensure it's not a null reference.
566 virtual_ = FALSE;
569 if (!virtual_) {
570 if (!method->string_ctor)
571 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
574 if (!virtual_ && cfg->llvm_only && cfg->interp && !tailcall && can_enter_interp (cfg, method, FALSE)) {
575 MonoInst *ftndesc = mini_emit_get_rgctx_method (cfg, -1, method, MONO_RGCTX_INFO_METHOD_FTNDESC);
577 /* This call might need to enter the interpreter so make it indirect */
578 return mini_emit_llvmonly_calli (cfg, sig, args, ftndesc);
579 } else if (!virtual_) {
580 call->inst.opcode = callvirt_to_call (call->inst.opcode);
581 } else {
582 vtable_reg = alloc_preg (cfg);
583 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
584 if (mono_class_is_interface (method->klass)) {
585 guint32 imt_slot = mono_method_get_imt_slot (method);
586 emit_imt_argument (cfg, call, call->method, imt_arg);
587 slot_reg = vtable_reg;
588 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * TARGET_SIZEOF_VOID_P;
589 } else {
590 slot_reg = vtable_reg;
591 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
592 ((mono_method_get_vtable_index (method)) * (TARGET_SIZEOF_VOID_P));
593 if (imt_arg) {
594 g_assert (mono_method_signature_internal (method)->generic_param_count);
595 emit_imt_argument (cfg, call, call->method, imt_arg);
599 call->inst.sreg1 = slot_reg;
600 call->inst.inst_offset = offset;
601 call->is_virtual = TRUE;
605 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
607 if (rgctx_arg)
608 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
610 return (MonoInst*)call;
613 MonoInst*
614 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
616 return mini_emit_method_call_full (cfg, method, mono_method_signature_internal (method), FALSE, args, this_ins, NULL, NULL);
619 MonoInst*
620 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
621 MonoInst **args)
623 MonoCallInst *call;
625 g_assert (sig);
627 call = mini_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE, NULL);
628 call->fptr = func;
630 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
632 return (MonoInst*)call;
635 MonoInst*
636 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
638 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
640 g_assert (info);
642 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
646 * mini_emit_abs_call:
648 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
650 MonoInst*
651 mini_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
652 MonoMethodSignature *sig, MonoInst **args)
654 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
655 MonoInst *ins;
658 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
659 * handle it.
661 if (cfg->abs_patches == NULL)
662 cfg->abs_patches = g_hash_table_new (NULL, NULL);
663 g_hash_table_insert (cfg->abs_patches, ji, ji);
664 ins = mono_emit_native_call (cfg, ji, sig, args);
665 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
666 return ins;
669 MonoInst*
670 mini_emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
672 static MonoMethodSignature *helper_sig_llvmonly_imt_trampoline = NULL;
673 MonoInst *icall_args [16];
674 MonoInst *call_target, *ins, *vtable_ins;
675 int arg_reg, this_reg, vtable_reg;
676 gboolean is_iface = mono_class_is_interface (cmethod->klass);
677 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
678 gboolean variant_iface = FALSE;
679 guint32 slot;
680 int offset;
681 gboolean special_array_interface = m_class_is_array_special_interface (cmethod->klass);
683 if (cfg->interp && can_enter_interp (cfg, cmethod, TRUE))
684 /* Need wrappers for this signature to be able to enter interpreter */
685 cfg->interp_in_signatures = g_slist_prepend_mempool (cfg->mempool, cfg->interp_in_signatures, fsig);
688 * In llvm-only mode, vtables contain function descriptors instead of
689 * method addresses/trampolines.
691 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg, FALSE);
693 if (is_iface)
694 slot = mono_method_get_imt_slot (cmethod);
695 else
696 slot = mono_method_get_vtable_index (cmethod);
698 this_reg = sp [0]->dreg;
700 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
701 variant_iface = TRUE;
703 if (!helper_sig_llvmonly_imt_trampoline) {
704 MonoMethodSignature *tmp = mono_icall_sig_ptr_ptr_ptr;
705 mono_memory_barrier ();
706 helper_sig_llvmonly_imt_trampoline = tmp;
709 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
711 * The simplest case, a normal virtual call.
713 int slot_reg = alloc_preg (cfg);
714 int addr_reg = alloc_preg (cfg);
715 int arg_reg = alloc_preg (cfg);
716 MonoBasicBlock *non_null_bb;
718 vtable_reg = alloc_preg (cfg);
719 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
720 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * TARGET_SIZEOF_VOID_P);
722 /* Load the vtable slot, which contains a function descriptor. */
723 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
725 NEW_BBLOCK (cfg, non_null_bb);
727 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
728 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
729 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
731 /* Slow path */
732 // FIXME: Make the wrapper use the preserveall cconv
733 // FIXME: Use one icall per slot for small slot numbers ?
734 icall_args [0] = vtable_ins;
735 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
736 /* Make the icall return the vtable slot value to save some code space */
737 ins = mono_emit_jit_icall (cfg, mini_llvmonly_init_vtable_slot, icall_args);
738 ins->dreg = slot_reg;
739 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
741 /* Fastpath */
742 MONO_START_BB (cfg, non_null_bb);
743 /* Load the address + arg from the vtable slot */
744 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
745 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, TARGET_SIZEOF_VOID_P);
747 return mini_emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
750 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt && !special_array_interface) {
752 * A simple interface call
754 * We make a call through an imt slot to obtain the function descriptor we need to call.
755 * The imt slot contains a function descriptor for a runtime function + arg.
757 int slot_reg = alloc_preg (cfg);
758 int addr_reg = alloc_preg (cfg);
759 int arg_reg = alloc_preg (cfg);
760 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
762 vtable_reg = alloc_preg (cfg);
763 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
764 offset = ((gint32)slot - MONO_IMT_SIZE) * TARGET_SIZEOF_VOID_P;
767 * The slot is already initialized when the vtable is created so there is no need
768 * to check it here.
771 /* Load the imt slot, which contains a function descriptor. */
772 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
774 /* Load the address + arg of the imt thunk from the imt slot */
775 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
776 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, TARGET_SIZEOF_VOID_P);
778 * IMT thunks in llvm-only mode are C functions which take an info argument
779 * plus the imt method and return the ftndesc to call.
781 icall_args [0] = thunk_arg_ins;
782 icall_args [1] = mini_emit_get_rgctx_method (cfg, context_used,
783 cmethod, MONO_RGCTX_INFO_METHOD);
784 ftndesc_ins = mini_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
786 return mini_emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
789 if ((fsig->generic_param_count || variant_iface || special_array_interface) && !is_gsharedvt) {
791 * This is similar to the interface case, the vtable slot points to an imt thunk which is
792 * dynamically extended as more instantiations are discovered.
793 * This handles generic virtual methods both on classes and interfaces.
795 int slot_reg = alloc_preg (cfg);
796 int addr_reg = alloc_preg (cfg);
797 int arg_reg = alloc_preg (cfg);
798 int ftndesc_reg = alloc_preg (cfg);
799 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
800 MonoBasicBlock *slowpath_bb, *end_bb;
802 NEW_BBLOCK (cfg, slowpath_bb);
803 NEW_BBLOCK (cfg, end_bb);
805 vtable_reg = alloc_preg (cfg);
806 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
807 if (is_iface)
808 offset = ((gint32)slot - MONO_IMT_SIZE) * TARGET_SIZEOF_VOID_P;
809 else
810 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * TARGET_SIZEOF_VOID_P);
812 /* Load the slot, which contains a function descriptor. */
813 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
815 /* These slots are not initialized, so fall back to the slow path until they are initialized */
816 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
817 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
818 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
820 /* Fastpath */
821 /* Same as with iface calls */
822 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
823 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, TARGET_SIZEOF_VOID_P);
824 icall_args [0] = thunk_arg_ins;
825 icall_args [1] = mini_emit_get_rgctx_method (cfg, context_used,
826 cmethod, MONO_RGCTX_INFO_METHOD);
827 ftndesc_ins = mini_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
828 ftndesc_ins->dreg = ftndesc_reg;
830 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
831 * they don't know about yet. Fall back to the slowpath in that case.
833 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
834 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
836 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
838 /* Slowpath */
839 MONO_START_BB (cfg, slowpath_bb);
840 icall_args [0] = vtable_ins;
841 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
842 icall_args [2] = mini_emit_get_rgctx_method (cfg, context_used,
843 cmethod, MONO_RGCTX_INFO_METHOD);
844 if (is_iface)
845 ftndesc_ins = mono_emit_jit_icall (cfg, mini_llvmonly_resolve_generic_virtual_iface_call, icall_args);
846 else
847 ftndesc_ins = mono_emit_jit_icall (cfg, mini_llvmonly_resolve_generic_virtual_call, icall_args);
848 ftndesc_ins->dreg = ftndesc_reg;
849 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
851 /* Common case */
852 MONO_START_BB (cfg, end_bb);
853 return mini_emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
857 * Non-optimized cases
859 icall_args [0] = sp [0];
860 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
862 icall_args [2] = mini_emit_get_rgctx_method (cfg, context_used,
863 cmethod, MONO_RGCTX_INFO_METHOD);
865 arg_reg = alloc_preg (cfg);
866 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
867 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, mono_get_int_type ());
869 g_assert (is_gsharedvt);
870 if (is_iface)
871 call_target = mono_emit_jit_icall (cfg, mini_llvmonly_resolve_iface_call_gsharedvt, icall_args);
872 else
873 call_target = mono_emit_jit_icall (cfg, mini_llvmonly_resolve_vcall_gsharedvt, icall_args);
876 * Pass the extra argument even if the callee doesn't receive it, most
877 * calling conventions allow this.
879 return mini_emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
882 static MonoMethodSignature*
883 sig_to_rgctx_sig (MonoMethodSignature *sig)
885 // FIXME: memory allocation
886 MonoMethodSignature *res;
887 int i;
889 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
890 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
891 res->param_count = sig->param_count + 1;
892 for (i = 0; i < sig->param_count; ++i)
893 res->params [i] = sig->params [i];
894 res->params [sig->param_count] = m_class_get_this_arg (mono_defaults.int_class);
895 return res;
898 /* Make an indirect call to FSIG passing an additional argument */
899 MonoInst*
900 mini_emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
902 MonoMethodSignature *csig;
903 MonoInst *args_buf [16];
904 MonoInst **args;
905 int i, pindex, tmp_reg;
907 /* Make a call with an rgctx/extra arg */
908 if (fsig->param_count + 2 < 16)
909 args = args_buf;
910 else
911 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
912 pindex = 0;
913 if (fsig->hasthis)
914 args [pindex ++] = orig_args [0];
915 for (i = 0; i < fsig->param_count; ++i)
916 args [pindex ++] = orig_args [fsig->hasthis + i];
917 tmp_reg = alloc_preg (cfg);
918 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
919 csig = sig_to_rgctx_sig (fsig);
920 return mini_emit_calli (cfg, csig, args, call_target, NULL, NULL);
923 /* Emit an indirect call to the function descriptor ADDR */
924 MonoInst*
925 mini_emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
926 // FIXME no tailcall support
928 int addr_reg, arg_reg;
929 MonoInst *call_target;
931 g_assert (cfg->llvm_only);
934 * addr points to a <addr, arg> pair, load both of them, and
935 * make a call to addr, passing arg as an extra arg.
937 addr_reg = alloc_preg (cfg);
938 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
939 arg_reg = alloc_preg (cfg);
940 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, TARGET_SIZEOF_VOID_P);
942 return mini_emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
944 #else
945 MONO_EMPTY_SOURCE_FILE (calls);
946 #endif