Propagate error in mono_unicode_to_external (#14879)
[mono-project.git] / mono / mini / calls.c
blob6e9836ba7bb5dcf82e468cec6306841b0df85691
1 /**
2 * \file
3 */
5 #include <config.h>
6 #include <mono/utils/mono-compiler.h>
8 #ifndef DISABLE_JIT
10 #include "mini.h"
11 #include "ir-emit.h"
12 #include "mini-runtime.h"
13 #include "llvmonly-runtime.h"
14 #include "mini-llvm.h"
15 #include "jit-icalls.h"
16 #include <mono/metadata/abi-details.h>
17 #include <mono/metadata/class-abi-details.h>
18 #include <mono/utils/mono-utils-debug.h>
19 #include "mono/metadata/icall-signatures.h"
21 static const gboolean debug_tailcall_break_compile = FALSE; // break in method_to_ir
22 static const gboolean debug_tailcall_break_run = FALSE; // insert breakpoint in generated code
24 MonoJumpInfoTarget
25 mono_call_to_patch (MonoCallInst *call)
27 MonoJumpInfoTarget patch;
28 MonoJitICallId jit_icall_id;
30 // This is similar to amd64 emit_call.
32 if (call->inst.flags & MONO_INST_HAS_METHOD) {
33 patch.type = MONO_PATCH_INFO_METHOD;
34 patch.target = call->method;
35 } else if ((jit_icall_id = call->jit_icall_id)) {
36 patch.type = MONO_PATCH_INFO_JIT_ICALL_ID;
37 patch.target = GUINT_TO_POINTER (jit_icall_id);
38 } else {
39 patch.type = MONO_PATCH_INFO_ABS;
40 patch.target = call->fptr;
42 return patch;
45 void
46 mono_call_add_patch_info (MonoCompile *cfg, MonoCallInst *call, int ip)
48 const MonoJumpInfoTarget patch = mono_call_to_patch (call);
49 mono_add_patch_info (cfg, ip, patch.type, patch.target);
52 void
53 mini_test_tailcall (MonoCompile *cfg, gboolean tailcall)
55 // A lot of tests say "tailcall" throughout their verbose output.
56 // "tailcalllog" is more searchable.
58 // Do not change "tailcalllog" here without changing other places, e.g. tests that search for it.
60 g_assertf (tailcall || !mini_get_debug_options ()->test_tailcall_require, "tailcalllog fail from %s", cfg->method->name);
61 mono_tailcall_print ("tailcalllog %s from %s\n", tailcall ? "success" : "fail", cfg->method->name);
64 void
65 mini_emit_tailcall_parameters (MonoCompile *cfg, MonoMethodSignature *sig)
67 // OP_TAILCALL_PARAMETER helps compute the size of code, in order
68 // to size branches around OP_TAILCALL_[REG,MEMBASE].
70 // The actual bytes are output from OP_TAILCALL_[REG,MEMBASE].
71 // OP_TAILCALL_PARAMETER is an overestimate because typically
72 // many parameters are in registers.
74 const int n = sig->param_count + (sig->hasthis ? 1 : 0);
75 for (int i = 0; i < n; ++i) {
76 MonoInst *ins;
77 MONO_INST_NEW (cfg, ins, OP_TAILCALL_PARAMETER);
78 MONO_ADD_INS (cfg->cbb, ins);
83 static int
84 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
86 handle_enum:
87 type = mini_get_underlying_type (type);
88 switch (type->type) {
89 case MONO_TYPE_VOID:
90 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
91 case MONO_TYPE_I1:
92 case MONO_TYPE_U1:
93 case MONO_TYPE_I2:
94 case MONO_TYPE_U2:
95 case MONO_TYPE_I4:
96 case MONO_TYPE_U4:
97 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
98 case MONO_TYPE_I:
99 case MONO_TYPE_U:
100 case MONO_TYPE_PTR:
101 case MONO_TYPE_FNPTR:
102 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
103 case MONO_TYPE_CLASS:
104 case MONO_TYPE_STRING:
105 case MONO_TYPE_OBJECT:
106 case MONO_TYPE_SZARRAY:
107 case MONO_TYPE_ARRAY:
108 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
109 case MONO_TYPE_I8:
110 case MONO_TYPE_U8:
111 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
112 case MONO_TYPE_R4:
113 if (cfg->r4fp)
114 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
115 else
116 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
117 case MONO_TYPE_R8:
118 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
119 case MONO_TYPE_VALUETYPE:
120 if (m_class_is_enumtype (type->data.klass)) {
121 type = mono_class_enum_basetype_internal (type->data.klass);
122 goto handle_enum;
123 } else
124 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
125 case MONO_TYPE_TYPEDBYREF:
126 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
127 case MONO_TYPE_GENERICINST:
128 type = m_class_get_byval_arg (type->data.generic_class->container_class);
129 goto handle_enum;
130 case MONO_TYPE_VAR:
131 case MONO_TYPE_MVAR:
132 /* gsharedvt */
133 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
134 default:
135 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
137 return -1;
140 MonoCallInst *
141 mini_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
142 MonoInst **args, gboolean calli, gboolean virtual_, gboolean tailcall,
143 gboolean rgctx, gboolean unbox_trampoline, MonoMethod *target)
145 MonoType *sig_ret;
146 MonoCallInst *call;
148 cfg->has_calls = TRUE;
150 if (tailcall && cfg->llvm_only) {
151 // FIXME tailcall should not be changed this late.
152 // FIXME It really should not be changed due to llvm_only.
153 // Accuracy is presently available MONO_IS_TAILCALL_OPCODE (call).
154 tailcall = FALSE;
155 mono_tailcall_print ("losing tailcall in %s due to llvm_only\n", cfg->method->name);
156 mini_test_tailcall (cfg, FALSE);
159 if (tailcall && (debug_tailcall_break_compile || debug_tailcall_break_run)
160 && mono_is_usermode_native_debugger_present ()) {
162 if (debug_tailcall_break_compile)
163 G_BREAKPOINT ();
165 if (tailcall && debug_tailcall_break_run) { // Can change tailcall in debugger.
166 MonoInst *brk;
167 MONO_INST_NEW (cfg, brk, OP_BREAK);
168 MONO_ADD_INS (cfg->cbb, brk);
172 if (tailcall) {
173 mini_profiler_emit_tail_call (cfg, target);
174 mini_emit_tailcall_parameters (cfg, sig);
175 MONO_INST_NEW_CALL (cfg, call, calli ? OP_TAILCALL_REG : virtual_ ? OP_TAILCALL_MEMBASE : OP_TAILCALL);
176 } else
177 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
179 call->args = args;
180 call->signature = sig;
181 call->rgctx_reg = rgctx;
182 sig_ret = mini_get_underlying_type (sig->ret);
184 mini_type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
186 if (tailcall) {
187 if (mini_type_is_vtype (sig_ret)) {
188 call->vret_var = cfg->vret_addr;
189 //g_assert_not_reached ();
191 } else if (mini_type_is_vtype (sig_ret)) {
192 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
193 MonoInst *loada;
195 temp->backend.is_pinvoke = sig->pinvoke;
198 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
199 * address of return value to increase optimization opportunities.
200 * Before vtype decomposition, the dreg of the call ins itself represents the
201 * fact the call modifies the return value. After decomposition, the call will
202 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
203 * will be transformed into an LDADDR.
205 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
206 loada->dreg = alloc_preg (cfg);
207 loada->inst_p0 = temp;
208 /* We reference the call too since call->dreg could change during optimization */
209 loada->inst_p1 = call;
210 MONO_ADD_INS (cfg->cbb, loada);
212 call->inst.dreg = temp->dreg;
214 call->vret_var = loada;
215 } else if (!MONO_TYPE_IS_VOID (sig_ret))
216 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
218 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
219 if (COMPILE_SOFT_FLOAT (cfg)) {
221 * If the call has a float argument, we would need to do an r8->r4 conversion using
222 * an icall, but that cannot be done during the call sequence since it would clobber
223 * the call registers + the stack. So we do it before emitting the call.
225 for (int i = 0; i < sig->param_count + sig->hasthis; ++i) {
226 MonoType *t;
227 MonoInst *in = call->args [i];
229 if (i >= sig->hasthis)
230 t = sig->params [i - sig->hasthis];
231 else
232 t = mono_get_int_type ();
233 t = mono_type_get_underlying_type (t);
235 if (!t->byref && t->type == MONO_TYPE_R4) {
236 MonoInst *iargs [1];
237 MonoInst *conv;
239 iargs [0] = in;
240 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
242 /* The result will be in an int vreg */
243 call->args [i] = conv;
247 #endif
249 call->need_unbox_trampoline = unbox_trampoline;
251 #ifdef ENABLE_LLVM
252 if (COMPILE_LLVM (cfg))
253 mono_llvm_emit_call (cfg, call);
254 else
255 mono_arch_emit_call (cfg, call);
256 #else
257 mono_arch_emit_call (cfg, call);
258 #endif
260 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
261 cfg->flags |= MONO_CFG_HAS_CALLS;
263 return call;
266 gboolean
267 mini_should_check_stack_pointer (MonoCompile *cfg)
269 // This logic is shared by mini_emit_calli_full and is_supported_tailcall,
270 // in order to compute tailcall_supported earlier. Alternatively it could be passed
271 // out from mini_emit_calli_full -- if it has not been copied around
272 // or decisions made based on it.
274 WrapperInfo *info;
276 return cfg->check_pinvoke_callconv &&
277 cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE &&
278 ((info = mono_marshal_get_wrapper_info (cfg->method))) &&
279 info->subtype == WRAPPER_SUBTYPE_PINVOKE;
282 static void
283 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
285 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
286 cfg->uses_rgctx_reg = TRUE;
287 call->rgctx_reg = TRUE;
288 #ifdef ENABLE_LLVM
289 call->rgctx_arg_reg = rgctx_reg;
290 #endif
293 /* Either METHOD or IMT_ARG needs to be set */
294 static void
295 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
297 int method_reg;
299 g_assert (method || imt_arg);
301 if (COMPILE_LLVM (cfg)) {
302 if (imt_arg) {
303 method_reg = alloc_preg (cfg);
304 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
305 } else {
306 MonoInst *ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
307 method_reg = ins->dreg;
310 #ifdef ENABLE_LLVM
311 call->imt_arg_reg = method_reg;
312 #endif
313 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
314 return;
317 if (imt_arg) {
318 method_reg = alloc_preg (cfg);
319 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
320 } else {
321 MonoInst *ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
322 method_reg = ins->dreg;
325 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
328 MonoInst*
329 mini_emit_calli_full (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr,
330 MonoInst *imt_arg, MonoInst *rgctx_arg, gboolean tailcall)
332 MonoCallInst *call;
333 MonoInst *ins;
334 int rgctx_reg = -1;
336 g_assert (!rgctx_arg || !imt_arg);
338 if (rgctx_arg) {
339 rgctx_reg = mono_alloc_preg (cfg);
340 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
343 const gboolean check_sp = mini_should_check_stack_pointer (cfg);
345 // Checking stack pointer requires running code after a function call, prevents tailcall.
346 // Caller needs to have decided that earlier.
347 g_assert (!check_sp || !tailcall);
349 if (check_sp) {
350 if (!cfg->stack_inbalance_var)
351 cfg->stack_inbalance_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
353 MONO_INST_NEW (cfg, ins, OP_GET_SP);
354 ins->dreg = cfg->stack_inbalance_var->dreg;
355 MONO_ADD_INS (cfg->cbb, ins);
358 call = mini_emit_call_args (cfg, sig, args, TRUE, FALSE, tailcall, rgctx_arg ? TRUE : FALSE, FALSE, NULL);
360 call->inst.sreg1 = addr->dreg;
362 if (imt_arg)
363 emit_imt_argument (cfg, call, NULL, imt_arg);
365 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
367 if (check_sp) {
368 int sp_reg;
370 sp_reg = mono_alloc_preg (cfg);
372 MONO_INST_NEW (cfg, ins, OP_GET_SP);
373 ins->dreg = sp_reg;
374 MONO_ADD_INS (cfg->cbb, ins);
376 /* Restore the stack so we don't crash when throwing the exception */
377 MONO_INST_NEW (cfg, ins, OP_SET_SP);
378 ins->sreg1 = cfg->stack_inbalance_var->dreg;
379 MONO_ADD_INS (cfg->cbb, ins);
381 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
382 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
385 if (rgctx_arg)
386 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
388 return (MonoInst*)call;
391 MonoInst*
392 mini_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
393 // Historical version without gboolean tailcall parameter.
395 return mini_emit_calli_full (cfg, sig, args, addr, imt_arg, rgctx_arg, FALSE);
398 static int
399 callvirt_to_call (int opcode)
401 switch (opcode) {
402 case OP_TAILCALL_MEMBASE:
403 return OP_TAILCALL;
404 case OP_CALL_MEMBASE:
405 return OP_CALL;
406 case OP_VOIDCALL_MEMBASE:
407 return OP_VOIDCALL;
408 case OP_FCALL_MEMBASE:
409 return OP_FCALL;
410 case OP_RCALL_MEMBASE:
411 return OP_RCALL;
412 case OP_VCALL_MEMBASE:
413 return OP_VCALL;
414 case OP_LCALL_MEMBASE:
415 return OP_LCALL;
416 default:
417 g_assert_not_reached ();
420 return -1;
423 static gboolean
424 can_enter_interp (MonoCompile *cfg, MonoMethod *method, gboolean virtual_)
426 if (method->wrapper_type)
427 return FALSE;
428 /* Virtual calls from corlib can go outside corlib */
429 if ((m_class_get_image (method->klass) == m_class_get_image (cfg->method->klass)) && !virtual_)
430 return FALSE;
432 /* See needs_extra_arg () in mini-llvm.c */
433 if (method->string_ctor)
434 return FALSE;
435 if (method->klass == mono_get_string_class () && (strstr (method->name, "memcpy") || strstr (method->name, "bzero")))
436 return FALSE;
438 /* Assume all calls outside the assembly can enter the interpreter */
439 return TRUE;
442 MonoInst*
443 mini_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tailcall,
444 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
446 #ifndef DISABLE_REMOTING
447 gboolean might_be_remote = FALSE;
448 #endif
449 gboolean virtual_ = this_ins != NULL;
450 int context_used;
451 MonoCallInst *call;
452 int rgctx_reg = 0;
453 gboolean need_unbox_trampoline;
455 if (!sig)
456 sig = mono_method_signature_internal (method);
458 if (cfg->llvm_only && mono_class_is_interface (method->klass))
459 g_assert_not_reached ();
461 if (rgctx_arg) {
462 rgctx_reg = mono_alloc_preg (cfg);
463 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
466 if (method->string_ctor) {
467 /* Create the real signature */
468 /* FIXME: Cache these */
469 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
470 ctor_sig->ret = m_class_get_byval_arg (mono_defaults.string_class);
472 sig = ctor_sig;
475 context_used = mini_method_check_context_used (cfg, method);
477 #ifndef DISABLE_REMOTING
478 might_be_remote = this_ins && sig->hasthis &&
479 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
480 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
482 if (might_be_remote && context_used) {
483 MonoInst *addr;
485 g_assert (cfg->gshared);
487 addr = mini_emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
489 return mini_emit_calli (cfg, sig, args, addr, NULL, NULL);
491 #endif
493 if (cfg->llvm_only && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
494 return mini_emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
496 if (cfg->llvm_only && cfg->interp && !virtual_ && !tailcall && can_enter_interp (cfg, method, FALSE)) {
497 MonoInst *ftndesc = mini_emit_get_rgctx_method (cfg, -1, method, MONO_RGCTX_INFO_METHOD_FTNDESC);
499 /* Need wrappers for this signature to be able to enter interpreter */
500 cfg->interp_in_signatures = g_slist_prepend_mempool (cfg->mempool, cfg->interp_in_signatures, sig);
502 /* This call might need to enter the interpreter so make it indirect */
503 return mini_emit_llvmonly_calli (cfg, sig, args, ftndesc);
506 need_unbox_trampoline = method->klass == mono_defaults.object_class || mono_class_is_interface (method->klass);
508 call = mini_emit_call_args (cfg, sig, args, FALSE, virtual_, tailcall, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline, method);
510 #ifndef DISABLE_REMOTING
511 if (might_be_remote) {
512 ERROR_DECL (error);
513 call->method = mono_marshal_get_remoting_invoke_with_check (method, error);
514 mono_error_assert_ok (error);
515 } else
516 #endif
517 call->method = method;
518 call->inst.flags |= MONO_INST_HAS_METHOD;
519 call->inst.inst_left = this_ins;
521 // FIXME This has already been read in amd64 parameter construction.
522 // Fixing it generates incorrect code. CEE_JMP needs attention.
523 call->tailcall = tailcall;
525 if (virtual_) {
526 int vtable_reg, slot_reg, this_reg;
527 int offset;
529 this_reg = this_ins->dreg;
531 if (!cfg->llvm_only && (m_class_get_parent (method->klass) == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
532 MonoInst *dummy_use;
534 MONO_EMIT_NULL_CHECK (cfg, this_reg, FALSE);
536 /* Make a call to delegate->invoke_impl */
537 call->inst.inst_basereg = this_reg;
538 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
539 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
541 /* We must emit a dummy use here because the delegate trampoline will
542 replace the 'this' argument with the delegate target making this activation
543 no longer a root for the delegate.
544 This is an issue for delegates that target collectible code such as dynamic
545 methods of GC'able assemblies.
547 For a test case look into #667921.
549 FIXME: a dummy use is not the best way to do it as the local register allocator
550 will put it on a caller save register and spill it around the call.
551 Ideally, we would either put it on a callee save register or only do the store part.
553 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
555 return (MonoInst*)call;
558 if ((!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
559 (MONO_METHOD_IS_FINAL (method) &&
560 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
561 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
563 * the method is not virtual, we just need to ensure this is not null
564 * and then we can call the method directly.
566 #ifndef DISABLE_REMOTING
567 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
568 ERROR_DECL (error);
570 * The check above ensures method is not gshared, this is needed since
571 * gshared methods can't have wrappers.
573 method = call->method = mono_marshal_get_remoting_invoke_with_check (method, error);
574 mono_error_assert_ok (error);
576 #endif
578 virtual_ = FALSE;
579 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
581 * the method is virtual, but we can statically dispatch since either
582 * it's class or the method itself are sealed.
583 * But first we need to ensure it's not a null reference.
585 virtual_ = FALSE;
588 if (!virtual_) {
589 if (!method->string_ctor)
590 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
593 if (!virtual_ && cfg->llvm_only && cfg->interp && !tailcall && can_enter_interp (cfg, method, FALSE)) {
594 MonoInst *ftndesc = mini_emit_get_rgctx_method (cfg, -1, method, MONO_RGCTX_INFO_METHOD_FTNDESC);
596 /* This call might need to enter the interpreter so make it indirect */
597 return mini_emit_llvmonly_calli (cfg, sig, args, ftndesc);
598 } else if (!virtual_) {
599 call->inst.opcode = callvirt_to_call (call->inst.opcode);
600 } else {
601 vtable_reg = alloc_preg (cfg);
602 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
603 if (mono_class_is_interface (method->klass)) {
604 guint32 imt_slot = mono_method_get_imt_slot (method);
605 emit_imt_argument (cfg, call, call->method, imt_arg);
606 slot_reg = vtable_reg;
607 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * TARGET_SIZEOF_VOID_P;
608 } else {
609 slot_reg = vtable_reg;
610 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
611 ((mono_method_get_vtable_index (method)) * (TARGET_SIZEOF_VOID_P));
612 if (imt_arg) {
613 g_assert (mono_method_signature_internal (method)->generic_param_count);
614 emit_imt_argument (cfg, call, call->method, imt_arg);
618 call->inst.sreg1 = slot_reg;
619 call->inst.inst_offset = offset;
620 call->is_virtual = TRUE;
624 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
626 if (rgctx_arg)
627 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
629 return (MonoInst*)call;
632 MonoInst*
633 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
635 return mini_emit_method_call_full (cfg, method, mono_method_signature_internal (method), FALSE, args, this_ins, NULL, NULL);
638 static
639 MonoInst*
640 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
641 MonoInst **args)
643 MonoCallInst *call;
645 g_assert (sig);
647 call = mini_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE, NULL);
648 call->fptr = func;
650 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
652 return (MonoInst*)call;
655 MonoInst*
656 mono_emit_jit_icall_id (MonoCompile *cfg, MonoJitICallId jit_icall_id, MonoInst **args)
658 MonoJitICallInfo *info = mono_find_jit_icall_info (jit_icall_id);
660 MonoCallInst *call = (MonoCallInst *)mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
662 call->jit_icall_id = jit_icall_id;
664 return (MonoInst*)call;
668 * mini_emit_abs_call:
670 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
672 MonoInst*
673 mini_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
674 MonoMethodSignature *sig, MonoInst **args)
676 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
677 MonoInst *ins;
680 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
681 * handle it.
682 * FIXME: Is the abs_patches hashtable avoidable?
683 * Such as by putting the patch info in the call instruction?
685 if (cfg->abs_patches == NULL)
686 cfg->abs_patches = g_hash_table_new (NULL, NULL);
687 g_hash_table_insert (cfg->abs_patches, ji, ji);
688 ins = mono_emit_native_call (cfg, ji, sig, args);
689 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
690 return ins;
693 MonoInst*
694 mini_emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
696 static MonoMethodSignature *helper_sig_llvmonly_imt_trampoline = NULL;
697 MonoInst *icall_args [16];
698 MonoInst *call_target, *ins, *vtable_ins;
699 int arg_reg, this_reg, vtable_reg;
700 gboolean is_iface = mono_class_is_interface (cmethod->klass);
701 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
702 gboolean variant_iface = FALSE;
703 guint32 slot;
704 int offset;
705 gboolean special_array_interface = m_class_is_array_special_interface (cmethod->klass);
707 if (cfg->interp && can_enter_interp (cfg, cmethod, TRUE))
708 /* Need wrappers for this signature to be able to enter interpreter */
709 cfg->interp_in_signatures = g_slist_prepend_mempool (cfg->mempool, cfg->interp_in_signatures, fsig);
712 * In llvm-only mode, vtables contain function descriptors instead of
713 * method addresses/trampolines.
715 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg, FALSE);
717 if (is_iface)
718 slot = mono_method_get_imt_slot (cmethod);
719 else
720 slot = mono_method_get_vtable_index (cmethod);
722 this_reg = sp [0]->dreg;
724 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
725 variant_iface = TRUE;
727 if (!helper_sig_llvmonly_imt_trampoline) {
728 MonoMethodSignature *tmp = mono_icall_sig_ptr_ptr_ptr;
729 mono_memory_barrier ();
730 helper_sig_llvmonly_imt_trampoline = tmp;
733 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
735 * The simplest case, a normal virtual call.
737 int slot_reg = alloc_preg (cfg);
738 int addr_reg = alloc_preg (cfg);
739 int arg_reg = alloc_preg (cfg);
740 MonoBasicBlock *non_null_bb;
742 vtable_reg = alloc_preg (cfg);
743 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
744 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * TARGET_SIZEOF_VOID_P);
746 /* Load the vtable slot, which contains a function descriptor. */
747 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
749 NEW_BBLOCK (cfg, non_null_bb);
751 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
752 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
753 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
755 /* Slow path */
756 // FIXME: Make the wrapper use the preserveall cconv
757 // FIXME: Use one icall per slot for small slot numbers ?
758 icall_args [0] = vtable_ins;
759 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
760 /* Make the icall return the vtable slot value to save some code space */
761 ins = mono_emit_jit_icall (cfg, mini_llvmonly_init_vtable_slot, icall_args);
762 ins->dreg = slot_reg;
763 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
765 /* Fastpath */
766 MONO_START_BB (cfg, non_null_bb);
767 /* Load the address + arg from the vtable slot */
768 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
769 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, TARGET_SIZEOF_VOID_P);
771 return mini_emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
774 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt && !special_array_interface) {
776 * A simple interface call
778 * We make a call through an imt slot to obtain the function descriptor we need to call.
779 * The imt slot contains a function descriptor for a runtime function + arg.
781 int slot_reg = alloc_preg (cfg);
782 int addr_reg = alloc_preg (cfg);
783 int arg_reg = alloc_preg (cfg);
784 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
786 vtable_reg = alloc_preg (cfg);
787 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
788 offset = ((gint32)slot - MONO_IMT_SIZE) * TARGET_SIZEOF_VOID_P;
791 * The slot is already initialized when the vtable is created so there is no need
792 * to check it here.
795 /* Load the imt slot, which contains a function descriptor. */
796 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
798 /* Load the address + arg of the imt thunk from the imt slot */
799 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
800 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, TARGET_SIZEOF_VOID_P);
802 * IMT thunks in llvm-only mode are C functions which take an info argument
803 * plus the imt method and return the ftndesc to call.
805 icall_args [0] = thunk_arg_ins;
806 icall_args [1] = mini_emit_get_rgctx_method (cfg, context_used,
807 cmethod, MONO_RGCTX_INFO_METHOD);
808 ftndesc_ins = mini_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
810 return mini_emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
813 if ((fsig->generic_param_count || variant_iface || special_array_interface) && !is_gsharedvt) {
815 * This is similar to the interface case, the vtable slot points to an imt thunk which is
816 * dynamically extended as more instantiations are discovered.
817 * This handles generic virtual methods both on classes and interfaces.
819 int slot_reg = alloc_preg (cfg);
820 int addr_reg = alloc_preg (cfg);
821 int arg_reg = alloc_preg (cfg);
822 int ftndesc_reg = alloc_preg (cfg);
823 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
824 MonoBasicBlock *slowpath_bb, *end_bb;
826 NEW_BBLOCK (cfg, slowpath_bb);
827 NEW_BBLOCK (cfg, end_bb);
829 vtable_reg = alloc_preg (cfg);
830 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
831 if (is_iface)
832 offset = ((gint32)slot - MONO_IMT_SIZE) * TARGET_SIZEOF_VOID_P;
833 else
834 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * TARGET_SIZEOF_VOID_P);
836 /* Load the slot, which contains a function descriptor. */
837 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
839 /* These slots are not initialized, so fall back to the slow path until they are initialized */
840 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
841 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
842 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
844 /* Fastpath */
845 /* Same as with iface calls */
846 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
847 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, TARGET_SIZEOF_VOID_P);
848 icall_args [0] = thunk_arg_ins;
849 icall_args [1] = mini_emit_get_rgctx_method (cfg, context_used,
850 cmethod, MONO_RGCTX_INFO_METHOD);
851 ftndesc_ins = mini_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
852 ftndesc_ins->dreg = ftndesc_reg;
854 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
855 * they don't know about yet. Fall back to the slowpath in that case.
857 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
858 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
860 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
862 /* Slowpath */
863 MONO_START_BB (cfg, slowpath_bb);
864 icall_args [0] = vtable_ins;
865 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
866 icall_args [2] = mini_emit_get_rgctx_method (cfg, context_used,
867 cmethod, MONO_RGCTX_INFO_METHOD);
868 if (is_iface)
869 ftndesc_ins = mono_emit_jit_icall (cfg, mini_llvmonly_resolve_generic_virtual_iface_call, icall_args);
870 else
871 ftndesc_ins = mono_emit_jit_icall (cfg, mini_llvmonly_resolve_generic_virtual_call, icall_args);
872 ftndesc_ins->dreg = ftndesc_reg;
873 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
875 /* Common case */
876 MONO_START_BB (cfg, end_bb);
877 return mini_emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
881 * Non-optimized cases
883 icall_args [0] = sp [0];
884 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
886 icall_args [2] = mini_emit_get_rgctx_method (cfg, context_used,
887 cmethod, MONO_RGCTX_INFO_METHOD);
889 arg_reg = alloc_preg (cfg);
890 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
891 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, mono_get_int_type ());
893 g_assert (is_gsharedvt);
894 if (is_iface)
895 call_target = mono_emit_jit_icall (cfg, mini_llvmonly_resolve_iface_call_gsharedvt, icall_args);
896 else
897 call_target = mono_emit_jit_icall (cfg, mini_llvmonly_resolve_vcall_gsharedvt, icall_args);
900 * Pass the extra argument even if the callee doesn't receive it, most
901 * calling conventions allow this.
903 return mini_emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
906 static MonoMethodSignature*
907 sig_to_rgctx_sig (MonoMethodSignature *sig)
909 // FIXME: memory allocation
910 MonoMethodSignature *res;
911 int i;
913 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
914 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
915 res->param_count = sig->param_count + 1;
916 for (i = 0; i < sig->param_count; ++i)
917 res->params [i] = sig->params [i];
918 res->params [sig->param_count] = m_class_get_this_arg (mono_defaults.int_class);
919 return res;
922 /* Make an indirect call to FSIG passing an additional argument */
923 MonoInst*
924 mini_emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
926 MonoMethodSignature *csig;
927 MonoInst *args_buf [16];
928 MonoInst **args;
929 int i, pindex, tmp_reg;
931 /* Make a call with an rgctx/extra arg */
932 if (fsig->param_count + 2 < 16)
933 args = args_buf;
934 else
935 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
936 pindex = 0;
937 if (fsig->hasthis)
938 args [pindex ++] = orig_args [0];
939 for (i = 0; i < fsig->param_count; ++i)
940 args [pindex ++] = orig_args [fsig->hasthis + i];
941 tmp_reg = alloc_preg (cfg);
942 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
943 csig = sig_to_rgctx_sig (fsig);
944 return mini_emit_calli (cfg, csig, args, call_target, NULL, NULL);
947 /* Emit an indirect call to the function descriptor ADDR */
948 MonoInst*
949 mini_emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
950 // FIXME no tailcall support
952 int addr_reg, arg_reg;
953 MonoInst *call_target;
955 g_assert (cfg->llvm_only);
958 * addr points to a <addr, arg> pair, load both of them, and
959 * make a call to addr, passing arg as an extra arg.
961 addr_reg = alloc_preg (cfg);
962 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
963 arg_reg = alloc_preg (cfg);
964 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, TARGET_SIZEOF_VOID_P);
966 return mini_emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
968 #else
969 MONO_EMPTY_SOURCE_FILE (calls);
970 #endif