[interp] Remove varargs from InterpFrame and recompute it instead (#16598)
[mono-project.git] / mono / mini / method-to-ir.c
blob3094161735581d5bdf3be75d1569d9902f0be2b7
1 /**
2 * \file
3 * Convert CIL to the JIT internal representation
5 * Author:
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
9 * (C) 2002 Ximian, Inc.
10 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
11 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
15 #include <config.h>
16 #include <glib.h>
17 #include <mono/utils/mono-compiler.h>
18 #include "mini.h"
20 #ifndef DISABLE_JIT
22 #include <signal.h>
24 #ifdef HAVE_UNISTD_H
25 #include <unistd.h>
26 #endif
28 #include <math.h>
29 #include <string.h>
30 #include <ctype.h>
32 #ifdef HAVE_SYS_TIME_H
33 #include <sys/time.h>
34 #endif
36 #ifdef HAVE_ALLOCA_H
37 #include <alloca.h>
38 #endif
40 #include <mono/utils/memcheck.h>
41 #include <mono/metadata/abi-details.h>
42 #include <mono/metadata/assembly.h>
43 #include <mono/metadata/attrdefs.h>
44 #include <mono/metadata/loader.h>
45 #include <mono/metadata/tabledefs.h>
46 #include <mono/metadata/class.h>
47 #include <mono/metadata/class-abi-details.h>
48 #include <mono/metadata/object.h>
49 #include <mono/metadata/exception.h>
50 #include <mono/metadata/exception-internals.h>
51 #include <mono/metadata/opcodes.h>
52 #include <mono/metadata/mono-endian.h>
53 #include <mono/metadata/tokentype.h>
54 #include <mono/metadata/tabledefs.h>
55 #include <mono/metadata/marshal.h>
56 #include <mono/metadata/debug-helpers.h>
57 #include <mono/metadata/debug-internals.h>
58 #include <mono/metadata/gc-internals.h>
59 #include <mono/metadata/security-manager.h>
60 #include <mono/metadata/threads-types.h>
61 #include <mono/metadata/security-core-clr.h>
62 #include <mono/metadata/profiler-private.h>
63 #include <mono/metadata/profiler.h>
64 #include <mono/metadata/monitor.h>
65 #include <mono/utils/mono-memory-model.h>
66 #include <mono/utils/mono-error-internals.h>
67 #include <mono/metadata/mono-basic-block.h>
68 #include <mono/metadata/reflection-internals.h>
69 #include <mono/utils/mono-threads-coop.h>
70 #include <mono/utils/mono-utils-debug.h>
71 #include <mono/utils/mono-logger-internals.h>
72 #include <mono/metadata/verify-internals.h>
73 #include <mono/metadata/icall-decl.h>
74 #include "mono/metadata/icall-signatures.h"
76 #include "trace.h"
78 #include "ir-emit.h"
80 #include "jit-icalls.h"
81 #include "jit.h"
82 #include "debugger-agent.h"
83 #include "seq-points.h"
84 #include "aot-compiler.h"
85 #include "mini-llvm.h"
86 #include "mini-runtime.h"
87 #include "llvmonly-runtime.h"
89 #define BRANCH_COST 10
90 #define CALL_COST 10
91 /* Used for the JIT */
92 #define INLINE_LENGTH_LIMIT 20
94 * The aot and jit inline limits should be different,
95 * since aot sees the whole program so we can let opt inline methods for us,
96 * while the jit only sees one method, so we have to inline things ourselves.
98 /* Used by LLVM AOT */
99 #define LLVM_AOT_INLINE_LENGTH_LIMIT 30
100 /* Used to LLVM JIT */
101 #define LLVM_JIT_INLINE_LENGTH_LIMIT 100
103 static const gboolean debug_tailcall = FALSE; // logging
104 static const gboolean debug_tailcall_try_all = FALSE; // consider any call followed by ret
106 gboolean
107 mono_tailcall_print_enabled (void)
109 return debug_tailcall || MONO_TRACE_IS_TRACED (G_LOG_LEVEL_DEBUG, MONO_TRACE_TAILCALL);
112 void
113 mono_tailcall_print (const char *format, ...)
115 if (!mono_tailcall_print_enabled ())
116 return;
117 va_list args;
118 va_start (args, format);
119 g_printv (format, args);
120 va_end (args);
123 /* These have 'cfg' as an implicit argument */
124 #define INLINE_FAILURE(msg) do { \
125 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
126 inline_failure (cfg, msg); \
127 goto exception_exit; \
129 } while (0)
130 #define CHECK_CFG_EXCEPTION do {\
131 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
132 goto exception_exit; \
133 } while (0)
134 #define FIELD_ACCESS_FAILURE(method, field) do { \
135 field_access_failure ((cfg), (method), (field)); \
136 goto exception_exit; \
137 } while (0)
138 #define GENERIC_SHARING_FAILURE(opcode) do { \
139 if (cfg->gshared) { \
140 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
141 goto exception_exit; \
143 } while (0)
144 #define GSHAREDVT_FAILURE(opcode) do { \
145 if (cfg->gsharedvt) { \
146 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
147 goto exception_exit; \
149 } while (0)
150 #define OUT_OF_MEMORY_FAILURE do { \
151 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
152 mono_error_set_out_of_memory (cfg->error, ""); \
153 goto exception_exit; \
154 } while (0)
155 #define DISABLE_AOT(cfg) do { \
156 if ((cfg)->verbose_level >= 2) \
157 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
158 (cfg)->disable_aot = TRUE; \
159 } while (0)
160 #define LOAD_ERROR do { \
161 break_on_unverified (); \
162 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
163 goto exception_exit; \
164 } while (0)
166 #define TYPE_LOAD_ERROR(klass) do { \
167 cfg->exception_ptr = klass; \
168 LOAD_ERROR; \
169 } while (0)
171 #define CHECK_CFG_ERROR do {\
172 if (!is_ok (cfg->error)) { \
173 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
174 goto mono_error_exit; \
176 } while (0)
178 static int stind_to_store_membase (int opcode);
180 int mono_op_to_op_imm (int opcode);
181 int mono_op_to_op_imm_noemul (int opcode);
183 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
184 guchar *ip, guint real_offset, gboolean inline_always);
185 static MonoInst*
186 convert_value (MonoCompile *cfg, MonoType *type, MonoInst *ins);
188 /* helper methods signatures */
190 /* type loading helpers */
191 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, "System.Diagnostics", "DebuggableAttribute")
192 static GENERATE_GET_CLASS_WITH_CACHE (iequatable, "System", "IEquatable`1")
193 static GENERATE_GET_CLASS_WITH_CACHE (geqcomparer, "System.Collections.Generic", "GenericEqualityComparer`1");
196 * Instruction metadata
198 #ifdef MINI_OP
199 #undef MINI_OP
200 #endif
201 #ifdef MINI_OP3
202 #undef MINI_OP3
203 #endif
204 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
205 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
206 #define NONE ' '
207 #define IREG 'i'
208 #define FREG 'f'
209 #define VREG 'v'
210 #define XREG 'x'
211 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == TARGET_SIZEOF_VOID_P
212 #define LREG IREG
213 #else
214 #define LREG 'l'
215 #endif
216 /* keep in sync with the enum in mini.h */
217 const char
218 mini_ins_info[] = {
219 #include "mini-ops.h"
221 #undef MINI_OP
222 #undef MINI_OP3
224 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
225 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
227 * This should contain the index of the last sreg + 1. This is not the same
228 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
230 const gint8 mini_ins_sreg_counts[] = {
231 #include "mini-ops.h"
233 #undef MINI_OP
234 #undef MINI_OP3
236 guint32
237 mono_alloc_ireg (MonoCompile *cfg)
239 return alloc_ireg (cfg);
242 guint32
243 mono_alloc_lreg (MonoCompile *cfg)
245 return alloc_lreg (cfg);
248 guint32
249 mono_alloc_freg (MonoCompile *cfg)
251 return alloc_freg (cfg);
254 guint32
255 mono_alloc_preg (MonoCompile *cfg)
257 return alloc_preg (cfg);
260 guint32
261 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
263 return alloc_dreg (cfg, stack_type);
267 * mono_alloc_ireg_ref:
269 * Allocate an IREG, and mark it as holding a GC ref.
271 guint32
272 mono_alloc_ireg_ref (MonoCompile *cfg)
274 return alloc_ireg_ref (cfg);
278 * mono_alloc_ireg_mp:
280 * Allocate an IREG, and mark it as holding a managed pointer.
282 guint32
283 mono_alloc_ireg_mp (MonoCompile *cfg)
285 return alloc_ireg_mp (cfg);
289 * mono_alloc_ireg_copy:
291 * Allocate an IREG with the same GC type as VREG.
293 guint32
294 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
296 if (vreg_is_ref (cfg, vreg))
297 return alloc_ireg_ref (cfg);
298 else if (vreg_is_mp (cfg, vreg))
299 return alloc_ireg_mp (cfg);
300 else
301 return alloc_ireg (cfg);
304 guint
305 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
307 if (type->byref)
308 return OP_MOVE;
310 type = mini_get_underlying_type (type);
311 handle_enum:
312 switch (type->type) {
313 case MONO_TYPE_I1:
314 case MONO_TYPE_U1:
315 return OP_MOVE;
316 case MONO_TYPE_I2:
317 case MONO_TYPE_U2:
318 return OP_MOVE;
319 case MONO_TYPE_I4:
320 case MONO_TYPE_U4:
321 return OP_MOVE;
322 case MONO_TYPE_I:
323 case MONO_TYPE_U:
324 case MONO_TYPE_PTR:
325 case MONO_TYPE_FNPTR:
326 return OP_MOVE;
327 case MONO_TYPE_CLASS:
328 case MONO_TYPE_STRING:
329 case MONO_TYPE_OBJECT:
330 case MONO_TYPE_SZARRAY:
331 case MONO_TYPE_ARRAY:
332 return OP_MOVE;
333 case MONO_TYPE_I8:
334 case MONO_TYPE_U8:
335 #if SIZEOF_REGISTER == 8
336 return OP_MOVE;
337 #else
338 return OP_LMOVE;
339 #endif
340 case MONO_TYPE_R4:
341 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
342 case MONO_TYPE_R8:
343 return OP_FMOVE;
344 case MONO_TYPE_VALUETYPE:
345 if (m_class_is_enumtype (type->data.klass)) {
346 type = mono_class_enum_basetype_internal (type->data.klass);
347 goto handle_enum;
349 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (type)))
350 return OP_XMOVE;
351 return OP_VMOVE;
352 case MONO_TYPE_TYPEDBYREF:
353 return OP_VMOVE;
354 case MONO_TYPE_GENERICINST:
355 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (type)))
356 return OP_XMOVE;
357 type = m_class_get_byval_arg (type->data.generic_class->container_class);
358 goto handle_enum;
359 case MONO_TYPE_VAR:
360 case MONO_TYPE_MVAR:
361 g_assert (cfg->gshared);
362 if (mini_type_var_is_vt (type))
363 return OP_VMOVE;
364 else
365 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
366 default:
367 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
369 return -1;
372 void
373 mono_print_bb (MonoBasicBlock *bb, const char *msg)
375 int i;
376 MonoInst *tree;
377 GString *str = g_string_new ("");
379 g_string_append_printf (str, "%s %d: [IN: ", msg, bb->block_num);
380 for (i = 0; i < bb->in_count; ++i)
381 g_string_append_printf (str, " BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
382 g_string_append_printf (str, ", OUT: ");
383 for (i = 0; i < bb->out_count; ++i)
384 g_string_append_printf (str, " BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
385 g_string_append_printf (str, " ]\n");
387 g_print ("%s", str->str);
388 g_string_free (str, TRUE);
390 for (tree = bb->code; tree; tree = tree->next)
391 mono_print_ins_index (-1, tree);
394 static MONO_NEVER_INLINE gboolean
395 break_on_unverified (void)
397 if (mini_debug_options.break_on_unverified) {
398 G_BREAKPOINT ();
399 return TRUE;
401 return FALSE;
404 static void
405 clear_cfg_error (MonoCompile *cfg)
407 mono_error_cleanup (cfg->error);
408 error_init (cfg->error);
411 static MONO_NEVER_INLINE void
412 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
414 char *method_fname = mono_method_full_name (method, TRUE);
415 char *field_fname = mono_field_full_name (field);
416 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
417 mono_error_set_generic_error (cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
418 g_free (method_fname);
419 g_free (field_fname);
422 static MONO_NEVER_INLINE void
423 inline_failure (MonoCompile *cfg, const char *msg)
425 if (cfg->verbose_level >= 2)
426 printf ("inline failed: %s\n", msg);
427 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
430 static MONO_NEVER_INLINE void
431 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
433 if (cfg->verbose_level > 2)
434 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", m_class_get_name_space (cfg->current_method->klass), m_class_get_name (cfg->current_method->klass), cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name (opcode), line);
435 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
438 static MONO_NEVER_INLINE void
439 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
441 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", m_class_get_name_space (cfg->current_method->klass), m_class_get_name (cfg->current_method->klass), cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
442 if (cfg->verbose_level >= 2)
443 printf ("%s\n", cfg->exception_message);
444 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
447 void
448 mini_set_inline_failure (MonoCompile *cfg, const char *msg)
450 if (cfg->verbose_level >= 2)
451 printf ("inline failed: %s\n", msg);
452 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
456 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
457 * foo<T> (int i) { ldarg.0; box T; }
459 #define UNVERIFIED do { \
460 if (cfg->gsharedvt) { \
461 if (cfg->verbose_level > 2) \
462 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
463 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
464 goto exception_exit; \
466 break_on_unverified (); \
467 goto unverified; \
468 } while (0)
470 #define GET_BBLOCK(cfg,tblock,ip) do { \
471 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
472 if (!(tblock)) { \
473 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
474 NEW_BBLOCK (cfg, (tblock)); \
475 (tblock)->cil_code = (ip); \
476 ADD_BBLOCK (cfg, (tblock)); \
478 } while (0)
480 /* Emit conversions so both operands of a binary opcode are of the same type */
481 static void
482 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
484 MonoInst *arg1 = *arg1_ref;
485 MonoInst *arg2 = *arg2_ref;
487 if (cfg->r4fp &&
488 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
489 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
490 MonoInst *conv;
492 /* Mixing r4/r8 is allowed by the spec */
493 if (arg1->type == STACK_R4) {
494 int dreg = alloc_freg (cfg);
496 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
497 conv->type = STACK_R8;
498 ins->sreg1 = dreg;
499 *arg1_ref = conv;
501 if (arg2->type == STACK_R4) {
502 int dreg = alloc_freg (cfg);
504 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
505 conv->type = STACK_R8;
506 ins->sreg2 = dreg;
507 *arg2_ref = conv;
511 #if SIZEOF_REGISTER == 8
512 /* FIXME: Need to add many more cases */
513 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
514 MonoInst *widen;
516 int dr = alloc_preg (cfg);
517 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
518 (ins)->sreg2 = widen->dreg;
520 #endif
523 #define ADD_BINOP(op) do { \
524 MONO_INST_NEW (cfg, ins, (op)); \
525 sp -= 2; \
526 ins->sreg1 = sp [0]->dreg; \
527 ins->sreg2 = sp [1]->dreg; \
528 type_from_op (cfg, ins, sp [0], sp [1]); \
529 CHECK_TYPE (ins); \
530 /* Have to insert a widening op */ \
531 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
532 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
533 MONO_ADD_INS ((cfg)->cbb, (ins)); \
534 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
535 } while (0)
537 #define ADD_UNOP(op) do { \
538 MONO_INST_NEW (cfg, ins, (op)); \
539 sp--; \
540 ins->sreg1 = sp [0]->dreg; \
541 type_from_op (cfg, ins, sp [0], NULL); \
542 CHECK_TYPE (ins); \
543 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
544 MONO_ADD_INS ((cfg)->cbb, (ins)); \
545 *sp++ = mono_decompose_opcode (cfg, ins); \
546 } while (0)
548 #define ADD_BINCOND(next_block) do { \
549 MonoInst *cmp; \
550 sp -= 2; \
551 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
552 cmp->sreg1 = sp [0]->dreg; \
553 cmp->sreg2 = sp [1]->dreg; \
554 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
555 type_from_op (cfg, cmp, sp [0], sp [1]); \
556 CHECK_TYPE (cmp); \
557 type_from_op (cfg, ins, sp [0], sp [1]); \
558 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
559 GET_BBLOCK (cfg, tblock, target); \
560 link_bblock (cfg, cfg->cbb, tblock); \
561 ins->inst_true_bb = tblock; \
562 if ((next_block)) { \
563 link_bblock (cfg, cfg->cbb, (next_block)); \
564 ins->inst_false_bb = (next_block); \
565 start_new_bblock = 1; \
566 } else { \
567 GET_BBLOCK (cfg, tblock, next_ip); \
568 link_bblock (cfg, cfg->cbb, tblock); \
569 ins->inst_false_bb = tblock; \
570 start_new_bblock = 2; \
572 if (sp != stack_start) { \
573 handle_stack_args (cfg, stack_start, sp - stack_start); \
574 CHECK_UNVERIFIABLE (cfg); \
576 MONO_ADD_INS (cfg->cbb, cmp); \
577 MONO_ADD_INS (cfg->cbb, ins); \
578 } while (0)
580 /* *
581 * link_bblock: Links two basic blocks
583 * links two basic blocks in the control flow graph, the 'from'
584 * argument is the starting block and the 'to' argument is the block
585 * the control flow ends to after 'from'.
587 static void
588 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
590 MonoBasicBlock **newa;
591 int i, found;
593 #if 0
594 if (from->cil_code) {
595 if (to->cil_code)
596 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
597 else
598 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
599 } else {
600 if (to->cil_code)
601 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
602 else
603 printf ("edge from entry to exit\n");
605 #endif
607 found = FALSE;
608 for (i = 0; i < from->out_count; ++i) {
609 if (to == from->out_bb [i]) {
610 found = TRUE;
611 break;
614 if (!found) {
615 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
616 for (i = 0; i < from->out_count; ++i) {
617 newa [i] = from->out_bb [i];
619 newa [i] = to;
620 from->out_count++;
621 from->out_bb = newa;
624 found = FALSE;
625 for (i = 0; i < to->in_count; ++i) {
626 if (from == to->in_bb [i]) {
627 found = TRUE;
628 break;
631 if (!found) {
632 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
633 for (i = 0; i < to->in_count; ++i) {
634 newa [i] = to->in_bb [i];
636 newa [i] = from;
637 to->in_count++;
638 to->in_bb = newa;
642 void
643 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
645 link_bblock (cfg, from, to);
648 static void
649 mono_create_spvar_for_region (MonoCompile *cfg, int region);
651 static void
652 mark_bb_in_region (MonoCompile *cfg, guint region, uint32_t start, uint32_t end)
654 MonoBasicBlock *bb = cfg->cil_offset_to_bb [start];
656 //start must exist in cil_offset_to_bb as those are il offsets used by EH which should have GET_BBLOCK early.
657 g_assert (bb);
659 if (cfg->verbose_level > 1)
660 g_print ("FIRST BB for %d is BB_%d\n", start, bb->block_num);
661 for (; bb && bb->real_offset < end; bb = bb->next_bb) {
662 //no one claimed this bb, take it.
663 if (bb->region == -1) {
664 bb->region = region;
665 continue;
668 //current region is an early handler, bail
669 if ((bb->region & (0xf << 4)) != MONO_REGION_TRY) {
670 continue;
673 //current region is a try, only overwrite if new region is a handler
674 if ((region & (0xf << 4)) != MONO_REGION_TRY) {
675 bb->region = region;
679 if (cfg->spvars)
680 mono_create_spvar_for_region (cfg, region);
683 static void
684 compute_bb_regions (MonoCompile *cfg)
686 MonoBasicBlock *bb;
687 MonoMethodHeader *header = cfg->header;
688 int i;
690 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
691 bb->region = -1;
693 for (i = 0; i < header->num_clauses; ++i) {
694 MonoExceptionClause *clause = &header->clauses [i];
696 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER)
697 mark_bb_in_region (cfg, ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags, clause->data.filter_offset, clause->handler_offset);
699 guint handler_region;
700 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
701 handler_region = ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
702 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
703 handler_region = ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
704 else
705 handler_region = ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
707 mark_bb_in_region (cfg, handler_region, clause->handler_offset, clause->handler_offset + clause->handler_len);
708 mark_bb_in_region (cfg, ((i + 1) << 8) | clause->flags, clause->try_offset, clause->try_offset + clause->try_len);
711 if (cfg->verbose_level > 2) {
712 MonoBasicBlock *bb;
713 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
714 g_print ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
720 static gboolean
721 ip_in_finally_clause (MonoCompile *cfg, int offset)
723 MonoMethodHeader *header = cfg->header;
724 MonoExceptionClause *clause;
725 int i;
727 for (i = 0; i < header->num_clauses; ++i) {
728 clause = &header->clauses [i];
729 if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)
730 continue;
732 if (MONO_OFFSET_IN_HANDLER (clause, offset))
733 return TRUE;
735 return FALSE;
738 /* Find clauses between ip and target, from inner to outer */
739 static GList*
740 mono_find_leave_clauses (MonoCompile *cfg, guchar *ip, guchar *target)
742 MonoMethodHeader *header = cfg->header;
743 MonoExceptionClause *clause;
744 int i;
745 GList *res = NULL;
747 for (i = 0; i < header->num_clauses; ++i) {
748 clause = &header->clauses [i];
749 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
750 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
751 MonoLeaveClause *leave = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoLeaveClause));
752 leave->index = i;
753 leave->clause = clause;
755 res = g_list_append_mempool (cfg->mempool, res, leave);
758 return res;
761 static void
762 mono_create_spvar_for_region (MonoCompile *cfg, int region)
764 MonoInst *var;
766 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
767 if (var)
768 return;
770 var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
771 /* prevent it from being register allocated */
772 var->flags |= MONO_INST_VOLATILE;
774 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
777 MonoInst *
778 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
780 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
783 static MonoInst*
784 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
786 MonoInst *var;
788 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
789 if (var)
790 return var;
792 var = mono_compile_create_var (cfg, mono_get_object_type (), OP_LOCAL);
793 /* prevent it from being register allocated */
794 var->flags |= MONO_INST_VOLATILE;
796 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
798 return var;
802 * Returns the type used in the eval stack when @type is loaded.
803 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
805 void
806 mini_type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
808 MonoClass *klass;
810 type = mini_get_underlying_type (type);
811 inst->klass = klass = mono_class_from_mono_type_internal (type);
812 if (type->byref) {
813 inst->type = STACK_MP;
814 return;
817 handle_enum:
818 switch (type->type) {
819 case MONO_TYPE_VOID:
820 inst->type = STACK_INV;
821 return;
822 case MONO_TYPE_I1:
823 case MONO_TYPE_U1:
824 case MONO_TYPE_I2:
825 case MONO_TYPE_U2:
826 case MONO_TYPE_I4:
827 case MONO_TYPE_U4:
828 inst->type = STACK_I4;
829 return;
830 case MONO_TYPE_I:
831 case MONO_TYPE_U:
832 case MONO_TYPE_PTR:
833 case MONO_TYPE_FNPTR:
834 inst->type = STACK_PTR;
835 return;
836 case MONO_TYPE_CLASS:
837 case MONO_TYPE_STRING:
838 case MONO_TYPE_OBJECT:
839 case MONO_TYPE_SZARRAY:
840 case MONO_TYPE_ARRAY:
841 inst->type = STACK_OBJ;
842 return;
843 case MONO_TYPE_I8:
844 case MONO_TYPE_U8:
845 inst->type = STACK_I8;
846 return;
847 case MONO_TYPE_R4:
848 inst->type = cfg->r4_stack_type;
849 break;
850 case MONO_TYPE_R8:
851 inst->type = STACK_R8;
852 return;
853 case MONO_TYPE_VALUETYPE:
854 if (m_class_is_enumtype (type->data.klass)) {
855 type = mono_class_enum_basetype_internal (type->data.klass);
856 goto handle_enum;
857 } else {
858 inst->klass = klass;
859 inst->type = STACK_VTYPE;
860 return;
862 case MONO_TYPE_TYPEDBYREF:
863 inst->klass = mono_defaults.typed_reference_class;
864 inst->type = STACK_VTYPE;
865 return;
866 case MONO_TYPE_GENERICINST:
867 type = m_class_get_byval_arg (type->data.generic_class->container_class);
868 goto handle_enum;
869 case MONO_TYPE_VAR:
870 case MONO_TYPE_MVAR:
871 g_assert (cfg->gshared);
872 if (mini_is_gsharedvt_type (type)) {
873 g_assert (cfg->gsharedvt);
874 inst->type = STACK_VTYPE;
875 } else {
876 mini_type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
878 return;
879 default:
880 g_error ("unknown type 0x%02x in eval stack type", type->type);
885 * The following tables are used to quickly validate the IL code in type_from_op ().
887 static const char
888 bin_num_table [STACK_MAX] [STACK_MAX] = {
889 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
890 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
891 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
892 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
893 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
894 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
895 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
896 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
897 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
900 static const char
901 neg_table [] = {
902 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
905 /* reduce the size of this table */
906 static const char
907 bin_int_table [STACK_MAX] [STACK_MAX] = {
908 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
909 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
910 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
911 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
912 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
913 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
914 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
915 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
918 static const char
919 bin_comp_table [STACK_MAX] [STACK_MAX] = {
920 /* Inv i L p F & O vt r4 */
921 {0},
922 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
923 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
924 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
925 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
926 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
927 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
928 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
929 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
932 /* reduce the size of this table */
933 static const char
934 shift_table [STACK_MAX] [STACK_MAX] = {
935 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
936 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
937 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
938 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
939 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
940 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
941 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
942 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
946 * Tables to map from the non-specific opcode to the matching
947 * type-specific opcode.
949 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
950 static const guint16
951 binops_op_map [STACK_MAX] = {
952 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
955 /* handles from CEE_NEG to CEE_CONV_U8 */
956 static const guint16
957 unops_op_map [STACK_MAX] = {
958 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
961 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
962 static const guint16
963 ovfops_op_map [STACK_MAX] = {
964 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
967 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
968 static const guint16
969 ovf2ops_op_map [STACK_MAX] = {
970 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
973 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
974 static const guint16
975 ovf3ops_op_map [STACK_MAX] = {
976 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
979 /* handles from CEE_BEQ to CEE_BLT_UN */
980 static const guint16
981 beqops_op_map [STACK_MAX] = {
982 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
985 /* handles from CEE_CEQ to CEE_CLT_UN */
986 static const guint16
987 ceqops_op_map [STACK_MAX] = {
988 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
992 * Sets ins->type (the type on the eval stack) according to the
993 * type of the opcode and the arguments to it.
994 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
996 * FIXME: this function sets ins->type unconditionally in some cases, but
997 * it should set it to invalid for some types (a conv.x on an object)
999 static void
1000 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
1002 switch (ins->opcode) {
1003 /* binops */
1004 case MONO_CEE_ADD:
1005 case MONO_CEE_SUB:
1006 case MONO_CEE_MUL:
1007 case MONO_CEE_DIV:
1008 case MONO_CEE_REM:
1009 /* FIXME: check unverifiable args for STACK_MP */
1010 ins->type = bin_num_table [src1->type] [src2->type];
1011 ins->opcode += binops_op_map [ins->type];
1012 break;
1013 case MONO_CEE_DIV_UN:
1014 case MONO_CEE_REM_UN:
1015 case MONO_CEE_AND:
1016 case MONO_CEE_OR:
1017 case MONO_CEE_XOR:
1018 ins->type = bin_int_table [src1->type] [src2->type];
1019 ins->opcode += binops_op_map [ins->type];
1020 break;
1021 case MONO_CEE_SHL:
1022 case MONO_CEE_SHR:
1023 case MONO_CEE_SHR_UN:
1024 ins->type = shift_table [src1->type] [src2->type];
1025 ins->opcode += binops_op_map [ins->type];
1026 break;
1027 case OP_COMPARE:
1028 case OP_LCOMPARE:
1029 case OP_ICOMPARE:
1030 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
1031 if ((src1->type == STACK_I8) || ((TARGET_SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
1032 ins->opcode = OP_LCOMPARE;
1033 else if (src1->type == STACK_R4)
1034 ins->opcode = OP_RCOMPARE;
1035 else if (src1->type == STACK_R8)
1036 ins->opcode = OP_FCOMPARE;
1037 else
1038 ins->opcode = OP_ICOMPARE;
1039 break;
1040 case OP_ICOMPARE_IMM:
1041 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
1042 if ((src1->type == STACK_I8) || ((TARGET_SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
1043 ins->opcode = OP_LCOMPARE_IMM;
1044 break;
1045 case MONO_CEE_BEQ:
1046 case MONO_CEE_BGE:
1047 case MONO_CEE_BGT:
1048 case MONO_CEE_BLE:
1049 case MONO_CEE_BLT:
1050 case MONO_CEE_BNE_UN:
1051 case MONO_CEE_BGE_UN:
1052 case MONO_CEE_BGT_UN:
1053 case MONO_CEE_BLE_UN:
1054 case MONO_CEE_BLT_UN:
1055 ins->opcode += beqops_op_map [src1->type];
1056 break;
1057 case OP_CEQ:
1058 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
1059 ins->opcode += ceqops_op_map [src1->type];
1060 break;
1061 case OP_CGT:
1062 case OP_CGT_UN:
1063 case OP_CLT:
1064 case OP_CLT_UN:
1065 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
1066 ins->opcode += ceqops_op_map [src1->type];
1067 break;
1068 /* unops */
1069 case MONO_CEE_NEG:
1070 ins->type = neg_table [src1->type];
1071 ins->opcode += unops_op_map [ins->type];
1072 break;
1073 case MONO_CEE_NOT:
1074 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1075 ins->type = src1->type;
1076 else
1077 ins->type = STACK_INV;
1078 ins->opcode += unops_op_map [ins->type];
1079 break;
1080 case MONO_CEE_CONV_I1:
1081 case MONO_CEE_CONV_I2:
1082 case MONO_CEE_CONV_I4:
1083 case MONO_CEE_CONV_U4:
1084 ins->type = STACK_I4;
1085 ins->opcode += unops_op_map [src1->type];
1086 break;
1087 case MONO_CEE_CONV_R_UN:
1088 ins->type = STACK_R8;
1089 switch (src1->type) {
1090 case STACK_I4:
1091 case STACK_PTR:
1092 ins->opcode = OP_ICONV_TO_R_UN;
1093 break;
1094 case STACK_I8:
1095 ins->opcode = OP_LCONV_TO_R_UN;
1096 break;
1097 case STACK_R8:
1098 ins->opcode = OP_FMOVE;
1099 break;
1101 break;
1102 case MONO_CEE_CONV_OVF_I1:
1103 case MONO_CEE_CONV_OVF_U1:
1104 case MONO_CEE_CONV_OVF_I2:
1105 case MONO_CEE_CONV_OVF_U2:
1106 case MONO_CEE_CONV_OVF_I4:
1107 case MONO_CEE_CONV_OVF_U4:
1108 ins->type = STACK_I4;
1109 ins->opcode += ovf3ops_op_map [src1->type];
1110 break;
1111 case MONO_CEE_CONV_OVF_I_UN:
1112 case MONO_CEE_CONV_OVF_U_UN:
1113 ins->type = STACK_PTR;
1114 ins->opcode += ovf2ops_op_map [src1->type];
1115 break;
1116 case MONO_CEE_CONV_OVF_I1_UN:
1117 case MONO_CEE_CONV_OVF_I2_UN:
1118 case MONO_CEE_CONV_OVF_I4_UN:
1119 case MONO_CEE_CONV_OVF_U1_UN:
1120 case MONO_CEE_CONV_OVF_U2_UN:
1121 case MONO_CEE_CONV_OVF_U4_UN:
1122 ins->type = STACK_I4;
1123 ins->opcode += ovf2ops_op_map [src1->type];
1124 break;
1125 case MONO_CEE_CONV_U:
1126 ins->type = STACK_PTR;
1127 switch (src1->type) {
1128 case STACK_I4:
1129 ins->opcode = OP_ICONV_TO_U;
1130 break;
1131 case STACK_PTR:
1132 case STACK_MP:
1133 case STACK_OBJ:
1134 #if TARGET_SIZEOF_VOID_P == 8
1135 ins->opcode = OP_LCONV_TO_U;
1136 #else
1137 ins->opcode = OP_MOVE;
1138 #endif
1139 break;
1140 case STACK_I8:
1141 ins->opcode = OP_LCONV_TO_U;
1142 break;
1143 case STACK_R8:
1144 ins->opcode = OP_FCONV_TO_U;
1145 break;
1146 case STACK_R4:
1147 if (TARGET_SIZEOF_VOID_P == 8)
1148 ins->opcode = OP_RCONV_TO_U8;
1149 else
1150 ins->opcode = OP_RCONV_TO_U4;
1151 break;
1153 break;
1154 case MONO_CEE_CONV_I8:
1155 case MONO_CEE_CONV_U8:
1156 ins->type = STACK_I8;
1157 ins->opcode += unops_op_map [src1->type];
1158 break;
1159 case MONO_CEE_CONV_OVF_I8:
1160 case MONO_CEE_CONV_OVF_U8:
1161 ins->type = STACK_I8;
1162 ins->opcode += ovf3ops_op_map [src1->type];
1163 break;
1164 case MONO_CEE_CONV_OVF_U8_UN:
1165 case MONO_CEE_CONV_OVF_I8_UN:
1166 ins->type = STACK_I8;
1167 ins->opcode += ovf2ops_op_map [src1->type];
1168 break;
1169 case MONO_CEE_CONV_R4:
1170 ins->type = cfg->r4_stack_type;
1171 ins->opcode += unops_op_map [src1->type];
1172 break;
1173 case MONO_CEE_CONV_R8:
1174 ins->type = STACK_R8;
1175 ins->opcode += unops_op_map [src1->type];
1176 break;
1177 case OP_CKFINITE:
1178 ins->type = STACK_R8;
1179 break;
1180 case MONO_CEE_CONV_U2:
1181 case MONO_CEE_CONV_U1:
1182 ins->type = STACK_I4;
1183 ins->opcode += ovfops_op_map [src1->type];
1184 break;
1185 case MONO_CEE_CONV_I:
1186 case MONO_CEE_CONV_OVF_I:
1187 case MONO_CEE_CONV_OVF_U:
1188 ins->type = STACK_PTR;
1189 ins->opcode += ovfops_op_map [src1->type];
1190 break;
1191 case MONO_CEE_ADD_OVF:
1192 case MONO_CEE_ADD_OVF_UN:
1193 case MONO_CEE_MUL_OVF:
1194 case MONO_CEE_MUL_OVF_UN:
1195 case MONO_CEE_SUB_OVF:
1196 case MONO_CEE_SUB_OVF_UN:
1197 ins->type = bin_num_table [src1->type] [src2->type];
1198 ins->opcode += ovfops_op_map [src1->type];
1199 if (ins->type == STACK_R8)
1200 ins->type = STACK_INV;
1201 break;
1202 case OP_LOAD_MEMBASE:
1203 ins->type = STACK_PTR;
1204 break;
1205 case OP_LOADI1_MEMBASE:
1206 case OP_LOADU1_MEMBASE:
1207 case OP_LOADI2_MEMBASE:
1208 case OP_LOADU2_MEMBASE:
1209 case OP_LOADI4_MEMBASE:
1210 case OP_LOADU4_MEMBASE:
1211 ins->type = STACK_PTR;
1212 break;
1213 case OP_LOADI8_MEMBASE:
1214 ins->type = STACK_I8;
1215 break;
1216 case OP_LOADR4_MEMBASE:
1217 ins->type = cfg->r4_stack_type;
1218 break;
1219 case OP_LOADR8_MEMBASE:
1220 ins->type = STACK_R8;
1221 break;
1222 default:
1223 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1224 break;
1227 if (ins->type == STACK_MP) {
1228 if (src1->type == STACK_MP)
1229 ins->klass = src1->klass;
1230 else
1231 ins->klass = mono_defaults.object_class;
1235 void
1236 mini_type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
1238 type_from_op (cfg, ins, src1, src2);
1241 static MonoClass*
1242 ldind_to_type (int op)
1244 switch (op) {
1245 case MONO_CEE_LDIND_I1: return mono_defaults.sbyte_class;
1246 case MONO_CEE_LDIND_U1: return mono_defaults.byte_class;
1247 case MONO_CEE_LDIND_I2: return mono_defaults.int16_class;
1248 case MONO_CEE_LDIND_U2: return mono_defaults.uint16_class;
1249 case MONO_CEE_LDIND_I4: return mono_defaults.int32_class;
1250 case MONO_CEE_LDIND_U4: return mono_defaults.uint32_class;
1251 case MONO_CEE_LDIND_I8: return mono_defaults.int64_class;
1252 case MONO_CEE_LDIND_I: return mono_defaults.int_class;
1253 case MONO_CEE_LDIND_R4: return mono_defaults.single_class;
1254 case MONO_CEE_LDIND_R8: return mono_defaults.double_class;
1255 case MONO_CEE_LDIND_REF:return mono_defaults.object_class; //FIXME we should try to return a more specific type
1256 default: g_error ("Unknown ldind type %d", op);
1260 #if 0
1262 static const char
1263 param_table [STACK_MAX] [STACK_MAX] = {
1264 {0},
1267 static int
1268 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1270 int i;
1272 if (sig->hasthis) {
1273 switch (args->type) {
1274 case STACK_I4:
1275 case STACK_I8:
1276 case STACK_R8:
1277 case STACK_VTYPE:
1278 case STACK_INV:
1279 return 0;
1281 args++;
1283 for (i = 0; i < sig->param_count; ++i) {
1284 switch (args [i].type) {
1285 case STACK_INV:
1286 return 0;
1287 case STACK_MP:
1288 if (!sig->params [i]->byref)
1289 return 0;
1290 continue;
1291 case STACK_OBJ:
1292 if (sig->params [i]->byref)
1293 return 0;
1294 switch (sig->params [i]->type) {
1295 case MONO_TYPE_CLASS:
1296 case MONO_TYPE_STRING:
1297 case MONO_TYPE_OBJECT:
1298 case MONO_TYPE_SZARRAY:
1299 case MONO_TYPE_ARRAY:
1300 break;
1301 default:
1302 return 0;
1304 continue;
1305 case STACK_R8:
1306 if (sig->params [i]->byref)
1307 return 0;
1308 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1309 return 0;
1310 continue;
1311 case STACK_PTR:
1312 case STACK_I4:
1313 case STACK_I8:
1314 case STACK_VTYPE:
1315 break;
1317 /*if (!param_table [args [i].type] [sig->params [i]->type])
1318 return 0;*/
1320 return 1;
1322 #endif
1325 * When we need a pointer to the current domain many times in a method, we
1326 * call mono_domain_get() once and we store the result in a local variable.
1327 * This function returns the variable that represents the MonoDomain*.
1329 inline static MonoInst *
1330 mono_get_domainvar (MonoCompile *cfg)
1332 if (!cfg->domainvar) {
1333 /* Make sure we don't generate references after checking whenever to init this */
1334 g_assert (!cfg->domainvar_inited);
1335 cfg->domainvar = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
1336 /* Avoid optimizing it away */
1337 cfg->domainvar->flags |= MONO_INST_VOLATILE;
1339 return cfg->domainvar;
1343 * The got_var contains the address of the Global Offset Table when AOT
1344 * compiling.
1346 MonoInst *
1347 mono_get_got_var (MonoCompile *cfg)
1349 if (!cfg->compile_aot || !cfg->backend->need_got_var || cfg->llvm_only)
1350 return NULL;
1351 if (!cfg->got_var) {
1352 cfg->got_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
1354 return cfg->got_var;
1357 static void
1358 mono_create_rgctx_var (MonoCompile *cfg)
1360 if (!cfg->rgctx_var) {
1361 cfg->rgctx_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
1362 /* force the var to be stack allocated */
1363 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1367 static MonoInst *
1368 mono_get_vtable_var (MonoCompile *cfg)
1370 g_assert (cfg->gshared);
1372 mono_create_rgctx_var (cfg);
1374 return cfg->rgctx_var;
1377 static MonoType*
1378 type_from_stack_type (MonoInst *ins) {
1379 switch (ins->type) {
1380 case STACK_I4: return mono_get_int32_type ();
1381 case STACK_I8: return m_class_get_byval_arg (mono_defaults.int64_class);
1382 case STACK_PTR: return mono_get_int_type ();
1383 case STACK_R4: return m_class_get_byval_arg (mono_defaults.single_class);
1384 case STACK_R8: return m_class_get_byval_arg (mono_defaults.double_class);
1385 case STACK_MP:
1386 return m_class_get_this_arg (ins->klass);
1387 case STACK_OBJ: return mono_get_object_type ();
1388 case STACK_VTYPE: return m_class_get_byval_arg (ins->klass);
1389 default:
1390 g_error ("stack type %d to monotype not handled\n", ins->type);
1392 return NULL;
1395 static G_GNUC_UNUSED int
1396 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1398 t = mono_type_get_underlying_type (t);
1399 switch (t->type) {
1400 case MONO_TYPE_I1:
1401 case MONO_TYPE_U1:
1402 case MONO_TYPE_I2:
1403 case MONO_TYPE_U2:
1404 case MONO_TYPE_I4:
1405 case MONO_TYPE_U4:
1406 return STACK_I4;
1407 case MONO_TYPE_I:
1408 case MONO_TYPE_U:
1409 case MONO_TYPE_PTR:
1410 case MONO_TYPE_FNPTR:
1411 return STACK_PTR;
1412 case MONO_TYPE_CLASS:
1413 case MONO_TYPE_STRING:
1414 case MONO_TYPE_OBJECT:
1415 case MONO_TYPE_SZARRAY:
1416 case MONO_TYPE_ARRAY:
1417 return STACK_OBJ;
1418 case MONO_TYPE_I8:
1419 case MONO_TYPE_U8:
1420 return STACK_I8;
1421 case MONO_TYPE_R4:
1422 return cfg->r4_stack_type;
1423 case MONO_TYPE_R8:
1424 return STACK_R8;
1425 case MONO_TYPE_VALUETYPE:
1426 case MONO_TYPE_TYPEDBYREF:
1427 return STACK_VTYPE;
1428 case MONO_TYPE_GENERICINST:
1429 if (mono_type_generic_inst_is_valuetype (t))
1430 return STACK_VTYPE;
1431 else
1432 return STACK_OBJ;
1433 break;
1434 default:
1435 g_assert_not_reached ();
1438 return -1;
1441 static MonoClass*
1442 array_access_to_klass (int opcode)
1444 switch (opcode) {
1445 case MONO_CEE_LDELEM_U1:
1446 return mono_defaults.byte_class;
1447 case MONO_CEE_LDELEM_U2:
1448 return mono_defaults.uint16_class;
1449 case MONO_CEE_LDELEM_I:
1450 case MONO_CEE_STELEM_I:
1451 return mono_defaults.int_class;
1452 case MONO_CEE_LDELEM_I1:
1453 case MONO_CEE_STELEM_I1:
1454 return mono_defaults.sbyte_class;
1455 case MONO_CEE_LDELEM_I2:
1456 case MONO_CEE_STELEM_I2:
1457 return mono_defaults.int16_class;
1458 case MONO_CEE_LDELEM_I4:
1459 case MONO_CEE_STELEM_I4:
1460 return mono_defaults.int32_class;
1461 case MONO_CEE_LDELEM_U4:
1462 return mono_defaults.uint32_class;
1463 case MONO_CEE_LDELEM_I8:
1464 case MONO_CEE_STELEM_I8:
1465 return mono_defaults.int64_class;
1466 case MONO_CEE_LDELEM_R4:
1467 case MONO_CEE_STELEM_R4:
1468 return mono_defaults.single_class;
1469 case MONO_CEE_LDELEM_R8:
1470 case MONO_CEE_STELEM_R8:
1471 return mono_defaults.double_class;
1472 case MONO_CEE_LDELEM_REF:
1473 case MONO_CEE_STELEM_REF:
1474 return mono_defaults.object_class;
1475 default:
1476 g_assert_not_reached ();
1478 return NULL;
1482 * We try to share variables when possible
1484 static MonoInst *
1485 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1487 MonoInst *res;
1488 int pos, vnum;
1489 MonoType *type;
1491 type = type_from_stack_type (ins);
1493 /* inlining can result in deeper stacks */
1494 if (cfg->inline_depth || slot >= cfg->header->max_stack)
1495 return mono_compile_create_var (cfg, type, OP_LOCAL);
1497 pos = ins->type - 1 + slot * STACK_MAX;
1499 switch (ins->type) {
1500 case STACK_I4:
1501 case STACK_I8:
1502 case STACK_R8:
1503 case STACK_PTR:
1504 case STACK_MP:
1505 case STACK_OBJ:
1506 if ((vnum = cfg->intvars [pos]))
1507 return cfg->varinfo [vnum];
1508 res = mono_compile_create_var (cfg, type, OP_LOCAL);
1509 cfg->intvars [pos] = res->inst_c0;
1510 break;
1511 default:
1512 res = mono_compile_create_var (cfg, type, OP_LOCAL);
1514 return res;
1517 static void
1518 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1521 * Don't use this if a generic_context is set, since that means AOT can't
1522 * look up the method using just the image+token.
1523 * table == 0 means this is a reference made from a wrapper.
1525 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1526 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1527 jump_info_token->image = image;
1528 jump_info_token->token = token;
1529 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1534 * This function is called to handle items that are left on the evaluation stack
1535 * at basic block boundaries. What happens is that we save the values to local variables
1536 * and we reload them later when first entering the target basic block (with the
1537 * handle_loaded_temps () function).
1538 * A single joint point will use the same variables (stored in the array bb->out_stack or
1539 * bb->in_stack, if the basic block is before or after the joint point).
1541 * This function needs to be called _before_ emitting the last instruction of
1542 * the bb (i.e. before emitting a branch).
1543 * If the stack merge fails at a join point, cfg->unverifiable is set.
1545 static void
1546 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1548 int i, bindex;
1549 MonoBasicBlock *bb = cfg->cbb;
1550 MonoBasicBlock *outb;
1551 MonoInst *inst, **locals;
1552 gboolean found;
1554 if (!count)
1555 return;
1556 if (cfg->verbose_level > 3)
1557 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1558 if (!bb->out_scount) {
1559 bb->out_scount = count;
1560 //printf ("bblock %d has out:", bb->block_num);
1561 found = FALSE;
1562 for (i = 0; i < bb->out_count; ++i) {
1563 outb = bb->out_bb [i];
1564 /* exception handlers are linked, but they should not be considered for stack args */
1565 if (outb->flags & BB_EXCEPTION_HANDLER)
1566 continue;
1567 //printf (" %d", outb->block_num);
1568 if (outb->in_stack) {
1569 found = TRUE;
1570 bb->out_stack = outb->in_stack;
1571 break;
1574 //printf ("\n");
1575 if (!found) {
1576 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1577 for (i = 0; i < count; ++i) {
1579 * try to reuse temps already allocated for this purpouse, if they occupy the same
1580 * stack slot and if they are of the same type.
1581 * This won't cause conflicts since if 'local' is used to
1582 * store one of the values in the in_stack of a bblock, then
1583 * the same variable will be used for the same outgoing stack
1584 * slot as well.
1585 * This doesn't work when inlining methods, since the bblocks
1586 * in the inlined methods do not inherit their in_stack from
1587 * the bblock they are inlined to. See bug #58863 for an
1588 * example.
1590 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1595 for (i = 0; i < bb->out_count; ++i) {
1596 outb = bb->out_bb [i];
1597 /* exception handlers are linked, but they should not be considered for stack args */
1598 if (outb->flags & BB_EXCEPTION_HANDLER)
1599 continue;
1600 if (outb->in_scount) {
1601 if (outb->in_scount != bb->out_scount) {
1602 cfg->unverifiable = TRUE;
1603 return;
1605 continue; /* check they are the same locals */
1607 outb->in_scount = count;
1608 outb->in_stack = bb->out_stack;
1611 locals = bb->out_stack;
1612 cfg->cbb = bb;
1613 for (i = 0; i < count; ++i) {
1614 sp [i] = convert_value (cfg, locals [i]->inst_vtype, sp [i]);
1615 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1616 inst->cil_code = sp [i]->cil_code;
1617 sp [i] = locals [i];
1618 if (cfg->verbose_level > 3)
1619 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1623 * It is possible that the out bblocks already have in_stack assigned, and
1624 * the in_stacks differ. In this case, we will store to all the different
1625 * in_stacks.
1628 found = TRUE;
1629 bindex = 0;
1630 while (found) {
1631 /* Find a bblock which has a different in_stack */
1632 found = FALSE;
1633 while (bindex < bb->out_count) {
1634 outb = bb->out_bb [bindex];
1635 /* exception handlers are linked, but they should not be considered for stack args */
1636 if (outb->flags & BB_EXCEPTION_HANDLER) {
1637 bindex++;
1638 continue;
1640 if (outb->in_stack != locals) {
1641 for (i = 0; i < count; ++i) {
1642 sp [i] = convert_value (cfg, outb->in_stack [i]->inst_vtype, sp [i]);
1643 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1644 inst->cil_code = sp [i]->cil_code;
1645 sp [i] = locals [i];
1646 if (cfg->verbose_level > 3)
1647 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1649 locals = outb->in_stack;
1650 found = TRUE;
1651 break;
1653 bindex ++;
1658 MonoInst*
1659 mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1661 MonoInst *ins;
1663 if (cfg->compile_aot) {
1664 MONO_DISABLE_WARNING (4306) // 'type cast': conversion from 'MonoJumpInfoType' to 'MonoInst *' of greater size
1665 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1666 MONO_RESTORE_WARNING
1667 } else {
1668 MonoJumpInfo ji;
1669 gpointer target;
1670 ERROR_DECL (error);
1672 ji.type = patch_type;
1673 ji.data.target = data;
1674 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, error);
1675 mono_error_assert_ok (error);
1677 EMIT_NEW_PCONST (cfg, ins, target);
1679 return ins;
1682 static MonoInst*
1683 mono_create_fast_tls_getter (MonoCompile *cfg, MonoTlsKey key)
1685 int tls_offset = mono_tls_get_tls_offset (key);
1687 if (cfg->compile_aot)
1688 return NULL;
1690 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1691 MonoInst *ins;
1692 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
1693 ins->dreg = mono_alloc_preg (cfg);
1694 ins->inst_offset = tls_offset;
1695 return ins;
1697 return NULL;
1700 static MonoInst*
1701 mono_create_tls_get (MonoCompile *cfg, MonoTlsKey key)
1703 MonoInst *fast_tls = NULL;
1705 if (!mini_debug_options.use_fallback_tls)
1706 fast_tls = mono_create_fast_tls_getter (cfg, key);
1708 if (fast_tls) {
1709 MONO_ADD_INS (cfg->cbb, fast_tls);
1710 return fast_tls;
1713 const MonoJitICallId jit_icall_id = mono_get_tls_key_to_jit_icall_id (key);
1715 if (cfg->compile_aot) {
1716 MonoInst *addr;
1718 * tls getters are critical pieces of code and we don't want to resolve them
1719 * through the standard plt/tramp mechanism since we might expose ourselves
1720 * to crashes and infinite recursions.
1721 * Therefore the NOCALL part of MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL, FALSE in is_plt_patch.
1723 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL, GUINT_TO_POINTER (jit_icall_id));
1724 return mini_emit_calli (cfg, mono_icall_sig_ptr, NULL, addr, NULL, NULL);
1725 } else {
1726 return mono_emit_jit_icall_id (cfg, jit_icall_id, NULL);
1731 * emit_push_lmf:
1733 * Emit IR to push the current LMF onto the LMF stack.
1735 static void
1736 emit_push_lmf (MonoCompile *cfg)
1739 * Emit IR to push the LMF:
1740 * lmf_addr = <lmf_addr from tls>
1741 * lmf->lmf_addr = lmf_addr
1742 * lmf->prev_lmf = *lmf_addr
1743 * *lmf_addr = lmf
1745 MonoInst *ins, *lmf_ins;
1747 if (!cfg->lmf_ir)
1748 return;
1750 int lmf_reg, prev_lmf_reg;
1752 * Store lmf_addr in a variable, so it can be allocated to a global register.
1754 if (!cfg->lmf_addr_var)
1755 cfg->lmf_addr_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
1757 lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR);
1758 g_assert (lmf_ins);
1760 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1762 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1763 lmf_reg = ins->dreg;
1765 prev_lmf_reg = alloc_preg (cfg);
1766 /* Save previous_lmf */
1767 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1768 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1769 /* Set new lmf */
1770 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1774 * emit_pop_lmf:
1776 * Emit IR to pop the current LMF from the LMF stack.
1778 static void
1779 emit_pop_lmf (MonoCompile *cfg)
1781 int lmf_reg, lmf_addr_reg;
1782 MonoInst *ins;
1784 if (!cfg->lmf_ir)
1785 return;
1787 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1788 lmf_reg = ins->dreg;
1790 int prev_lmf_reg;
1792 * Emit IR to pop the LMF:
1793 * *(lmf->lmf_addr) = lmf->prev_lmf
1795 /* This could be called before emit_push_lmf () */
1796 if (!cfg->lmf_addr_var)
1797 cfg->lmf_addr_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
1798 lmf_addr_reg = cfg->lmf_addr_var->dreg;
1800 prev_lmf_reg = alloc_preg (cfg);
1801 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
1802 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
1806 * target_type_is_incompatible:
1807 * @cfg: MonoCompile context
1809 * Check that the item @arg on the evaluation stack can be stored
1810 * in the target type (can be a local, or field, etc).
1811 * The cfg arg can be used to check if we need verification or just
1812 * validity checks.
1814 * Returns: non-0 value if arg can't be stored on a target.
1816 static int
1817 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1819 MonoType *simple_type;
1820 MonoClass *klass;
1822 if (target->byref) {
1823 /* FIXME: check that the pointed to types match */
1824 if (arg->type == STACK_MP) {
1825 /* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
1826 MonoClass *target_class_lowered = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (mono_class_from_mono_type_internal (target))));
1827 MonoClass *source_class_lowered = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (arg->klass)));
1829 /* if the target is native int& or X* or same type */
1830 if (target->type == MONO_TYPE_I || target->type == MONO_TYPE_PTR || target_class_lowered == source_class_lowered)
1831 return 0;
1833 /* Both are primitive type byrefs and the source points to a larger type that the destination */
1834 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (m_class_get_byval_arg (target_class_lowered)) && MONO_TYPE_IS_PRIMITIVE_SCALAR (m_class_get_byval_arg (source_class_lowered)) &&
1835 mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
1836 return 0;
1837 return 1;
1839 if (arg->type == STACK_PTR)
1840 return 0;
1841 return 1;
1844 simple_type = mini_get_underlying_type (target);
1845 switch (simple_type->type) {
1846 case MONO_TYPE_VOID:
1847 return 1;
1848 case MONO_TYPE_I1:
1849 case MONO_TYPE_U1:
1850 case MONO_TYPE_I2:
1851 case MONO_TYPE_U2:
1852 case MONO_TYPE_I4:
1853 case MONO_TYPE_U4:
1854 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1855 return 1;
1856 return 0;
1857 case MONO_TYPE_PTR:
1858 /* STACK_MP is needed when setting pinned locals */
1859 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1860 return 1;
1861 return 0;
1862 case MONO_TYPE_I:
1863 case MONO_TYPE_U:
1864 case MONO_TYPE_FNPTR:
1866 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
1867 * in native int. (#688008).
1869 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1870 return 1;
1871 return 0;
1872 case MONO_TYPE_CLASS:
1873 case MONO_TYPE_STRING:
1874 case MONO_TYPE_OBJECT:
1875 case MONO_TYPE_SZARRAY:
1876 case MONO_TYPE_ARRAY:
1877 if (arg->type != STACK_OBJ)
1878 return 1;
1879 /* FIXME: check type compatibility */
1880 return 0;
1881 case MONO_TYPE_I8:
1882 case MONO_TYPE_U8:
1883 if (arg->type != STACK_I8)
1884 return 1;
1885 return 0;
1886 case MONO_TYPE_R4:
1887 if (arg->type != cfg->r4_stack_type)
1888 return 1;
1889 return 0;
1890 case MONO_TYPE_R8:
1891 if (arg->type != STACK_R8)
1892 return 1;
1893 return 0;
1894 case MONO_TYPE_VALUETYPE:
1895 if (arg->type != STACK_VTYPE)
1896 return 1;
1897 klass = mono_class_from_mono_type_internal (simple_type);
1898 if (klass != arg->klass)
1899 return 1;
1900 return 0;
1901 case MONO_TYPE_TYPEDBYREF:
1902 if (arg->type != STACK_VTYPE)
1903 return 1;
1904 klass = mono_class_from_mono_type_internal (simple_type);
1905 if (klass != arg->klass)
1906 return 1;
1907 return 0;
1908 case MONO_TYPE_GENERICINST:
1909 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1910 MonoClass *target_class;
1911 if (arg->type != STACK_VTYPE)
1912 return 1;
1913 klass = mono_class_from_mono_type_internal (simple_type);
1914 target_class = mono_class_from_mono_type_internal (target);
1915 /* The second cases is needed when doing partial sharing */
1916 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (arg->klass))))
1917 return 1;
1918 return 0;
1919 } else {
1920 if (arg->type != STACK_OBJ)
1921 return 1;
1922 /* FIXME: check type compatibility */
1923 return 0;
1925 case MONO_TYPE_VAR:
1926 case MONO_TYPE_MVAR:
1927 g_assert (cfg->gshared);
1928 if (mini_type_var_is_vt (simple_type)) {
1929 if (arg->type != STACK_VTYPE)
1930 return 1;
1931 } else {
1932 if (arg->type != STACK_OBJ)
1933 return 1;
1935 return 0;
1936 default:
1937 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1939 return 1;
1943 * convert_value:
1945 * Emit some implicit conversions which are not part of the .net spec, but are allowed by MS.NET.
1947 static MonoInst*
1948 convert_value (MonoCompile *cfg, MonoType *type, MonoInst *ins)
1950 if (!cfg->r4fp)
1951 return ins;
1952 type = mini_get_underlying_type (type);
1953 switch (type->type) {
1954 case MONO_TYPE_R4:
1955 if (ins->type == STACK_R8) {
1956 int dreg = alloc_freg (cfg);
1957 MonoInst *conv;
1958 EMIT_NEW_UNALU (cfg, conv, OP_FCONV_TO_R4, dreg, ins->dreg);
1959 conv->type = STACK_R4;
1960 return conv;
1962 break;
1963 case MONO_TYPE_R8:
1964 if (ins->type == STACK_R4) {
1965 int dreg = alloc_freg (cfg);
1966 MonoInst *conv;
1967 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, ins->dreg);
1968 conv->type = STACK_R8;
1969 return conv;
1971 break;
1972 default:
1973 break;
1975 return ins;
1979 * Prepare arguments for passing to a function call.
1980 * Return a non-zero value if the arguments can't be passed to the given
1981 * signature.
1982 * The type checks are not yet complete and some conversions may need
1983 * casts on 32 or 64 bit architectures.
1985 * FIXME: implement this using target_type_is_incompatible ()
1987 static gboolean
1988 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1990 MonoType *simple_type;
1991 int i;
1993 if (sig->hasthis) {
1994 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1995 return TRUE;
1996 args++;
1998 for (i = 0; i < sig->param_count; ++i) {
1999 if (sig->params [i]->byref) {
2000 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2001 return TRUE;
2002 continue;
2004 simple_type = mini_get_underlying_type (sig->params [i]);
2005 handle_enum:
2006 switch (simple_type->type) {
2007 case MONO_TYPE_VOID:
2008 return TRUE;
2009 case MONO_TYPE_I1:
2010 case MONO_TYPE_U1:
2011 case MONO_TYPE_I2:
2012 case MONO_TYPE_U2:
2013 case MONO_TYPE_I4:
2014 case MONO_TYPE_U4:
2015 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2016 return TRUE;
2017 continue;
2018 case MONO_TYPE_I:
2019 case MONO_TYPE_U:
2020 case MONO_TYPE_PTR:
2021 case MONO_TYPE_FNPTR:
2022 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2023 return TRUE;
2024 continue;
2025 case MONO_TYPE_CLASS:
2026 case MONO_TYPE_STRING:
2027 case MONO_TYPE_OBJECT:
2028 case MONO_TYPE_SZARRAY:
2029 case MONO_TYPE_ARRAY:
2030 if (args [i]->type != STACK_OBJ)
2031 return TRUE;
2032 continue;
2033 case MONO_TYPE_I8:
2034 case MONO_TYPE_U8:
2035 if (args [i]->type != STACK_I8)
2036 return TRUE;
2037 continue;
2038 case MONO_TYPE_R4:
2039 if (args [i]->type != cfg->r4_stack_type)
2040 return TRUE;
2041 continue;
2042 case MONO_TYPE_R8:
2043 if (args [i]->type != STACK_R8)
2044 return TRUE;
2045 continue;
2046 case MONO_TYPE_VALUETYPE:
2047 if (m_class_is_enumtype (simple_type->data.klass)) {
2048 simple_type = mono_class_enum_basetype_internal (simple_type->data.klass);
2049 goto handle_enum;
2051 if (args [i]->type != STACK_VTYPE)
2052 return TRUE;
2053 continue;
2054 case MONO_TYPE_TYPEDBYREF:
2055 if (args [i]->type != STACK_VTYPE)
2056 return TRUE;
2057 continue;
2058 case MONO_TYPE_GENERICINST:
2059 simple_type = m_class_get_byval_arg (simple_type->data.generic_class->container_class);
2060 goto handle_enum;
2061 case MONO_TYPE_VAR:
2062 case MONO_TYPE_MVAR:
2063 /* gsharedvt */
2064 if (args [i]->type != STACK_VTYPE)
2065 return TRUE;
2066 continue;
2067 default:
2068 g_error ("unknown type 0x%02x in check_call_signature",
2069 simple_type->type);
2072 return FALSE;
2075 MonoJumpInfo *
2076 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2078 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2080 ji->ip.i = ip;
2081 ji->type = type;
2082 ji->data.target = target;
2084 return ji;
2088 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2090 if (cfg->gshared)
2091 return mono_class_check_context_used (klass);
2092 else
2093 return 0;
2097 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2099 if (cfg->gshared)
2100 return mono_method_check_context_used (method);
2101 else
2102 return 0;
2106 * check_method_sharing:
2108 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2110 static void
2111 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2113 gboolean pass_vtable = FALSE;
2114 gboolean pass_mrgctx = FALSE;
2116 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || m_class_is_valuetype (cmethod->klass)) &&
2117 (mono_class_is_ginst (cmethod->klass) || mono_class_is_gtd (cmethod->klass))) {
2118 gboolean sharable = FALSE;
2120 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2121 sharable = TRUE;
2124 * Pass vtable iff target method might
2125 * be shared, which means that sharing
2126 * is enabled for its class and its
2127 * context is sharable (and it's not a
2128 * generic method).
2130 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2131 pass_vtable = TRUE;
2134 if (mini_method_needs_mrgctx (cmethod)) {
2135 if (mini_method_is_default_method (cmethod))
2136 pass_vtable = FALSE;
2137 else
2138 g_assert (!pass_vtable);
2140 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2141 pass_mrgctx = TRUE;
2142 } else {
2143 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature_internal (cmethod)))
2144 pass_mrgctx = TRUE;
2148 if (out_pass_vtable)
2149 *out_pass_vtable = pass_vtable;
2150 if (out_pass_mrgctx)
2151 *out_pass_mrgctx = pass_mrgctx;
2154 static gboolean
2155 direct_icalls_enabled (MonoCompile *cfg, MonoMethod *method)
2157 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
2158 return FALSE;
2160 if (method && mono_aot_direct_icalls_enabled_for_method (cfg, method))
2161 return TRUE;
2163 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2164 #ifdef TARGET_AMD64
2165 if (cfg->compile_llvm && !cfg->llvm_only)
2166 return FALSE;
2167 #endif
2169 return FALSE;
2172 MonoInst*
2173 mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
2176 * Call the jit icall without a wrapper if possible.
2177 * The wrapper is needed to be able to do stack walks for asynchronously suspended
2178 * threads when debugging.
2180 if (direct_icalls_enabled (cfg, NULL)) {
2181 int costs;
2183 if (!info->wrapper_method) {
2184 info->wrapper_method = mono_marshal_get_icall_wrapper (info, TRUE);
2185 mono_memory_barrier ();
2189 * Inline the wrapper method, which is basically a call to the C icall, and
2190 * an exception check.
2192 costs = inline_method (cfg, info->wrapper_method, NULL,
2193 args, NULL, il_offset, TRUE);
2194 g_assert (costs > 0);
2195 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
2197 return args [0];
2199 return mono_emit_jit_icall_id (cfg, mono_jit_icall_info_id (info), args);
2202 static MonoInst*
2203 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2205 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2206 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2207 int widen_op = -1;
2210 * Native code might return non register sized integers
2211 * without initializing the upper bits.
2213 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2214 case OP_LOADI1_MEMBASE:
2215 widen_op = OP_ICONV_TO_I1;
2216 break;
2217 case OP_LOADU1_MEMBASE:
2218 widen_op = OP_ICONV_TO_U1;
2219 break;
2220 case OP_LOADI2_MEMBASE:
2221 widen_op = OP_ICONV_TO_I2;
2222 break;
2223 case OP_LOADU2_MEMBASE:
2224 widen_op = OP_ICONV_TO_U2;
2225 break;
2226 default:
2227 break;
2230 if (widen_op != -1) {
2231 int dreg = alloc_preg (cfg);
2232 MonoInst *widen;
2234 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2235 widen->type = ins->type;
2236 ins = widen;
2241 return ins;
2244 static MonoInst*
2245 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2246 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2248 static void
2249 emit_method_access_failure (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
2251 MonoInst *args [2];
2252 args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (caller), caller, MONO_RGCTX_INFO_METHOD);
2253 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (callee), callee, MONO_RGCTX_INFO_METHOD);
2254 mono_emit_jit_icall (cfg, mono_throw_method_access, args);
2257 static void
2258 emit_bad_image_failure (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
2260 mono_emit_jit_icall (cfg, mono_throw_bad_image, NULL);
2263 static MonoMethod*
2264 get_method_nofail (MonoClass *klass, const char *method_name, int num_params, int flags)
2266 MonoMethod *method;
2267 ERROR_DECL (error);
2268 method = mono_class_get_method_from_name_checked (klass, method_name, num_params, flags, error);
2269 mono_error_assert_ok (error);
2270 g_assertf (method, "Could not lookup method %s in %s", method_name, m_class_get_name (klass));
2271 return method;
2274 MonoMethod*
2275 mini_get_memcpy_method (void)
2277 static MonoMethod *memcpy_method = NULL;
2278 if (!memcpy_method) {
2279 memcpy_method = get_method_nofail (mono_defaults.string_class, "memcpy", 3, 0);
2280 if (!memcpy_method)
2281 g_error ("Old corlib found. Install a new one");
2283 return memcpy_method;
2286 MonoInst*
2287 mini_emit_storing_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2289 MonoInst *store;
2292 * Add a release memory barrier so the object contents are flushed
2293 * to memory before storing the reference into another object.
2295 if (mini_debug_options.clr_memory_model)
2296 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
2298 EMIT_NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, ptr->dreg, 0, value->dreg);
2300 mini_emit_write_barrier (cfg, ptr, value);
2301 return store;
2304 void
2305 mini_emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2307 int card_table_shift_bits;
2308 target_mgreg_t card_table_mask;
2309 guint8 *card_table;
2310 MonoInst *dummy_use;
2311 int nursery_shift_bits;
2312 size_t nursery_size;
2314 if (!cfg->gen_write_barriers)
2315 return;
2317 //method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1])
2319 card_table = mono_gc_get_target_card_table (&card_table_shift_bits, &card_table_mask);
2321 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2323 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
2324 MonoInst *wbarrier;
2326 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2327 wbarrier->sreg1 = ptr->dreg;
2328 wbarrier->sreg2 = value->dreg;
2329 MONO_ADD_INS (cfg->cbb, wbarrier);
2330 } else if (card_table) {
2331 int offset_reg = alloc_preg (cfg);
2332 int card_reg;
2333 MonoInst *ins;
2336 * We emit a fast light weight write barrier. This always marks cards as in the concurrent
2337 * collector case, so, for the serial collector, it might slightly slow down nursery
2338 * collections. We also expect that the host system and the target system have the same card
2339 * table configuration, which is the case if they have the same pointer size.
2342 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2343 if (card_table_mask)
2344 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2346 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2347 * IMM's larger than 32bits.
2349 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
2350 card_reg = ins->dreg;
2352 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2353 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2354 } else {
2355 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2356 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2359 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2362 MonoMethod*
2363 mini_get_memset_method (void)
2365 static MonoMethod *memset_method = NULL;
2366 if (!memset_method) {
2367 memset_method = get_method_nofail (mono_defaults.string_class, "memset", 3, 0);
2368 if (!memset_method)
2369 g_error ("Old corlib found. Install a new one");
2371 return memset_method;
2374 void
2375 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2377 MonoInst *iargs [3];
2378 int n;
2379 guint32 align;
2380 MonoMethod *memset_method;
2381 MonoInst *size_ins = NULL;
2382 MonoInst *bzero_ins = NULL;
2383 static MonoMethod *bzero_method;
2385 /* FIXME: Optimize this for the case when dest is an LDADDR */
2386 mono_class_init_internal (klass);
2387 if (mini_is_gsharedvt_klass (klass)) {
2388 size_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
2389 bzero_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
2390 if (!bzero_method)
2391 bzero_method = get_method_nofail (mono_defaults.string_class, "bzero_aligned_1", 2, 0);
2392 g_assert (bzero_method);
2393 iargs [0] = dest;
2394 iargs [1] = size_ins;
2395 mini_emit_calli (cfg, mono_method_signature_internal (bzero_method), iargs, bzero_ins, NULL, NULL);
2396 return;
2399 klass = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (klass)));
2401 n = mono_class_value_size (klass, &align);
2403 if (n <= TARGET_SIZEOF_VOID_P * 8) {
2404 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2406 else {
2407 memset_method = mini_get_memset_method ();
2408 iargs [0] = dest;
2409 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2410 EMIT_NEW_ICONST (cfg, iargs [2], n);
2411 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2415 static gboolean
2416 context_used_is_mrgctx (MonoCompile *cfg, int context_used)
2418 /* gshared dim methods use an mrgctx */
2419 if (mini_method_is_default_method (cfg->method))
2420 return context_used != 0;
2421 return context_used & MONO_GENERIC_CONTEXT_USED_METHOD;
2425 * emit_get_rgctx:
2427 * Emit IR to return either the this pointer for instance method,
2428 * or the mrgctx for static methods.
2430 static MonoInst*
2431 emit_get_rgctx (MonoCompile *cfg, int context_used)
2433 MonoInst *this_ins = NULL;
2434 MonoMethod *method = cfg->method;
2436 g_assert (cfg->gshared);
2438 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2439 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2440 !m_class_is_valuetype (method->klass))
2441 EMIT_NEW_VARLOAD (cfg, this_ins, cfg->this_arg, mono_get_object_type ());
2443 if (context_used_is_mrgctx (cfg, context_used)) {
2444 MonoInst *mrgctx_loc, *mrgctx_var;
2446 if (!mini_method_is_default_method (method)) {
2447 g_assert (!this_ins);
2448 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2451 mrgctx_loc = mono_get_vtable_var (cfg);
2452 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2454 return mrgctx_var;
2455 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || m_class_is_valuetype (method->klass)) {
2456 MonoInst *vtable_loc, *vtable_var;
2458 g_assert (!this_ins);
2460 vtable_loc = mono_get_vtable_var (cfg);
2461 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2463 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2464 MonoInst *mrgctx_var = vtable_var;
2465 int vtable_reg;
2467 vtable_reg = alloc_preg (cfg);
2468 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2469 vtable_var->type = STACK_PTR;
2472 return vtable_var;
2473 } else {
2474 MonoInst *ins;
2475 int vtable_reg;
2477 vtable_reg = alloc_preg (cfg);
2478 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2479 return ins;
2483 static MonoJumpInfoRgctxEntry *
2484 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
2486 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2487 if (in_mrgctx)
2488 res->d.method = method;
2489 else
2490 res->d.klass = method->klass;
2491 res->in_mrgctx = in_mrgctx;
2492 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2493 res->data->type = patch_type;
2494 res->data->data.target = patch_data;
2495 res->info_type = info_type;
2497 return res;
2500 static MonoInst*
2501 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2503 MonoInst *args [16];
2504 MonoInst *call;
2506 // FIXME: No fastpath since the slot is not a compile time constant
2507 args [0] = rgctx;
2508 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
2509 if (entry->in_mrgctx)
2510 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
2511 else
2512 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
2513 return call;
2514 #if 0
2516 * FIXME: This can be called during decompose, which is a problem since it creates
2517 * new bblocks.
2518 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
2520 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
2521 gboolean mrgctx;
2522 MonoBasicBlock *is_null_bb, *end_bb;
2523 MonoInst *res, *ins, *call;
2524 MonoInst *args[16];
2526 slot = mini_get_rgctx_entry_slot (entry);
2528 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
2529 index = MONO_RGCTX_SLOT_INDEX (slot);
2530 if (mrgctx)
2531 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / TARGET_SIZEOF_VOID_P;
2532 for (depth = 0; ; ++depth) {
2533 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
2535 if (index < size - 1)
2536 break;
2537 index -= size - 1;
2540 NEW_BBLOCK (cfg, end_bb);
2541 NEW_BBLOCK (cfg, is_null_bb);
2543 if (mrgctx) {
2544 rgctx_reg = rgctx->dreg;
2545 } else {
2546 rgctx_reg = alloc_preg (cfg);
2548 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
2549 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
2550 NEW_BBLOCK (cfg, is_null_bb);
2552 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
2553 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2556 for (i = 0; i < depth; ++i) {
2557 int array_reg = alloc_preg (cfg);
2559 /* load ptr to next array */
2560 if (mrgctx && i == 0)
2561 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
2562 else
2563 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
2564 rgctx_reg = array_reg;
2565 /* is the ptr null? */
2566 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
2567 /* if yes, jump to actual trampoline */
2568 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2571 /* fetch slot */
2572 val_reg = alloc_preg (cfg);
2573 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * TARGET_SIZEOF_VOID_P);
2574 /* is the slot null? */
2575 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
2576 /* if yes, jump to actual trampoline */
2577 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2579 /* Fastpath */
2580 res_reg = alloc_preg (cfg);
2581 MONO_INST_NEW (cfg, ins, OP_MOVE);
2582 ins->dreg = res_reg;
2583 ins->sreg1 = val_reg;
2584 MONO_ADD_INS (cfg->cbb, ins);
2585 res = ins;
2586 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
2588 /* Slowpath */
2589 MONO_START_BB (cfg, is_null_bb);
2590 args [0] = rgctx;
2591 EMIT_NEW_ICONST (cfg, args [1], index);
2592 if (mrgctx)
2593 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
2594 else
2595 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
2596 MONO_INST_NEW (cfg, ins, OP_MOVE);
2597 ins->dreg = res_reg;
2598 ins->sreg1 = call->dreg;
2599 MONO_ADD_INS (cfg->cbb, ins);
2600 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
2602 MONO_START_BB (cfg, end_bb);
2604 return res;
2605 #endif
2609 * emit_rgctx_fetch:
2611 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
2612 * given by RGCTX.
2614 static MonoInst*
2615 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2617 if (cfg->llvm_only)
2618 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
2619 else
2620 return mini_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, mono_icall_sig_ptr_ptr, &rgctx);
2624 * mini_emit_get_rgctx_klass:
2626 * Emit IR to load the property RGCTX_TYPE of KLASS. If context_used is 0, emit
2627 * normal constants, else emit a load from the rgctx.
2629 MonoInst*
2630 mini_emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2631 MonoClass *klass, MonoRgctxInfoType rgctx_type)
2633 if (!context_used) {
2634 MonoInst *ins;
2636 switch (rgctx_type) {
2637 case MONO_RGCTX_INFO_KLASS:
2638 EMIT_NEW_CLASSCONST (cfg, ins, klass);
2639 return ins;
2640 default:
2641 g_assert_not_reached ();
2645 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2646 MonoInst *rgctx = emit_get_rgctx (cfg, context_used);
2648 return emit_rgctx_fetch (cfg, rgctx, entry);
2651 static MonoInst*
2652 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
2653 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
2655 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
2656 MonoInst *rgctx = emit_get_rgctx (cfg, context_used);
2658 return emit_rgctx_fetch (cfg, rgctx, entry);
2661 static MonoInst*
2662 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
2663 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
2665 MonoJumpInfoGSharedVtCall *call_info;
2666 MonoJumpInfoRgctxEntry *entry;
2667 MonoInst *rgctx;
2669 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
2670 call_info->sig = sig;
2671 call_info->method = cmethod;
2673 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
2674 rgctx = emit_get_rgctx (cfg, context_used);
2676 return emit_rgctx_fetch (cfg, rgctx, entry);
2680 * emit_get_rgctx_virt_method:
2682 * Return data for method VIRT_METHOD for a receiver of type KLASS.
2684 static MonoInst*
2685 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
2686 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
2688 MonoJumpInfoVirtMethod *info;
2689 MonoJumpInfoRgctxEntry *entry;
2690 MonoInst *rgctx;
2692 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
2693 info->klass = klass;
2694 info->method = virt_method;
2696 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
2697 rgctx = emit_get_rgctx (cfg, context_used);
2699 return emit_rgctx_fetch (cfg, rgctx, entry);
2702 static MonoInst*
2703 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
2704 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
2706 MonoJumpInfoRgctxEntry *entry;
2707 MonoInst *rgctx;
2709 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
2710 rgctx = emit_get_rgctx (cfg, context_used);
2712 return emit_rgctx_fetch (cfg, rgctx, entry);
2716 * emit_get_rgctx_method:
2718 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2719 * normal constants, else emit a load from the rgctx.
2721 static MonoInst*
2722 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2723 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
2725 if (context_used == -1)
2726 context_used = mono_method_check_context_used (cmethod);
2728 if (!context_used) {
2729 MonoInst *ins;
2731 switch (rgctx_type) {
2732 case MONO_RGCTX_INFO_METHOD:
2733 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2734 return ins;
2735 case MONO_RGCTX_INFO_METHOD_RGCTX:
2736 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2737 return ins;
2738 case MONO_RGCTX_INFO_METHOD_FTNDESC:
2739 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_METHOD_FTNDESC, cmethod);
2740 return ins;
2741 default:
2742 g_assert_not_reached ();
2744 } else {
2745 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2746 MonoInst *rgctx = emit_get_rgctx (cfg, context_used);
2748 return emit_rgctx_fetch (cfg, rgctx, entry);
2752 static MonoInst*
2753 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2754 MonoClassField *field, MonoRgctxInfoType rgctx_type)
2756 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_FIELD, field, rgctx_type);
2757 MonoInst *rgctx = emit_get_rgctx (cfg, context_used);
2759 return emit_rgctx_fetch (cfg, rgctx, entry);
2762 MonoInst*
2763 mini_emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2764 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
2766 return emit_get_rgctx_method (cfg, context_used, cmethod, rgctx_type);
2769 static int
2770 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
2772 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
2773 MonoRuntimeGenericContextInfoTemplate *template_;
2774 int i, idx;
2776 g_assert (info);
2778 for (i = 0; i < info->num_entries; ++i) {
2779 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
2781 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
2782 return i;
2785 if (info->num_entries == info->count_entries) {
2786 MonoRuntimeGenericContextInfoTemplate *new_entries;
2787 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
2789 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
2791 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
2792 info->entries = new_entries;
2793 info->count_entries = new_count_entries;
2796 idx = info->num_entries;
2797 template_ = &info->entries [idx];
2798 template_->info_type = rgctx_type;
2799 template_->data = data;
2801 info->num_entries ++;
2803 return idx;
2807 * emit_get_gsharedvt_info:
2809 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
2811 static MonoInst*
2812 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
2814 MonoInst *ins;
2815 int idx, dreg;
2817 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
2818 /* Load info->entries [idx] */
2819 dreg = alloc_preg (cfg);
2820 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * TARGET_SIZEOF_VOID_P));
2822 return ins;
2825 MonoInst*
2826 mini_emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
2828 return emit_get_gsharedvt_info (cfg, m_class_get_byval_arg (klass), rgctx_type);
2832 * On return the caller must check @klass for load errors.
2834 static void
2835 emit_class_init (MonoCompile *cfg, MonoClass *klass)
2837 MonoInst *vtable_arg;
2838 int context_used;
2840 context_used = mini_class_check_context_used (cfg, klass);
2842 if (context_used) {
2843 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
2844 klass, MONO_RGCTX_INFO_VTABLE);
2845 } else {
2846 MonoVTable *vtable = mono_class_vtable_checked (cfg->domain, klass, cfg->error);
2847 if (!is_ok (cfg->error)) {
2848 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
2849 return;
2852 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2855 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
2856 MonoInst *ins;
2859 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
2860 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
2862 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
2863 ins->sreg1 = vtable_arg->dreg;
2864 MONO_ADD_INS (cfg->cbb, ins);
2865 } else {
2866 int inited_reg;
2867 MonoBasicBlock *inited_bb;
2869 inited_reg = alloc_ireg (cfg);
2871 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, inited_reg, vtable_arg->dreg, MONO_STRUCT_OFFSET (MonoVTable, initialized));
2873 NEW_BBLOCK (cfg, inited_bb);
2875 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
2876 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
2878 mono_emit_jit_icall (cfg, mono_generic_class_init, &vtable_arg);
2880 MONO_START_BB (cfg, inited_bb);
2884 static void
2885 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
2887 MonoInst *ins;
2889 if (cfg->gen_seq_points && cfg->method == method) {
2890 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
2891 if (nonempty_stack)
2892 ins->flags |= MONO_INST_NONEMPTY_STACK;
2893 MONO_ADD_INS (cfg->cbb, ins);
2894 cfg->last_seq_point = ins;
2898 void
2899 mini_save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
2901 if (mini_debug_options.better_cast_details) {
2902 int vtable_reg = alloc_preg (cfg);
2903 int klass_reg = alloc_preg (cfg);
2904 MonoBasicBlock *is_null_bb = NULL;
2905 MonoInst *tls_get;
2907 if (null_check) {
2908 NEW_BBLOCK (cfg, is_null_bb);
2910 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2911 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2914 tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
2915 if (!tls_get) {
2916 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2917 exit (1);
2920 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2921 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
2923 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2925 MonoInst *class_ins = mini_emit_get_rgctx_klass (cfg, mini_class_check_context_used (cfg, klass), klass, MONO_RGCTX_INFO_KLASS);
2926 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), class_ins->dreg);
2928 if (null_check)
2929 MONO_START_BB (cfg, is_null_bb);
2933 void
2934 mini_reset_cast_details (MonoCompile *cfg)
2936 /* Reset the variables holding the cast details */
2937 if (mini_debug_options.better_cast_details) {
2938 MonoInst *tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
2939 /* It is enough to reset the from field */
2940 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2945 * On return the caller must check @array_class for load errors
2947 static void
2948 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2950 int vtable_reg = alloc_preg (cfg);
2951 int context_used;
2953 context_used = mini_class_check_context_used (cfg, array_class);
2955 mini_save_cast_details (cfg, array_class, obj->dreg, FALSE);
2957 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2959 if (cfg->opt & MONO_OPT_SHARED) {
2960 int class_reg = alloc_preg (cfg);
2961 MonoInst *ins;
2963 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
2964 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
2965 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
2966 } else if (context_used) {
2967 MonoInst *vtable_ins;
2969 vtable_ins = mini_emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2970 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2971 } else {
2972 if (cfg->compile_aot) {
2973 int vt_reg;
2974 MonoVTable *vtable;
2976 if (!(vtable = mono_class_vtable_checked (cfg->domain, array_class, cfg->error))) {
2977 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
2978 return;
2980 vt_reg = alloc_preg (cfg);
2981 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2982 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2983 } else {
2984 MonoVTable *vtable;
2985 if (!(vtable = mono_class_vtable_checked (cfg->domain, array_class, cfg->error))) {
2986 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
2987 return;
2989 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, (gssize)vtable);
2993 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2995 mini_reset_cast_details (cfg);
2999 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3000 * generic code is generated.
3002 static MonoInst*
3003 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3005 MonoMethod* method;
3007 if (m_class_is_enumtype (mono_class_get_nullable_param_internal (klass)))
3008 method = get_method_nofail (klass, "UnboxExact", 1, 0);
3009 else
3010 method = get_method_nofail (klass, "Unbox", 1, 0);
3011 g_assert (method);
3013 if (context_used) {
3014 MonoInst *rgctx, *addr;
3016 /* FIXME: What if the class is shared? We might not
3017 have to get the address of the method from the
3018 RGCTX. */
3019 if (cfg->llvm_only) {
3020 addr = emit_get_rgctx_method (cfg, context_used, method,
3021 MONO_RGCTX_INFO_METHOD_FTNDESC);
3022 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature_internal (method));
3023 return mini_emit_llvmonly_calli (cfg, mono_method_signature_internal (method), &val, addr);
3024 } else {
3025 addr = emit_get_rgctx_method (cfg, context_used, method,
3026 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3027 rgctx = emit_get_rgctx (cfg, context_used);
3029 return mini_emit_calli (cfg, mono_method_signature_internal (method), &val, addr, NULL, rgctx);
3031 } else {
3032 gboolean pass_vtable, pass_mrgctx;
3033 MonoInst *rgctx_arg = NULL;
3035 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3036 g_assert (!pass_mrgctx);
3038 if (pass_vtable) {
3039 MonoVTable *vtable = mono_class_vtable_checked (cfg->domain, method->klass, cfg->error);
3041 mono_error_assert_ok (cfg->error);
3042 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3045 return mini_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3049 static MonoInst*
3050 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3052 MonoInst *add;
3053 int obj_reg;
3054 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3055 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3056 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3057 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3059 obj_reg = sp [0]->dreg;
3060 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3061 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3063 /* FIXME: generics */
3064 g_assert (m_class_get_rank (klass) == 0);
3066 // Check rank == 0
3067 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3068 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3070 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3071 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, m_class_offsetof_element_class ());
3073 if (context_used) {
3074 MonoInst *element_class;
3076 /* This assertion is from the unboxcast insn */
3077 g_assert (m_class_get_rank (klass) == 0);
3079 element_class = mini_emit_get_rgctx_klass (cfg, context_used,
3080 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3082 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3083 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3084 } else {
3085 mini_save_cast_details (cfg, m_class_get_element_class (klass), obj_reg, FALSE);
3086 mini_emit_class_check (cfg, eclass_reg, m_class_get_element_class (klass));
3087 mini_reset_cast_details (cfg);
3090 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, MONO_ABI_SIZEOF (MonoObject));
3091 MONO_ADD_INS (cfg->cbb, add);
3092 add->type = STACK_MP;
3093 add->klass = klass;
3095 return add;
3098 static MonoInst*
3099 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
3101 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3102 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3103 MonoInst *ins;
3104 int dreg, addr_reg;
3106 klass_inst = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3108 /* obj */
3109 args [0] = obj;
3111 /* klass */
3112 args [1] = klass_inst;
3114 /* CASTCLASS */
3115 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3117 NEW_BBLOCK (cfg, is_ref_bb);
3118 NEW_BBLOCK (cfg, is_nullable_bb);
3119 NEW_BBLOCK (cfg, end_bb);
3120 is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3121 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
3122 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3124 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
3125 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3127 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3128 addr_reg = alloc_dreg (cfg, STACK_MP);
3130 /* Non-ref case */
3131 /* UNBOX */
3132 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, MONO_ABI_SIZEOF (MonoObject));
3133 MONO_ADD_INS (cfg->cbb, addr);
3135 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3137 /* Ref case */
3138 MONO_START_BB (cfg, is_ref_bb);
3140 /* Save the ref to a temporary */
3141 dreg = alloc_ireg (cfg);
3142 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, m_class_get_byval_arg (klass));
3143 addr->dreg = addr_reg;
3144 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3145 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3147 /* Nullable case */
3148 MONO_START_BB (cfg, is_nullable_bb);
3151 MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3152 MonoInst *unbox_call;
3153 MonoMethodSignature *unbox_sig;
3155 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3156 unbox_sig->ret = m_class_get_byval_arg (klass);
3157 unbox_sig->param_count = 1;
3158 unbox_sig->params [0] = mono_get_object_type ();
3160 if (cfg->llvm_only)
3161 unbox_call = mini_emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
3162 else
3163 unbox_call = mini_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3165 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, m_class_get_byval_arg (klass));
3166 addr->dreg = addr_reg;
3169 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3171 /* End */
3172 MONO_START_BB (cfg, end_bb);
3174 /* LDOBJ */
3175 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr_reg, 0);
3177 return ins;
3181 * Returns NULL and set the cfg exception on error.
3183 static MonoInst*
3184 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3186 MonoInst *iargs [2];
3187 MonoJitICallId alloc_ftn;
3189 if (mono_class_get_flags (klass) & TYPE_ATTRIBUTE_ABSTRACT) {
3190 char* full_name = mono_type_get_full_name (klass);
3191 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
3192 mono_error_set_member_access (cfg->error, "Cannot create an abstract class: %s", full_name);
3193 g_free (full_name);
3194 return NULL;
3197 if (context_used) {
3198 MonoInst *data;
3199 MonoRgctxInfoType rgctx_info;
3200 MonoInst *iargs [2];
3201 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
3203 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
3205 if (cfg->opt & MONO_OPT_SHARED)
3206 rgctx_info = MONO_RGCTX_INFO_KLASS;
3207 else
3208 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3209 data = mini_emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3211 if (cfg->opt & MONO_OPT_SHARED) {
3212 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3213 iargs [1] = data;
3214 alloc_ftn = MONO_JIT_ICALL_ves_icall_object_new;
3215 } else {
3216 iargs [0] = data;
3217 alloc_ftn = MONO_JIT_ICALL_ves_icall_object_new_specific;
3220 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
3221 if (known_instance_size) {
3222 int size = mono_class_instance_size (klass);
3223 if (size < MONO_ABI_SIZEOF (MonoObject))
3224 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
3226 EMIT_NEW_ICONST (cfg, iargs [1], size);
3228 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3231 return mono_emit_jit_icall_id (cfg, alloc_ftn, iargs);
3234 if (cfg->opt & MONO_OPT_SHARED) {
3235 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3236 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3238 alloc_ftn = MONO_JIT_ICALL_ves_icall_object_new;
3239 } else if (cfg->compile_aot && cfg->cbb->out_of_line && m_class_get_type_token (klass) && m_class_get_image (klass) == mono_defaults.corlib && !mono_class_is_ginst (klass)) {
3240 /* This happens often in argument checking code, eg. throw new FooException... */
3241 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3242 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (m_class_get_type_token (klass)));
3243 alloc_ftn = MONO_JIT_ICALL_mono_helper_newobj_mscorlib;
3244 } else {
3245 MonoVTable *vtable = mono_class_vtable_checked (cfg->domain, klass, cfg->error);
3247 if (!is_ok (cfg->error)) {
3248 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
3249 return NULL;
3252 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
3254 if (managed_alloc) {
3255 int size = mono_class_instance_size (klass);
3256 if (size < MONO_ABI_SIZEOF (MonoObject))
3257 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
3259 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3260 EMIT_NEW_ICONST (cfg, iargs [1], size);
3261 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3263 alloc_ftn = MONO_JIT_ICALL_ves_icall_object_new_specific;
3264 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3267 return mono_emit_jit_icall_id (cfg, alloc_ftn, iargs);
3271 * Returns NULL and set the cfg exception on error.
3273 MonoInst*
3274 mini_emit_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3276 MonoInst *alloc, *ins;
3278 if (G_UNLIKELY (m_class_is_byreflike (klass))) {
3279 mono_error_set_bad_image (cfg->error, m_class_get_image (cfg->method->klass), "Cannot box IsByRefLike type '%s.%s'", m_class_get_name_space (klass), m_class_get_name (klass));
3280 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
3281 return NULL;
3284 if (mono_class_is_nullable (klass)) {
3285 MonoMethod* method = get_method_nofail (klass, "Box", 1, 0);
3287 if (context_used) {
3288 if (cfg->llvm_only && cfg->gsharedvt) {
3289 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3290 MONO_RGCTX_INFO_METHOD_FTNDESC);
3291 return mini_emit_llvmonly_calli (cfg, mono_method_signature_internal (method), &val, addr);
3292 } else {
3293 /* FIXME: What if the class is shared? We might not
3294 have to get the method address from the RGCTX. */
3295 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3296 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3297 MonoInst *rgctx = emit_get_rgctx (cfg, context_used);
3299 return mini_emit_calli (cfg, mono_method_signature_internal (method), &val, addr, NULL, rgctx);
3301 } else {
3302 gboolean pass_vtable, pass_mrgctx;
3303 MonoInst *rgctx_arg = NULL;
3305 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3306 g_assert (!pass_mrgctx);
3308 if (pass_vtable) {
3309 MonoVTable *vtable = mono_class_vtable_checked (cfg->domain, method->klass, cfg->error);
3311 mono_error_assert_ok (cfg->error);
3312 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3315 return mini_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3319 if (mini_is_gsharedvt_klass (klass)) {
3320 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3321 MonoInst *res, *is_ref, *src_var, *addr;
3322 int dreg;
3324 dreg = alloc_ireg (cfg);
3326 NEW_BBLOCK (cfg, is_ref_bb);
3327 NEW_BBLOCK (cfg, is_nullable_bb);
3328 NEW_BBLOCK (cfg, end_bb);
3329 is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3330 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
3331 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3333 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
3334 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3336 /* Non-ref case */
3337 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3338 if (!alloc)
3339 return NULL;
3340 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), alloc->dreg, MONO_ABI_SIZEOF (MonoObject), val->dreg);
3341 ins->opcode = OP_STOREV_MEMBASE;
3343 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
3344 res->type = STACK_OBJ;
3345 res->klass = klass;
3346 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3348 /* Ref case */
3349 MONO_START_BB (cfg, is_ref_bb);
3351 /* val is a vtype, so has to load the value manually */
3352 src_var = get_vreg_to_inst (cfg, val->dreg);
3353 if (!src_var)
3354 src_var = mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (klass), OP_LOCAL, val->dreg);
3355 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
3356 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
3357 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3359 /* Nullable case */
3360 MONO_START_BB (cfg, is_nullable_bb);
3363 MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass,
3364 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
3365 MonoInst *box_call;
3366 MonoMethodSignature *box_sig;
3369 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
3370 * construct that method at JIT time, so have to do things by hand.
3372 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3373 box_sig->ret = mono_get_object_type ();
3374 box_sig->param_count = 1;
3375 box_sig->params [0] = m_class_get_byval_arg (klass);
3377 if (cfg->llvm_only)
3378 box_call = mini_emit_llvmonly_calli (cfg, box_sig, &val, addr);
3379 else
3380 box_call = mini_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
3381 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
3382 res->type = STACK_OBJ;
3383 res->klass = klass;
3386 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3388 MONO_START_BB (cfg, end_bb);
3390 return res;
3393 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3394 if (!alloc)
3395 return NULL;
3397 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), alloc->dreg, MONO_ABI_SIZEOF (MonoObject), val->dreg);
3398 return alloc;
3401 static gboolean
3402 method_needs_stack_walk (MonoCompile *cfg, MonoMethod *cmethod)
3404 if (cmethod->klass == mono_defaults.systemtype_class) {
3405 if (!strcmp (cmethod->name, "GetType"))
3406 return TRUE;
3408 return FALSE;
3411 G_GNUC_UNUSED MonoInst*
3412 mini_handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, int enum_val_reg, MonoInst *enum_flag)
3414 MonoType *enum_type = mono_type_get_underlying_type (m_class_get_byval_arg (klass));
3415 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
3416 gboolean is_i4;
3418 switch (enum_type->type) {
3419 case MONO_TYPE_I8:
3420 case MONO_TYPE_U8:
3421 #if SIZEOF_REGISTER == 8
3422 case MONO_TYPE_I:
3423 case MONO_TYPE_U:
3424 #endif
3425 is_i4 = FALSE;
3426 break;
3427 default:
3428 is_i4 = TRUE;
3429 break;
3433 MonoInst *load = NULL, *and_, *cmp, *ceq;
3434 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
3435 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
3436 int dest_reg = alloc_ireg (cfg);
3438 if (enum_this) {
3439 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
3440 } else {
3441 g_assert (enum_val_reg != -1);
3442 enum_reg = enum_val_reg;
3444 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
3445 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
3446 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
3448 ceq->type = STACK_I4;
3450 if (!is_i4) {
3451 load = load ? mono_decompose_opcode (cfg, load) : NULL;
3452 and_ = mono_decompose_opcode (cfg, and_);
3453 cmp = mono_decompose_opcode (cfg, cmp);
3454 ceq = mono_decompose_opcode (cfg, ceq);
3457 return ceq;
3461 static MonoInst*
3462 emit_get_rgctx_dele_tramp (MonoCompile *cfg, int context_used,
3463 MonoClass *klass, MonoMethod *virt_method, gboolean _virtual, MonoRgctxInfoType rgctx_type)
3465 MonoDelegateClassMethodPair *info;
3466 MonoJumpInfoRgctxEntry *entry;
3467 MonoInst *rgctx;
3469 info = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
3470 info->klass = klass;
3471 info->method = virt_method;
3472 info->is_virtual = _virtual;
3474 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, info, rgctx_type);
3475 rgctx = emit_get_rgctx (cfg, context_used);
3477 return emit_rgctx_fetch (cfg, rgctx, entry);
3482 * Returns NULL and set the cfg exception on error.
3484 static G_GNUC_UNUSED MonoInst*
3485 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int target_method_context_used, int invoke_context_used, gboolean virtual_)
3487 MonoInst *ptr;
3488 int dreg;
3489 gpointer trampoline;
3490 MonoInst *obj, *tramp_ins;
3491 MonoDomain *domain;
3492 guint8 **code_slot;
3494 if (virtual_ && !cfg->llvm_only) {
3495 MonoMethod *invoke = mono_get_delegate_invoke_internal (klass);
3496 g_assert (invoke);
3498 //FIXME verify & fix any issue with removing invoke_context_used restriction
3499 if (invoke_context_used || !mono_get_delegate_virtual_invoke_impl (mono_method_signature_internal (invoke), target_method_context_used ? NULL : method))
3500 return NULL;
3503 obj = handle_alloc (cfg, klass, FALSE, invoke_context_used);
3504 if (!obj)
3505 return NULL;
3507 /* Inline the contents of mono_delegate_ctor */
3509 /* Set target field */
3510 /* Optimize away setting of NULL target */
3511 if (!MONO_INS_IS_PCONST_NULL (target)) {
3512 if (!(method->flags & METHOD_ATTRIBUTE_STATIC)) {
3513 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target->dreg, 0);
3514 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
3516 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3517 if (cfg->gen_write_barriers) {
3518 dreg = alloc_preg (cfg);
3519 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
3520 mini_emit_write_barrier (cfg, ptr, target);
3524 /* Set method field */
3525 if (!(target_method_context_used || invoke_context_used) || cfg->llvm_only) {
3526 //If compiling with gsharing enabled, it's faster to load method the delegate trampoline info than to use a rgctx slot
3527 MonoInst *method_ins = emit_get_rgctx_method (cfg, target_method_context_used, method, MONO_RGCTX_INFO_METHOD);
3528 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3532 * To avoid looking up the compiled code belonging to the target method
3533 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3534 * store it, and we fill it after the method has been compiled.
3536 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
3537 MonoInst *code_slot_ins;
3539 if (target_method_context_used) {
3540 code_slot_ins = emit_get_rgctx_method (cfg, target_method_context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3541 } else {
3542 domain = mono_domain_get ();
3543 mono_domain_lock (domain);
3544 if (!domain_jit_info (domain)->method_code_hash)
3545 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3546 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3547 if (!code_slot) {
3548 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
3549 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3551 mono_domain_unlock (domain);
3553 code_slot_ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
3555 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3558 if (cfg->llvm_only) {
3559 if (virtual_) {
3560 MonoInst *args [ ] = {
3561 obj,
3562 target,
3563 emit_get_rgctx_method (cfg, target_method_context_used, method, MONO_RGCTX_INFO_METHOD)
3565 mono_emit_jit_icall (cfg, mini_llvmonly_init_delegate_virtual, args);
3566 } else {
3567 mono_emit_jit_icall (cfg, mini_llvmonly_init_delegate, &obj);
3570 return obj;
3572 if (target_method_context_used || invoke_context_used) {
3573 tramp_ins = emit_get_rgctx_dele_tramp (cfg, target_method_context_used | invoke_context_used, klass, method, virtual_, MONO_RGCTX_INFO_DELEGATE_TRAMP_INFO);
3575 //This is emited as a contant store for the non-shared case.
3576 //We copy from the delegate trampoline info as it's faster than a rgctx fetch
3577 dreg = alloc_preg (cfg);
3578 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method));
3579 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), dreg);
3580 } else if (cfg->compile_aot) {
3581 MonoDelegateClassMethodPair *del_tramp;
3583 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
3584 del_tramp->klass = klass;
3585 del_tramp->method = method;
3586 del_tramp->is_virtual = virtual_;
3587 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
3588 } else {
3589 if (virtual_)
3590 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, method);
3591 else
3592 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, method);
3593 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3596 /* Set invoke_impl field */
3597 if (virtual_) {
3598 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3599 } else {
3600 dreg = alloc_preg (cfg);
3601 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
3602 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
3604 dreg = alloc_preg (cfg);
3605 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
3606 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
3609 dreg = alloc_preg (cfg);
3610 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
3611 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
3613 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3615 return obj;
3619 * handle_constrained_gsharedvt_call:
3621 * Handle constrained calls where the receiver is a gsharedvt type.
3622 * Return the instruction representing the call. Set the cfg exception on failure.
3624 static MonoInst*
3625 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
3626 gboolean *ref_emit_widen)
3628 MonoInst *ins = NULL;
3629 gboolean emit_widen = *ref_emit_widen;
3630 gboolean supported;
3633 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
3634 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
3635 * pack the arguments into an array, and do the rest of the work in in an icall.
3637 supported = ((cmethod->klass == mono_defaults.object_class) || mono_class_is_interface (cmethod->klass) || (!m_class_is_valuetype (cmethod->klass) && m_class_get_image (cmethod->klass) != mono_defaults.corlib));
3638 if (supported)
3639 supported = (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || m_class_is_enumtype (mono_class_from_mono_type_internal (fsig->ret)) || mini_is_gsharedvt_type (fsig->ret));
3640 if (supported) {
3641 if (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1)) {
3642 supported = TRUE;
3643 } else {
3644 /* Allow scalar parameters and a gsharedvt first parameter */
3645 supported = MONO_TYPE_IS_PRIMITIVE (fsig->params [0]) || MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]);
3646 if (supported) {
3647 for (int i = 1; i < fsig->param_count; ++i) {
3648 if (!(fsig->params [i]->byref || MONO_TYPE_IS_PRIMITIVE (fsig->params [i]) || MONO_TYPE_IS_REFERENCE (fsig->params [i]) || MONO_TYPE_ISSTRUCT (fsig->params [i])))
3649 supported = FALSE;
3654 if (supported) {
3655 MonoInst *args [16];
3658 * This case handles calls to
3659 * - object:ToString()/Equals()/GetHashCode(),
3660 * - System.IComparable<T>:CompareTo()
3661 * - System.IEquatable<T>:Equals ()
3662 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
3665 args [0] = sp [0];
3666 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
3667 args [2] = mini_emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
3669 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
3670 if (fsig->hasthis && fsig->param_count) {
3671 /* Call mono_gsharedvt_constrained_call (gpointer mp, MonoMethod *cmethod, MonoClass *klass, gboolean deref_arg, gpointer *args) */
3672 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
3673 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
3674 ins->dreg = alloc_preg (cfg);
3675 ins->inst_imm = fsig->param_count * sizeof (target_mgreg_t);
3676 MONO_ADD_INS (cfg->cbb, ins);
3677 args [4] = ins;
3679 /* Only the first argument is allowed to be gsharedvt */
3680 /* args [3] = deref_arg */
3681 if (mini_is_gsharedvt_type (fsig->params [0])) {
3682 int deref_arg_reg;
3683 ins = mini_emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type_internal (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3684 deref_arg_reg = alloc_preg (cfg);
3685 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
3686 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
3687 } else {
3688 EMIT_NEW_ICONST (cfg, args [3], 0);
3691 for (int i = 0; i < fsig->param_count; ++i) {
3692 int addr_reg;
3694 if (mini_is_gsharedvt_type (fsig->params [i]) || MONO_TYPE_IS_PRIMITIVE (fsig->params [i]) || MONO_TYPE_ISSTRUCT (fsig->params [i])) {
3695 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [i + 1]->dreg, fsig->params [i]);
3696 addr_reg = ins->dreg;
3697 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, i * sizeof (target_mgreg_t), addr_reg);
3698 } else {
3699 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, i * sizeof (target_mgreg_t), sp [i + 1]->dreg);
3702 } else {
3703 EMIT_NEW_ICONST (cfg, args [3], 0);
3704 EMIT_NEW_ICONST (cfg, args [4], 0);
3706 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
3707 emit_widen = FALSE;
3709 if (mini_is_gsharedvt_type (fsig->ret)) {
3710 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type_internal (fsig->ret), ins);
3711 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || m_class_is_enumtype (mono_class_from_mono_type_internal (fsig->ret))) {
3712 MonoInst *add;
3714 /* Unbox */
3715 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, MONO_ABI_SIZEOF (MonoObject));
3716 MONO_ADD_INS (cfg->cbb, add);
3717 /* Load value */
3718 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
3719 MONO_ADD_INS (cfg->cbb, ins);
3720 /* ins represents the call result */
3722 } else {
3723 GSHAREDVT_FAILURE (CEE_CALLVIRT);
3726 *ref_emit_widen = emit_widen;
3728 return ins;
3730 exception_exit:
3731 return NULL;
3734 static void
3735 mono_emit_load_got_addr (MonoCompile *cfg)
3737 MonoInst *getaddr, *dummy_use;
3739 if (!cfg->got_var || cfg->got_var_allocated)
3740 return;
3742 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3743 getaddr->cil_code = cfg->header->code;
3744 getaddr->dreg = cfg->got_var->dreg;
3746 /* Add it to the start of the first bblock */
3747 if (cfg->bb_entry->code) {
3748 getaddr->next = cfg->bb_entry->code;
3749 cfg->bb_entry->code = getaddr;
3751 else
3752 MONO_ADD_INS (cfg->bb_entry, getaddr);
3754 cfg->got_var_allocated = TRUE;
3757 * Add a dummy use to keep the got_var alive, since real uses might
3758 * only be generated by the back ends.
3759 * Add it to end_bblock, so the variable's lifetime covers the whole
3760 * method.
3761 * It would be better to make the usage of the got var explicit in all
3762 * cases when the backend needs it (i.e. calls, throw etc.), so this
3763 * wouldn't be needed.
3765 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3766 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3769 static gboolean
3770 method_does_not_return (MonoMethod *method)
3772 // FIXME: Under netcore, these are decorated with the [DoesNotReturn] attribute
3773 return m_class_get_image (method->klass) == mono_defaults.corlib &&
3774 !strcmp (m_class_get_name (method->klass), "ThrowHelper") &&
3775 strstr (method->name, "Throw") == method->name &&
3776 !method->is_inflated;
3779 static int inline_limit, llvm_jit_inline_limit, llvm_aot_inline_limit;
3780 static gboolean inline_limit_inited;
3782 static gboolean
3783 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3785 MonoMethodHeaderSummary header;
3786 MonoVTable *vtable;
3787 int limit;
3788 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
3789 MonoMethodSignature *sig = mono_method_signature_internal (method);
3790 int i;
3791 #endif
3793 if (cfg->disable_inline)
3794 return FALSE;
3795 if (cfg->gsharedvt)
3796 return FALSE;
3798 if (cfg->inline_depth > 10)
3799 return FALSE;
3801 if (!mono_method_get_header_summary (method, &header))
3802 return FALSE;
3804 /*runtime, icall and pinvoke are checked by summary call*/
3805 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3806 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3807 (mono_class_is_marshalbyref (method->klass)) ||
3808 header.has_clauses)
3809 return FALSE;
3811 if (method->flags & METHOD_ATTRIBUTE_REQSECOBJ)
3812 /* Used to mark methods containing StackCrawlMark locals */
3813 return FALSE;
3815 /* also consider num_locals? */
3816 /* Do the size check early to avoid creating vtables */
3817 if (!inline_limit_inited) {
3818 char *inlinelimit;
3819 if ((inlinelimit = g_getenv ("MONO_INLINELIMIT"))) {
3820 inline_limit = atoi (inlinelimit);
3821 llvm_jit_inline_limit = inline_limit;
3822 llvm_aot_inline_limit = inline_limit;
3823 g_free (inlinelimit);
3824 } else {
3825 inline_limit = INLINE_LENGTH_LIMIT;
3826 llvm_jit_inline_limit = LLVM_JIT_INLINE_LENGTH_LIMIT;
3827 llvm_aot_inline_limit = LLVM_AOT_INLINE_LENGTH_LIMIT;
3829 inline_limit_inited = TRUE;
3832 #ifdef ENABLE_NETCORE
3833 if (COMPILE_LLVM (cfg)) {
3834 if (cfg->compile_aot)
3835 limit = llvm_aot_inline_limit;
3836 else
3837 limit = llvm_jit_inline_limit;
3838 } else {
3839 limit = inline_limit;
3841 #else
3842 if (COMPILE_LLVM (cfg) && !cfg->compile_aot)
3843 limit = llvm_jit_inline_limit;
3844 else
3845 limit = inline_limit;
3846 #endif
3847 if (header.code_size >= limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
3848 return FALSE;
3851 * if we can initialize the class of the method right away, we do,
3852 * otherwise we don't allow inlining if the class needs initialization,
3853 * since it would mean inserting a call to mono_runtime_class_init()
3854 * inside the inlined code
3856 if (cfg->gshared && m_class_has_cctor (method->klass) && mini_class_check_context_used (cfg, method->klass))
3857 return FALSE;
3859 if (!(cfg->opt & MONO_OPT_SHARED)) {
3860 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
3861 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
3862 if (m_class_has_cctor (method->klass)) {
3863 ERROR_DECL (error);
3864 vtable = mono_class_vtable_checked (cfg->domain, method->klass, error);
3865 if (!is_ok (error)) {
3866 mono_error_cleanup (error);
3867 return FALSE;
3869 if (!cfg->compile_aot) {
3870 if (!mono_runtime_class_init_full (vtable, error)) {
3871 mono_error_cleanup (error);
3872 return FALSE;
3876 } else if (mono_class_is_before_field_init (method->klass)) {
3877 if (cfg->run_cctors && m_class_has_cctor (method->klass)) {
3878 ERROR_DECL (error);
3879 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3880 if (!m_class_get_runtime_info (method->klass))
3881 /* No vtable created yet */
3882 return FALSE;
3883 vtable = mono_class_vtable_checked (cfg->domain, method->klass, error);
3884 if (!is_ok (error)) {
3885 mono_error_cleanup (error);
3886 return FALSE;
3888 /* This makes so that inline cannot trigger */
3889 /* .cctors: too many apps depend on them */
3890 /* running with a specific order... */
3891 if (! vtable->initialized)
3892 return FALSE;
3893 if (!mono_runtime_class_init_full (vtable, error)) {
3894 mono_error_cleanup (error);
3895 return FALSE;
3898 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3899 ERROR_DECL (error);
3900 if (!m_class_get_runtime_info (method->klass))
3901 /* No vtable created yet */
3902 return FALSE;
3903 vtable = mono_class_vtable_checked (cfg->domain, method->klass, error);
3904 if (!is_ok (error)) {
3905 mono_error_cleanup (error);
3906 return FALSE;
3908 if (!vtable->initialized)
3909 return FALSE;
3911 } else {
3913 * If we're compiling for shared code
3914 * the cctor will need to be run at aot method load time, for example,
3915 * or at the end of the compilation of the inlining method.
3917 if (mono_class_needs_cctor_run (method->klass, NULL) && !mono_class_is_before_field_init (method->klass))
3918 return FALSE;
3921 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
3922 if (mono_arch_is_soft_float ()) {
3923 /* FIXME: */
3924 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3925 return FALSE;
3926 for (i = 0; i < sig->param_count; ++i)
3927 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3928 return FALSE;
3930 #endif
3932 if (g_list_find (cfg->dont_inline, method))
3933 return FALSE;
3935 if (mono_profiler_get_call_instrumentation_flags (method))
3936 return FALSE;
3938 if (mono_profiler_coverage_instrumentation_enabled (method))
3939 return FALSE;
3941 if (method_does_not_return (method))
3942 return FALSE;
3944 return TRUE;
3947 static gboolean
3948 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
3950 if (!cfg->compile_aot) {
3951 g_assert (vtable);
3952 if (vtable->initialized)
3953 return FALSE;
3956 if (mono_class_is_before_field_init (klass)) {
3957 if (cfg->method == method)
3958 return FALSE;
3961 if (!mono_class_needs_cctor_run (klass, method))
3962 return FALSE;
3964 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
3965 /* The initialization is already done before the method is called */
3966 return FALSE;
3968 return TRUE;
3972 mini_emit_sext_index_reg (MonoCompile *cfg, MonoInst *index)
3974 int index_reg = index->dreg;
3975 int index2_reg;
3977 #if SIZEOF_REGISTER == 8
3978 /* The array reg is 64 bits but the index reg is only 32 */
3979 if (COMPILE_LLVM (cfg)) {
3981 * abcrem can't handle the OP_SEXT_I4, so add this after abcrem,
3982 * during OP_BOUNDS_CHECK decomposition, and in the implementation
3983 * of OP_X86_LEA for llvm.
3985 index2_reg = index_reg;
3986 } else {
3987 index2_reg = alloc_preg (cfg);
3988 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3990 #else
3991 if (index->type == STACK_I8) {
3992 index2_reg = alloc_preg (cfg);
3993 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3994 } else {
3995 index2_reg = index_reg;
3997 #endif
3999 return index2_reg;
4002 MonoInst*
4003 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4005 MonoInst *ins;
4006 guint32 size;
4007 int mult_reg, add_reg, array_reg, index2_reg;
4008 int context_used;
4010 if (mini_is_gsharedvt_variable_klass (klass)) {
4011 size = -1;
4012 } else {
4013 mono_class_init_internal (klass);
4014 size = mono_class_array_element_size (klass);
4017 mult_reg = alloc_preg (cfg);
4018 array_reg = arr->dreg;
4020 index2_reg = mini_emit_sext_index_reg (cfg, index);
4022 if (bcheck)
4023 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4025 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4026 if (size == 1 || size == 2 || size == 4 || size == 8) {
4027 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4029 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
4030 ins->klass = klass;
4031 ins->type = STACK_MP;
4033 return ins;
4035 #endif
4037 add_reg = alloc_ireg_mp (cfg);
4039 if (size == -1) {
4040 MonoInst *rgctx_ins;
4042 /* gsharedvt */
4043 g_assert (cfg->gshared);
4044 context_used = mini_class_check_context_used (cfg, klass);
4045 g_assert (context_used);
4046 rgctx_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4047 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4048 } else {
4049 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4051 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4052 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4053 ins->klass = klass;
4054 ins->type = STACK_MP;
4055 MONO_ADD_INS (cfg->cbb, ins);
4057 return ins;
4060 static MonoInst*
4061 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4063 int bounds_reg = alloc_preg (cfg);
4064 int add_reg = alloc_ireg_mp (cfg);
4065 int mult_reg = alloc_preg (cfg);
4066 int mult2_reg = alloc_preg (cfg);
4067 int low1_reg = alloc_preg (cfg);
4068 int low2_reg = alloc_preg (cfg);
4069 int high1_reg = alloc_preg (cfg);
4070 int high2_reg = alloc_preg (cfg);
4071 int realidx1_reg = alloc_preg (cfg);
4072 int realidx2_reg = alloc_preg (cfg);
4073 int sum_reg = alloc_preg (cfg);
4074 int index1, index2;
4075 MonoInst *ins;
4076 guint32 size;
4078 mono_class_init_internal (klass);
4079 size = mono_class_array_element_size (klass);
4081 index1 = index_ins1->dreg;
4082 index2 = index_ins2->dreg;
4084 #if SIZEOF_REGISTER == 8
4085 /* The array reg is 64 bits but the index reg is only 32 */
4086 if (COMPILE_LLVM (cfg)) {
4087 /* Not needed */
4088 } else {
4089 int tmpreg = alloc_preg (cfg);
4090 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4091 index1 = tmpreg;
4092 tmpreg = alloc_preg (cfg);
4093 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4094 index2 = tmpreg;
4096 #else
4097 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4098 #endif
4100 /* range checking */
4101 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4102 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4104 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4105 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4106 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4107 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4108 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4109 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4110 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4112 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4113 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4114 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4115 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4116 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4117 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4118 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4120 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4121 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4122 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4123 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4124 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4126 ins->type = STACK_MP;
4127 ins->klass = klass;
4128 MONO_ADD_INS (cfg->cbb, ins);
4130 return ins;
4133 static MonoInst*
4134 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, guchar *ip, gboolean is_set)
4136 int rank;
4137 MonoInst *addr;
4138 MonoMethod *addr_method;
4139 int element_size;
4140 MonoClass *eclass = m_class_get_element_class (cmethod->klass);
4142 rank = mono_method_signature_internal (cmethod)->param_count - (is_set? 1: 0);
4144 if (rank == 1)
4145 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
4147 /* emit_ldelema_2 depends on OP_LMUL */
4148 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
4149 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
4152 if (mini_is_gsharedvt_variable_klass (eclass))
4153 element_size = 0;
4154 else
4155 element_size = mono_class_array_element_size (eclass);
4156 addr_method = mono_marshal_get_array_address (rank, element_size);
4157 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4159 return addr;
4162 static gboolean
4163 mini_class_is_reference (MonoClass *klass)
4165 return mini_type_is_reference (m_class_get_byval_arg (klass));
4168 MonoInst*
4169 mini_emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4171 if (safety_checks && mini_class_is_reference (klass) &&
4172 !(MONO_INS_IS_PCONST_NULL (sp [2]))) {
4173 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4174 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4175 MonoInst *iargs [3];
4177 if (!helper->slot)
4178 mono_class_setup_vtable (obj_array);
4179 g_assert (helper->slot);
4181 if (sp [0]->type != STACK_OBJ)
4182 return NULL;
4183 if (sp [2]->type != STACK_OBJ)
4184 return NULL;
4186 iargs [2] = sp [2];
4187 iargs [1] = sp [1];
4188 iargs [0] = sp [0];
4190 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4191 } else {
4192 MonoInst *ins;
4194 if (mini_is_gsharedvt_variable_klass (klass)) {
4195 MonoInst *addr;
4197 // FIXME-VT: OP_ICONST optimization
4198 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4199 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0, sp [2]->dreg);
4200 ins->opcode = OP_STOREV_MEMBASE;
4201 } else if (sp [1]->opcode == OP_ICONST) {
4202 int array_reg = sp [0]->dreg;
4203 int index_reg = sp [1]->dreg;
4204 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
4206 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg) && sp [1]->inst_c0 < 0)
4207 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
4209 if (safety_checks)
4210 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
4211 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), array_reg, offset, sp [2]->dreg);
4212 } else {
4213 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
4214 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0, sp [2]->dreg);
4215 if (mini_class_is_reference (klass))
4216 mini_emit_write_barrier (cfg, addr, sp [2]);
4218 return ins;
4222 MonoInst*
4223 mini_emit_memory_barrier (MonoCompile *cfg, int kind)
4225 MonoInst *ins = NULL;
4226 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4227 MONO_ADD_INS (cfg->cbb, ins);
4228 ins->backend.memory_barrier_kind = kind;
4230 return ins;
4234 * This entry point could be used later for arbitrary method
4235 * redirection.
4237 inline static MonoInst*
4238 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4239 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
4241 if (method->klass == mono_defaults.string_class) {
4242 /* managed string allocation support */
4243 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(cfg->opt & MONO_OPT_SHARED)) {
4244 MonoInst *iargs [2];
4245 MonoVTable *vtable = mono_class_vtable_checked (cfg->domain, method->klass, cfg->error);
4246 MonoMethod *managed_alloc = NULL;
4248 mono_error_assert_ok (cfg->error); /*Should not fail since it System.String*/
4249 #ifndef MONO_CROSS_COMPILE
4250 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
4251 #endif
4252 if (!managed_alloc)
4253 return NULL;
4254 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4255 iargs [1] = args [0];
4256 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
4259 return NULL;
4262 static void
4263 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4265 MonoInst *store, *temp;
4266 int i;
4268 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4269 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4272 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4273 * would be different than the MonoInst's used to represent arguments, and
4274 * the ldelema implementation can't deal with that.
4275 * Solution: When ldelema is used on an inline argument, create a var for
4276 * it, emit ldelema on that var, and emit the saving code below in
4277 * inline_method () if needed.
4279 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4280 cfg->args [i] = temp;
4281 /* This uses cfg->args [i] which is set by the preceeding line */
4282 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4283 store->cil_code = sp [0]->cil_code;
4284 sp++;
4288 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4289 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4291 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4292 static gboolean
4293 check_inline_called_method_name_limit (MonoMethod *called_method)
4295 int strncmp_result;
4296 static const char *limit = NULL;
4298 if (limit == NULL) {
4299 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4301 if (limit_string != NULL)
4302 limit = limit_string;
4303 else
4304 limit = "";
4307 if (limit [0] != '\0') {
4308 char *called_method_name = mono_method_full_name (called_method, TRUE);
4310 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4311 g_free (called_method_name);
4313 //return (strncmp_result <= 0);
4314 return (strncmp_result == 0);
4315 } else {
4316 return TRUE;
4319 #endif
4321 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4322 static gboolean
4323 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4325 int strncmp_result;
4326 static const char *limit = NULL;
4328 if (limit == NULL) {
4329 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4330 if (limit_string != NULL) {
4331 limit = limit_string;
4332 } else {
4333 limit = "";
4337 if (limit [0] != '\0') {
4338 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4340 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4341 g_free (caller_method_name);
4343 //return (strncmp_result <= 0);
4344 return (strncmp_result == 0);
4345 } else {
4346 return TRUE;
4349 #endif
4351 static void
4352 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
4354 static double r8_0 = 0.0;
4355 static float r4_0 = 0.0;
4356 MonoInst *ins;
4357 int t;
4359 rtype = mini_get_underlying_type (rtype);
4360 t = rtype->type;
4362 if (rtype->byref) {
4363 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
4364 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
4365 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4366 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
4367 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
4368 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
4369 MONO_INST_NEW (cfg, ins, OP_R4CONST);
4370 ins->type = STACK_R4;
4371 ins->inst_p0 = (void*)&r4_0;
4372 ins->dreg = dreg;
4373 MONO_ADD_INS (cfg->cbb, ins);
4374 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
4375 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4376 ins->type = STACK_R8;
4377 ins->inst_p0 = (void*)&r8_0;
4378 ins->dreg = dreg;
4379 MONO_ADD_INS (cfg->cbb, ins);
4380 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
4381 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
4382 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type_internal (rtype));
4383 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
4384 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type_internal (rtype));
4385 } else {
4386 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
4390 static void
4391 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
4393 int t;
4395 rtype = mini_get_underlying_type (rtype);
4396 t = rtype->type;
4398 if (rtype->byref) {
4399 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
4400 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
4401 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
4402 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
4403 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
4404 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
4405 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
4406 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
4407 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
4408 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
4409 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
4410 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
4411 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
4412 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
4413 } else {
4414 emit_init_rvar (cfg, dreg, rtype);
4418 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
4419 static void
4420 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
4422 MonoInst *var = cfg->locals [local];
4423 if (COMPILE_SOFT_FLOAT (cfg)) {
4424 MonoInst *store;
4425 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
4426 emit_init_rvar (cfg, reg, type);
4427 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
4428 } else {
4429 if (init)
4430 emit_init_rvar (cfg, var->dreg, type);
4431 else
4432 emit_dummy_init_rvar (cfg, var->dreg, type);
4437 mini_inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always)
4439 return inline_method (cfg, cmethod, fsig, sp, ip, real_offset, inline_always);
4443 * inline_method:
4445 * Return the cost of inlining CMETHOD, or zero if it should not be inlined.
4447 static int
4448 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4449 guchar *ip, guint real_offset, gboolean inline_always)
4451 ERROR_DECL (error);
4452 MonoInst *ins, *rvar = NULL;
4453 MonoMethodHeader *cheader;
4454 MonoBasicBlock *ebblock, *sbblock;
4455 int i, costs;
4456 MonoInst **prev_locals, **prev_args;
4457 MonoType **prev_arg_types;
4458 guint prev_real_offset;
4459 GHashTable *prev_cbb_hash;
4460 MonoBasicBlock **prev_cil_offset_to_bb;
4461 MonoBasicBlock *prev_cbb;
4462 const guchar *prev_ip;
4463 guchar *prev_cil_start;
4464 guint32 prev_cil_offset_to_bb_len;
4465 MonoMethod *prev_current_method;
4466 MonoGenericContext *prev_generic_context;
4467 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
4469 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4471 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4472 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
4473 return 0;
4474 #endif
4475 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4476 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
4477 return 0;
4478 #endif
4480 if (!fsig)
4481 fsig = mono_method_signature_internal (cmethod);
4483 if (cfg->verbose_level > 2)
4484 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4486 if (!cmethod->inline_info) {
4487 cfg->stat_inlineable_methods++;
4488 cmethod->inline_info = 1;
4491 /* allocate local variables */
4492 cheader = mono_method_get_header_checked (cmethod, error);
4493 if (!cheader) {
4494 if (inline_always) {
4495 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
4496 mono_error_move (cfg->error, error);
4497 } else {
4498 mono_error_cleanup (error);
4500 return 0;
4503 /*Must verify before creating locals as it can cause the JIT to assert.*/
4504 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
4505 mono_metadata_free_mh (cheader);
4506 return 0;
4509 /* allocate space to store the return value */
4510 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4511 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4514 prev_locals = cfg->locals;
4515 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4516 for (i = 0; i < cheader->num_locals; ++i)
4517 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4519 /* allocate start and end blocks */
4520 /* This is needed so if the inline is aborted, we can clean up */
4521 NEW_BBLOCK (cfg, sbblock);
4522 sbblock->real_offset = real_offset;
4524 NEW_BBLOCK (cfg, ebblock);
4525 ebblock->block_num = cfg->num_bblocks++;
4526 ebblock->real_offset = real_offset;
4528 prev_args = cfg->args;
4529 prev_arg_types = cfg->arg_types;
4530 prev_ret_var_set = cfg->ret_var_set;
4531 prev_real_offset = cfg->real_offset;
4532 prev_cbb_hash = cfg->cbb_hash;
4533 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4534 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4535 prev_cil_start = cfg->cil_start;
4536 prev_ip = cfg->ip;
4537 prev_cbb = cfg->cbb;
4538 prev_current_method = cfg->current_method;
4539 prev_generic_context = cfg->generic_context;
4540 prev_disable_inline = cfg->disable_inline;
4542 cfg->ret_var_set = FALSE;
4543 cfg->inline_depth ++;
4545 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
4546 virtual_ = TRUE;
4548 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
4550 ret_var_set = cfg->ret_var_set;
4552 cfg->real_offset = prev_real_offset;
4553 cfg->cbb_hash = prev_cbb_hash;
4554 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4555 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4556 cfg->cil_start = prev_cil_start;
4557 cfg->ip = prev_ip;
4558 cfg->locals = prev_locals;
4559 cfg->args = prev_args;
4560 cfg->arg_types = prev_arg_types;
4561 cfg->current_method = prev_current_method;
4562 cfg->generic_context = prev_generic_context;
4563 cfg->ret_var_set = prev_ret_var_set;
4564 cfg->disable_inline = prev_disable_inline;
4565 cfg->inline_depth --;
4567 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
4568 if (cfg->verbose_level > 2)
4569 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4571 mono_error_assert_ok (cfg->error);
4573 cfg->stat_inlined_methods++;
4575 /* always add some code to avoid block split failures */
4576 MONO_INST_NEW (cfg, ins, OP_NOP);
4577 MONO_ADD_INS (prev_cbb, ins);
4579 prev_cbb->next_bb = sbblock;
4580 link_bblock (cfg, prev_cbb, sbblock);
4583 * Get rid of the begin and end bblocks if possible to aid local
4584 * optimizations.
4586 if (prev_cbb->out_count == 1)
4587 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4589 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4590 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4592 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4593 MonoBasicBlock *prev = ebblock->in_bb [0];
4595 if (prev->next_bb == ebblock) {
4596 mono_merge_basic_blocks (cfg, prev, ebblock);
4597 cfg->cbb = prev;
4598 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4599 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4600 cfg->cbb = prev_cbb;
4602 } else {
4603 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
4604 cfg->cbb = ebblock;
4606 } else {
4608 * Its possible that the rvar is set in some prev bblock, but not in others.
4609 * (#1835).
4611 if (rvar) {
4612 MonoBasicBlock *bb;
4614 for (i = 0; i < ebblock->in_count; ++i) {
4615 bb = ebblock->in_bb [i];
4617 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
4618 cfg->cbb = bb;
4620 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
4625 cfg->cbb = ebblock;
4628 if (rvar) {
4630 * If the inlined method contains only a throw, then the ret var is not
4631 * set, so set it to a dummy value.
4633 if (!ret_var_set)
4634 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
4636 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4637 *sp++ = ins;
4639 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4640 return costs + 1;
4641 } else {
4642 if (cfg->verbose_level > 2) {
4643 const char *msg = mono_error_get_message (cfg->error);
4644 printf ("INLINE ABORTED %s (cost %d) %s\n", mono_method_full_name (cmethod, TRUE), costs, msg ? msg : "");
4646 cfg->exception_type = MONO_EXCEPTION_NONE;
4648 clear_cfg_error (cfg);
4650 /* This gets rid of the newly added bblocks */
4651 cfg->cbb = prev_cbb;
4653 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4654 return 0;
4658 * Some of these comments may well be out-of-date.
4659 * Design decisions: we do a single pass over the IL code (and we do bblock
4660 * splitting/merging in the few cases when it's required: a back jump to an IL
4661 * address that was not already seen as bblock starting point).
4662 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4663 * Complex operations are decomposed in simpler ones right away. We need to let the
4664 * arch-specific code peek and poke inside this process somehow (except when the
4665 * optimizations can take advantage of the full semantic info of coarse opcodes).
4666 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4667 * MonoInst->opcode initially is the IL opcode or some simplification of that
4668 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4669 * opcode with value bigger than OP_LAST.
4670 * At this point the IR can be handed over to an interpreter, a dumb code generator
4671 * or to the optimizing code generator that will translate it to SSA form.
4673 * Profiling directed optimizations.
4674 * We may compile by default with few or no optimizations and instrument the code
4675 * or the user may indicate what methods to optimize the most either in a config file
4676 * or through repeated runs where the compiler applies offline the optimizations to
4677 * each method and then decides if it was worth it.
4680 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4681 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4682 #define CHECK_STACK_OVF() if (((sp - stack_start) + 1) > header->max_stack) UNVERIFIED
4683 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4684 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4685 #define CHECK_OPSIZE(size) if ((size) < 1 || ip + (size) > end) UNVERIFIED
4686 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4687 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
4689 /* offset from br.s -> br like opcodes */
4690 #define BIG_BRANCH_OFFSET 13
4692 static gboolean
4693 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4695 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4697 return b == NULL || b == bb;
4700 static int
4701 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, guchar *start, guchar *end, guchar **pos)
4703 guchar *ip = start;
4704 guchar *target;
4705 int i;
4706 guint cli_addr;
4707 MonoBasicBlock *bblock;
4708 const MonoOpcode *opcode;
4710 while (ip < end) {
4711 cli_addr = ip - start;
4712 i = mono_opcode_value ((const guint8 **)&ip, end);
4713 if (i < 0)
4714 UNVERIFIED;
4715 opcode = &mono_opcodes [i];
4716 switch (opcode->argument) {
4717 case MonoInlineNone:
4718 ip++;
4719 break;
4720 case MonoInlineString:
4721 case MonoInlineType:
4722 case MonoInlineField:
4723 case MonoInlineMethod:
4724 case MonoInlineTok:
4725 case MonoInlineSig:
4726 case MonoShortInlineR:
4727 case MonoInlineI:
4728 ip += 5;
4729 break;
4730 case MonoInlineVar:
4731 ip += 3;
4732 break;
4733 case MonoShortInlineVar:
4734 case MonoShortInlineI:
4735 ip += 2;
4736 break;
4737 case MonoShortInlineBrTarget:
4738 target = start + cli_addr + 2 + (signed char)ip [1];
4739 GET_BBLOCK (cfg, bblock, target);
4740 ip += 2;
4741 if (ip < end)
4742 GET_BBLOCK (cfg, bblock, ip);
4743 break;
4744 case MonoInlineBrTarget:
4745 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4746 GET_BBLOCK (cfg, bblock, target);
4747 ip += 5;
4748 if (ip < end)
4749 GET_BBLOCK (cfg, bblock, ip);
4750 break;
4751 case MonoInlineSwitch: {
4752 guint32 n = read32 (ip + 1);
4753 guint32 j;
4754 ip += 5;
4755 cli_addr += 5 + 4 * n;
4756 target = start + cli_addr;
4757 GET_BBLOCK (cfg, bblock, target);
4759 for (j = 0; j < n; ++j) {
4760 target = start + cli_addr + (gint32)read32 (ip);
4761 GET_BBLOCK (cfg, bblock, target);
4762 ip += 4;
4764 break;
4766 case MonoInlineR:
4767 case MonoInlineI8:
4768 ip += 9;
4769 break;
4770 default:
4771 g_assert_not_reached ();
4774 if (i == CEE_THROW) {
4775 guchar *bb_start = ip - 1;
4777 /* Find the start of the bblock containing the throw */
4778 bblock = NULL;
4779 while ((bb_start >= start) && !bblock) {
4780 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4781 bb_start --;
4783 if (bblock)
4784 bblock->out_of_line = 1;
4787 return 0;
4788 unverified:
4789 exception_exit:
4790 *pos = ip;
4791 return 1;
4794 static MonoMethod *
4795 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
4797 MonoMethod *method;
4799 error_init (error);
4801 if (m->wrapper_type != MONO_WRAPPER_NONE) {
4802 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
4803 if (context) {
4804 method = mono_class_inflate_generic_method_checked (method, context, error);
4806 } else {
4807 method = mono_get_method_checked (m_class_get_image (m->klass), token, klass, context, error);
4810 return method;
4813 static MonoMethod *
4814 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4816 ERROR_DECL (error);
4817 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? cfg->error : error);
4819 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (m_class_get_byval_arg (method->klass))) {
4820 mono_error_set_bad_image (cfg->error, m_class_get_image (cfg->method->klass), "Method with open type while not compiling gshared");
4821 method = NULL;
4824 if (!method && !cfg)
4825 mono_error_cleanup (error); /* FIXME don't swallow the error */
4827 return method;
4830 static MonoMethodSignature*
4831 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
4833 MonoMethodSignature *fsig;
4835 error_init (error);
4836 if (method->wrapper_type != MONO_WRAPPER_NONE) {
4837 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
4838 } else {
4839 fsig = mono_metadata_parse_signature_checked (m_class_get_image (method->klass), token, error);
4840 return_val_if_nok (error, NULL);
4842 if (context) {
4843 fsig = mono_inflate_generic_signature(fsig, context, error);
4845 return fsig;
4848 static MonoMethod*
4849 throw_exception (void)
4851 static MonoMethod *method = NULL;
4853 if (!method) {
4854 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4855 method = get_method_nofail (secman->securitymanager, "ThrowException", 1, 0);
4857 g_assert (method);
4858 return method;
4861 static void
4862 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
4864 MonoMethod *thrower = throw_exception ();
4865 MonoInst *args [1];
4867 EMIT_NEW_PCONST (cfg, args [0], ex);
4868 mono_emit_method_call (cfg, thrower, args, NULL);
4872 * Return the original method is a wrapper is specified. We can only access
4873 * the custom attributes from the original method.
4875 static MonoMethod*
4876 get_original_method (MonoMethod *method)
4878 if (method->wrapper_type == MONO_WRAPPER_NONE)
4879 return method;
4881 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4882 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
4883 return NULL;
4885 /* in other cases we need to find the original method */
4886 return mono_marshal_method_from_wrapper (method);
4889 static void
4890 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
4892 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4893 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
4894 if (ex)
4895 emit_throw_exception (cfg, ex);
4898 static void
4899 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4901 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4902 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
4903 if (ex)
4904 emit_throw_exception (cfg, ex);
4907 static guchar*
4908 il_read_op (guchar *ip, guchar *end, guchar first_byte, MonoOpcodeEnum desired_il_op)
4909 // If ip is desired_il_op, return the next ip, else NULL.
4911 if (G_LIKELY (ip < end) && G_UNLIKELY (*ip == first_byte)) {
4912 MonoOpcodeEnum il_op = MonoOpcodeEnum_Invalid;
4913 // mono_opcode_value_and_size updates ip, but not in the expected way.
4914 const guchar *temp_ip = ip;
4915 const int size = mono_opcode_value_and_size (&temp_ip, end, &il_op);
4916 return (G_LIKELY (size > 0) && G_UNLIKELY (il_op == desired_il_op)) ? (ip + size) : NULL;
4918 return NULL;
4921 static guchar*
4922 il_read_op_and_token (guchar *ip, guchar *end, guchar first_byte, MonoOpcodeEnum desired_il_op, guint32 *token)
4924 ip = il_read_op (ip, end, first_byte, desired_il_op);
4925 if (ip)
4926 *token = read32 (ip - 4); // could be +1 or +2 from start
4927 return ip;
4930 static guchar*
4931 il_read_branch_and_target (guchar *ip, guchar *end, guchar first_byte, MonoOpcodeEnum desired_il_op, int size, guchar **target)
4933 ip = il_read_op (ip, end, first_byte, desired_il_op);
4934 if (ip) {
4935 gint32 delta = 0;
4936 switch (size) {
4937 case 1:
4938 delta = (signed char)ip [-1];
4939 break;
4940 case 4:
4941 delta = (gint32)read32 (ip - 4);
4942 break;
4944 // FIXME verify it is within the function and start of an instruction.
4945 *target = ip + delta;
4946 return ip;
4948 return NULL;
4951 #define il_read_brtrue(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRTRUE, MONO_CEE_BRTRUE, 4, target))
4952 #define il_read_brtrue_s(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRTRUE_S, MONO_CEE_BRTRUE_S, 1, target))
4953 #define il_read_brfalse(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRFALSE, MONO_CEE_BRFALSE, 4, target))
4954 #define il_read_brfalse_s(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRFALSE_S, MONO_CEE_BRFALSE_S, 1, target))
4955 #define il_read_dup(ip, end) (il_read_op (ip, end, CEE_DUP, MONO_CEE_DUP))
4956 #define il_read_newobj(ip, end, token) (il_read_op_and_token (ip, end, CEE_NEW_OBJ, MONO_CEE_NEWOBJ, token))
4957 #define il_read_ldtoken(ip, end, token) (il_read_op_and_token (ip, end, CEE_LDTOKEN, MONO_CEE_LDTOKEN, token))
4958 #define il_read_call(ip, end, token) (il_read_op_and_token (ip, end, CEE_CALL, MONO_CEE_CALL, token))
4959 #define il_read_callvirt(ip, end, token) (il_read_op_and_token (ip, end, CEE_CALLVIRT, MONO_CEE_CALLVIRT, token))
4960 #define il_read_initobj(ip, end, token) (il_read_op_and_token (ip, end, CEE_PREFIX1, MONO_CEE_INITOBJ, token))
4961 #define il_read_constrained(ip, end, token) (il_read_op_and_token (ip, end, CEE_PREFIX1, MONO_CEE_CONSTRAINED_, token))
4962 #define il_read_unbox_any(ip, end, token) (il_read_op_and_token (ip, end, CEE_UNBOX_ANY, MONO_CEE_UNBOX_ANY, token))
4965 * Check that the IL instructions at ip are the array initialization
4966 * sequence and return the pointer to the data and the size.
4968 static const char*
4969 initialize_array_data (MonoCompile *cfg, MonoMethod *method, gboolean aot, guchar *ip,
4970 guchar *end, MonoClass *klass, guint32 len, int *out_size,
4971 guint32 *out_field_token, MonoOpcodeEnum *il_op, guchar **next_ip)
4974 * newarr[System.Int32]
4975 * dup
4976 * ldtoken field valuetype ...
4977 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4980 guint32 token;
4981 guint32 field_token;
4983 if ((ip = il_read_dup (ip, end))
4984 && ip_in_bb (cfg, cfg->cbb, ip)
4985 && (ip = il_read_ldtoken (ip, end, &field_token))
4986 && IS_FIELD_DEF (field_token)
4987 && ip_in_bb (cfg, cfg->cbb, ip)
4988 && (ip = il_read_call (ip, end, &token))) {
4989 ERROR_DECL (error);
4990 guint32 rva;
4991 const char *data_ptr;
4992 int size = 0;
4993 MonoMethod *cmethod;
4994 MonoClass *dummy_class;
4995 MonoClassField *field = mono_field_from_token_checked (m_class_get_image (method->klass), field_token, &dummy_class, NULL, error);
4996 int dummy_align;
4998 if (!field) {
4999 mono_error_cleanup (error); /* FIXME don't swallow the error */
5000 return NULL;
5003 *out_field_token = field_token;
5005 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5006 if (!cmethod)
5007 return NULL;
5008 if (strcmp (cmethod->name, "InitializeArray") || strcmp (m_class_get_name (cmethod->klass), "RuntimeHelpers") || m_class_get_image (cmethod->klass) != mono_defaults.corlib)
5009 return NULL;
5010 switch (mini_get_underlying_type (m_class_get_byval_arg (klass))->type) {
5011 case MONO_TYPE_I1:
5012 case MONO_TYPE_U1:
5013 size = 1; break;
5014 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5015 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5016 case MONO_TYPE_I2:
5017 case MONO_TYPE_U2:
5018 size = 2; break;
5019 case MONO_TYPE_I4:
5020 case MONO_TYPE_U4:
5021 case MONO_TYPE_R4:
5022 size = 4; break;
5023 case MONO_TYPE_R8:
5024 case MONO_TYPE_I8:
5025 case MONO_TYPE_U8:
5026 size = 8; break;
5027 #endif
5028 default:
5029 return NULL;
5031 size *= len;
5032 if (size > mono_type_size (field->type, &dummy_align))
5033 return NULL;
5034 *out_size = size;
5035 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5036 MonoImage *method_klass_image = m_class_get_image (method->klass);
5037 if (!image_is_dynamic (method_klass_image)) {
5038 guint32 field_index = mono_metadata_token_index (field_token);
5039 mono_metadata_field_info (method_klass_image, field_index - 1, NULL, &rva, NULL);
5040 data_ptr = mono_image_rva_map (method_klass_image, rva);
5041 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5042 /* for aot code we do the lookup on load */
5043 if (aot && data_ptr)
5044 data_ptr = (const char *)GUINT_TO_POINTER (rva);
5045 } else {
5046 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5047 g_assert (!aot);
5048 data_ptr = mono_field_get_data (field);
5050 if (!data_ptr)
5051 return NULL;
5052 *il_op = MONO_CEE_CALL;
5053 *next_ip = ip;
5054 return data_ptr;
5056 return NULL;
5059 static void
5060 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, guchar *ip)
5062 ERROR_DECL (error);
5063 char *method_fname = mono_method_full_name (method, TRUE);
5064 char *method_code;
5065 MonoMethodHeader *header = mono_method_get_header_checked (method, error);
5067 if (!header) {
5068 method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (error));
5069 mono_error_cleanup (error);
5070 } else if (header->code_size == 0)
5071 method_code = g_strdup ("method body is empty.");
5072 else
5073 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5074 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
5075 g_free (method_fname);
5076 g_free (method_code);
5077 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5080 guint32
5081 mono_type_to_stloc_coerce (MonoType *type)
5083 if (type->byref)
5084 return 0;
5086 type = mini_get_underlying_type (type);
5087 handle_enum:
5088 switch (type->type) {
5089 case MONO_TYPE_I1:
5090 return OP_ICONV_TO_I1;
5091 case MONO_TYPE_U1:
5092 return OP_ICONV_TO_U1;
5093 case MONO_TYPE_I2:
5094 return OP_ICONV_TO_I2;
5095 case MONO_TYPE_U2:
5096 return OP_ICONV_TO_U2;
5097 case MONO_TYPE_I4:
5098 case MONO_TYPE_U4:
5099 case MONO_TYPE_I:
5100 case MONO_TYPE_U:
5101 case MONO_TYPE_PTR:
5102 case MONO_TYPE_FNPTR:
5103 case MONO_TYPE_CLASS:
5104 case MONO_TYPE_STRING:
5105 case MONO_TYPE_OBJECT:
5106 case MONO_TYPE_SZARRAY:
5107 case MONO_TYPE_ARRAY:
5108 case MONO_TYPE_I8:
5109 case MONO_TYPE_U8:
5110 case MONO_TYPE_R4:
5111 case MONO_TYPE_R8:
5112 case MONO_TYPE_TYPEDBYREF:
5113 case MONO_TYPE_GENERICINST:
5114 return 0;
5115 case MONO_TYPE_VALUETYPE:
5116 if (m_class_is_enumtype (type->data.klass)) {
5117 type = mono_class_enum_basetype_internal (type->data.klass);
5118 goto handle_enum;
5120 return 0;
5121 case MONO_TYPE_VAR:
5122 case MONO_TYPE_MVAR: //TODO I believe we don't need to handle gsharedvt as there won't be match and, for example, u1 is not covariant to u32
5123 return 0;
5124 default:
5125 g_error ("unknown type 0x%02x in mono_type_to_stloc_coerce", type->type);
5127 return -1;
5130 static void
5131 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5133 MonoInst *ins;
5134 guint32 coerce_op = mono_type_to_stloc_coerce (header->locals [n]);
5136 if (coerce_op) {
5137 if (cfg->cbb->last_ins == sp [0] && sp [0]->opcode == coerce_op) {
5138 if (cfg->verbose_level > 2)
5139 printf ("Found existing coercing is enough for stloc\n");
5140 } else {
5141 MONO_INST_NEW (cfg, ins, coerce_op);
5142 ins->dreg = alloc_ireg (cfg);
5143 ins->sreg1 = sp [0]->dreg;
5144 ins->type = STACK_I4;
5145 ins->klass = mono_class_from_mono_type_internal (header->locals [n]);
5146 MONO_ADD_INS (cfg->cbb, ins);
5147 *sp = mono_decompose_opcode (cfg, ins);
5152 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5153 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5154 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5155 /* Optimize reg-reg moves away */
5157 * Can't optimize other opcodes, since sp[0] might point to
5158 * the last ins of a decomposed opcode.
5160 sp [0]->dreg = (cfg)->locals [n]->dreg;
5161 } else {
5162 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5166 static void
5167 emit_starg_ir (MonoCompile *cfg, MonoInst **sp, int n)
5169 MonoInst *ins;
5170 guint32 coerce_op = mono_type_to_stloc_coerce (cfg->arg_types [n]);
5172 if (coerce_op) {
5173 if (cfg->cbb->last_ins == sp [0] && sp [0]->opcode == coerce_op) {
5174 if (cfg->verbose_level > 2)
5175 printf ("Found existing coercing is enough for starg\n");
5176 } else {
5177 MONO_INST_NEW (cfg, ins, coerce_op);
5178 ins->dreg = alloc_ireg (cfg);
5179 ins->sreg1 = sp [0]->dreg;
5180 ins->type = STACK_I4;
5181 ins->klass = mono_class_from_mono_type_internal (cfg->arg_types [n]);
5182 MONO_ADD_INS (cfg->cbb, ins);
5183 *sp = mono_decompose_opcode (cfg, ins);
5187 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5191 * ldloca inhibits many optimizations so try to get rid of it in common
5192 * cases.
5194 static guchar *
5195 emit_optimized_ldloca_ir (MonoCompile *cfg, guchar *ip, guchar *end, int local)
5197 guint32 token;
5198 MonoClass *klass;
5199 MonoType *type;
5201 guchar *start = ip;
5203 if ((ip = il_read_initobj (ip, end, &token)) && ip_in_bb (cfg, cfg->cbb, start + 1)) {
5204 /* From the INITOBJ case */
5205 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5206 CHECK_TYPELOAD (klass);
5207 type = mini_get_underlying_type (m_class_get_byval_arg (klass));
5208 emit_init_local (cfg, local, type, TRUE);
5209 return ip;
5211 exception_exit:
5212 return NULL;
5215 static MonoInst*
5216 handle_call_res_devirt (MonoCompile *cfg, MonoMethod *cmethod, MonoInst *call_res)
5219 * Devirt EqualityComparer.Default.Equals () calls for some types.
5220 * The corefx code excepts these calls to be devirtualized.
5221 * This depends on the implementation of EqualityComparer.Default, which is
5222 * in mcs/class/referencesource/mscorlib/system/collections/generic/equalitycomparer.cs
5224 if (m_class_get_image (cmethod->klass) == mono_defaults.corlib &&
5225 !strcmp (m_class_get_name (cmethod->klass), "EqualityComparer`1") &&
5226 !strcmp (cmethod->name, "get_Default")) {
5227 MonoType *param_type = mono_class_get_generic_class (cmethod->klass)->context.class_inst->type_argv [0];
5228 MonoClass *inst;
5229 MonoGenericContext ctx;
5230 MonoType *args [16];
5231 ERROR_DECL (error);
5233 memset (&ctx, 0, sizeof (ctx));
5235 args [0] = param_type;
5236 ctx.class_inst = mono_metadata_get_generic_inst (1, args);
5238 inst = mono_class_inflate_generic_class_checked (mono_class_get_iequatable_class (), &ctx, error);
5239 mono_error_assert_ok (error);
5241 /* EqualityComparer<T>.Default returns specific types depending on T */
5242 // FIXME: Add more
5243 /* 1. Implements IEquatable<T> */
5245 * Can't use this for string/byte as it might use a different comparer:
5247 * // Specialize type byte for performance reasons
5248 * if (t == typeof(byte)) {
5249 * return (EqualityComparer<T>)(object)(new ByteEqualityComparer());
5251 * #if MOBILE
5252 * // Breaks .net serialization compatibility
5253 * if (t == typeof (string))
5254 * return (EqualityComparer<T>)(object)new InternalStringComparer ();
5255 * #endif
5257 if (mono_class_is_assignable_from_internal (inst, mono_class_from_mono_type_internal (param_type)) && param_type->type != MONO_TYPE_U1 && param_type->type != MONO_TYPE_STRING) {
5258 MonoInst *typed_objref;
5259 MonoClass *gcomparer_inst;
5261 memset (&ctx, 0, sizeof (ctx));
5263 args [0] = param_type;
5264 ctx.class_inst = mono_metadata_get_generic_inst (1, args);
5266 MonoClass *gcomparer = mono_class_get_geqcomparer_class ();
5267 g_assert (gcomparer);
5268 gcomparer_inst = mono_class_inflate_generic_class_checked (gcomparer, &ctx, error);
5269 mono_error_assert_ok (error);
5271 MONO_INST_NEW (cfg, typed_objref, OP_TYPED_OBJREF);
5272 typed_objref->type = STACK_OBJ;
5273 typed_objref->dreg = alloc_ireg_ref (cfg);
5274 typed_objref->sreg1 = call_res->dreg;
5275 typed_objref->klass = gcomparer_inst;
5276 MONO_ADD_INS (cfg->cbb, typed_objref);
5278 call_res = typed_objref;
5280 /* Force decompose */
5281 cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE;
5282 cfg->cbb->needs_decompose = TRUE;
5286 return call_res;
5289 static gboolean
5290 is_exception_class (MonoClass *klass)
5292 if (G_LIKELY (m_class_get_supertypes (klass)))
5293 return mono_class_has_parent_fast (klass, mono_defaults.exception_class);
5294 while (klass) {
5295 if (klass == mono_defaults.exception_class)
5296 return TRUE;
5297 klass = m_class_get_parent (klass);
5299 return FALSE;
5303 * is_jit_optimizer_disabled:
5305 * Determine whenever M's assembly has a DebuggableAttribute with the
5306 * IsJITOptimizerDisabled flag set.
5308 static gboolean
5309 is_jit_optimizer_disabled (MonoMethod *m)
5311 ERROR_DECL (error);
5312 MonoAssembly *ass = m_class_get_image (m->klass)->assembly;
5313 MonoCustomAttrInfo* attrs;
5314 MonoClass *klass;
5315 int i;
5316 gboolean val = FALSE;
5318 g_assert (ass);
5319 if (ass->jit_optimizer_disabled_inited)
5320 return ass->jit_optimizer_disabled;
5322 klass = mono_class_try_get_debuggable_attribute_class ();
5324 if (!klass) {
5325 /* Linked away */
5326 ass->jit_optimizer_disabled = FALSE;
5327 mono_memory_barrier ();
5328 ass->jit_optimizer_disabled_inited = TRUE;
5329 return FALSE;
5332 attrs = mono_custom_attrs_from_assembly_checked (ass, FALSE, error);
5333 mono_error_cleanup (error); /* FIXME don't swallow the error */
5334 if (attrs) {
5335 for (i = 0; i < attrs->num_attrs; ++i) {
5336 MonoCustomAttrEntry *attr = &attrs->attrs [i];
5337 const gchar *p;
5338 MonoMethodSignature *sig;
5340 if (!attr->ctor || attr->ctor->klass != klass)
5341 continue;
5342 /* Decode the attribute. See reflection.c */
5343 p = (const char*)attr->data;
5344 g_assert (read16 (p) == 0x0001);
5345 p += 2;
5347 // FIXME: Support named parameters
5348 sig = mono_method_signature_internal (attr->ctor);
5349 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
5350 continue;
5351 /* Two boolean arguments */
5352 p ++;
5353 val = *p;
5355 mono_custom_attrs_free (attrs);
5358 ass->jit_optimizer_disabled = val;
5359 mono_memory_barrier ();
5360 ass->jit_optimizer_disabled_inited = TRUE;
5362 return val;
5365 gboolean
5366 mono_is_supported_tailcall_helper (gboolean value, const char *svalue)
5368 if (!value)
5369 mono_tailcall_print ("%s %s\n", __func__, svalue);
5370 return value;
5373 static gboolean
5374 mono_is_not_supported_tailcall_helper (gboolean value, const char *svalue, MonoMethod *method, MonoMethod *cmethod)
5376 // Return value, printing if it inhibits tailcall.
5378 if (value && mono_tailcall_print_enabled ()) {
5379 const char *lparen = strchr (svalue, ' ') ? "(" : "";
5380 const char *rparen = *lparen ? ")" : "";
5381 mono_tailcall_print ("%s %s -> %s %s%s%s:%d\n", __func__, method->name, cmethod->name, lparen, svalue, rparen, value);
5383 return value;
5386 #define IS_NOT_SUPPORTED_TAILCALL(x) (mono_is_not_supported_tailcall_helper((x), #x, method, cmethod))
5388 static gboolean
5389 is_supported_tailcall (MonoCompile *cfg, const guint8 *ip, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig,
5390 gboolean virtual_, gboolean extra_arg, gboolean *ptailcall_calli)
5392 // Some checks apply to "regular", some to "calli", some to both.
5393 // To ease burden on caller, always compute regular and calli.
5395 gboolean tailcall = TRUE;
5396 gboolean tailcall_calli = TRUE;
5398 if (IS_NOT_SUPPORTED_TAILCALL (virtual_ && !cfg->backend->have_op_tailcall_membase))
5399 tailcall = FALSE;
5401 if (IS_NOT_SUPPORTED_TAILCALL (!cfg->backend->have_op_tailcall_reg))
5402 tailcall_calli = FALSE;
5404 if (!tailcall && !tailcall_calli)
5405 goto exit;
5407 // FIXME in calli, there is no type for for the this parameter,
5408 // so we assume it might be valuetype; in future we should issue a range
5409 // check, so rule out pointing to frame (for other reference parameters also)
5411 if ( IS_NOT_SUPPORTED_TAILCALL (cmethod && fsig->hasthis && m_class_is_valuetype (cmethod->klass)) // This might point to the current method's stack. Emit range check?
5412 || IS_NOT_SUPPORTED_TAILCALL (cmethod && (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL))
5413 || IS_NOT_SUPPORTED_TAILCALL (fsig->pinvoke) // i.e. if !cmethod (calli)
5414 || IS_NOT_SUPPORTED_TAILCALL (cfg->method->save_lmf)
5415 || IS_NOT_SUPPORTED_TAILCALL (!cmethod && fsig->hasthis) // FIXME could be valuetype to current frame; range check
5416 || IS_NOT_SUPPORTED_TAILCALL (cmethod && cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
5418 // http://www.mono-project.com/docs/advanced/runtime/docs/generic-sharing/
5420 // 1. Non-generic non-static methods of reference types have access to the
5421 // RGCTX via the “this” argument (this->vtable->rgctx).
5422 // 2. a Non-generic static methods of reference types and b. non-generic methods
5423 // of value types need to be passed a pointer to the caller’s class’s VTable in the MONO_ARCH_RGCTX_REG register.
5424 // 3. Generic methods need to be passed a pointer to the MRGCTX in the MONO_ARCH_RGCTX_REG register
5426 // That is what vtable_arg is here (always?).
5428 // Passing vtable_arg uses (requires?) a volatile non-parameter register,
5429 // such as AMD64 rax, r10, r11, or the return register on many architectures.
5430 // ARM32 does not always clearly have such a register. ARM32's return register
5431 // is a parameter register.
5432 // iPhone could use r9 except on old systems. iPhone/ARM32 is not particularly
5433 // important. Linux/arm32 is less clear.
5434 // ARM32's scratch r12 might work but only with much collateral change.
5436 // Imagine F1 calls F2, and F2 tailcalls F3.
5437 // F2 and F3 are managed. F1 is native.
5438 // Without a tailcall, F2 can save and restore everything needed for F1.
5439 // However if the extra parameter were in a non-volatile, such as ARM32 V5/R8,
5440 // F3 cannot easily restore it for F1, in the current scheme. The current
5441 // scheme where the extra parameter is not merely an extra parameter, but
5442 // passed "outside of the ABI".
5444 // If all native to managed transitions are intercepted and wrapped (w/o tailcall),
5445 // then they can preserve this register and the rest of the managed callgraph
5446 // treat it as volatile.
5448 // Interface method dispatch has the same problem (imt_arg).
5450 || IS_NOT_SUPPORTED_TAILCALL (extra_arg && !cfg->backend->have_volatile_non_param_register)
5451 || IS_NOT_SUPPORTED_TAILCALL (cfg->gsharedvt)
5453 tailcall_calli = FALSE;
5454 tailcall = FALSE;
5455 goto exit;
5458 for (int i = 0; i < fsig->param_count; ++i) {
5459 if (IS_NOT_SUPPORTED_TAILCALL (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)) {
5460 tailcall_calli = FALSE;
5461 tailcall = FALSE; // These can point to the current method's stack. Emit range check?
5462 goto exit;
5466 MonoMethodSignature *caller_signature;
5467 MonoMethodSignature *callee_signature;
5468 caller_signature = mono_method_signature_internal (method);
5469 callee_signature = cmethod ? mono_method_signature_internal (cmethod) : fsig;
5471 g_assert (caller_signature);
5472 g_assert (callee_signature);
5474 // Require an exact match on return type due to various conversions in emit_move_return_value that would be skipped.
5475 // The main troublesome conversions are double <=> float.
5476 // CoreCLR allows some conversions here, such as integer truncation.
5477 // As well I <=> I[48] and U <=> U[48] would be ok, for matching size.
5478 if (IS_NOT_SUPPORTED_TAILCALL (mini_get_underlying_type (caller_signature->ret)->type != mini_get_underlying_type (callee_signature->ret)->type)
5479 || IS_NOT_SUPPORTED_TAILCALL (!mono_arch_tailcall_supported (cfg, caller_signature, callee_signature, virtual_))) {
5480 tailcall_calli = FALSE;
5481 tailcall = FALSE;
5482 goto exit;
5485 /* Debugging support */
5486 #if 0
5487 if (!mono_debug_count ()) {
5488 tailcall_calli = FALSE;
5489 tailcall = FALSE;
5490 goto exit;
5492 #endif
5493 // See check_sp in mini_emit_calli_full.
5494 if (tailcall_calli && IS_NOT_SUPPORTED_TAILCALL (mini_should_check_stack_pointer (cfg)))
5495 tailcall_calli = FALSE;
5496 exit:
5497 mono_tailcall_print ("tail.%s %s -> %s tailcall:%d tailcall_calli:%d gshared:%d extra_arg:%d virtual_:%d\n",
5498 mono_opcode_name (*ip), method->name, cmethod ? cmethod->name : "calli", tailcall, tailcall_calli,
5499 cfg->gshared, extra_arg, virtual_);
5501 *ptailcall_calli = tailcall_calli;
5502 return tailcall;
5506 * is_addressable_valuetype_load
5508 * Returns true if a previous load can be done without doing an extra copy, given the new instruction ip and the type of the object being loaded ldtype
5510 static gboolean
5511 is_addressable_valuetype_load (MonoCompile* cfg, guint8* ip, MonoType* ldtype)
5513 /* Avoid loading a struct just to load one of its fields */
5514 gboolean is_load_instruction = (*ip == CEE_LDFLD);
5515 gboolean is_in_previous_bb = ip_in_bb(cfg, cfg->cbb, ip);
5516 gboolean is_struct = MONO_TYPE_ISSTRUCT(ldtype);
5517 return is_load_instruction && is_in_previous_bb && is_struct;
5521 * handle_ctor_call:
5523 * Handle calls made to ctors from NEWOBJ opcodes.
5525 static void
5526 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
5527 MonoInst **sp, guint8 *ip, int *inline_costs)
5529 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
5531 if (m_class_is_valuetype (cmethod->klass) && mono_class_generic_sharing_enabled (cmethod->klass) &&
5532 mono_method_is_generic_sharable (cmethod, TRUE)) {
5533 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
5534 mono_class_vtable_checked (cfg->domain, cmethod->klass, cfg->error);
5535 CHECK_CFG_ERROR;
5536 CHECK_TYPELOAD (cmethod->klass);
5538 vtable_arg = emit_get_rgctx_method (cfg, context_used,
5539 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
5540 } else {
5541 if (context_used) {
5542 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
5543 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
5544 } else {
5545 MonoVTable *vtable = mono_class_vtable_checked (cfg->domain, cmethod->klass, cfg->error);
5546 CHECK_CFG_ERROR;
5547 CHECK_TYPELOAD (cmethod->klass);
5548 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
5553 /* Avoid virtual calls to ctors if possible */
5554 if (mono_class_is_marshalbyref (cmethod->klass))
5555 callvirt_this_arg = sp [0];
5557 if (cmethod && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
5558 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
5559 CHECK_CFG_EXCEPTION;
5560 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
5561 mono_method_check_inlining (cfg, cmethod) &&
5562 !mono_class_is_subclass_of_internal (cmethod->klass, mono_defaults.exception_class, FALSE)) {
5563 int costs;
5565 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
5566 cfg->real_offset += 5;
5568 *inline_costs += costs - 5;
5569 } else {
5570 INLINE_FAILURE ("inline failure");
5571 // FIXME-VT: Clean this up
5572 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
5573 GSHAREDVT_FAILURE(*ip);
5574 mini_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
5576 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
5577 MonoInst *addr;
5579 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
5581 if (cfg->llvm_only) {
5582 // FIXME: Avoid initializing vtable_arg
5583 mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
5584 } else {
5585 mini_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
5587 } else if (context_used &&
5588 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
5589 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
5590 MonoInst *cmethod_addr;
5592 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
5594 if (cfg->llvm_only) {
5595 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
5596 MONO_RGCTX_INFO_METHOD_FTNDESC);
5597 mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
5598 } else {
5599 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
5600 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
5602 mini_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
5604 } else {
5605 INLINE_FAILURE ("ctor call");
5606 ins = mini_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
5607 callvirt_this_arg, NULL, vtable_arg);
5609 exception_exit:
5610 mono_error_exit:
5611 return;
5614 typedef struct {
5615 MonoMethod *method;
5616 gboolean inst_tailcall;
5617 } HandleCallData;
5620 * handle_constrained_call:
5622 * Handle constrained calls. Return a MonoInst* representing the call or NULL.
5623 * May overwrite sp [0] and modify the ref_... parameters.
5625 static MonoInst*
5626 handle_constrained_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoClass *constrained_class, MonoInst **sp,
5627 HandleCallData *cdata, MonoMethod **ref_cmethod, gboolean *ref_virtual, gboolean *ref_emit_widen)
5629 MonoInst *ins, *addr;
5630 MonoMethod *method = cdata->method;
5631 gboolean constrained_partial_call = FALSE;
5632 gboolean constrained_is_generic_param =
5633 m_class_get_byval_arg (constrained_class)->type == MONO_TYPE_VAR ||
5634 m_class_get_byval_arg (constrained_class)->type == MONO_TYPE_MVAR;
5636 if (constrained_is_generic_param && cfg->gshared) {
5637 if (!mini_is_gsharedvt_klass (constrained_class)) {
5638 g_assert (!m_class_is_valuetype (cmethod->klass));
5639 if (!mini_type_is_reference (m_class_get_byval_arg (constrained_class)))
5640 constrained_partial_call = TRUE;
5644 if (mini_is_gsharedvt_klass (constrained_class)) {
5645 if ((cmethod->klass != mono_defaults.object_class) && m_class_is_valuetype (constrained_class) && m_class_is_valuetype (cmethod->klass)) {
5646 /* The 'Own method' case below */
5647 } else if (m_class_get_image (cmethod->klass) != mono_defaults.corlib && !mono_class_is_interface (cmethod->klass) && !m_class_is_valuetype (cmethod->klass)) {
5648 /* 'The type parameter is instantiated as a reference type' case below. */
5649 } else {
5650 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, ref_emit_widen);
5651 CHECK_CFG_EXCEPTION;
5652 g_assert (ins);
5653 if (cdata->inst_tailcall) // FIXME
5654 mono_tailcall_print ("missed tailcall constrained_class %s -> %s\n", method->name, cmethod->name);
5655 return ins;
5659 if (constrained_partial_call) {
5660 gboolean need_box = TRUE;
5663 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
5664 * called method is not known at compile time either. The called method could end up being
5665 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
5666 * to box the receiver.
5667 * A simple solution would be to box always and make a normal virtual call, but that would
5668 * be bad performance wise.
5670 if (mono_class_is_interface (cmethod->klass) && mono_class_is_ginst (cmethod->klass) &&
5671 (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT)) {
5673 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
5675 /* If the method is not abstract, it's a default interface method, and we need to box */
5676 need_box = FALSE;
5679 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == m_class_get_parent (mono_defaults.enum_class) || cmethod->klass == mono_defaults.enum_class)) {
5680 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
5681 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (constrained_class), sp [0]->dreg, 0);
5682 ins->klass = constrained_class;
5683 sp [0] = mini_emit_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
5684 CHECK_CFG_EXCEPTION;
5685 } else if (need_box) {
5686 MonoInst *box_type;
5687 MonoBasicBlock *is_ref_bb, *end_bb;
5688 MonoInst *nonbox_call, *addr;
5691 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
5692 * if needed.
5693 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
5694 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
5696 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
5698 NEW_BBLOCK (cfg, is_ref_bb);
5699 NEW_BBLOCK (cfg, end_bb);
5701 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
5702 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
5703 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
5705 /* Non-ref case */
5706 if (cfg->llvm_only)
5707 /* addr is an ftndesc in this case */
5708 nonbox_call = mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
5709 else
5710 nonbox_call = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
5712 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5714 /* Ref case */
5715 MONO_START_BB (cfg, is_ref_bb);
5716 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (constrained_class), sp [0]->dreg, 0);
5717 ins->klass = constrained_class;
5718 sp [0] = mini_emit_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
5719 CHECK_CFG_EXCEPTION;
5720 if (cfg->llvm_only)
5721 ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
5722 else
5723 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
5725 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5727 MONO_START_BB (cfg, end_bb);
5728 cfg->cbb = end_bb;
5730 nonbox_call->dreg = ins->dreg;
5731 if (cdata->inst_tailcall) // FIXME
5732 mono_tailcall_print ("missed tailcall constrained_partial_need_box %s -> %s\n", method->name, cmethod->name);
5733 return ins;
5734 } else {
5735 g_assert (mono_class_is_interface (cmethod->klass));
5736 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
5737 if (cfg->llvm_only)
5738 ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
5739 else
5740 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
5741 if (cdata->inst_tailcall) // FIXME
5742 mono_tailcall_print ("missed tailcall constrained_partial %s -> %s\n", method->name, cmethod->name);
5743 return ins;
5745 } else if (!m_class_is_valuetype (constrained_class)) {
5746 int dreg = alloc_ireg_ref (cfg);
5749 * The type parameter is instantiated as a reference
5750 * type. We have a managed pointer on the stack, so
5751 * we need to dereference it here.
5753 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
5754 ins->type = STACK_OBJ;
5755 sp [0] = ins;
5756 } else if (cmethod->klass == mono_defaults.object_class || cmethod->klass == m_class_get_parent (mono_defaults.enum_class) || cmethod->klass == mono_defaults.enum_class) {
5758 * The type parameter is instantiated as a valuetype,
5759 * but that type doesn't override the method we're
5760 * calling, so we need to box `this'.
5762 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (constrained_class), sp [0]->dreg, 0);
5763 ins->klass = constrained_class;
5764 sp [0] = mini_emit_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
5765 CHECK_CFG_EXCEPTION;
5766 } else {
5767 if (cmethod->klass != constrained_class) {
5768 /* Enums/default interface methods */
5769 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (constrained_class), sp [0]->dreg, 0);
5770 ins->klass = constrained_class;
5771 sp [0] = mini_emit_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
5772 CHECK_CFG_EXCEPTION;
5774 *ref_virtual = FALSE;
5777 exception_exit:
5778 return NULL;
5781 static void
5782 emit_setret (MonoCompile *cfg, MonoInst *val)
5784 MonoType *ret_type = mini_get_underlying_type (mono_method_signature_internal (cfg->method)->ret);
5785 MonoInst *ins;
5787 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
5788 MonoInst *ret_addr;
5790 if (!cfg->vret_addr) {
5791 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
5792 } else {
5793 EMIT_NEW_RETLOADA (cfg, ret_addr);
5795 MonoClass *ret_class = mono_class_from_mono_type_internal (ret_type);
5796 if (MONO_CLASS_IS_SIMD (cfg, ret_class))
5797 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREX_MEMBASE, ret_addr->dreg, 0, val->dreg);
5798 else
5799 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
5800 ins->klass = ret_class;
5802 } else {
5803 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5804 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
5805 MonoInst *iargs [1];
5806 MonoInst *conv;
5808 iargs [0] = val;
5809 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
5810 mono_arch_emit_setret (cfg, cfg->method, conv);
5811 } else {
5812 mono_arch_emit_setret (cfg, cfg->method, val);
5814 #else
5815 mono_arch_emit_setret (cfg, cfg->method, val);
5816 #endif
5820 typedef union _MonoOpcodeParameter {
5821 gint32 i32;
5822 gint64 i64;
5823 float f;
5824 double d;
5825 guchar *branch_target;
5826 } MonoOpcodeParameter;
5828 typedef struct _MonoOpcodeInfo {
5829 guint constant : 4; // private
5830 gint pops : 3; // public -1 means variable
5831 gint pushes : 3; // public -1 means variable
5832 } MonoOpcodeInfo;
5834 static const MonoOpcodeInfo*
5835 mono_opcode_decode (guchar *ip, guint op_size, MonoOpcodeEnum il_op, MonoOpcodeParameter *parameter)
5837 #define Push0 (0)
5838 #define Pop0 (0)
5839 #define Push1 (1)
5840 #define Pop1 (1)
5841 #define PushI (1)
5842 #define PopI (1)
5843 #define PushI8 (1)
5844 #define PopI8 (1)
5845 #define PushRef (1)
5846 #define PopRef (1)
5847 #define PushR4 (1)
5848 #define PopR4 (1)
5849 #define PushR8 (1)
5850 #define PopR8 (1)
5851 #define VarPush (-1)
5852 #define VarPop (-1)
5854 static const MonoOpcodeInfo mono_opcode_info [ ] = {
5855 #define OPDEF(name, str, pops, pushes, param, param_constant, a, b, c, flow) {param_constant + 1, pops, pushes },
5856 #include "mono/cil/opcode.def"
5857 #undef OPDEF
5860 #undef Push0
5861 #undef Pop0
5862 #undef Push1
5863 #undef Pop1
5864 #undef PushI
5865 #undef PopI
5866 #undef PushI8
5867 #undef PopI8
5868 #undef PushRef
5869 #undef PopRef
5870 #undef PushR4
5871 #undef PopR4
5872 #undef PushR8
5873 #undef PopR8
5874 #undef VarPush
5875 #undef VarPop
5877 gint32 delta;
5878 guchar *next_ip = ip + op_size;
5880 const MonoOpcodeInfo *info = &mono_opcode_info [il_op];
5882 switch (mono_opcodes [il_op].argument) {
5883 case MonoInlineNone:
5884 parameter->i32 = (int)info->constant - 1;
5885 break;
5886 case MonoInlineString:
5887 case MonoInlineType:
5888 case MonoInlineField:
5889 case MonoInlineMethod:
5890 case MonoInlineTok:
5891 case MonoInlineSig:
5892 case MonoShortInlineR:
5893 case MonoInlineI:
5894 parameter->i32 = read32 (next_ip - 4);
5895 // FIXME check token type?
5896 break;
5897 case MonoShortInlineI:
5898 parameter->i32 = (signed char)next_ip [-1];
5899 break;
5900 case MonoInlineVar:
5901 parameter->i32 = read16 (next_ip - 2);
5902 break;
5903 case MonoShortInlineVar:
5904 parameter->i32 = next_ip [-1];
5905 break;
5906 case MonoInlineR:
5907 case MonoInlineI8:
5908 parameter->i64 = read64 (next_ip - 8);
5909 break;
5910 case MonoShortInlineBrTarget:
5911 delta = (signed char)next_ip [-1];
5912 goto branch_target;
5913 case MonoInlineBrTarget:
5914 delta = (gint32)read32 (next_ip - 4);
5915 branch_target:
5916 parameter->branch_target = delta + next_ip;
5917 break;
5918 case MonoInlineSwitch: // complicated
5919 break;
5920 default:
5921 g_error ("%s %d %d\n", __func__, il_op, mono_opcodes [il_op].argument);
5923 return info;
5927 * mono_method_to_ir:
5929 * Translate the .net IL into linear IR.
5931 * @start_bblock: if not NULL, the starting basic block, used during inlining.
5932 * @end_bblock: if not NULL, the ending basic block, used during inlining.
5933 * @return_var: if not NULL, the place where the return value is stored, used during inlining.
5934 * @inline_args: if not NULL, contains the arguments to the inline call
5935 * @inline_offset: if not zero, the real offset from the inline call, or zero otherwise.
5936 * @is_virtual_call: whether this method is being called as a result of a call to callvirt
5938 * This method is used to turn ECMA IL into Mono's internal Linear IR
5939 * reprensetation. It is used both for entire methods, as well as
5940 * inlining existing methods. In the former case, the @start_bblock,
5941 * @end_bblock, @return_var, @inline_args are all set to NULL, and the
5942 * inline_offset is set to zero.
5944 * Returns: the inline cost, or -1 if there was an error processing this method.
5947 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5948 MonoInst *return_var, MonoInst **inline_args,
5949 guint inline_offset, gboolean is_virtual_call)
5951 ERROR_DECL (error);
5952 // Buffer to hold parameters to mono_new_array, instead of varargs.
5953 MonoInst *array_new_localalloc_ins = NULL;
5954 MonoInst *ins, **sp, **stack_start;
5955 MonoBasicBlock *tblock = NULL;
5956 MonoBasicBlock *init_localsbb = NULL, *init_localsbb2 = NULL;
5957 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5958 MonoMethod *method_definition;
5959 MonoInst **arg_array;
5960 MonoMethodHeader *header;
5961 MonoImage *image;
5962 guint32 token, ins_flag;
5963 MonoClass *klass;
5964 MonoClass *constrained_class = NULL;
5965 gboolean save_last_error = FALSE;
5966 guchar *ip, *end, *target, *err_pos;
5967 MonoMethodSignature *sig;
5968 MonoGenericContext *generic_context = NULL;
5969 MonoGenericContainer *generic_container = NULL;
5970 MonoType **param_types;
5971 int i, n, start_new_bblock, dreg;
5972 int num_calls = 0, inline_costs = 0;
5973 int breakpoint_id = 0;
5974 guint num_args;
5975 GSList *class_inits = NULL;
5976 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5977 int context_used;
5978 gboolean init_locals, seq_points, skip_dead_blocks;
5979 gboolean sym_seq_points = FALSE;
5980 MonoDebugMethodInfo *minfo;
5981 MonoBitSet *seq_point_locs = NULL;
5982 MonoBitSet *seq_point_set_locs = NULL;
5983 gboolean emitted_funccall_seq_point = FALSE;
5985 cfg->disable_inline = is_jit_optimizer_disabled (method);
5987 image = m_class_get_image (method->klass);
5989 /* serialization and xdomain stuff may need access to private fields and methods */
5990 dont_verify = image->assembly->corlib_internal? TRUE: FALSE;
5991 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5992 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5993 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5994 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5995 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5997 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5998 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5999 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_OTHER;
6000 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
6001 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
6003 header = mono_method_get_header_checked (method, cfg->error);
6004 if (!header) {
6005 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
6006 goto exception_exit;
6007 } else {
6008 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6011 generic_container = mono_method_get_generic_container (method);
6012 sig = mono_method_signature_internal (method);
6013 num_args = sig->hasthis + sig->param_count;
6014 ip = (guchar*)header->code;
6015 cfg->cil_start = ip;
6016 end = ip + header->code_size;
6017 cfg->stat_cil_code_size += header->code_size;
6019 seq_points = cfg->gen_seq_points && cfg->method == method;
6021 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
6022 /* We could hit a seq point before attaching to the JIT (#8338) */
6023 seq_points = FALSE;
6026 if (cfg->prof_coverage) {
6027 if (cfg->compile_aot)
6028 g_error ("Coverage profiling is not supported with AOT.");
6030 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
6033 if ((cfg->gen_sdb_seq_points && cfg->method == method) || cfg->prof_coverage) {
6034 minfo = mono_debug_lookup_method (method);
6035 if (minfo) {
6036 MonoSymSeqPoint *sps;
6037 int i, n_il_offsets;
6039 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
6040 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6041 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6042 sym_seq_points = TRUE;
6043 for (i = 0; i < n_il_offsets; ++i) {
6044 if (sps [i].il_offset < header->code_size)
6045 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
6047 g_free (sps);
6049 MonoDebugMethodAsyncInfo* asyncMethod = mono_debug_lookup_method_async_debug_info (method);
6050 if (asyncMethod) {
6051 for (i = 0; asyncMethod != NULL && i < asyncMethod->num_awaits; i++)
6053 mono_bitset_set_fast (seq_point_locs, asyncMethod->resume_offsets[i]);
6054 mono_bitset_set_fast (seq_point_locs, asyncMethod->yield_offsets[i]);
6056 mono_debug_free_method_async_debug_info (asyncMethod);
6058 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (m_class_get_image (method->klass))) {
6059 /* Methods without line number info like auto-generated property accessors */
6060 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6061 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6062 sym_seq_points = TRUE;
6067 * Methods without init_locals set could cause asserts in various passes
6068 * (#497220). To work around this, we emit dummy initialization opcodes
6069 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
6070 * on some platforms.
6072 if (cfg->opt & MONO_OPT_UNSAFE)
6073 init_locals = header->init_locals;
6074 else
6075 init_locals = TRUE;
6077 method_definition = method;
6078 while (method_definition->is_inflated) {
6079 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
6080 method_definition = imethod->declaring;
6083 /* SkipVerification is not allowed if core-clr is enabled */
6084 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
6085 dont_verify = TRUE;
6086 dont_verify_stloc = TRUE;
6089 if (sig->is_inflated)
6090 generic_context = mono_method_get_context (method);
6091 else if (generic_container)
6092 generic_context = &generic_container->context;
6093 cfg->generic_context = generic_context;
6095 if (!cfg->gshared)
6096 g_assert (!sig->has_type_parameters);
6098 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
6099 g_assert (method->is_inflated);
6100 g_assert (mono_method_get_context (method)->method_inst);
6102 if (method->is_inflated && mono_method_get_context (method)->method_inst)
6103 g_assert (sig->generic_param_count);
6105 if (cfg->method == method) {
6106 cfg->real_offset = 0;
6107 } else {
6108 cfg->real_offset = inline_offset;
6111 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
6112 cfg->cil_offset_to_bb_len = header->code_size;
6114 cfg->current_method = method;
6116 if (cfg->verbose_level > 2)
6117 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
6119 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
6120 if (sig->hasthis)
6121 param_types [0] = m_class_is_valuetype (method->klass) ? m_class_get_this_arg (method->klass) : m_class_get_byval_arg (method->klass);
6122 for (n = 0; n < sig->param_count; ++n)
6123 param_types [n + sig->hasthis] = sig->params [n];
6124 cfg->arg_types = param_types;
6126 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
6127 if (cfg->method == method) {
6128 /* ENTRY BLOCK */
6129 NEW_BBLOCK (cfg, start_bblock);
6130 cfg->bb_entry = start_bblock;
6131 start_bblock->cil_code = NULL;
6132 start_bblock->cil_length = 0;
6134 /* EXIT BLOCK */
6135 NEW_BBLOCK (cfg, end_bblock);
6136 cfg->bb_exit = end_bblock;
6137 end_bblock->cil_code = NULL;
6138 end_bblock->cil_length = 0;
6139 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
6140 g_assert (cfg->num_bblocks == 2);
6142 arg_array = cfg->args;
6144 if (header->num_clauses) {
6145 cfg->spvars = g_hash_table_new (NULL, NULL);
6146 cfg->exvars = g_hash_table_new (NULL, NULL);
6148 /* handle exception clauses */
6149 for (i = 0; i < header->num_clauses; ++i) {
6150 MonoBasicBlock *try_bb;
6151 MonoExceptionClause *clause = &header->clauses [i];
6152 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
6154 try_bb->real_offset = clause->try_offset;
6155 try_bb->try_start = TRUE;
6156 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
6157 tblock->real_offset = clause->handler_offset;
6158 tblock->flags |= BB_EXCEPTION_HANDLER;
6160 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
6161 mono_create_exvar_for_offset (cfg, clause->handler_offset);
6163 * Linking the try block with the EH block hinders inlining as we won't be able to
6164 * merge the bblocks from inlining and produce an artificial hole for no good reason.
6166 if (COMPILE_LLVM (cfg))
6167 link_bblock (cfg, try_bb, tblock);
6169 if (*(ip + clause->handler_offset) == CEE_POP)
6170 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
6172 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
6173 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
6174 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
6175 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6176 MONO_ADD_INS (tblock, ins);
6178 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
6179 /* finally clauses already have a seq point */
6180 /* seq points for filter clauses are emitted below */
6181 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
6182 MONO_ADD_INS (tblock, ins);
6185 /* todo: is a fault block unsafe to optimize? */
6186 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
6187 tblock->flags |= BB_EXCEPTION_UNSAFE;
6190 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
6191 while (p < end) {
6192 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
6194 /* catch and filter blocks get the exception object on the stack */
6195 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
6196 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6198 /* mostly like handle_stack_args (), but just sets the input args */
6199 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
6200 tblock->in_scount = 1;
6201 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6202 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6204 cfg->cbb = tblock;
6206 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
6207 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
6208 if (!cfg->compile_llvm) {
6209 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
6210 ins->dreg = tblock->in_stack [0]->dreg;
6211 MONO_ADD_INS (tblock, ins);
6213 #else
6214 MonoInst *dummy_use;
6217 * Add a dummy use for the exvar so its liveness info will be
6218 * correct.
6220 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
6221 #endif
6223 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6224 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
6225 MONO_ADD_INS (tblock, ins);
6228 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6229 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
6230 tblock->flags |= BB_EXCEPTION_HANDLER;
6231 tblock->real_offset = clause->data.filter_offset;
6232 tblock->in_scount = 1;
6233 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6234 /* The filter block shares the exvar with the handler block */
6235 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6236 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6237 MONO_ADD_INS (tblock, ins);
6241 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
6242 clause->data.catch_class &&
6243 cfg->gshared &&
6244 mono_class_check_context_used (clause->data.catch_class)) {
6246 * In shared generic code with catch
6247 * clauses containing type variables
6248 * the exception handling code has to
6249 * be able to get to the rgctx.
6250 * Therefore we have to make sure that
6251 * the vtable/mrgctx argument (for
6252 * static or generic methods) or the
6253 * "this" argument (for non-static
6254 * methods) are live.
6256 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6257 mini_method_get_context (method)->method_inst ||
6258 m_class_is_valuetype (method->klass)) {
6259 mono_get_vtable_var (cfg);
6260 } else {
6261 MonoInst *dummy_use;
6263 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
6267 } else {
6268 arg_array = g_newa (MonoInst*, num_args);
6269 cfg->cbb = start_bblock;
6270 cfg->args = arg_array;
6271 mono_save_args (cfg, sig, inline_args);
6274 /* FIRST CODE BLOCK */
6275 NEW_BBLOCK (cfg, tblock);
6276 tblock->cil_code = ip;
6277 cfg->cbb = tblock;
6278 cfg->ip = ip;
6280 ADD_BBLOCK (cfg, tblock);
6282 if (cfg->method == method) {
6283 breakpoint_id = mono_debugger_method_has_breakpoint (method);
6284 if (breakpoint_id) {
6285 MONO_INST_NEW (cfg, ins, OP_BREAK);
6286 MONO_ADD_INS (cfg->cbb, ins);
6290 /* we use a separate basic block for the initialization code */
6291 NEW_BBLOCK (cfg, init_localsbb);
6292 if (cfg->method == method)
6293 cfg->bb_init = init_localsbb;
6294 init_localsbb->real_offset = cfg->real_offset;
6295 start_bblock->next_bb = init_localsbb;
6296 init_localsbb->next_bb = cfg->cbb;
6297 link_bblock (cfg, start_bblock, init_localsbb);
6298 link_bblock (cfg, init_localsbb, cfg->cbb);
6299 init_localsbb2 = init_localsbb;
6300 cfg->cbb = init_localsbb;
6302 if (cfg->gsharedvt && cfg->method == method) {
6303 MonoGSharedVtMethodInfo *info;
6304 MonoInst *var, *locals_var;
6305 int dreg;
6307 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
6308 info->method = cfg->method;
6309 info->count_entries = 16;
6310 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
6311 cfg->gsharedvt_info = info;
6313 var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
6314 /* prevent it from being register allocated */
6315 //var->flags |= MONO_INST_VOLATILE;
6316 cfg->gsharedvt_info_var = var;
6318 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
6319 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
6321 /* Allocate locals */
6322 locals_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
6323 /* prevent it from being register allocated */
6324 //locals_var->flags |= MONO_INST_VOLATILE;
6325 cfg->gsharedvt_locals_var = locals_var;
6327 dreg = alloc_ireg (cfg);
6328 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
6330 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
6331 ins->dreg = locals_var->dreg;
6332 ins->sreg1 = dreg;
6333 MONO_ADD_INS (cfg->cbb, ins);
6334 cfg->gsharedvt_locals_var_ins = ins;
6336 cfg->flags |= MONO_CFG_HAS_ALLOCA;
6338 if (init_locals)
6339 ins->flags |= MONO_INST_INIT;
6343 if (mono_security_core_clr_enabled ()) {
6344 /* check if this is native code, e.g. an icall or a p/invoke */
6345 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
6346 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6347 if (wrapped) {
6348 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
6349 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
6351 /* if this ia a native call then it can only be JITted from platform code */
6352 if ((icall || pinvk) && method->klass && m_class_get_image (method->klass)) {
6353 if (!mono_security_core_clr_is_platform_image (m_class_get_image (method->klass))) {
6354 MonoException *ex = icall ? mono_get_exception_security () :
6355 mono_get_exception_method_access ();
6356 emit_throw_exception (cfg, ex);
6363 CHECK_CFG_EXCEPTION;
6365 if (header->code_size == 0)
6366 UNVERIFIED;
6368 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
6369 ip = err_pos;
6370 UNVERIFIED;
6373 if (cfg->method == method)
6374 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
6376 for (n = 0; n < header->num_locals; ++n) {
6377 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
6378 UNVERIFIED;
6380 class_inits = NULL;
6382 /* We force the vtable variable here for all shared methods
6383 for the possibility that they might show up in a stack
6384 trace where their exact instantiation is needed. */
6385 if (cfg->gshared && method == cfg->method) {
6386 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6387 mini_method_get_context (method)->method_inst ||
6388 m_class_is_valuetype (method->klass)) {
6389 mono_get_vtable_var (cfg);
6390 } else {
6391 /* FIXME: Is there a better way to do this?
6392 We need the variable live for the duration
6393 of the whole method. */
6394 cfg->args [0]->flags |= MONO_INST_VOLATILE;
6398 /* add a check for this != NULL to inlined methods */
6399 if (is_virtual_call) {
6400 MonoInst *arg_ins;
6402 NEW_ARGLOAD (cfg, arg_ins, 0);
6403 MONO_ADD_INS (cfg->cbb, arg_ins);
6404 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
6407 skip_dead_blocks = !dont_verify;
6408 if (skip_dead_blocks) {
6409 original_bb = bb = mono_basic_block_split (method, cfg->error, header);
6410 CHECK_CFG_ERROR;
6411 g_assert (bb);
6414 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
6415 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
6417 ins_flag = 0;
6418 start_new_bblock = 0;
6419 MonoOpcodeEnum il_op; il_op = MonoOpcodeEnum_Invalid;
6421 for (guchar *next_ip = ip; ip < end; ip = next_ip) {
6422 MonoOpcodeEnum previous_il_op = il_op;
6423 const guchar *tmp_ip = ip;
6424 const int op_size = mono_opcode_value_and_size (&tmp_ip, end, &il_op);
6425 CHECK_OPSIZE (op_size);
6426 next_ip += op_size;
6428 if (cfg->method == method)
6429 cfg->real_offset = ip - header->code;
6430 else
6431 cfg->real_offset = inline_offset;
6432 cfg->ip = ip;
6434 context_used = 0;
6436 if (start_new_bblock) {
6437 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
6438 if (start_new_bblock == 2) {
6439 g_assert (ip == tblock->cil_code);
6440 } else {
6441 GET_BBLOCK (cfg, tblock, ip);
6443 cfg->cbb->next_bb = tblock;
6444 cfg->cbb = tblock;
6445 start_new_bblock = 0;
6446 for (i = 0; i < cfg->cbb->in_scount; ++i) {
6447 if (cfg->verbose_level > 3)
6448 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
6449 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
6450 *sp++ = ins;
6452 if (class_inits)
6453 g_slist_free (class_inits);
6454 class_inits = NULL;
6455 } else {
6456 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
6457 link_bblock (cfg, cfg->cbb, tblock);
6458 if (sp != stack_start) {
6459 handle_stack_args (cfg, stack_start, sp - stack_start);
6460 sp = stack_start;
6461 CHECK_UNVERIFIABLE (cfg);
6463 cfg->cbb->next_bb = tblock;
6464 cfg->cbb = tblock;
6465 for (i = 0; i < cfg->cbb->in_scount; ++i) {
6466 if (cfg->verbose_level > 3)
6467 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
6468 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
6469 *sp++ = ins;
6471 g_slist_free (class_inits);
6472 class_inits = NULL;
6476 if (skip_dead_blocks) {
6477 int ip_offset = ip - header->code;
6479 if (ip_offset == bb->end)
6480 bb = bb->next;
6482 if (bb->dead) {
6483 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
6485 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
6487 if (ip_offset + op_size == bb->end) {
6488 MONO_INST_NEW (cfg, ins, OP_NOP);
6489 MONO_ADD_INS (cfg->cbb, ins);
6490 start_new_bblock = 1;
6492 continue;
6496 * Sequence points are points where the debugger can place a breakpoint.
6497 * Currently, we generate these automatically at points where the IL
6498 * stack is empty.
6500 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
6502 * Make methods interruptable at the beginning, and at the targets of
6503 * backward branches.
6504 * Also, do this at the start of every bblock in methods with clauses too,
6505 * to be able to handle instructions with inprecise control flow like
6506 * throw/endfinally.
6507 * Backward branches are handled at the end of method-to-ir ().
6509 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
6510 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
6512 /* Avoid sequence points on empty IL like .volatile */
6513 // FIXME: Enable this
6514 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
6515 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
6516 if ((sp != stack_start) && !sym_seq_point)
6517 ins->flags |= MONO_INST_NONEMPTY_STACK;
6518 MONO_ADD_INS (cfg->cbb, ins);
6520 if (sym_seq_points)
6521 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
6523 if (cfg->prof_coverage) {
6524 guint32 cil_offset = ip - header->code;
6525 gpointer counter = &cfg->coverage_info->data [cil_offset].count;
6526 cfg->coverage_info->data [cil_offset].cil_code = ip;
6528 if (mono_arch_opcode_supported (OP_ATOMIC_ADD_I4)) {
6529 MonoInst *one_ins, *load_ins;
6531 EMIT_NEW_PCONST (cfg, load_ins, counter);
6532 EMIT_NEW_ICONST (cfg, one_ins, 1);
6533 MONO_INST_NEW (cfg, ins, OP_ATOMIC_ADD_I4);
6534 ins->dreg = mono_alloc_ireg (cfg);
6535 ins->inst_basereg = load_ins->dreg;
6536 ins->inst_offset = 0;
6537 ins->sreg2 = one_ins->dreg;
6538 ins->type = STACK_I4;
6539 MONO_ADD_INS (cfg->cbb, ins);
6540 } else {
6541 EMIT_NEW_PCONST (cfg, ins, counter);
6542 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
6547 cfg->cbb->real_offset = cfg->real_offset;
6549 if (cfg->verbose_level > 3)
6550 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
6552 // Variables shared by CEE_CALLI CEE_CALL CEE_CALLVIRT CEE_JMP.
6553 // Initialize to either what they all need or zero.
6554 gboolean emit_widen = TRUE;
6555 gboolean tailcall = FALSE;
6556 gboolean common_call = FALSE;
6557 MonoInst *keep_this_alive = NULL;
6558 MonoMethod *cmethod = NULL;
6559 MonoMethodSignature *fsig = NULL;
6561 // These are used only in CALL/CALLVIRT but must be initialized also for CALLI,
6562 // since it jumps into CALL/CALLVIRT.
6563 gboolean need_seq_point = FALSE;
6564 gboolean push_res = TRUE;
6565 gboolean skip_ret = FALSE;
6566 gboolean tailcall_remove_ret = FALSE;
6568 // FIXME split 500 lines load/store field into separate file/function.
6570 MonoOpcodeParameter parameter;
6571 const MonoOpcodeInfo* info = mono_opcode_decode (ip, op_size, il_op, &parameter);
6572 g_assert (info);
6573 n = parameter.i32;
6574 token = parameter.i32;
6575 target = parameter.branch_target;
6577 // Check stack size for push/pop except variable cases -- -1 like call/ret/newobj.
6578 const int pushes = info->pushes;
6579 const int pops = info->pops;
6580 if (pushes >= 0 && pops >= 0) {
6581 g_assert (pushes - pops <= 1);
6582 if (pushes - pops == 1)
6583 CHECK_STACK_OVF ();
6585 if (pops >= 0)
6586 CHECK_STACK (pops);
6588 switch (il_op) {
6589 case MONO_CEE_NOP:
6590 if (seq_points && !sym_seq_points && sp != stack_start) {
6592 * The C# compiler uses these nops to notify the JIT that it should
6593 * insert seq points.
6595 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
6596 MONO_ADD_INS (cfg->cbb, ins);
6598 if (cfg->keep_cil_nops)
6599 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
6600 else
6601 MONO_INST_NEW (cfg, ins, OP_NOP);
6602 MONO_ADD_INS (cfg->cbb, ins);
6603 emitted_funccall_seq_point = FALSE;
6604 break;
6605 case MONO_CEE_BREAK:
6606 if (mini_should_insert_breakpoint (cfg->method)) {
6607 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6608 } else {
6609 MONO_INST_NEW (cfg, ins, OP_NOP);
6610 MONO_ADD_INS (cfg->cbb, ins);
6612 break;
6613 case MONO_CEE_LDARG_0:
6614 case MONO_CEE_LDARG_1:
6615 case MONO_CEE_LDARG_2:
6616 case MONO_CEE_LDARG_3:
6617 case MONO_CEE_LDARG_S:
6618 case MONO_CEE_LDARG:
6619 CHECK_ARG (n);
6620 if (next_ip < end && is_addressable_valuetype_load (cfg, next_ip, cfg->arg_types[n])) {
6621 EMIT_NEW_ARGLOADA (cfg, ins, n);
6622 } else {
6623 EMIT_NEW_ARGLOAD (cfg, ins, n);
6625 *sp++ = ins;
6626 break;
6628 case MONO_CEE_LDLOC_0:
6629 case MONO_CEE_LDLOC_1:
6630 case MONO_CEE_LDLOC_2:
6631 case MONO_CEE_LDLOC_3:
6632 case MONO_CEE_LDLOC_S:
6633 case MONO_CEE_LDLOC:
6634 CHECK_LOCAL (n);
6635 if (next_ip < end && is_addressable_valuetype_load (cfg, next_ip, header->locals[n])) {
6636 EMIT_NEW_LOCLOADA (cfg, ins, n);
6637 } else {
6638 EMIT_NEW_LOCLOAD (cfg, ins, n);
6640 *sp++ = ins;
6641 break;
6643 case MONO_CEE_STLOC_0:
6644 case MONO_CEE_STLOC_1:
6645 case MONO_CEE_STLOC_2:
6646 case MONO_CEE_STLOC_3:
6647 case MONO_CEE_STLOC_S:
6648 case MONO_CEE_STLOC:
6649 CHECK_LOCAL (n);
6650 --sp;
6651 *sp = convert_value (cfg, header->locals [n], *sp);
6652 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
6653 UNVERIFIED;
6654 emit_stloc_ir (cfg, sp, header, n);
6655 inline_costs += 1;
6656 break;
6657 case MONO_CEE_LDARGA_S:
6658 case MONO_CEE_LDARGA:
6659 CHECK_ARG (n);
6660 NEW_ARGLOADA (cfg, ins, n);
6661 MONO_ADD_INS (cfg->cbb, ins);
6662 *sp++ = ins;
6663 break;
6664 case MONO_CEE_STARG_S:
6665 case MONO_CEE_STARG:
6666 --sp;
6667 CHECK_ARG (n);
6668 *sp = convert_value (cfg, param_types [n], *sp);
6669 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
6670 UNVERIFIED;
6671 emit_starg_ir (cfg, sp, n);
6672 break;
6673 case MONO_CEE_LDLOCA:
6674 case MONO_CEE_LDLOCA_S: {
6675 guchar *tmp_ip;
6676 CHECK_LOCAL (n);
6678 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, next_ip, end, n))) {
6679 next_ip = tmp_ip;
6680 il_op = MONO_CEE_INITOBJ;
6681 inline_costs += 1;
6682 break;
6685 EMIT_NEW_LOCLOADA (cfg, ins, n);
6686 *sp++ = ins;
6687 break;
6689 case MONO_CEE_LDNULL:
6690 EMIT_NEW_PCONST (cfg, ins, NULL);
6691 ins->type = STACK_OBJ;
6692 *sp++ = ins;
6693 break;
6694 case MONO_CEE_LDC_I4_M1:
6695 case MONO_CEE_LDC_I4_0:
6696 case MONO_CEE_LDC_I4_1:
6697 case MONO_CEE_LDC_I4_2:
6698 case MONO_CEE_LDC_I4_3:
6699 case MONO_CEE_LDC_I4_4:
6700 case MONO_CEE_LDC_I4_5:
6701 case MONO_CEE_LDC_I4_6:
6702 case MONO_CEE_LDC_I4_7:
6703 case MONO_CEE_LDC_I4_8:
6704 case MONO_CEE_LDC_I4_S:
6705 case MONO_CEE_LDC_I4:
6706 EMIT_NEW_ICONST (cfg, ins, n);
6707 *sp++ = ins;
6708 break;
6709 case MONO_CEE_LDC_I8:
6710 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6711 ins->type = STACK_I8;
6712 ins->dreg = alloc_dreg (cfg, STACK_I8);
6713 ins->inst_l = parameter.i64;
6714 MONO_ADD_INS (cfg->cbb, ins);
6715 *sp++ = ins;
6716 break;
6717 case MONO_CEE_LDC_R4: {
6718 float *f;
6719 gboolean use_aotconst = FALSE;
6721 #ifdef TARGET_POWERPC
6722 /* FIXME: Clean this up */
6723 if (cfg->compile_aot)
6724 use_aotconst = TRUE;
6725 #endif
6726 /* FIXME: we should really allocate this only late in the compilation process */
6727 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
6729 if (use_aotconst) {
6730 MonoInst *cons;
6731 int dreg;
6733 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6735 dreg = alloc_freg (cfg);
6736 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6737 ins->type = cfg->r4_stack_type;
6738 } else {
6739 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6740 ins->type = cfg->r4_stack_type;
6741 ins->dreg = alloc_dreg (cfg, STACK_R8);
6742 ins->inst_p0 = f;
6743 MONO_ADD_INS (cfg->cbb, ins);
6745 *f = parameter.f;
6746 *sp++ = ins;
6747 break;
6749 case MONO_CEE_LDC_R8: {
6750 double *d;
6751 gboolean use_aotconst = FALSE;
6753 #ifdef TARGET_POWERPC
6754 /* FIXME: Clean this up */
6755 if (cfg->compile_aot)
6756 use_aotconst = TRUE;
6757 #endif
6759 /* FIXME: we should really allocate this only late in the compilation process */
6760 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
6762 if (use_aotconst) {
6763 MonoInst *cons;
6764 int dreg;
6766 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6768 dreg = alloc_freg (cfg);
6769 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6770 ins->type = STACK_R8;
6771 } else {
6772 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6773 ins->type = STACK_R8;
6774 ins->dreg = alloc_dreg (cfg, STACK_R8);
6775 ins->inst_p0 = d;
6776 MONO_ADD_INS (cfg->cbb, ins);
6778 *d = parameter.d;
6779 *sp++ = ins;
6780 break;
6782 case MONO_CEE_DUP: {
6783 MonoInst *temp, *store;
6784 sp--;
6785 ins = *sp;
6787 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6788 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6790 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6791 *sp++ = ins;
6793 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6794 *sp++ = ins;
6796 inline_costs += 2;
6797 break;
6799 case MONO_CEE_POP:
6800 --sp;
6802 #ifdef TARGET_X86
6803 if (sp [0]->type == STACK_R8)
6804 /* we need to pop the value from the x86 FP stack */
6805 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6806 #endif
6807 break;
6808 case MONO_CEE_JMP: {
6809 MonoCallInst *call;
6810 int i, n;
6812 INLINE_FAILURE ("jmp");
6813 GSHAREDVT_FAILURE (il_op);
6815 if (stack_start != sp)
6816 UNVERIFIED;
6817 /* FIXME: check the signature matches */
6818 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6819 CHECK_CFG_ERROR;
6821 if (cfg->gshared && mono_method_check_context_used (cmethod))
6822 GENERIC_SHARING_FAILURE (CEE_JMP);
6824 mini_profiler_emit_tail_call (cfg, cmethod);
6826 fsig = mono_method_signature_internal (cmethod);
6827 n = fsig->param_count + fsig->hasthis;
6828 if (cfg->llvm_only) {
6829 MonoInst **args;
6831 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6832 for (i = 0; i < n; ++i)
6833 EMIT_NEW_ARGLOAD (cfg, args [i], i);
6834 ins = mini_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
6836 * The code in mono-basic-block.c treats the rest of the code as dead, but we
6837 * have to emit a normal return since llvm expects it.
6839 if (cfg->ret)
6840 emit_setret (cfg, ins);
6841 MONO_INST_NEW (cfg, ins, OP_BR);
6842 ins->inst_target_bb = end_bblock;
6843 MONO_ADD_INS (cfg->cbb, ins);
6844 link_bblock (cfg, cfg->cbb, end_bblock);
6845 break;
6846 } else {
6847 /* Handle tailcalls similarly to calls */
6848 DISABLE_AOT (cfg);
6850 mini_emit_tailcall_parameters (cfg, fsig);
6851 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6852 call->method = cmethod;
6853 // FIXME Other initialization of the tailcall field occurs after
6854 // it is used. So this is the only "real" use and needs more attention.
6855 call->tailcall = TRUE;
6856 call->signature = fsig;
6857 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6858 call->inst.inst_p0 = cmethod;
6859 for (i = 0; i < n; ++i)
6860 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6862 if (mini_type_is_vtype (mini_get_underlying_type (call->signature->ret)))
6863 call->vret_var = cfg->vret_addr;
6865 mono_arch_emit_call (cfg, call);
6866 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
6867 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
6870 start_new_bblock = 1;
6871 break;
6873 case MONO_CEE_CALLI: {
6874 // FIXME tail.calli is problemetic because the this pointer's type
6875 // is not in the signature, and we cannot check for a byref valuetype.
6876 MonoInst *addr;
6877 MonoInst *callee = NULL;
6879 // Variables shared by CEE_CALLI and CEE_CALL/CEE_CALLVIRT.
6880 common_call = TRUE; // i.e. skip_ret/push_res/seq_point logic
6881 cmethod = NULL;
6883 gboolean const inst_tailcall = G_UNLIKELY (debug_tailcall_try_all
6884 ? (next_ip < end && next_ip [0] == CEE_RET)
6885 : ((ins_flag & MONO_INST_TAILCALL) != 0));
6886 ins = NULL;
6888 //GSHAREDVT_FAILURE (il_op);
6889 CHECK_STACK (1);
6890 --sp;
6891 addr = *sp;
6892 g_assert (addr);
6893 fsig = mini_get_signature (method, token, generic_context, cfg->error);
6894 CHECK_CFG_ERROR;
6896 if (method->dynamic && fsig->pinvoke) {
6897 MonoInst *args [3];
6900 * This is a call through a function pointer using a pinvoke
6901 * signature. Have to create a wrapper and call that instead.
6902 * FIXME: This is very slow, need to create a wrapper at JIT time
6903 * instead based on the signature.
6905 EMIT_NEW_IMAGECONST (cfg, args [0], m_class_get_image (method->klass));
6906 EMIT_NEW_PCONST (cfg, args [1], fsig);
6907 args [2] = addr;
6908 // FIXME tailcall?
6909 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6912 n = fsig->param_count + fsig->hasthis;
6914 CHECK_STACK (n);
6916 //g_assert (!virtual_ || fsig->hasthis);
6918 sp -= n;
6920 if (!(cfg->method->wrapper_type && cfg->method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD) && check_call_signature (cfg, fsig, sp)) {
6921 if (break_on_unverified ())
6922 check_call_signature (cfg, fsig, sp); // Again, step through it.
6923 UNVERIFIED;
6926 inline_costs += CALL_COST * MIN(10, num_calls++);
6929 * Making generic calls out of gsharedvt methods.
6930 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
6931 * patching gshared method addresses into a gsharedvt method.
6933 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
6935 * We pass the address to the gsharedvt trampoline in the rgctx reg
6937 callee = addr;
6938 g_assert (addr); // Doubles as boolean after tailcall check.
6941 inst_tailcall && is_supported_tailcall (cfg, ip, method, NULL, fsig,
6942 FALSE/*virtual irrelevant*/, addr != NULL, &tailcall);
6944 if (callee) {
6945 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
6946 /* Not tested */
6947 GSHAREDVT_FAILURE (il_op);
6949 if (cfg->llvm_only)
6950 // FIXME:
6951 GSHAREDVT_FAILURE (il_op);
6953 addr = emit_get_rgctx_sig (cfg, context_used, fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
6954 ins = (MonoInst*)mini_emit_calli_full (cfg, fsig, sp, addr, NULL, callee, tailcall);
6955 goto calli_end;
6958 /* Prevent inlining of methods with indirect calls */
6959 INLINE_FAILURE ("indirect call");
6961 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
6962 MonoJumpInfoType info_type;
6963 gpointer info_data;
6966 * Instead of emitting an indirect call, emit a direct call
6967 * with the contents of the aotconst as the patch info.
6969 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
6970 info_type = (MonoJumpInfoType)addr->inst_c1;
6971 info_data = addr->inst_p0;
6972 } else {
6973 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
6974 info_data = addr->inst_right->inst_left;
6977 if (info_type == MONO_PATCH_INFO_ICALL_ADDR) {
6978 // non-JIT icall, mostly builtin, but also user-extensible
6979 tailcall = FALSE;
6980 ins = (MonoInst*)mini_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp);
6981 NULLIFY_INS (addr);
6982 goto calli_end;
6983 } else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR
6984 || info_type == MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR) {
6985 tailcall = FALSE;
6986 ins = (MonoInst*)mini_emit_abs_call (cfg, info_type, info_data, fsig, sp);
6987 NULLIFY_INS (addr);
6988 goto calli_end;
6991 ins = (MonoInst*)mini_emit_calli_full (cfg, fsig, sp, addr, NULL, NULL, tailcall);
6992 goto calli_end;
6994 case MONO_CEE_CALL:
6995 case MONO_CEE_CALLVIRT: {
6996 MonoInst *addr; addr = NULL;
6997 int array_rank; array_rank = 0;
6998 gboolean virtual_; virtual_ = il_op == MONO_CEE_CALLVIRT;
6999 gboolean pass_imt_from_rgctx; pass_imt_from_rgctx = FALSE;
7000 MonoInst *imt_arg; imt_arg = NULL;
7001 gboolean pass_vtable; pass_vtable = FALSE;
7002 gboolean pass_mrgctx; pass_mrgctx = FALSE;
7003 MonoInst *vtable_arg; vtable_arg = NULL;
7004 gboolean check_this; check_this = FALSE;
7005 gboolean delegate_invoke; delegate_invoke = FALSE;
7006 gboolean direct_icall; direct_icall = FALSE;
7007 gboolean tailcall_calli; tailcall_calli = FALSE;
7008 gboolean noreturn; noreturn = FALSE;
7010 // Variables shared by CEE_CALLI and CEE_CALL/CEE_CALLVIRT.
7011 common_call = FALSE;
7013 // variables to help in assertions
7014 gboolean called_is_supported_tailcall; called_is_supported_tailcall = FALSE;
7015 MonoMethod *tailcall_method; tailcall_method = NULL;
7016 MonoMethod *tailcall_cmethod; tailcall_cmethod = NULL;
7017 MonoMethodSignature *tailcall_fsig; tailcall_fsig = NULL;
7018 gboolean tailcall_virtual; tailcall_virtual = FALSE;
7019 gboolean tailcall_extra_arg; tailcall_extra_arg = FALSE;
7021 gboolean inst_tailcall; inst_tailcall = G_UNLIKELY (debug_tailcall_try_all
7022 ? (next_ip < end && next_ip [0] == CEE_RET)
7023 : ((ins_flag & MONO_INST_TAILCALL) != 0));
7024 ins = NULL;
7026 /* Used to pass arguments to called functions */
7027 HandleCallData cdata;
7028 memset (&cdata, 0, sizeof (HandleCallData));
7030 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7031 CHECK_CFG_ERROR;
7033 if (cfg->verbose_level > 3)
7034 printf ("cmethod = %s\n", mono_method_get_full_name (cmethod));
7036 MonoMethod *cil_method; cil_method = cmethod;
7038 if (constrained_class) {
7039 gboolean constrained_is_generic_param =
7040 m_class_get_byval_arg (constrained_class)->type == MONO_TYPE_VAR ||
7041 m_class_get_byval_arg (constrained_class)->type == MONO_TYPE_MVAR;
7043 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7044 if (cfg->verbose_level > 2)
7045 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
7046 if (!(constrained_is_generic_param &&
7047 cfg->gshared)) {
7048 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, cfg->error);
7049 CHECK_CFG_ERROR;
7051 } else {
7052 if (cfg->verbose_level > 2)
7053 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
7055 if (constrained_is_generic_param && cfg->gshared) {
7057 * This is needed since get_method_constrained can't find
7058 * the method in klass representing a type var.
7059 * The type var is guaranteed to be a reference type in this
7060 * case.
7062 if (!mini_is_gsharedvt_klass (constrained_class))
7063 g_assert (!m_class_is_valuetype (cmethod->klass));
7064 } else {
7065 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, cfg->error);
7066 CHECK_CFG_ERROR;
7070 if (m_class_is_enumtype (constrained_class) && !strcmp (cmethod->name, "GetHashCode")) {
7071 /* Use the corresponding method from the base type to avoid boxing */
7072 MonoType *base_type = mono_class_enum_basetype_internal (constrained_class);
7073 g_assert (base_type);
7074 constrained_class = mono_class_from_mono_type_internal (base_type);
7075 cmethod = get_method_nofail (constrained_class, cmethod->name, 0, 0);
7076 g_assert (cmethod);
7080 if (!dont_verify && !cfg->skip_visibility) {
7081 MonoMethod *target_method = cil_method;
7082 if (method->is_inflated) {
7083 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), cfg->error);
7084 CHECK_CFG_ERROR;
7086 if (!mono_method_can_access_method (method_definition, target_method) &&
7087 !mono_method_can_access_method (method, cil_method))
7088 emit_method_access_failure (cfg, method, cil_method);
7091 if (mono_security_core_clr_enabled ())
7092 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
7094 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT)) {
7095 if (!mono_class_is_interface (method->klass))
7096 emit_bad_image_failure (cfg, method, cil_method);
7097 else
7098 virtual_ = TRUE;
7103 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
7104 * converts to a callvirt.
7106 * tests/bug-515884.il is an example of this behavior
7108 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
7109 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
7110 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
7111 virtual_ = TRUE;
7114 if (!m_class_is_inited (cmethod->klass))
7115 if (!mono_class_init_internal (cmethod->klass))
7116 TYPE_LOAD_ERROR (cmethod->klass);
7118 fsig = mono_method_signature_internal (cmethod);
7119 if (!fsig)
7120 LOAD_ERROR;
7121 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
7122 mini_class_is_system_array (cmethod->klass)) {
7123 array_rank = m_class_get_rank (cmethod->klass);
7124 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && direct_icalls_enabled (cfg, cmethod)) {
7125 direct_icall = TRUE;
7126 } else if (fsig->pinvoke) {
7127 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
7128 fsig = mono_method_signature_internal (wrapper);
7129 } else if (constrained_class) {
7130 } else {
7131 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, cfg->error);
7132 CHECK_CFG_ERROR;
7135 if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated))
7136 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
7138 /* See code below */
7139 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature_internal (cmethod)->param_count == 1) {
7140 MonoBasicBlock *tbb;
7142 GET_BBLOCK (cfg, tbb, next_ip);
7143 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
7145 * We want to extend the try block to cover the call, but we can't do it if the
7146 * call is made directly since its followed by an exception check.
7148 direct_icall = FALSE;
7152 mono_save_token_info (cfg, image, token, cil_method);
7154 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, next_ip - header->code)))
7155 need_seq_point = TRUE;
7157 /* Don't support calls made using type arguments for now */
7159 if (cfg->gsharedvt) {
7160 if (mini_is_gsharedvt_signature (fsig))
7161 GSHAREDVT_FAILURE (il_op);
7165 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
7166 g_assert_not_reached ();
7168 n = fsig->param_count + fsig->hasthis;
7170 if (!cfg->gshared && mono_class_is_gtd (cmethod->klass))
7171 UNVERIFIED;
7173 if (!cfg->gshared)
7174 g_assert (!mono_method_check_context_used (cmethod));
7176 CHECK_STACK (n);
7178 //g_assert (!virtual_ || fsig->hasthis);
7180 sp -= n;
7182 if (virtual_ && cmethod && sp [0]->opcode == OP_TYPED_OBJREF) {
7183 ERROR_DECL (error);
7185 MonoMethod *new_cmethod = mono_class_get_virtual_method (sp [0]->klass, cmethod, FALSE, error);
7186 mono_error_assert_ok (error);
7187 cmethod = new_cmethod;
7188 virtual_ = FALSE;
7191 if (cmethod && method_does_not_return (cmethod)) {
7192 cfg->cbb->out_of_line = TRUE;
7193 noreturn = TRUE;
7196 cdata.method = method;
7197 cdata.inst_tailcall = inst_tailcall;
7200 * We have the `constrained.' prefix opcode.
7202 if (constrained_class) {
7203 ins = handle_constrained_call (cfg, cmethod, fsig, constrained_class, sp, &cdata, &cmethod, &virtual_, &emit_widen);
7204 CHECK_CFG_EXCEPTION;
7205 constrained_class = NULL;
7206 if (ins)
7207 goto call_end;
7210 for (int i = 0; i < fsig->param_count; ++i)
7211 sp [i + fsig->hasthis] = convert_value (cfg, fsig->params [i], sp [i + fsig->hasthis]);
7213 if (check_call_signature (cfg, fsig, sp)) {
7214 if (break_on_unverified ())
7215 check_call_signature (cfg, fsig, sp); // Again, step through it.
7216 UNVERIFIED;
7219 if ((m_class_get_parent (cmethod->klass) == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
7220 delegate_invoke = TRUE;
7222 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
7223 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7224 mini_type_to_eval_stack_type ((cfg), fsig->ret, ins);
7225 emit_widen = FALSE;
7228 if (inst_tailcall) // FIXME
7229 mono_tailcall_print ("missed tailcall intrins_sharable %s -> %s\n", method->name, cmethod->name);
7230 goto call_end;
7234 * Implement a workaround for the inherent races involved in locking:
7235 * Monitor.Enter ()
7236 * try {
7237 * } finally {
7238 * Monitor.Exit ()
7240 * If a thread abort happens between the call to Monitor.Enter () and the start of the
7241 * try block, the Exit () won't be executed, see:
7242 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
7243 * To work around this, we extend such try blocks to include the last x bytes
7244 * of the Monitor.Enter () call.
7246 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature_internal (cmethod)->param_count == 1) {
7247 MonoBasicBlock *tbb;
7249 GET_BBLOCK (cfg, tbb, next_ip);
7251 * Only extend try blocks with a finally, to avoid catching exceptions thrown
7252 * from Monitor.Enter like ArgumentNullException.
7254 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
7255 /* Mark this bblock as needing to be extended */
7256 tbb->extend_try_block = TRUE;
7260 /* Conversion to a JIT intrinsic */
7261 if ((ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
7262 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7263 mini_type_to_eval_stack_type ((cfg), fsig->ret, ins);
7264 emit_widen = FALSE;
7266 // FIXME This is only missed if in fact the intrinsic involves a call.
7267 if (inst_tailcall) // FIXME
7268 mono_tailcall_print ("missed tailcall intrins %s -> %s\n", method->name, cmethod->name);
7269 goto call_end;
7271 CHECK_CFG_ERROR;
7274 * If the callee is a shared method, then its static cctor
7275 * might not get called after the call was patched.
7277 if (cfg->gshared && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7278 emit_class_init (cfg, cmethod->klass);
7279 CHECK_TYPELOAD (cmethod->klass);
7282 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
7284 if (cfg->gshared) {
7285 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
7287 context_used = mini_method_check_context_used (cfg, cmethod);
7289 if (context_used && mono_class_is_interface (cmethod->klass)) {
7290 /* Generic method interface
7291 calls are resolved via a
7292 helper function and don't
7293 need an imt. */
7294 if (!cmethod_context || !cmethod_context->method_inst)
7295 pass_imt_from_rgctx = TRUE;
7299 * If a shared method calls another
7300 * shared method then the caller must
7301 * have a generic sharing context
7302 * because the magic trampoline
7303 * requires it. FIXME: We shouldn't
7304 * have to force the vtable/mrgctx
7305 * variable here. Instead there
7306 * should be a flag in the cfg to
7307 * request a generic sharing context.
7309 if (context_used &&
7310 ((cfg->method->flags & METHOD_ATTRIBUTE_STATIC) || m_class_is_valuetype (cfg->method->klass)))
7311 mono_get_vtable_var (cfg);
7314 if (pass_vtable) {
7315 if (context_used) {
7316 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7317 } else {
7318 MonoVTable *vtable = mono_class_vtable_checked (cfg->domain, cmethod->klass, cfg->error);
7319 CHECK_CFG_ERROR;
7321 CHECK_TYPELOAD (cmethod->klass);
7322 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7326 if (pass_mrgctx) {
7327 g_assert (!vtable_arg);
7329 if (!cfg->compile_aot) {
7331 * emit_get_rgctx_method () calls mono_class_vtable () so check
7332 * for type load errors before.
7334 mono_class_setup_vtable (cmethod->klass);
7335 CHECK_TYPELOAD (cmethod->klass);
7338 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7340 /* !marshalbyref is needed to properly handle generic methods + remoting */
7341 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
7342 MONO_METHOD_IS_FINAL (cmethod)) &&
7343 !mono_class_is_marshalbyref (cmethod->klass)) {
7344 if (virtual_)
7345 check_this = TRUE;
7346 virtual_ = FALSE;
7350 if (pass_imt_from_rgctx) {
7351 g_assert (!pass_vtable);
7353 imt_arg = emit_get_rgctx_method (cfg, context_used,
7354 cmethod, MONO_RGCTX_INFO_METHOD);
7355 g_assert (imt_arg);
7358 if (check_this)
7359 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
7361 /* Calling virtual generic methods */
7363 // These temporaries help detangle "pure" computation of
7364 // inputs to is_supported_tailcall from side effects, so that
7365 // is_supported_tailcall can be computed just once.
7366 gboolean virtual_generic; virtual_generic = FALSE;
7367 gboolean virtual_generic_imt; virtual_generic_imt = FALSE;
7369 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
7370 !(MONO_METHOD_IS_FINAL (cmethod) &&
7371 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
7372 fsig->generic_param_count &&
7373 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
7374 !cfg->llvm_only) {
7376 g_assert (fsig->is_inflated);
7378 virtual_generic = TRUE;
7380 /* Prevent inlining of methods that contain indirect calls */
7381 INLINE_FAILURE ("virtual generic call");
7383 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7384 GSHAREDVT_FAILURE (il_op);
7386 if (cfg->backend->have_generalized_imt_trampoline && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
7387 virtual_generic_imt = TRUE;
7388 g_assert (!imt_arg);
7389 if (!context_used)
7390 g_assert (cmethod->is_inflated);
7392 imt_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
7393 g_assert (imt_arg);
7395 virtual_ = TRUE;
7396 vtable_arg = NULL;
7400 // Capture some intent before computing tailcall.
7402 gboolean make_generic_call_out_of_gsharedvt_method;
7403 gboolean will_have_imt_arg;
7405 make_generic_call_out_of_gsharedvt_method = FALSE;
7406 will_have_imt_arg = FALSE;
7409 * Making generic calls out of gsharedvt methods.
7410 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
7411 * patching gshared method addresses into a gsharedvt method.
7413 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || mono_class_is_ginst (cmethod->klass)) &&
7414 !(m_class_get_rank (cmethod->klass) && m_class_get_byval_arg (cmethod->klass)->type != MONO_TYPE_SZARRAY) &&
7415 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
7417 make_generic_call_out_of_gsharedvt_method = TRUE;
7419 if (virtual_) {
7420 if (fsig->generic_param_count) {
7421 will_have_imt_arg = TRUE;
7422 } else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
7423 will_have_imt_arg = TRUE;
7428 #ifdef ENABLE_NETCORE
7429 if (save_last_error) {
7430 mono_emit_jit_icall (cfg, mono_marshal_clear_last_error, NULL);
7432 #endif
7434 /* Tail prefix / tailcall optimization */
7436 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests.
7437 Inlining and stack traces are not guaranteed however. */
7438 /* FIXME: runtime generic context pointer for jumps? */
7439 /* FIXME: handle this for generic sharing eventually */
7441 // tailcall means "the backend can and will handle it".
7442 // inst_tailcall means the tail. prefix is present.
7443 tailcall_extra_arg = vtable_arg || imt_arg || will_have_imt_arg || mono_class_is_interface (cmethod->klass);
7444 tailcall = inst_tailcall && is_supported_tailcall (cfg, ip, method, cmethod, fsig,
7445 virtual_, tailcall_extra_arg, &tailcall_calli);
7446 // Writes to imt_arg, vtable_arg, virtual_, cmethod, must not occur from here (inputs to is_supported_tailcall).
7447 // Capture values to later assert they don't change.
7448 called_is_supported_tailcall = TRUE;
7449 tailcall_method = method;
7450 tailcall_cmethod = cmethod;
7451 tailcall_fsig = fsig;
7452 tailcall_virtual = virtual_;
7454 if (virtual_generic) {
7455 if (virtual_generic_imt) {
7456 if (tailcall) {
7457 /* Prevent inlining of methods with tailcalls (the call stack would be altered) */
7458 INLINE_FAILURE ("tailcall");
7460 common_call = TRUE;
7461 goto call_end;
7464 MonoInst *this_temp, *this_arg_temp, *store;
7465 MonoInst *iargs [4];
7467 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
7468 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
7469 MONO_ADD_INS (cfg->cbb, store);
7471 /* FIXME: This should be a managed pointer */
7472 this_arg_temp = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
7474 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
7475 iargs [1] = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
7477 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
7478 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
7480 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
7482 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
7484 if (inst_tailcall) // FIXME
7485 mono_tailcall_print ("missed tailcall virtual generic %s -> %s\n", method->name, cmethod->name);
7486 goto call_end;
7488 CHECK_CFG_ERROR;
7490 /* Inlining */
7491 if ((cfg->opt & MONO_OPT_INLINE) &&
7492 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
7493 mono_method_check_inlining (cfg, cmethod)) {
7494 int costs;
7495 gboolean always = FALSE;
7497 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
7498 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7499 /* Prevent inlining of methods that call wrappers */
7500 INLINE_FAILURE ("wrapper call");
7501 // FIXME? Does this write to cmethod impact tailcall_supported? Probably not.
7502 // Neither pinvoke or icall are likely to be tailcalled.
7503 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
7504 always = TRUE;
7507 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
7508 if (costs) {
7509 cfg->real_offset += 5;
7511 if (!MONO_TYPE_IS_VOID (fsig->ret))
7512 /* *sp is already set by inline_method */
7513 ins = *sp;
7515 inline_costs += costs;
7516 // FIXME This is missed if the inlinee contains tail calls that
7517 // would work, but not once inlined into caller.
7518 // This matchingness could be a factor in inlining.
7519 // i.e. Do not inline if it hurts tailcall, do inline
7520 // if it helps and/or or is neutral, and helps performance
7521 // using usual heuristics.
7522 // Note that inlining will expose multiple tailcall opportunities
7523 // so the tradeoff is not obvious. If we can tailcall anything
7524 // like desktop, then this factor mostly falls away, except
7525 // that inlining can affect tailcall performance due to
7526 // signature match/mismatch.
7527 if (inst_tailcall) // FIXME
7528 mono_tailcall_print ("missed tailcall inline %s -> %s\n", method->name, cmethod->name);
7529 goto call_end;
7533 /* Tail recursion elimination */
7534 if (((cfg->opt & MONO_OPT_TAILCALL) || inst_tailcall) && il_op == MONO_CEE_CALL && cmethod == method && next_ip < end && next_ip [0] == CEE_RET && !vtable_arg) {
7535 gboolean has_vtargs = FALSE;
7536 int i;
7538 /* Prevent inlining of methods with tailcalls (the call stack would be altered) */
7539 INLINE_FAILURE ("tailcall");
7541 /* keep it simple */
7542 for (i = fsig->param_count - 1; !has_vtargs && i >= 0; i--)
7543 has_vtargs = MONO_TYPE_ISSTRUCT (mono_method_signature_internal (cmethod)->params [i]);
7545 if (!has_vtargs) {
7546 if (need_seq_point) {
7547 emit_seq_point (cfg, method, ip, FALSE, TRUE);
7548 need_seq_point = FALSE;
7550 for (i = 0; i < n; ++i)
7551 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7553 mini_profiler_emit_tail_call (cfg, cmethod);
7555 MONO_INST_NEW (cfg, ins, OP_BR);
7556 MONO_ADD_INS (cfg->cbb, ins);
7557 tblock = start_bblock->out_bb [0];
7558 link_bblock (cfg, cfg->cbb, tblock);
7559 ins->inst_target_bb = tblock;
7560 start_new_bblock = 1;
7562 /* skip the CEE_RET, too */
7563 if (ip_in_bb (cfg, cfg->cbb, next_ip))
7564 skip_ret = TRUE;
7565 push_res = FALSE;
7566 need_seq_point = FALSE;
7567 goto call_end;
7571 inline_costs += CALL_COST * MIN(10, num_calls++);
7574 * Synchronized wrappers.
7575 * Its hard to determine where to replace a method with its synchronized
7576 * wrapper without causing an infinite recursion. The current solution is
7577 * to add the synchronized wrapper in the trampolines, and to
7578 * change the called method to a dummy wrapper, and resolve that wrapper
7579 * to the real method in mono_jit_compile_method ().
7581 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
7582 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
7583 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig)) {
7584 // FIXME? Does this write to cmethod impact tailcall_supported? Probably not.
7585 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
7590 * Making generic calls out of gsharedvt methods.
7591 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
7592 * patching gshared method addresses into a gsharedvt method.
7594 if (make_generic_call_out_of_gsharedvt_method) {
7595 if (virtual_) {
7596 //if (mono_class_is_interface (cmethod->klass))
7597 //GSHAREDVT_FAILURE (il_op);
7598 // disable for possible remoting calls
7599 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
7600 GSHAREDVT_FAILURE (il_op);
7601 if (fsig->generic_param_count) {
7602 /* virtual generic call */
7603 g_assert (!imt_arg);
7604 g_assert (will_have_imt_arg);
7605 /* Same as the virtual generic case above */
7606 imt_arg = emit_get_rgctx_method (cfg, context_used,
7607 cmethod, MONO_RGCTX_INFO_METHOD);
7608 g_assert (imt_arg);
7609 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
7610 vtable_arg = NULL;
7611 } else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
7612 /* This can happen when we call a fully instantiated iface method */
7613 g_assert (will_have_imt_arg);
7614 imt_arg = emit_get_rgctx_method (cfg, context_used,
7615 cmethod, MONO_RGCTX_INFO_METHOD);
7616 g_assert (imt_arg);
7617 vtable_arg = NULL;
7621 if ((m_class_get_parent (cmethod->klass) == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
7622 keep_this_alive = sp [0];
7624 MonoRgctxInfoType info_type;
7626 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
7627 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
7628 else
7629 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
7630 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
7632 if (cfg->llvm_only) {
7633 // FIXME: Avoid initializing vtable_arg
7634 ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
7635 if (inst_tailcall) // FIXME
7636 mono_tailcall_print ("missed tailcall llvmonly gsharedvt %s -> %s\n", method->name, cmethod->name);
7637 } else {
7638 tailcall = tailcall_calli;
7639 ins = (MonoInst*)mini_emit_calli_full (cfg, fsig, sp, addr, imt_arg, vtable_arg, tailcall);
7640 tailcall_remove_ret |= tailcall;
7642 goto call_end;
7645 /* Generic sharing */
7648 * Use this if the callee is gsharedvt sharable too, since
7649 * at runtime we might find an instantiation so the call cannot
7650 * be patched (the 'no_patch' code path in mini-trampolines.c).
7652 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
7653 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7654 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
7655 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
7656 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
7657 INLINE_FAILURE ("gshared");
7659 g_assert (cfg->gshared && cmethod);
7660 g_assert (!addr);
7663 * We are compiling a call to a
7664 * generic method from shared code,
7665 * which means that we have to look up
7666 * the method in the rgctx and do an
7667 * indirect call.
7669 if (fsig->hasthis)
7670 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
7672 if (cfg->llvm_only) {
7673 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
7674 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
7675 else
7676 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_FTNDESC);
7677 // FIXME: Avoid initializing imt_arg/vtable_arg
7678 ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
7679 if (inst_tailcall) // FIXME
7680 mono_tailcall_print ("missed tailcall context_used_llvmonly %s -> %s\n", method->name, cmethod->name);
7681 } else {
7682 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7683 if (inst_tailcall)
7684 mono_tailcall_print ("%s tailcall_calli#2 %s -> %s\n", tailcall_calli ? "making" : "missed", method->name, cmethod->name);
7685 tailcall = tailcall_calli;
7686 ins = (MonoInst*)mini_emit_calli_full (cfg, fsig, sp, addr, imt_arg, vtable_arg, tailcall);
7687 tailcall_remove_ret |= tailcall;
7689 goto call_end;
7692 /* Direct calls to icalls */
7693 if (direct_icall) {
7694 MonoMethod *wrapper;
7695 int costs;
7697 /* Inline the wrapper */
7698 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
7700 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
7701 g_assert (costs > 0);
7702 cfg->real_offset += 5;
7704 if (!MONO_TYPE_IS_VOID (fsig->ret))
7705 /* *sp is already set by inline_method */
7706 ins = *sp;
7708 inline_costs += costs;
7710 if (inst_tailcall) // FIXME
7711 mono_tailcall_print ("missed tailcall direct_icall %s -> %s\n", method->name, cmethod->name);
7712 goto call_end;
7715 /* Array methods */
7716 if (array_rank) {
7717 MonoInst *addr;
7719 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
7720 MonoInst *val = sp [fsig->param_count];
7722 if (val->type == STACK_OBJ) {
7723 MonoInst *iargs [2];
7725 iargs [0] = sp [0];
7726 iargs [1] = val;
7728 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
7731 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
7732 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
7733 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !MONO_INS_IS_PCONST_NULL (val))
7734 mini_emit_write_barrier (cfg, addr, val);
7735 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
7736 GSHAREDVT_FAILURE (il_op);
7737 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
7738 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7740 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
7741 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
7742 if (!m_class_is_valuetype (m_class_get_element_class (cmethod->klass)) && !readonly)
7743 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
7744 CHECK_TYPELOAD (cmethod->klass);
7746 readonly = FALSE;
7747 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7748 ins = addr;
7749 } else {
7750 g_assert_not_reached ();
7753 emit_widen = FALSE;
7754 if (inst_tailcall) // FIXME
7755 mono_tailcall_print ("missed tailcall array_rank %s -> %s\n", method->name, cmethod->name);
7756 goto call_end;
7759 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
7760 if (ins) {
7761 if (inst_tailcall) // FIXME
7762 mono_tailcall_print ("missed tailcall redirect %s -> %s\n", method->name, cmethod->name);
7763 goto call_end;
7766 /* Tail prefix / tailcall optimization */
7768 if (tailcall) {
7769 /* Prevent inlining of methods with tailcalls (the call stack would be altered) */
7770 INLINE_FAILURE ("tailcall");
7774 * Virtual calls in llvm-only mode.
7776 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
7777 ins = mini_emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
7778 goto call_end;
7781 /* Common call */
7782 if (!(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) && !(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
7783 INLINE_FAILURE ("call");
7784 common_call = TRUE;
7786 call_end:
7787 // Check that the decision to tailcall would not have changed.
7788 g_assert (!called_is_supported_tailcall || tailcall_method == method);
7789 // FIXME? cmethod does change, weaken the assert if we weren't tailcalling anyway.
7790 // If this still fails, restructure the code, or call tailcall_supported again and assert no change.
7791 g_assert (!called_is_supported_tailcall || !tailcall || tailcall_cmethod == cmethod);
7792 g_assert (!called_is_supported_tailcall || tailcall_fsig == fsig);
7793 g_assert (!called_is_supported_tailcall || tailcall_virtual == virtual_);
7794 g_assert (!called_is_supported_tailcall || tailcall_extra_arg == (vtable_arg || imt_arg || will_have_imt_arg || mono_class_is_interface (cmethod->klass)));
7796 if (common_call) // FIXME goto call_end && !common_call often skips tailcall processing.
7797 ins = mini_emit_method_call_full (cfg, cmethod, fsig, tailcall, sp, virtual_ ? sp [0] : NULL,
7798 imt_arg, vtable_arg);
7801 * Handle devirt of some A.B.C calls by replacing the result of A.B with a OP_TYPED_OBJREF instruction, so the .C
7802 * call can be devirtualized above.
7804 if (cmethod)
7805 ins = handle_call_res_devirt (cfg, cmethod, ins);
7807 if (noreturn) {
7808 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
7809 MONO_ADD_INS (cfg->cbb, ins);
7811 calli_end:
7812 if ((tailcall_remove_ret || (common_call && tailcall)) && !cfg->llvm_only) {
7813 link_bblock (cfg, cfg->cbb, end_bblock);
7814 start_new_bblock = 1;
7816 // FIXME: Eliminate unreachable epilogs
7819 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
7820 * only reachable from this call.
7822 GET_BBLOCK (cfg, tblock, next_ip);
7823 if (tblock == cfg->cbb || tblock->in_count == 0)
7824 skip_ret = TRUE;
7825 push_res = FALSE;
7826 need_seq_point = FALSE;
7829 if (ins_flag & MONO_INST_TAILCALL)
7830 mini_test_tailcall (cfg, tailcall);
7832 /* End of call, INS should contain the result of the call, if any */
7834 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
7835 g_assert (ins);
7836 if (emit_widen)
7837 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7838 else
7839 *sp++ = ins;
7842 if (save_last_error) {
7843 save_last_error = FALSE;
7844 #ifdef TARGET_WIN32
7845 // Making icalls etc could clobber the value so emit inline code
7846 // to read last error on Windows.
7847 MONO_INST_NEW (cfg, ins, OP_GET_LAST_ERROR);
7848 ins->dreg = alloc_dreg (cfg, STACK_I4);
7849 ins->type = STACK_I4;
7850 MONO_ADD_INS (cfg->cbb, ins);
7851 mono_emit_jit_icall (cfg, mono_marshal_set_last_error_windows, &ins);
7852 #else
7853 mono_emit_jit_icall (cfg, mono_marshal_set_last_error, NULL);
7854 #endif
7857 if (keep_this_alive) {
7858 MonoInst *dummy_use;
7860 /* See mini_emit_method_call_full () */
7861 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
7864 if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) {
7866 * Clang can convert these calls to tailcalls which screw up the stack
7867 * walk. This happens even when the -fno-optimize-sibling-calls
7868 * option is passed to clang.
7869 * Work around this by emitting a dummy call.
7871 mono_emit_jit_icall (cfg, mono_dummy_jit_icall, NULL);
7874 CHECK_CFG_EXCEPTION;
7876 if (skip_ret) {
7877 // FIXME When not followed by CEE_RET, correct behavior is to raise an exception.
7878 g_assert (next_ip [0] == CEE_RET);
7879 next_ip += 1;
7880 il_op = MonoOpcodeEnum_Invalid; // Call or ret? Unclear.
7882 ins_flag = 0;
7883 constrained_class = NULL;
7885 if (need_seq_point) {
7886 //check is is a nested call and remove the non_empty_stack of the last call, only for non native methods
7887 if (!(method->flags & METHOD_IMPL_ATTRIBUTE_NATIVE)) {
7888 if (emitted_funccall_seq_point) {
7889 if (cfg->last_seq_point)
7890 cfg->last_seq_point->flags |= MONO_INST_NESTED_CALL;
7892 else
7893 emitted_funccall_seq_point = TRUE;
7895 emit_seq_point (cfg, method, next_ip, FALSE, TRUE);
7897 break;
7899 case MONO_CEE_RET:
7900 mini_profiler_emit_leave (cfg, sig->ret->type != MONO_TYPE_VOID ? sp [-1] : NULL);
7902 g_assert (!method_does_not_return (method));
7904 if (cfg->method != method) {
7905 /* return from inlined method */
7907 * If in_count == 0, that means the ret is unreachable due to
7908 * being preceeded by a throw. In that case, inline_method () will
7909 * handle setting the return value
7910 * (test case: test_0_inline_throw ()).
7912 if (return_var && cfg->cbb->in_count) {
7913 MonoType *ret_type = mono_method_signature_internal (method)->ret;
7915 MonoInst *store;
7916 CHECK_STACK (1);
7917 --sp;
7918 *sp = convert_value (cfg, ret_type, *sp);
7920 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7921 UNVERIFIED;
7923 //g_assert (returnvar != -1);
7924 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
7925 cfg->ret_var_set = TRUE;
7927 } else {
7928 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
7929 emit_pop_lmf (cfg);
7931 if (cfg->ret) {
7932 MonoType *ret_type = mini_get_underlying_type (mono_method_signature_internal (method)->ret);
7934 if (seq_points && !sym_seq_points) {
7936 * Place a seq point here too even through the IL stack is not
7937 * empty, so a step over on
7938 * call <FOO>
7939 * ret
7940 * will work correctly.
7942 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
7943 MONO_ADD_INS (cfg->cbb, ins);
7946 g_assert (!return_var);
7947 CHECK_STACK (1);
7948 --sp;
7949 *sp = convert_value (cfg, ret_type, *sp);
7951 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7952 UNVERIFIED;
7954 emit_setret (cfg, *sp);
7957 if (sp != stack_start)
7958 UNVERIFIED;
7959 MONO_INST_NEW (cfg, ins, OP_BR);
7960 ins->inst_target_bb = end_bblock;
7961 MONO_ADD_INS (cfg->cbb, ins);
7962 link_bblock (cfg, cfg->cbb, end_bblock);
7963 start_new_bblock = 1;
7964 break;
7965 case MONO_CEE_BR_S:
7966 MONO_INST_NEW (cfg, ins, OP_BR);
7967 GET_BBLOCK (cfg, tblock, target);
7968 link_bblock (cfg, cfg->cbb, tblock);
7969 ins->inst_target_bb = tblock;
7970 if (sp != stack_start) {
7971 handle_stack_args (cfg, stack_start, sp - stack_start);
7972 sp = stack_start;
7973 CHECK_UNVERIFIABLE (cfg);
7975 MONO_ADD_INS (cfg->cbb, ins);
7976 start_new_bblock = 1;
7977 inline_costs += BRANCH_COST;
7978 break;
7979 case MONO_CEE_BEQ_S:
7980 case MONO_CEE_BGE_S:
7981 case MONO_CEE_BGT_S:
7982 case MONO_CEE_BLE_S:
7983 case MONO_CEE_BLT_S:
7984 case MONO_CEE_BNE_UN_S:
7985 case MONO_CEE_BGE_UN_S:
7986 case MONO_CEE_BGT_UN_S:
7987 case MONO_CEE_BLE_UN_S:
7988 case MONO_CEE_BLT_UN_S:
7989 MONO_INST_NEW (cfg, ins, il_op + BIG_BRANCH_OFFSET);
7991 ADD_BINCOND (NULL);
7993 sp = stack_start;
7994 inline_costs += BRANCH_COST;
7995 break;
7996 case MONO_CEE_BR:
7997 MONO_INST_NEW (cfg, ins, OP_BR);
7999 GET_BBLOCK (cfg, tblock, target);
8000 link_bblock (cfg, cfg->cbb, tblock);
8001 ins->inst_target_bb = tblock;
8002 if (sp != stack_start) {
8003 handle_stack_args (cfg, stack_start, sp - stack_start);
8004 sp = stack_start;
8005 CHECK_UNVERIFIABLE (cfg);
8008 MONO_ADD_INS (cfg->cbb, ins);
8010 start_new_bblock = 1;
8011 inline_costs += BRANCH_COST;
8012 break;
8013 case MONO_CEE_BRFALSE_S:
8014 case MONO_CEE_BRTRUE_S:
8015 case MONO_CEE_BRFALSE:
8016 case MONO_CEE_BRTRUE: {
8017 MonoInst *cmp;
8018 gboolean is_true = il_op == MONO_CEE_BRTRUE_S || il_op == MONO_CEE_BRTRUE;
8020 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
8021 UNVERIFIED;
8023 sp--;
8025 GET_BBLOCK (cfg, tblock, target);
8026 link_bblock (cfg, cfg->cbb, tblock);
8027 GET_BBLOCK (cfg, tblock, next_ip);
8028 link_bblock (cfg, cfg->cbb, tblock);
8030 if (sp != stack_start) {
8031 handle_stack_args (cfg, stack_start, sp - stack_start);
8032 CHECK_UNVERIFIABLE (cfg);
8035 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
8036 cmp->sreg1 = sp [0]->dreg;
8037 type_from_op (cfg, cmp, sp [0], NULL);
8038 CHECK_TYPE (cmp);
8040 #if SIZEOF_REGISTER == 4
8041 if (cmp->opcode == OP_LCOMPARE_IMM) {
8042 /* Convert it to OP_LCOMPARE */
8043 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8044 ins->type = STACK_I8;
8045 ins->dreg = alloc_dreg (cfg, STACK_I8);
8046 ins->inst_l = 0;
8047 MONO_ADD_INS (cfg->cbb, ins);
8048 cmp->opcode = OP_LCOMPARE;
8049 cmp->sreg2 = ins->dreg;
8051 #endif
8052 MONO_ADD_INS (cfg->cbb, cmp);
8054 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
8055 type_from_op (cfg, ins, sp [0], NULL);
8056 MONO_ADD_INS (cfg->cbb, ins);
8057 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
8058 GET_BBLOCK (cfg, tblock, target);
8059 ins->inst_true_bb = tblock;
8060 GET_BBLOCK (cfg, tblock, next_ip);
8061 ins->inst_false_bb = tblock;
8062 start_new_bblock = 2;
8064 sp = stack_start;
8065 inline_costs += BRANCH_COST;
8066 break;
8068 case MONO_CEE_BEQ:
8069 case MONO_CEE_BGE:
8070 case MONO_CEE_BGT:
8071 case MONO_CEE_BLE:
8072 case MONO_CEE_BLT:
8073 case MONO_CEE_BNE_UN:
8074 case MONO_CEE_BGE_UN:
8075 case MONO_CEE_BGT_UN:
8076 case MONO_CEE_BLE_UN:
8077 case MONO_CEE_BLT_UN:
8078 MONO_INST_NEW (cfg, ins, il_op);
8080 ADD_BINCOND (NULL);
8082 sp = stack_start;
8083 inline_costs += BRANCH_COST;
8084 break;
8085 case MONO_CEE_SWITCH: {
8086 MonoInst *src1;
8087 MonoBasicBlock **targets;
8088 MonoBasicBlock *default_bblock;
8089 MonoJumpInfoBBTable *table;
8090 int offset_reg = alloc_preg (cfg);
8091 int target_reg = alloc_preg (cfg);
8092 int table_reg = alloc_preg (cfg);
8093 int sum_reg = alloc_preg (cfg);
8094 gboolean use_op_switch;
8096 n = read32 (ip + 1);
8097 --sp;
8098 src1 = sp [0];
8099 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
8100 UNVERIFIED;
8102 ip += 5;
8104 GET_BBLOCK (cfg, default_bblock, next_ip);
8105 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8107 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
8108 for (i = 0; i < n; ++i) {
8109 GET_BBLOCK (cfg, tblock, next_ip + (gint32)read32 (ip));
8110 targets [i] = tblock;
8111 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
8112 ip += 4;
8115 if (sp != stack_start) {
8117 * Link the current bb with the targets as well, so handle_stack_args
8118 * will set their in_stack correctly.
8120 link_bblock (cfg, cfg->cbb, default_bblock);
8121 for (i = 0; i < n; ++i)
8122 link_bblock (cfg, cfg->cbb, targets [i]);
8124 handle_stack_args (cfg, stack_start, sp - stack_start);
8125 sp = stack_start;
8126 CHECK_UNVERIFIABLE (cfg);
8128 /* Undo the links */
8129 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
8130 for (i = 0; i < n; ++i)
8131 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
8134 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
8135 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
8137 for (i = 0; i < n; ++i)
8138 link_bblock (cfg, cfg->cbb, targets [i]);
8140 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
8141 table->table = targets;
8142 table->table_size = n;
8144 use_op_switch = FALSE;
8145 #ifdef TARGET_ARM
8146 /* ARM implements SWITCH statements differently */
8147 /* FIXME: Make it use the generic implementation */
8148 if (!cfg->compile_aot)
8149 use_op_switch = TRUE;
8150 #endif
8152 if (COMPILE_LLVM (cfg))
8153 use_op_switch = TRUE;
8155 cfg->cbb->has_jump_table = 1;
8157 if (use_op_switch) {
8158 MONO_INST_NEW (cfg, ins, OP_SWITCH);
8159 ins->sreg1 = src1->dreg;
8160 ins->inst_p0 = table;
8161 ins->inst_many_bb = targets;
8162 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
8163 MONO_ADD_INS (cfg->cbb, ins);
8164 } else {
8165 if (TARGET_SIZEOF_VOID_P == 8)
8166 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
8167 else
8168 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
8170 #if SIZEOF_REGISTER == 8
8171 /* The upper word might not be zero, and we add it to a 64 bit address later */
8172 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
8173 #endif
8175 if (cfg->compile_aot) {
8176 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
8177 } else {
8178 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
8179 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
8180 ins->inst_p0 = table;
8181 ins->dreg = table_reg;
8182 MONO_ADD_INS (cfg->cbb, ins);
8185 /* FIXME: Use load_memindex */
8186 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
8187 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
8188 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
8190 start_new_bblock = 1;
8191 inline_costs += BRANCH_COST * 2;
8192 break;
8194 case MONO_CEE_LDIND_I1:
8195 case MONO_CEE_LDIND_U1:
8196 case MONO_CEE_LDIND_I2:
8197 case MONO_CEE_LDIND_U2:
8198 case MONO_CEE_LDIND_I4:
8199 case MONO_CEE_LDIND_U4:
8200 case MONO_CEE_LDIND_I8:
8201 case MONO_CEE_LDIND_I:
8202 case MONO_CEE_LDIND_R4:
8203 case MONO_CEE_LDIND_R8:
8204 case MONO_CEE_LDIND_REF:
8205 --sp;
8207 ins = mini_emit_memory_load (cfg, m_class_get_byval_arg (ldind_to_type (il_op)), sp [0], 0, ins_flag);
8208 *sp++ = ins;
8209 ins_flag = 0;
8210 break;
8211 case MONO_CEE_STIND_REF:
8212 case MONO_CEE_STIND_I1:
8213 case MONO_CEE_STIND_I2:
8214 case MONO_CEE_STIND_I4:
8215 case MONO_CEE_STIND_I8:
8216 case MONO_CEE_STIND_R4:
8217 case MONO_CEE_STIND_R8:
8218 case MONO_CEE_STIND_I: {
8219 sp -= 2;
8221 if (ins_flag & MONO_INST_VOLATILE) {
8222 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
8223 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
8226 if (il_op == MONO_CEE_STIND_R4 && sp [1]->type == STACK_R8)
8227 sp [1] = convert_value (cfg, m_class_get_byval_arg (mono_defaults.single_class), sp [1]);
8228 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (il_op), sp [0]->dreg, 0, sp [1]->dreg);
8229 ins->flags |= ins_flag;
8230 ins_flag = 0;
8232 MONO_ADD_INS (cfg->cbb, ins);
8234 if (il_op == MONO_CEE_STIND_REF) {
8235 /* stind.ref must only be used with object references. */
8236 if (sp [1]->type != STACK_OBJ)
8237 UNVERIFIED;
8238 if (cfg->gen_write_barriers && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1]))
8239 mini_emit_write_barrier (cfg, sp [0], sp [1]);
8242 inline_costs += 1;
8243 break;
8245 case MONO_CEE_MUL:
8246 MONO_INST_NEW (cfg, ins, il_op);
8247 sp -= 2;
8248 ins->sreg1 = sp [0]->dreg;
8249 ins->sreg2 = sp [1]->dreg;
8250 type_from_op (cfg, ins, sp [0], sp [1]);
8251 CHECK_TYPE (ins);
8252 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
8254 /* Use the immediate opcodes if possible */
8255 int imm_opcode; imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8257 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (ins->opcode, imm_opcode, sp [1]->inst_c0)) {
8258 if (imm_opcode != -1) {
8259 ins->opcode = imm_opcode;
8260 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
8261 ins->sreg2 = -1;
8263 NULLIFY_INS (sp [1]);
8267 MONO_ADD_INS ((cfg)->cbb, (ins));
8269 *sp++ = mono_decompose_opcode (cfg, ins);
8270 break;
8271 case MONO_CEE_ADD:
8272 case MONO_CEE_SUB:
8273 case MONO_CEE_DIV:
8274 case MONO_CEE_DIV_UN:
8275 case MONO_CEE_REM:
8276 case MONO_CEE_REM_UN:
8277 case MONO_CEE_AND:
8278 case MONO_CEE_OR:
8279 case MONO_CEE_XOR:
8280 case MONO_CEE_SHL:
8281 case MONO_CEE_SHR:
8282 case MONO_CEE_SHR_UN: {
8283 MONO_INST_NEW (cfg, ins, il_op);
8284 sp -= 2;
8285 ins->sreg1 = sp [0]->dreg;
8286 ins->sreg2 = sp [1]->dreg;
8287 type_from_op (cfg, ins, sp [0], sp [1]);
8288 CHECK_TYPE (ins);
8289 add_widen_op (cfg, ins, &sp [0], &sp [1]);
8290 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
8292 /* Use the immediate opcodes if possible */
8293 int imm_opcode; imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8295 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) &&
8296 mono_arch_is_inst_imm (ins->opcode, imm_opcode, sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
8297 if (imm_opcode != -1) {
8298 ins->opcode = imm_opcode;
8299 if (sp [1]->opcode == OP_I8CONST) {
8300 #if SIZEOF_REGISTER == 8
8301 ins->inst_imm = sp [1]->inst_l;
8302 #else
8303 ins->inst_l = sp [1]->inst_l;
8304 #endif
8305 } else {
8306 ins->inst_imm = (gssize)(sp [1]->inst_c0);
8308 ins->sreg2 = -1;
8310 /* Might be followed by an instruction added by add_widen_op */
8311 if (sp [1]->next == NULL)
8312 NULLIFY_INS (sp [1]);
8315 MONO_ADD_INS ((cfg)->cbb, (ins));
8317 *sp++ = mono_decompose_opcode (cfg, ins);
8318 break;
8320 case MONO_CEE_NEG:
8321 case MONO_CEE_NOT:
8322 case MONO_CEE_CONV_I1:
8323 case MONO_CEE_CONV_I2:
8324 case MONO_CEE_CONV_I4:
8325 case MONO_CEE_CONV_R4:
8326 case MONO_CEE_CONV_R8:
8327 case MONO_CEE_CONV_U4:
8328 case MONO_CEE_CONV_I8:
8329 case MONO_CEE_CONV_U8:
8330 case MONO_CEE_CONV_OVF_I8:
8331 case MONO_CEE_CONV_OVF_U8:
8332 case MONO_CEE_CONV_R_UN:
8333 /* Special case this earlier so we have long constants in the IR */
8334 if ((il_op == MONO_CEE_CONV_I8 || il_op == MONO_CEE_CONV_U8) && (sp [-1]->opcode == OP_ICONST)) {
8335 int data = sp [-1]->inst_c0;
8336 sp [-1]->opcode = OP_I8CONST;
8337 sp [-1]->type = STACK_I8;
8338 #if SIZEOF_REGISTER == 8
8339 if (il_op == MONO_CEE_CONV_U8)
8340 sp [-1]->inst_c0 = (guint32)data;
8341 else
8342 sp [-1]->inst_c0 = data;
8343 #else
8344 if (il_op == MONO_CEE_CONV_U8)
8345 sp [-1]->inst_l = (guint32)data;
8346 else
8347 sp [-1]->inst_l = data;
8348 #endif
8349 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
8351 else {
8352 ADD_UNOP (il_op);
8354 break;
8355 case MONO_CEE_CONV_OVF_I4:
8356 case MONO_CEE_CONV_OVF_I1:
8357 case MONO_CEE_CONV_OVF_I2:
8358 case MONO_CEE_CONV_OVF_I:
8359 case MONO_CEE_CONV_OVF_U:
8360 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
8361 ADD_UNOP (CEE_CONV_OVF_I8);
8362 ADD_UNOP (il_op);
8363 } else {
8364 ADD_UNOP (il_op);
8366 break;
8367 case MONO_CEE_CONV_OVF_U1:
8368 case MONO_CEE_CONV_OVF_U2:
8369 case MONO_CEE_CONV_OVF_U4:
8370 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
8371 ADD_UNOP (CEE_CONV_OVF_U8);
8372 ADD_UNOP (il_op);
8373 } else {
8374 ADD_UNOP (il_op);
8376 break;
8377 case MONO_CEE_CONV_OVF_I1_UN:
8378 case MONO_CEE_CONV_OVF_I2_UN:
8379 case MONO_CEE_CONV_OVF_I4_UN:
8380 case MONO_CEE_CONV_OVF_I8_UN:
8381 case MONO_CEE_CONV_OVF_U1_UN:
8382 case MONO_CEE_CONV_OVF_U2_UN:
8383 case MONO_CEE_CONV_OVF_U4_UN:
8384 case MONO_CEE_CONV_OVF_U8_UN:
8385 case MONO_CEE_CONV_OVF_I_UN:
8386 case MONO_CEE_CONV_OVF_U_UN:
8387 case MONO_CEE_CONV_U2:
8388 case MONO_CEE_CONV_U1:
8389 case MONO_CEE_CONV_I:
8390 case MONO_CEE_CONV_U:
8391 ADD_UNOP (il_op);
8392 CHECK_CFG_EXCEPTION;
8393 break;
8394 case MONO_CEE_ADD_OVF:
8395 case MONO_CEE_ADD_OVF_UN:
8396 case MONO_CEE_MUL_OVF:
8397 case MONO_CEE_MUL_OVF_UN:
8398 case MONO_CEE_SUB_OVF:
8399 case MONO_CEE_SUB_OVF_UN:
8400 ADD_BINOP (il_op);
8401 break;
8402 case MONO_CEE_CPOBJ:
8403 GSHAREDVT_FAILURE (il_op);
8404 GSHAREDVT_FAILURE (*ip);
8405 klass = mini_get_class (method, token, generic_context);
8406 CHECK_TYPELOAD (klass);
8407 sp -= 2;
8408 mini_emit_memory_copy (cfg, sp [0], sp [1], klass, FALSE, ins_flag);
8409 ins_flag = 0;
8410 break;
8411 case MONO_CEE_LDOBJ: {
8412 int loc_index = -1;
8413 int stloc_len = 0;
8415 --sp;
8416 klass = mini_get_class (method, token, generic_context);
8417 CHECK_TYPELOAD (klass);
8419 /* Optimize the common ldobj+stloc combination */
8420 if (next_ip < end) {
8421 switch (next_ip [0]) {
8422 case MONO_CEE_STLOC_S:
8423 CHECK_OPSIZE (7);
8424 loc_index = next_ip [1];
8425 stloc_len = 2;
8426 break;
8427 case MONO_CEE_STLOC_0:
8428 case MONO_CEE_STLOC_1:
8429 case MONO_CEE_STLOC_2:
8430 case MONO_CEE_STLOC_3:
8431 loc_index = next_ip [0] - CEE_STLOC_0;
8432 stloc_len = 1;
8433 break;
8434 default:
8435 break;
8439 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, next_ip)) {
8440 CHECK_LOCAL (loc_index);
8442 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), sp [0]->dreg, 0);
8443 ins->dreg = cfg->locals [loc_index]->dreg;
8444 ins->flags |= ins_flag;
8445 il_op = (MonoOpcodeEnum)next_ip [0];
8446 next_ip += stloc_len;
8447 if (ins_flag & MONO_INST_VOLATILE) {
8448 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
8449 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
8451 ins_flag = 0;
8452 break;
8455 /* Optimize the ldobj+stobj combination */
8456 if (next_ip + 4 < end && next_ip [0] == CEE_STOBJ && ip_in_bb (cfg, cfg->cbb, next_ip) && read32 (next_ip + 1) == token) {
8457 CHECK_STACK (1);
8459 sp --;
8461 mini_emit_memory_copy (cfg, sp [0], sp [1], klass, FALSE, ins_flag);
8463 il_op = (MonoOpcodeEnum)next_ip [0];
8464 next_ip += 5;
8465 ins_flag = 0;
8466 break;
8469 ins = mini_emit_memory_load (cfg, m_class_get_byval_arg (klass), sp [0], 0, ins_flag);
8470 *sp++ = ins;
8472 ins_flag = 0;
8473 inline_costs += 1;
8474 break;
8476 case MONO_CEE_LDSTR:
8477 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
8478 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
8479 ins->type = STACK_OBJ;
8480 *sp = ins;
8482 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
8483 MonoInst *iargs [1];
8484 char *str = (char *)mono_method_get_wrapper_data (method, n);
8486 if (cfg->compile_aot)
8487 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
8488 else
8489 EMIT_NEW_PCONST (cfg, iargs [0], str);
8490 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper_internal, iargs);
8491 } else {
8492 if (cfg->opt & MONO_OPT_SHARED) {
8493 MonoInst *iargs [3];
8495 if (cfg->compile_aot) {
8496 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
8498 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8499 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
8500 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
8501 *sp = mono_emit_jit_icall (cfg, ves_icall_mono_ldstr, iargs);
8502 mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), cfg->error);
8503 CHECK_CFG_ERROR;
8504 } else {
8505 if (cfg->cbb->out_of_line) {
8506 MonoInst *iargs [2];
8508 if (image == mono_defaults.corlib) {
8510 * Avoid relocations in AOT and save some space by using a
8511 * version of helper_ldstr specialized to mscorlib.
8513 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
8514 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
8515 } else {
8516 /* Avoid creating the string object */
8517 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8518 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
8519 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
8522 else
8523 if (cfg->compile_aot) {
8524 NEW_LDSTRCONST (cfg, ins, image, n);
8525 *sp = ins;
8526 MONO_ADD_INS (cfg->cbb, ins);
8528 else {
8529 NEW_PCONST (cfg, ins, NULL);
8530 ins->type = STACK_OBJ;
8531 ins->inst_p0 = mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), cfg->error);
8532 CHECK_CFG_ERROR;
8534 if (!ins->inst_p0)
8535 OUT_OF_MEMORY_FAILURE;
8537 *sp = ins;
8538 MONO_ADD_INS (cfg->cbb, ins);
8543 sp++;
8544 break;
8545 case MONO_CEE_NEWOBJ: {
8546 MonoInst *iargs [2];
8547 MonoMethodSignature *fsig;
8548 MonoInst this_ins;
8549 MonoInst *alloc;
8550 MonoInst *vtable_arg = NULL;
8552 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8553 CHECK_CFG_ERROR;
8555 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, cfg->error);
8556 CHECK_CFG_ERROR;
8558 mono_save_token_info (cfg, image, token, cmethod);
8560 if (!mono_class_init_internal (cmethod->klass))
8561 TYPE_LOAD_ERROR (cmethod->klass);
8563 context_used = mini_method_check_context_used (cfg, cmethod);
8565 if (!dont_verify && !cfg->skip_visibility) {
8566 MonoMethod *cil_method = cmethod;
8567 MonoMethod *target_method = cil_method;
8569 if (method->is_inflated) {
8570 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), cfg->error);
8571 CHECK_CFG_ERROR;
8574 if (!mono_method_can_access_method (method_definition, target_method) &&
8575 !mono_method_can_access_method (method, cil_method))
8576 emit_method_access_failure (cfg, method, cil_method);
8579 if (mono_security_core_clr_enabled ())
8580 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
8582 if (cfg->gshared && cmethod && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8583 emit_class_init (cfg, cmethod->klass);
8584 CHECK_TYPELOAD (cmethod->klass);
8588 if (cfg->gsharedvt) {
8589 if (mini_is_gsharedvt_variable_signature (sig))
8590 GSHAREDVT_FAILURE (il_op);
8594 n = fsig->param_count;
8595 CHECK_STACK (n);
8598 * Generate smaller code for the common newobj <exception> instruction in
8599 * argument checking code.
8601 if (cfg->cbb->out_of_line && m_class_get_image (cmethod->klass) == mono_defaults.corlib &&
8602 is_exception_class (cmethod->klass) && n <= 2 &&
8603 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
8604 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
8605 MonoInst *iargs [3];
8607 sp -= n;
8609 EMIT_NEW_ICONST (cfg, iargs [0], m_class_get_type_token (cmethod->klass));
8610 switch (n) {
8611 case 0:
8612 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
8613 break;
8614 case 1:
8615 iargs [1] = sp [0];
8616 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
8617 break;
8618 case 2:
8619 iargs [1] = sp [0];
8620 iargs [2] = sp [1];
8621 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
8622 break;
8623 default:
8624 g_assert_not_reached ();
8627 inline_costs += 5;
8628 break;
8631 /* move the args to allow room for 'this' in the first position */
8632 while (n--) {
8633 --sp;
8634 sp [1] = sp [0];
8637 for (int i = 0; i < fsig->param_count; ++i)
8638 sp [i + fsig->hasthis] = convert_value (cfg, fsig->params [i], sp [i + fsig->hasthis]);
8640 /* check_call_signature () requires sp[0] to be set */
8641 this_ins.type = STACK_OBJ;
8642 sp [0] = &this_ins;
8643 if (check_call_signature (cfg, fsig, sp))
8644 UNVERIFIED;
8646 iargs [0] = NULL;
8648 if (mini_class_is_system_array (cmethod->klass)) {
8649 *sp = emit_get_rgctx_method (cfg, context_used,
8650 cmethod, MONO_RGCTX_INFO_METHOD);
8651 /* Optimize the common cases */
8652 MonoJitICallId function = MONO_JIT_ICALL_ZeroIsReserved;;
8653 int n = fsig->param_count;
8654 switch (n) {
8655 case 1: function = MONO_JIT_ICALL_mono_array_new_1;
8656 break;
8657 case 2: function = MONO_JIT_ICALL_mono_array_new_2;
8658 break;
8659 case 3: function = MONO_JIT_ICALL_mono_array_new_3;
8660 break;
8661 case 4: function = MONO_JIT_ICALL_mono_array_new_4;
8662 break;
8663 default:
8664 // FIXME Maximum value of param_count? Realistically 64. Fits in imm?
8665 if (!array_new_localalloc_ins) {
8666 MONO_INST_NEW (cfg, array_new_localalloc_ins, OP_LOCALLOC_IMM);
8667 array_new_localalloc_ins->dreg = alloc_preg (cfg);
8668 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8669 MONO_ADD_INS (init_localsbb, array_new_localalloc_ins);
8671 array_new_localalloc_ins->inst_imm = MAX (array_new_localalloc_ins->inst_imm, n * sizeof (target_mgreg_t));
8672 int dreg = array_new_localalloc_ins->dreg;
8673 for (int i = 0; i < n; ++i) {
8674 NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, dreg, i * sizeof (target_mgreg_t), sp [i + 1]->dreg);
8675 MONO_ADD_INS (cfg->cbb, ins);
8677 EMIT_NEW_ICONST (cfg, ins, n);
8678 sp [1] = ins;
8679 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), dreg);
8680 ins->type = STACK_PTR;
8681 sp [2] = ins;
8682 // FIXME Adjust sp by n - 3? Attempts failed.
8683 function = MONO_JIT_ICALL_mono_array_new_n_icall;
8684 break;
8686 alloc = mono_emit_jit_icall_id (cfg, function, sp);
8687 } else if (cmethod->string_ctor) {
8688 g_assert (!context_used);
8689 g_assert (!vtable_arg);
8690 /* we simply pass a null pointer */
8691 EMIT_NEW_PCONST (cfg, *sp, NULL);
8692 /* now call the string ctor */
8693 alloc = mini_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
8694 } else {
8695 if (m_class_is_valuetype (cmethod->klass)) {
8696 iargs [0] = mono_compile_create_var (cfg, m_class_get_byval_arg (cmethod->klass), OP_LOCAL);
8697 emit_init_rvar (cfg, iargs [0]->dreg, m_class_get_byval_arg (cmethod->klass));
8698 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
8700 alloc = NULL;
8703 * The code generated by mini_emit_virtual_call () expects
8704 * iargs [0] to be a boxed instance, but luckily the vcall
8705 * will be transformed into a normal call there.
8707 } else if (context_used) {
8708 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
8709 *sp = alloc;
8710 } else {
8711 MonoVTable *vtable = NULL;
8713 if (!cfg->compile_aot)
8714 vtable = mono_class_vtable_checked (cfg->domain, cmethod->klass, cfg->error);
8715 CHECK_CFG_ERROR;
8716 CHECK_TYPELOAD (cmethod->klass);
8719 * TypeInitializationExceptions thrown from the mono_runtime_class_init
8720 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
8721 * As a workaround, we call class cctors before allocating objects.
8723 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
8724 emit_class_init (cfg, cmethod->klass);
8725 if (cfg->verbose_level > 2)
8726 printf ("class %s.%s needs init call for ctor\n", m_class_get_name_space (cmethod->klass), m_class_get_name (cmethod->klass));
8727 class_inits = g_slist_prepend (class_inits, cmethod->klass);
8730 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
8731 *sp = alloc;
8733 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
8735 if (alloc)
8736 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
8738 /* Now call the actual ctor */
8739 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
8740 CHECK_CFG_EXCEPTION;
8743 if (alloc == NULL) {
8744 /* Valuetype */
8745 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
8746 mini_type_to_eval_stack_type (cfg, m_class_get_byval_arg (ins->klass), ins);
8747 *sp++= ins;
8748 } else {
8749 *sp++ = alloc;
8752 inline_costs += 5;
8753 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, next_ip - header->code)))
8754 emit_seq_point (cfg, method, next_ip, FALSE, TRUE);
8755 break;
8757 case MONO_CEE_CASTCLASS:
8758 case MONO_CEE_ISINST: {
8759 --sp;
8760 klass = mini_get_class (method, token, generic_context);
8761 CHECK_TYPELOAD (klass);
8762 if (sp [0]->type != STACK_OBJ)
8763 UNVERIFIED;
8765 MONO_INST_NEW (cfg, ins, (il_op == MONO_CEE_ISINST) ? OP_ISINST : OP_CASTCLASS);
8766 ins->dreg = alloc_preg (cfg);
8767 ins->sreg1 = (*sp)->dreg;
8768 ins->klass = klass;
8769 ins->type = STACK_OBJ;
8770 MONO_ADD_INS (cfg->cbb, ins);
8772 CHECK_CFG_EXCEPTION;
8773 *sp++ = ins;
8775 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
8776 break;
8778 case MONO_CEE_UNBOX_ANY: {
8779 MonoInst *res, *addr;
8781 --sp;
8782 klass = mini_get_class (method, token, generic_context);
8783 CHECK_TYPELOAD (klass);
8785 mono_save_token_info (cfg, image, token, klass);
8787 context_used = mini_class_check_context_used (cfg, klass);
8789 if (mini_is_gsharedvt_klass (klass)) {
8790 res = handle_unbox_gsharedvt (cfg, klass, *sp);
8791 inline_costs += 2;
8792 } else if (mini_class_is_reference (klass)) {
8793 if (MONO_INS_IS_PCONST_NULL (*sp)) {
8794 EMIT_NEW_PCONST (cfg, res, NULL);
8795 res->type = STACK_OBJ;
8796 } else {
8797 MONO_INST_NEW (cfg, res, OP_CASTCLASS);
8798 res->dreg = alloc_preg (cfg);
8799 res->sreg1 = (*sp)->dreg;
8800 res->klass = klass;
8801 res->type = STACK_OBJ;
8802 MONO_ADD_INS (cfg->cbb, res);
8803 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
8805 } else if (mono_class_is_nullable (klass)) {
8806 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
8807 } else {
8808 addr = handle_unbox (cfg, klass, sp, context_used);
8809 /* LDOBJ */
8810 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0);
8811 res = ins;
8812 inline_costs += 2;
8815 *sp ++ = res;
8816 break;
8818 case MONO_CEE_BOX: {
8819 MonoInst *val;
8820 MonoClass *enum_class;
8821 MonoMethod *has_flag;
8823 --sp;
8824 val = *sp;
8825 klass = mini_get_class (method, token, generic_context);
8826 CHECK_TYPELOAD (klass);
8828 mono_save_token_info (cfg, image, token, klass);
8830 context_used = mini_class_check_context_used (cfg, klass);
8832 if (mini_class_is_reference (klass)) {
8833 *sp++ = val;
8834 break;
8837 val = convert_value (cfg, m_class_get_byval_arg (klass), val);
8839 if (klass == mono_defaults.void_class)
8840 UNVERIFIED;
8841 if (target_type_is_incompatible (cfg, m_class_get_byval_arg (klass), val))
8842 UNVERIFIED;
8843 /* frequent check in generic code: box (struct), brtrue */
8846 * Look for:
8848 * <push int/long ptr>
8849 * <push int/long>
8850 * box MyFlags
8851 * constrained. MyFlags
8852 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
8854 * If we find this sequence and the operand types on box and constrained
8855 * are equal, we can emit a specialized instruction sequence instead of
8856 * the very slow HasFlag () call.
8857 * This code sequence is generated by older mcs/csc, the newer one is handled in
8858 * emit_inst_for_method ().
8860 guint32 constrained_token;
8861 guint32 callvirt_token;
8863 if ((cfg->opt & MONO_OPT_INTRINS) &&
8864 // FIXME ip_in_bb as we go?
8865 next_ip < end && ip_in_bb (cfg, cfg->cbb, next_ip) &&
8866 (ip = il_read_constrained (next_ip, end, &constrained_token)) &&
8867 ip_in_bb (cfg, cfg->cbb, ip) &&
8868 (ip = il_read_callvirt (ip, end, &callvirt_token)) &&
8869 ip_in_bb (cfg, cfg->cbb, ip) &&
8870 m_class_is_enumtype (klass) &&
8871 (enum_class = mini_get_class (method, constrained_token, generic_context)) &&
8872 (has_flag = mini_get_method (cfg, method, callvirt_token, NULL, generic_context)) &&
8873 has_flag->klass == mono_defaults.enum_class &&
8874 !strcmp (has_flag->name, "HasFlag") &&
8875 has_flag->signature->hasthis &&
8876 has_flag->signature->param_count == 1) {
8877 CHECK_TYPELOAD (enum_class);
8879 if (enum_class == klass) {
8880 MonoInst *enum_this, *enum_flag;
8882 next_ip = ip;
8883 il_op = MONO_CEE_CALLVIRT;
8884 --sp;
8886 enum_this = sp [0];
8887 enum_flag = sp [1];
8889 *sp++ = mini_handle_enum_has_flag (cfg, klass, enum_this, -1, enum_flag);
8890 break;
8894 guint32 unbox_any_token;
8897 * Common in generic code:
8898 * box T1, unbox.any T2.
8900 if ((cfg->opt & MONO_OPT_INTRINS) &&
8901 next_ip < end && ip_in_bb (cfg, cfg->cbb, next_ip) &&
8902 (ip = il_read_unbox_any (next_ip, end, &unbox_any_token))) {
8903 MonoClass *unbox_klass = mini_get_class (method, unbox_any_token, generic_context);
8904 CHECK_TYPELOAD (unbox_klass);
8906 if (klass == unbox_klass) {
8907 next_ip = ip;
8908 *sp++ = val;
8909 break;
8913 gboolean is_true;
8915 // FIXME: LLVM can't handle the inconsistent bb linking
8916 if (!mono_class_is_nullable (klass) &&
8917 !mini_is_gsharedvt_klass (klass) &&
8918 next_ip < end && ip_in_bb (cfg, cfg->cbb, next_ip) &&
8919 ( (is_true = !!(ip = il_read_brtrue (next_ip, end, &target))) ||
8920 (is_true = !!(ip = il_read_brtrue_s (next_ip, end, &target))) ||
8921 (ip = il_read_brfalse (next_ip, end, &target)) ||
8922 (ip = il_read_brfalse_s (next_ip, end, &target)))) {
8924 int dreg;
8925 MonoBasicBlock *true_bb, *false_bb;
8927 il_op = (MonoOpcodeEnum)next_ip [0];
8928 next_ip = ip;
8930 if (cfg->verbose_level > 3) {
8931 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8932 printf ("<box+brtrue opt>\n");
8936 * We need to link both bblocks, since it is needed for handling stack
8937 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8938 * Branching to only one of them would lead to inconsistencies, so
8939 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8941 GET_BBLOCK (cfg, true_bb, target);
8942 GET_BBLOCK (cfg, false_bb, next_ip);
8944 mono_link_bblock (cfg, cfg->cbb, true_bb);
8945 mono_link_bblock (cfg, cfg->cbb, false_bb);
8947 if (sp != stack_start) {
8948 handle_stack_args (cfg, stack_start, sp - stack_start);
8949 sp = stack_start;
8950 CHECK_UNVERIFIABLE (cfg);
8953 if (COMPILE_LLVM (cfg)) {
8954 dreg = alloc_ireg (cfg);
8955 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
8956 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
8958 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
8959 } else {
8960 /* The JIT can't eliminate the iconst+compare */
8961 MONO_INST_NEW (cfg, ins, OP_BR);
8962 ins->inst_target_bb = is_true ? true_bb : false_bb;
8963 MONO_ADD_INS (cfg->cbb, ins);
8966 start_new_bblock = 1;
8967 break;
8970 if (m_class_is_enumtype (klass) && !mini_is_gsharedvt_klass (klass) && !(val->type == STACK_I8 && TARGET_SIZEOF_VOID_P == 4)) {
8971 /* Can't do this with 64 bit enums on 32 bit since the vtype decomp pass is ran after the long decomp pass */
8972 if (val->opcode == OP_ICONST) {
8973 MONO_INST_NEW (cfg, ins, OP_BOX_ICONST);
8974 ins->type = STACK_OBJ;
8975 ins->klass = klass;
8976 ins->inst_c0 = val->inst_c0;
8977 ins->dreg = alloc_dreg (cfg, (MonoStackType)val->type);
8978 } else {
8979 MONO_INST_NEW (cfg, ins, OP_BOX);
8980 ins->type = STACK_OBJ;
8981 ins->klass = klass;
8982 ins->sreg1 = val->dreg;
8983 ins->dreg = alloc_dreg (cfg, (MonoStackType)val->type);
8985 MONO_ADD_INS (cfg->cbb, ins);
8986 *sp++ = ins;
8987 /* Create domainvar early so it gets initialized earlier than this code */
8988 if (cfg->opt & MONO_OPT_SHARED)
8989 mono_get_domainvar (cfg);
8990 } else {
8991 *sp++ = mini_emit_box (cfg, val, klass, context_used);
8993 CHECK_CFG_EXCEPTION;
8994 inline_costs += 1;
8995 break;
8997 case MONO_CEE_UNBOX: {
8998 --sp;
8999 klass = mini_get_class (method, token, generic_context);
9000 CHECK_TYPELOAD (klass);
9002 mono_save_token_info (cfg, image, token, klass);
9004 context_used = mini_class_check_context_used (cfg, klass);
9006 if (mono_class_is_nullable (klass)) {
9007 MonoInst *val;
9009 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
9010 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), m_class_get_byval_arg (val->klass));
9012 *sp++= ins;
9013 } else {
9014 ins = handle_unbox (cfg, klass, sp, context_used);
9015 *sp++ = ins;
9017 inline_costs += 2;
9018 break;
9020 case MONO_CEE_LDFLD:
9021 case MONO_CEE_LDFLDA:
9022 case MONO_CEE_STFLD:
9023 case MONO_CEE_LDSFLD:
9024 case MONO_CEE_LDSFLDA:
9025 case MONO_CEE_STSFLD: {
9026 MonoClassField *field;
9027 #ifndef DISABLE_REMOTING
9028 int costs;
9029 #endif
9030 guint foffset;
9031 gboolean is_instance;
9032 gpointer addr = NULL;
9033 gboolean is_special_static;
9034 MonoType *ftype;
9035 MonoInst *store_val = NULL;
9036 MonoInst *thread_ins;
9038 is_instance = (il_op == MONO_CEE_LDFLD || il_op == MONO_CEE_LDFLDA || il_op == MONO_CEE_STFLD);
9039 if (is_instance) {
9040 if (il_op == MONO_CEE_STFLD) {
9041 sp -= 2;
9042 store_val = sp [1];
9043 } else {
9044 --sp;
9046 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
9047 UNVERIFIED;
9048 if (il_op != MONO_CEE_LDFLD && sp [0]->type == STACK_VTYPE)
9049 UNVERIFIED;
9050 } else {
9051 if (il_op == MONO_CEE_STSFLD) {
9052 sp--;
9053 store_val = sp [0];
9057 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9058 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
9059 klass = field->parent;
9061 else {
9062 field = mono_field_from_token_checked (image, token, &klass, generic_context, cfg->error);
9063 CHECK_CFG_ERROR;
9065 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
9066 FIELD_ACCESS_FAILURE (method, field);
9067 mono_class_init_internal (klass);
9069 /* if the class is Critical then transparent code cannot access it's fields */
9070 if (!is_instance && mono_security_core_clr_enabled ())
9071 ensure_method_is_allowed_to_access_field (cfg, method, field);
9073 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
9074 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
9075 if (mono_security_core_clr_enabled ())
9076 ensure_method_is_allowed_to_access_field (cfg, method, field);
9079 ftype = mono_field_get_type_internal (field);
9082 * LDFLD etc. is usable on static fields as well, so convert those cases to
9083 * the static case.
9085 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
9086 switch (il_op) {
9087 case MONO_CEE_LDFLD:
9088 il_op = MONO_CEE_LDSFLD;
9089 break;
9090 case MONO_CEE_STFLD:
9091 il_op = MONO_CEE_STSFLD;
9092 break;
9093 case MONO_CEE_LDFLDA:
9094 il_op = MONO_CEE_LDSFLDA;
9095 break;
9096 default:
9097 g_assert_not_reached ();
9099 is_instance = FALSE;
9102 context_used = mini_class_check_context_used (cfg, klass);
9104 if (il_op == MONO_CEE_LDSFLD) {
9105 ins = mini_emit_inst_for_field_load (cfg, field);
9106 if (ins) {
9107 *sp++ = ins;
9108 goto field_access_end;
9112 /* INSTANCE CASE */
9114 foffset = m_class_is_valuetype (klass) ? field->offset - MONO_ABI_SIZEOF (MonoObject): field->offset;
9115 if (il_op == MONO_CEE_STFLD) {
9116 sp [1] = convert_value (cfg, field->type, sp [1]);
9117 if (target_type_is_incompatible (cfg, field->type, sp [1]))
9118 UNVERIFIED;
9119 #ifndef DISABLE_REMOTING
9120 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
9121 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
9122 MonoInst *iargs [5];
9124 GSHAREDVT_FAILURE (il_op);
9126 iargs [0] = sp [0];
9127 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9128 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9129 EMIT_NEW_ICONST (cfg, iargs [3], m_class_is_valuetype (klass) ? field->offset - MONO_ABI_SIZEOF (MonoObject) :
9130 field->offset);
9131 iargs [4] = sp [1];
9133 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9134 costs = inline_method (cfg, stfld_wrapper, mono_method_signature_internal (stfld_wrapper),
9135 iargs, ip, cfg->real_offset, TRUE);
9136 CHECK_CFG_EXCEPTION;
9137 g_assert (costs > 0);
9139 cfg->real_offset += 5;
9141 inline_costs += costs;
9142 } else {
9143 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
9145 } else
9146 #endif
9148 MonoInst *store;
9150 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg, foffset > mono_target_pagesize ());
9152 if (ins_flag & MONO_INST_VOLATILE) {
9153 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9154 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9157 if (mini_is_gsharedvt_klass (klass)) {
9158 MonoInst *offset_ins;
9160 context_used = mini_class_check_context_used (cfg, klass);
9162 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9163 /* The value is offset by 1 */
9164 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
9165 dreg = alloc_ireg_mp (cfg);
9166 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9167 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
9168 store = mini_emit_storing_write_barrier (cfg, ins, sp [1]);
9169 } else {
9170 /* The decomposition will call mini_emit_memory_copy () which will emit a wbarrier if needed */
9171 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
9173 } else {
9174 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
9175 /* insert call to write barrier */
9176 MonoInst *ptr;
9177 int dreg;
9179 dreg = alloc_ireg_mp (cfg);
9180 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9181 store = mini_emit_storing_write_barrier (cfg, ptr, sp [1]);
9182 } else {
9183 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
9187 if (sp [0]->opcode != OP_LDADDR)
9188 store->flags |= MONO_INST_FAULT;
9190 store->flags |= ins_flag;
9192 goto field_access_end;
9195 #ifndef DISABLE_REMOTING
9196 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
9197 MonoMethod *wrapper = (il_op == MONO_CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
9198 MonoInst *iargs [4];
9200 GSHAREDVT_FAILURE (il_op);
9202 iargs [0] = sp [0];
9203 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9204 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9205 EMIT_NEW_ICONST (cfg, iargs [3], m_class_is_valuetype (klass) ? field->offset - MONO_ABI_SIZEOF (MonoObject) : field->offset);
9206 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9207 costs = inline_method (cfg, wrapper, mono_method_signature_internal (wrapper),
9208 iargs, ip, cfg->real_offset, TRUE);
9209 CHECK_CFG_EXCEPTION;
9210 g_assert (costs > 0);
9212 cfg->real_offset += 5;
9214 *sp++ = iargs [0];
9216 inline_costs += costs;
9217 } else {
9218 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
9219 *sp++ = ins;
9221 } else
9222 #endif
9223 if (is_instance) {
9224 if (sp [0]->type == STACK_VTYPE) {
9225 MonoInst *var;
9227 /* Have to compute the address of the variable */
9229 var = get_vreg_to_inst (cfg, sp [0]->dreg);
9230 if (!var)
9231 var = mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (klass), OP_LOCAL, sp [0]->dreg);
9232 else
9233 g_assert (var->klass == klass);
9235 EMIT_NEW_VARLOADA (cfg, ins, var, m_class_get_byval_arg (var->klass));
9236 sp [0] = ins;
9239 if (il_op == MONO_CEE_LDFLDA) {
9240 if (sp [0]->type == STACK_OBJ) {
9241 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
9242 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
9245 dreg = alloc_ireg_mp (cfg);
9247 if (mini_is_gsharedvt_klass (klass)) {
9248 MonoInst *offset_ins;
9250 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9251 /* The value is offset by 1 */
9252 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
9253 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9254 } else {
9255 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9257 ins->klass = mono_class_from_mono_type_internal (field->type);
9258 ins->type = STACK_MP;
9259 *sp++ = ins;
9260 } else {
9261 MonoInst *load;
9263 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg, foffset > mono_target_pagesize ());
9265 #ifdef MONO_ARCH_SIMD_INTRINSICS
9266 if (sp [0]->opcode == OP_LDADDR && m_class_is_simd_type (klass) && cfg->opt & MONO_OPT_SIMD) {
9267 ins = mono_emit_simd_field_load (cfg, field, sp [0]);
9268 if (ins) {
9269 *sp++ = ins;
9270 goto field_access_end;
9273 #endif
9275 MonoInst *field_add_inst = sp [0];
9276 if (mini_is_gsharedvt_klass (klass)) {
9277 MonoInst *offset_ins;
9279 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9280 /* The value is offset by 1 */
9281 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
9282 EMIT_NEW_BIALU (cfg, field_add_inst, OP_PADD, alloc_ireg_mp (cfg), sp [0]->dreg, offset_ins->dreg);
9283 foffset = 0;
9286 load = mini_emit_memory_load (cfg, field->type, field_add_inst, foffset, ins_flag);
9288 if (sp [0]->opcode != OP_LDADDR)
9289 load->flags |= MONO_INST_FAULT;
9290 *sp++ = load;
9294 if (is_instance)
9295 goto field_access_end;
9297 /* STATIC CASE */
9298 context_used = mini_class_check_context_used (cfg, klass);
9300 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) {
9301 mono_error_set_field_missing (cfg->error, field->parent, field->name, NULL, "Using static instructions with literal field");
9302 CHECK_CFG_ERROR;
9305 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
9306 * to be called here.
9308 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
9309 mono_class_vtable_checked (cfg->domain, klass, cfg->error);
9310 CHECK_CFG_ERROR;
9311 CHECK_TYPELOAD (klass);
9313 mono_domain_lock (cfg->domain);
9314 if (cfg->domain->special_static_fields)
9315 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
9316 mono_domain_unlock (cfg->domain);
9318 is_special_static = mono_class_field_is_special_static (field);
9320 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
9321 thread_ins = mono_create_tls_get (cfg, TLS_KEY_THREAD);
9322 else
9323 thread_ins = NULL;
9325 /* Generate IR to compute the field address */
9326 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
9328 * Fast access to TLS data
9329 * Inline version of get_thread_static_data () in
9330 * threads.c.
9332 guint32 offset;
9333 int idx, static_data_reg, array_reg, dreg;
9335 if (context_used && cfg->gsharedvt && mini_is_gsharedvt_klass (klass))
9336 GSHAREDVT_FAILURE (il_op);
9338 static_data_reg = alloc_ireg (cfg);
9339 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
9341 if (cfg->compile_aot) {
9342 int offset_reg, offset2_reg, idx_reg;
9344 /* For TLS variables, this will return the TLS offset */
9345 EMIT_NEW_SFLDACONST (cfg, ins, field);
9346 offset_reg = ins->dreg;
9347 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
9348 idx_reg = alloc_ireg (cfg);
9349 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
9350 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, TARGET_SIZEOF_VOID_P == 8 ? 3 : 2);
9351 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
9352 array_reg = alloc_ireg (cfg);
9353 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
9354 offset2_reg = alloc_ireg (cfg);
9355 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
9356 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
9357 dreg = alloc_ireg (cfg);
9358 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
9359 } else {
9360 offset = (gsize)addr & 0x7fffffff;
9361 idx = offset & 0x3f;
9363 array_reg = alloc_ireg (cfg);
9364 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * TARGET_SIZEOF_VOID_P);
9365 dreg = alloc_ireg (cfg);
9366 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
9368 } else if ((cfg->opt & MONO_OPT_SHARED) ||
9369 (cfg->compile_aot && is_special_static) ||
9370 (context_used && is_special_static)) {
9371 MonoInst *iargs [2];
9373 g_assert (field->parent);
9374 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9375 if (context_used) {
9376 iargs [1] = emit_get_rgctx_field (cfg, context_used,
9377 field, MONO_RGCTX_INFO_CLASS_FIELD);
9378 } else {
9379 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
9381 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
9382 } else if (context_used) {
9383 MonoInst *static_data;
9386 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
9387 method->klass->name_space, method->klass->name, method->name,
9388 depth, field->offset);
9391 if (mono_class_needs_cctor_run (klass, method))
9392 emit_class_init (cfg, klass);
9395 * The pointer we're computing here is
9397 * super_info.static_data + field->offset
9399 static_data = mini_emit_get_rgctx_klass (cfg, context_used,
9400 klass, MONO_RGCTX_INFO_STATIC_DATA);
9402 if (mini_is_gsharedvt_klass (klass)) {
9403 MonoInst *offset_ins;
9405 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9406 /* The value is offset by 1 */
9407 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
9408 dreg = alloc_ireg_mp (cfg);
9409 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
9410 } else if (field->offset == 0) {
9411 ins = static_data;
9412 } else {
9413 int addr_reg = mono_alloc_preg (cfg);
9414 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
9416 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
9417 MonoInst *iargs [2];
9419 g_assert (field->parent);
9420 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9421 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
9422 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
9423 } else {
9424 MonoVTable *vtable = NULL;
9426 if (!cfg->compile_aot)
9427 vtable = mono_class_vtable_checked (cfg->domain, klass, cfg->error);
9428 CHECK_CFG_ERROR;
9429 CHECK_TYPELOAD (klass);
9431 if (!addr) {
9432 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
9433 if (!(g_slist_find (class_inits, klass))) {
9434 emit_class_init (cfg, klass);
9435 if (cfg->verbose_level > 2)
9436 printf ("class %s.%s needs init call for %s\n", m_class_get_name_space (klass), m_class_get_name (klass), mono_field_get_name (field));
9437 class_inits = g_slist_prepend (class_inits, klass);
9439 } else {
9440 if (cfg->run_cctors) {
9441 /* This makes so that inline cannot trigger */
9442 /* .cctors: too many apps depend on them */
9443 /* running with a specific order... */
9444 g_assert (vtable);
9445 if (!vtable->initialized && m_class_has_cctor (vtable->klass))
9446 INLINE_FAILURE ("class init");
9447 if (!mono_runtime_class_init_full (vtable, cfg->error)) {
9448 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
9449 goto exception_exit;
9453 if (cfg->compile_aot)
9454 EMIT_NEW_SFLDACONST (cfg, ins, field);
9455 else {
9456 g_assert (vtable);
9457 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
9458 g_assert (addr);
9459 EMIT_NEW_PCONST (cfg, ins, addr);
9461 } else {
9462 MonoInst *iargs [1];
9463 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
9464 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
9468 /* Generate IR to do the actual load/store operation */
9470 if ((il_op == MONO_CEE_STFLD || il_op == MONO_CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
9471 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9472 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9475 if (il_op == MONO_CEE_LDSFLDA) {
9476 ins->klass = mono_class_from_mono_type_internal (ftype);
9477 ins->type = STACK_PTR;
9478 *sp++ = ins;
9479 } else if (il_op == MONO_CEE_STSFLD) {
9480 MonoInst *store;
9482 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
9483 store->flags |= ins_flag;
9484 } else {
9485 gboolean is_const = FALSE;
9486 MonoVTable *vtable = NULL;
9487 gpointer addr = NULL;
9489 if (!context_used) {
9490 vtable = mono_class_vtable_checked (cfg->domain, klass, cfg->error);
9491 CHECK_CFG_ERROR;
9492 CHECK_TYPELOAD (klass);
9494 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
9495 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
9496 int ro_type = ftype->type;
9497 if (!addr)
9498 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
9499 if (ro_type == MONO_TYPE_VALUETYPE && m_class_is_enumtype (ftype->data.klass)) {
9500 ro_type = mono_class_enum_basetype_internal (ftype->data.klass)->type;
9503 GSHAREDVT_FAILURE (il_op);
9505 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
9506 is_const = TRUE;
9507 switch (ro_type) {
9508 case MONO_TYPE_BOOLEAN:
9509 case MONO_TYPE_U1:
9510 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
9511 sp++;
9512 break;
9513 case MONO_TYPE_I1:
9514 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
9515 sp++;
9516 break;
9517 case MONO_TYPE_CHAR:
9518 case MONO_TYPE_U2:
9519 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
9520 sp++;
9521 break;
9522 case MONO_TYPE_I2:
9523 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
9524 sp++;
9525 break;
9526 break;
9527 case MONO_TYPE_I4:
9528 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
9529 sp++;
9530 break;
9531 case MONO_TYPE_U4:
9532 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
9533 sp++;
9534 break;
9535 case MONO_TYPE_I:
9536 case MONO_TYPE_U:
9537 case MONO_TYPE_PTR:
9538 case MONO_TYPE_FNPTR:
9539 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
9540 mini_type_to_eval_stack_type ((cfg), field->type, *sp);
9541 sp++;
9542 break;
9543 case MONO_TYPE_STRING:
9544 case MONO_TYPE_OBJECT:
9545 case MONO_TYPE_CLASS:
9546 case MONO_TYPE_SZARRAY:
9547 case MONO_TYPE_ARRAY:
9548 if (!mono_gc_is_moving ()) {
9549 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
9550 mini_type_to_eval_stack_type ((cfg), field->type, *sp);
9551 sp++;
9552 } else {
9553 is_const = FALSE;
9555 break;
9556 case MONO_TYPE_I8:
9557 case MONO_TYPE_U8:
9558 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
9559 sp++;
9560 break;
9561 case MONO_TYPE_R4:
9562 case MONO_TYPE_R8:
9563 case MONO_TYPE_VALUETYPE:
9564 default:
9565 is_const = FALSE;
9566 break;
9570 if (!is_const) {
9571 MonoInst *load;
9573 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
9574 load->flags |= ins_flag;
9575 *sp++ = load;
9579 field_access_end:
9580 if ((il_op == MONO_CEE_LDFLD || il_op == MONO_CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
9581 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9582 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9585 ins_flag = 0;
9586 break;
9588 case MONO_CEE_STOBJ:
9589 sp -= 2;
9590 klass = mini_get_class (method, token, generic_context);
9591 CHECK_TYPELOAD (klass);
9593 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
9594 mini_emit_memory_store (cfg, m_class_get_byval_arg (klass), sp [0], sp [1], ins_flag);
9595 ins_flag = 0;
9596 inline_costs += 1;
9597 break;
9600 * Array opcodes
9602 case MONO_CEE_NEWARR: {
9603 MonoInst *len_ins;
9604 const char *data_ptr;
9605 int data_size = 0;
9606 guint32 field_token;
9608 --sp;
9610 klass = mini_get_class (method, token, generic_context);
9611 CHECK_TYPELOAD (klass);
9612 if (m_class_get_byval_arg (klass)->type == MONO_TYPE_VOID)
9613 UNVERIFIED;
9615 context_used = mini_class_check_context_used (cfg, klass);
9617 if (sp [0]->type == STACK_I8 || (TARGET_SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
9618 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
9619 ins->sreg1 = sp [0]->dreg;
9620 ins->type = STACK_I4;
9621 ins->dreg = alloc_ireg (cfg);
9622 MONO_ADD_INS (cfg->cbb, ins);
9623 *sp = mono_decompose_opcode (cfg, ins);
9626 if (context_used) {
9627 MonoInst *args [3];
9628 MonoClass *array_class = mono_class_create_array (klass, 1);
9629 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
9631 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
9633 /* vtable */
9634 args [0] = mini_emit_get_rgctx_klass (cfg, context_used,
9635 array_class, MONO_RGCTX_INFO_VTABLE);
9636 /* array len */
9637 args [1] = sp [0];
9639 if (managed_alloc)
9640 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
9641 else
9642 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
9643 } else {
9644 if (cfg->opt & MONO_OPT_SHARED) {
9645 /* Decompose now to avoid problems with references to the domainvar */
9646 MonoInst *iargs [3];
9648 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9649 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9650 iargs [2] = sp [0];
9652 ins = mono_emit_jit_icall (cfg, ves_icall_array_new, iargs);
9653 } else {
9654 /* Decompose later since it is needed by abcrem */
9655 MonoClass *array_type = mono_class_create_array (klass, 1);
9656 mono_class_vtable_checked (cfg->domain, array_type, cfg->error);
9657 CHECK_CFG_ERROR;
9658 CHECK_TYPELOAD (array_type);
9660 MONO_INST_NEW (cfg, ins, OP_NEWARR);
9661 ins->dreg = alloc_ireg_ref (cfg);
9662 ins->sreg1 = sp [0]->dreg;
9663 ins->inst_newa_class = klass;
9664 ins->type = STACK_OBJ;
9665 ins->klass = array_type;
9666 MONO_ADD_INS (cfg->cbb, ins);
9667 cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE;
9668 cfg->cbb->needs_decompose = TRUE;
9670 /* Needed so mono_emit_load_get_addr () gets called */
9671 mono_get_got_var (cfg);
9675 len_ins = sp [0];
9676 ip += 5;
9677 *sp++ = ins;
9678 inline_costs += 1;
9681 * we inline/optimize the initialization sequence if possible.
9682 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
9683 * for small sizes open code the memcpy
9684 * ensure the rva field is big enough
9686 if ((cfg->opt & MONO_OPT_INTRINS) && next_ip < end
9687 && ip_in_bb (cfg, cfg->cbb, next_ip)
9688 && (len_ins->opcode == OP_ICONST)
9689 && (data_ptr = initialize_array_data (cfg, method,
9690 cfg->compile_aot, next_ip, end, klass,
9691 len_ins->inst_c0, &data_size, &field_token,
9692 &il_op, &next_ip))) {
9693 MonoMethod *memcpy_method = mini_get_memcpy_method ();
9694 MonoInst *iargs [3];
9695 int add_reg = alloc_ireg_mp (cfg);
9697 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
9698 if (cfg->compile_aot) {
9699 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, m_class_get_image (method->klass), GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
9700 } else {
9701 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
9703 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
9704 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9707 break;
9709 case MONO_CEE_LDLEN:
9710 --sp;
9711 if (sp [0]->type != STACK_OBJ)
9712 UNVERIFIED;
9714 MONO_INST_NEW (cfg, ins, OP_LDLEN);
9715 ins->dreg = alloc_preg (cfg);
9716 ins->sreg1 = sp [0]->dreg;
9717 ins->inst_imm = MONO_STRUCT_OFFSET (MonoArray, max_length);
9718 ins->type = STACK_I4;
9719 /* This flag will be inherited by the decomposition */
9720 ins->flags |= MONO_INST_FAULT | MONO_INST_INVARIANT_LOAD;
9721 MONO_ADD_INS (cfg->cbb, ins);
9722 cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE;
9723 cfg->cbb->needs_decompose = TRUE;
9724 *sp++ = ins;
9725 break;
9726 case MONO_CEE_LDELEMA:
9727 sp -= 2;
9728 if (sp [0]->type != STACK_OBJ)
9729 UNVERIFIED;
9731 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9733 klass = mini_get_class (method, token, generic_context);
9734 CHECK_TYPELOAD (klass);
9735 /* we need to make sure that this array is exactly the type it needs
9736 * to be for correctness. the wrappers are lax with their usage
9737 * so we need to ignore them here
9739 if (!m_class_is_valuetype (klass) && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
9740 MonoClass *array_class = mono_class_create_array (klass, 1);
9741 mini_emit_check_array_type (cfg, sp [0], array_class);
9742 CHECK_TYPELOAD (array_class);
9745 readonly = FALSE;
9746 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9747 *sp++ = ins;
9748 break;
9749 case MONO_CEE_LDELEM:
9750 case MONO_CEE_LDELEM_I1:
9751 case MONO_CEE_LDELEM_U1:
9752 case MONO_CEE_LDELEM_I2:
9753 case MONO_CEE_LDELEM_U2:
9754 case MONO_CEE_LDELEM_I4:
9755 case MONO_CEE_LDELEM_U4:
9756 case MONO_CEE_LDELEM_I8:
9757 case MONO_CEE_LDELEM_I:
9758 case MONO_CEE_LDELEM_R4:
9759 case MONO_CEE_LDELEM_R8:
9760 case MONO_CEE_LDELEM_REF: {
9761 MonoInst *addr;
9763 sp -= 2;
9765 if (il_op == MONO_CEE_LDELEM) {
9766 klass = mini_get_class (method, token, generic_context);
9767 CHECK_TYPELOAD (klass);
9768 mono_class_init_internal (klass);
9770 else
9771 klass = array_access_to_klass (il_op);
9773 if (sp [0]->type != STACK_OBJ)
9774 UNVERIFIED;
9776 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9778 if (mini_is_gsharedvt_variable_klass (klass)) {
9779 // FIXME-VT: OP_ICONST optimization
9780 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9781 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0);
9782 ins->opcode = OP_LOADV_MEMBASE;
9783 } else if (sp [1]->opcode == OP_ICONST) {
9784 int array_reg = sp [0]->dreg;
9785 int index_reg = sp [1]->dreg;
9786 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
9788 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
9789 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
9791 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9792 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), array_reg, offset);
9793 } else {
9794 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9795 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0);
9797 *sp++ = ins;
9798 break;
9800 case MONO_CEE_STELEM_I:
9801 case MONO_CEE_STELEM_I1:
9802 case MONO_CEE_STELEM_I2:
9803 case MONO_CEE_STELEM_I4:
9804 case MONO_CEE_STELEM_I8:
9805 case MONO_CEE_STELEM_R4:
9806 case MONO_CEE_STELEM_R8:
9807 case MONO_CEE_STELEM_REF:
9808 case MONO_CEE_STELEM: {
9809 sp -= 3;
9811 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9813 if (il_op == MONO_CEE_STELEM) {
9814 klass = mini_get_class (method, token, generic_context);
9815 CHECK_TYPELOAD (klass);
9816 mono_class_init_internal (klass);
9818 else
9819 klass = array_access_to_klass (il_op);
9821 if (sp [0]->type != STACK_OBJ)
9822 UNVERIFIED;
9824 sp [2] = convert_value (cfg, m_class_get_byval_arg (klass), sp [2]);
9825 mini_emit_array_store (cfg, klass, sp, TRUE);
9827 inline_costs += 1;
9828 break;
9830 case MONO_CEE_CKFINITE: {
9831 --sp;
9833 if (cfg->llvm_only) {
9834 MonoInst *iargs [1];
9836 iargs [0] = sp [0];
9837 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
9838 } else {
9839 sp [0] = convert_value (cfg, m_class_get_byval_arg (mono_defaults.double_class), sp [0]);
9840 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
9841 ins->sreg1 = sp [0]->dreg;
9842 ins->dreg = alloc_freg (cfg);
9843 ins->type = STACK_R8;
9844 MONO_ADD_INS (cfg->cbb, ins);
9846 *sp++ = mono_decompose_opcode (cfg, ins);
9849 break;
9851 case MONO_CEE_REFANYVAL: {
9852 MonoInst *src_var, *src;
9854 int klass_reg = alloc_preg (cfg);
9855 int dreg = alloc_preg (cfg);
9857 GSHAREDVT_FAILURE (il_op);
9859 MONO_INST_NEW (cfg, ins, il_op);
9860 --sp;
9861 klass = mini_get_class (method, token, generic_context);
9862 CHECK_TYPELOAD (klass);
9864 context_used = mini_class_check_context_used (cfg, klass);
9866 // FIXME:
9867 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9868 if (!src_var)
9869 src_var = mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.typed_reference_class), OP_LOCAL, sp [0]->dreg);
9870 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9871 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
9873 if (context_used) {
9874 MonoInst *klass_ins;
9876 klass_ins = mini_emit_get_rgctx_klass (cfg, context_used,
9877 klass, MONO_RGCTX_INFO_KLASS);
9879 // FIXME:
9880 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
9881 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
9882 } else {
9883 mini_emit_class_check (cfg, klass_reg, klass);
9885 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
9886 ins->type = STACK_MP;
9887 ins->klass = klass;
9888 *sp++ = ins;
9889 break;
9891 case MONO_CEE_MKREFANY: {
9892 MonoInst *loc, *addr;
9894 GSHAREDVT_FAILURE (il_op);
9896 MONO_INST_NEW (cfg, ins, il_op);
9897 --sp;
9898 klass = mini_get_class (method, token, generic_context);
9899 CHECK_TYPELOAD (klass);
9901 context_used = mini_class_check_context_used (cfg, klass);
9903 loc = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.typed_reference_class), OP_LOCAL);
9904 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
9906 MonoInst *const_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
9907 int type_reg = alloc_preg (cfg);
9909 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
9910 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, m_class_offsetof_byval_arg ());
9911 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9913 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
9915 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
9916 ins->type = STACK_VTYPE;
9917 ins->klass = mono_defaults.typed_reference_class;
9918 *sp++ = ins;
9919 break;
9921 case MONO_CEE_LDTOKEN: {
9922 gpointer handle;
9923 MonoClass *handle_class;
9925 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
9926 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9927 handle = mono_method_get_wrapper_data (method, n);
9928 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
9929 if (handle_class == mono_defaults.typehandle_class)
9930 handle = m_class_get_byval_arg ((MonoClass*)handle);
9932 else {
9933 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, cfg->error);
9934 CHECK_CFG_ERROR;
9936 if (!handle)
9937 LOAD_ERROR;
9938 mono_class_init_internal (handle_class);
9939 if (cfg->gshared) {
9940 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
9941 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
9942 /* This case handles ldtoken
9943 of an open type, like for
9944 typeof(Gen<>). */
9945 context_used = 0;
9946 } else if (handle_class == mono_defaults.typehandle_class) {
9947 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type_internal ((MonoType *)handle));
9948 } else if (handle_class == mono_defaults.fieldhandle_class)
9949 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
9950 else if (handle_class == mono_defaults.methodhandle_class)
9951 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
9952 else
9953 g_assert_not_reached ();
9956 if ((cfg->opt & MONO_OPT_SHARED) &&
9957 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
9958 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
9959 MonoInst *addr, *vtvar, *iargs [3];
9960 int method_context_used;
9962 method_context_used = mini_method_check_context_used (cfg, method);
9964 vtvar = mono_compile_create_var (cfg, m_class_get_byval_arg (handle_class), OP_LOCAL);
9966 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9967 EMIT_NEW_ICONST (cfg, iargs [1], n);
9968 if (method_context_used) {
9969 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
9970 method, MONO_RGCTX_INFO_METHOD);
9971 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
9972 } else {
9973 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9974 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9976 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9978 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9980 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9981 } else {
9982 if ((next_ip + 4 < end) && ip_in_bb (cfg, cfg->cbb, next_ip) &&
9983 ((next_ip [0] == CEE_CALL) || (next_ip [0] == CEE_CALLVIRT)) &&
9984 (cmethod = mini_get_method (cfg, method, read32 (next_ip + 1), NULL, generic_context)) &&
9985 (cmethod->klass == mono_defaults.systemtype_class) &&
9986 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9987 MonoClass *tclass = mono_class_from_mono_type_internal ((MonoType *)handle);
9989 mono_class_init_internal (tclass);
9990 if (context_used) {
9991 ins = mini_emit_get_rgctx_klass (cfg, context_used,
9992 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9993 } else if (cfg->compile_aot) {
9994 if (method->wrapper_type) {
9995 error_init (error); //got to do it since there are multiple conditionals below
9996 if (mono_class_get_checked (m_class_get_image (tclass), m_class_get_type_token (tclass), error) == tclass && !generic_context) {
9997 /* Special case for static synchronized wrappers */
9998 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, m_class_get_image (tclass), m_class_get_type_token (tclass), generic_context);
9999 } else {
10000 mono_error_cleanup (error); /* FIXME don't swallow the error */
10001 /* FIXME: n is not a normal token */
10002 DISABLE_AOT (cfg);
10003 EMIT_NEW_PCONST (cfg, ins, NULL);
10005 } else {
10006 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
10008 } else {
10009 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, cfg->error);
10010 CHECK_CFG_ERROR;
10011 EMIT_NEW_PCONST (cfg, ins, rt);
10013 ins->type = STACK_OBJ;
10014 ins->klass = cmethod->klass;
10015 il_op = (MonoOpcodeEnum)next_ip [0];
10016 next_ip += 5;
10017 } else {
10018 MonoInst *addr, *vtvar;
10020 vtvar = mono_compile_create_var (cfg, m_class_get_byval_arg (handle_class), OP_LOCAL);
10022 if (context_used) {
10023 if (handle_class == mono_defaults.typehandle_class) {
10024 ins = mini_emit_get_rgctx_klass (cfg, context_used,
10025 mono_class_from_mono_type_internal ((MonoType *)handle),
10026 MONO_RGCTX_INFO_TYPE);
10027 } else if (handle_class == mono_defaults.methodhandle_class) {
10028 ins = emit_get_rgctx_method (cfg, context_used,
10029 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
10030 } else if (handle_class == mono_defaults.fieldhandle_class) {
10031 ins = emit_get_rgctx_field (cfg, context_used,
10032 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
10033 } else {
10034 g_assert_not_reached ();
10036 } else if (cfg->compile_aot) {
10037 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
10038 } else {
10039 EMIT_NEW_PCONST (cfg, ins, handle);
10041 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10042 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10043 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10047 *sp++ = ins;
10048 break;
10050 case MONO_CEE_THROW:
10051 if (sp [-1]->type != STACK_OBJ)
10052 UNVERIFIED;
10054 MONO_INST_NEW (cfg, ins, OP_THROW);
10055 --sp;
10056 ins->sreg1 = sp [0]->dreg;
10057 cfg->cbb->out_of_line = TRUE;
10058 MONO_ADD_INS (cfg->cbb, ins);
10059 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10060 MONO_ADD_INS (cfg->cbb, ins);
10061 sp = stack_start;
10063 link_bblock (cfg, cfg->cbb, end_bblock);
10064 start_new_bblock = 1;
10065 /* This can complicate code generation for llvm since the return value might not be defined */
10066 if (COMPILE_LLVM (cfg))
10067 INLINE_FAILURE ("throw");
10068 break;
10069 case MONO_CEE_ENDFINALLY:
10070 if (!ip_in_finally_clause (cfg, ip - header->code))
10071 UNVERIFIED;
10072 /* mono_save_seq_point_info () depends on this */
10073 if (sp != stack_start)
10074 emit_seq_point (cfg, method, ip, FALSE, FALSE);
10075 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
10076 MONO_ADD_INS (cfg->cbb, ins);
10077 start_new_bblock = 1;
10080 * Control will leave the method so empty the stack, otherwise
10081 * the next basic block will start with a nonempty stack.
10083 while (sp != stack_start) {
10084 sp--;
10086 break;
10087 case MONO_CEE_LEAVE:
10088 case MONO_CEE_LEAVE_S: {
10089 GList *handlers;
10091 /* empty the stack */
10092 g_assert (sp >= stack_start);
10093 sp = stack_start;
10096 * If this leave statement is in a catch block, check for a
10097 * pending exception, and rethrow it if necessary.
10098 * We avoid doing this in runtime invoke wrappers, since those are called
10099 * by native code which excepts the wrapper to catch all exceptions.
10101 for (i = 0; i < header->num_clauses; ++i) {
10102 MonoExceptionClause *clause = &header->clauses [i];
10105 * Use <= in the final comparison to handle clauses with multiple
10106 * leave statements, like in bug #78024.
10107 * The ordering of the exception clauses guarantees that we find the
10108 * innermost clause.
10110 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((il_op == MONO_CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
10111 MonoInst *exc_ins;
10112 MonoBasicBlock *dont_throw;
10115 MonoInst *load;
10117 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
10120 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
10122 NEW_BBLOCK (cfg, dont_throw);
10125 * Currently, we always rethrow the abort exception, despite the
10126 * fact that this is not correct. See thread6.cs for an example.
10127 * But propagating the abort exception is more important than
10128 * getting the semantics right.
10130 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
10131 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
10132 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
10134 MONO_START_BB (cfg, dont_throw);
10138 #ifdef ENABLE_LLVM
10139 cfg->cbb->try_end = (intptr_t)(ip - header->code);
10140 #endif
10142 if ((handlers = mono_find_leave_clauses (cfg, ip, target))) {
10143 GList *tmp;
10145 * For each finally clause that we exit we need to invoke the finally block.
10146 * After each invocation we need to add try holes for all the clauses that
10147 * we already exited.
10149 for (tmp = handlers; tmp; tmp = tmp->next) {
10150 MonoLeaveClause *leave = (MonoLeaveClause *) tmp->data;
10151 MonoExceptionClause *clause = leave->clause;
10153 if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY)
10154 continue;
10156 MonoInst *abort_exc = (MonoInst *)mono_find_exvar_for_offset (cfg, clause->handler_offset);
10157 MonoBasicBlock *dont_throw;
10160 * Emit instrumentation code before linking the basic blocks below as this
10161 * will alter cfg->cbb.
10163 mini_profiler_emit_call_finally (cfg, header, ip, leave->index, clause);
10165 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
10166 g_assert (tblock);
10167 link_bblock (cfg, cfg->cbb, tblock);
10169 MONO_EMIT_NEW_PCONST (cfg, abort_exc->dreg, 0);
10171 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
10172 ins->inst_target_bb = tblock;
10173 ins->inst_eh_blocks = tmp;
10174 MONO_ADD_INS (cfg->cbb, ins);
10175 cfg->cbb->has_call_handler = 1;
10177 /* Throw exception if exvar is set */
10178 /* FIXME Do we need this for calls from catch/filter ? */
10179 NEW_BBLOCK (cfg, dont_throw);
10180 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, abort_exc->dreg, 0);
10181 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
10182 mono_emit_jit_icall (cfg, ves_icall_thread_finish_async_abort, NULL);
10183 cfg->cbb->clause_holes = tmp;
10185 MONO_START_BB (cfg, dont_throw);
10186 cfg->cbb->clause_holes = tmp;
10188 if (COMPILE_LLVM (cfg)) {
10189 MonoBasicBlock *target_bb;
10192 * Link the finally bblock with the target, since it will
10193 * conceptually branch there.
10195 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
10196 GET_BBLOCK (cfg, target_bb, target);
10197 link_bblock (cfg, tblock, target_bb);
10202 MONO_INST_NEW (cfg, ins, OP_BR);
10203 MONO_ADD_INS (cfg->cbb, ins);
10204 GET_BBLOCK (cfg, tblock, target);
10205 link_bblock (cfg, cfg->cbb, tblock);
10206 ins->inst_target_bb = tblock;
10208 start_new_bblock = 1;
10209 break;
10213 * Mono specific opcodes
10216 case MONO_CEE_MONO_ICALL: {
10217 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10218 const MonoJitICallId jit_icall_id = (MonoJitICallId)token;
10219 MonoJitICallInfo * const info = mono_find_jit_icall_info (jit_icall_id);
10221 CHECK_STACK (info->sig->param_count);
10222 sp -= info->sig->param_count;
10224 if (token == MONO_JIT_ICALL_mono_threads_attach_coop) {
10225 MonoInst *addr;
10226 MonoBasicBlock *next_bb;
10228 if (cfg->compile_aot) {
10230 * This is called on unattached threads, so it cannot go through the trampoline
10231 * infrastructure. Use an indirect call through a got slot initialized at load time
10232 * instead.
10234 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL, GUINT_TO_POINTER (jit_icall_id));
10235 ins = mini_emit_calli (cfg, info->sig, sp, addr, NULL, NULL);
10236 } else {
10237 ins = mono_emit_jit_icall_id (cfg, jit_icall_id, sp);
10241 * Parts of the initlocals code needs to come after this, since it might call methods like memset.
10243 init_localsbb2 = cfg->cbb;
10244 NEW_BBLOCK (cfg, next_bb);
10245 MONO_START_BB (cfg, next_bb);
10246 } else {
10247 ins = mono_emit_jit_icall_id (cfg, jit_icall_id, sp);
10250 if (!MONO_TYPE_IS_VOID (info->sig->ret))
10251 *sp++ = ins;
10253 inline_costs += CALL_COST * MIN(10, num_calls++);
10254 break;
10257 MonoJumpInfoType ldptr_type;
10259 case MONO_CEE_MONO_LDPTR_CARD_TABLE:
10260 ldptr_type = MONO_PATCH_INFO_GC_CARD_TABLE_ADDR;
10261 goto mono_ldptr;
10262 case MONO_CEE_MONO_LDPTR_NURSERY_START:
10263 ldptr_type = MONO_PATCH_INFO_GC_NURSERY_START;
10264 goto mono_ldptr;
10265 case MONO_CEE_MONO_LDPTR_NURSERY_BITS:
10266 ldptr_type = MONO_PATCH_INFO_GC_NURSERY_BITS;
10267 goto mono_ldptr;
10268 case MONO_CEE_MONO_LDPTR_INT_REQ_FLAG:
10269 ldptr_type = MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG;
10270 goto mono_ldptr;
10271 case MONO_CEE_MONO_LDPTR_PROFILER_ALLOCATION_COUNT:
10272 ldptr_type = MONO_PATCH_INFO_PROFILER_ALLOCATION_COUNT;
10273 mono_ldptr:
10274 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10275 ins = mini_emit_runtime_constant (cfg, ldptr_type, NULL);
10276 *sp++ = ins;
10277 inline_costs += CALL_COST * MIN(10, num_calls++);
10278 break;
10280 case MONO_CEE_MONO_LDPTR: {
10281 gpointer ptr;
10283 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10284 ptr = mono_method_get_wrapper_data (method, token);
10285 EMIT_NEW_PCONST (cfg, ins, ptr);
10286 *sp++ = ins;
10287 inline_costs += CALL_COST * MIN(10, num_calls++);
10288 /* Can't embed random pointers into AOT code */
10289 DISABLE_AOT (cfg);
10290 break;
10292 case MONO_CEE_MONO_JIT_ICALL_ADDR:
10293 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10294 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, GUINT_TO_POINTER (token));
10295 *sp++ = ins;
10296 inline_costs += CALL_COST * MIN(10, num_calls++);
10297 break;
10299 case MONO_CEE_MONO_ICALL_ADDR: {
10300 MonoMethod *cmethod;
10301 gpointer ptr;
10303 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10305 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
10307 if (cfg->compile_aot) {
10308 if (cfg->direct_pinvoke && ip + 6 < end && (ip [6] == CEE_POP)) {
10310 * This is generated by emit_native_wrapper () to resolve the pinvoke address
10311 * before the call, its not needed when using direct pinvoke.
10312 * This is not an optimization, but its used to avoid looking up pinvokes
10313 * on platforms which don't support dlopen ().
10315 EMIT_NEW_PCONST (cfg, ins, NULL);
10316 } else {
10317 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
10319 } else {
10320 ptr = mono_lookup_internal_call (cmethod);
10321 g_assert (ptr);
10322 EMIT_NEW_PCONST (cfg, ins, ptr);
10324 *sp++ = ins;
10325 break;
10327 case MONO_CEE_MONO_VTADDR: {
10328 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10329 MonoInst *src_var, *src;
10331 --sp;
10333 // FIXME:
10334 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10335 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
10336 *sp++ = src;
10337 break;
10339 case MONO_CEE_MONO_NEWOBJ: {
10340 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10341 MonoInst *iargs [2];
10343 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10344 mono_class_init_internal (klass);
10345 NEW_DOMAINCONST (cfg, iargs [0]);
10346 MONO_ADD_INS (cfg->cbb, iargs [0]);
10347 NEW_CLASSCONST (cfg, iargs [1], klass);
10348 MONO_ADD_INS (cfg->cbb, iargs [1]);
10349 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
10350 inline_costs += CALL_COST * MIN(10, num_calls++);
10351 break;
10353 case MONO_CEE_MONO_OBJADDR:
10354 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10355 --sp;
10356 MONO_INST_NEW (cfg, ins, OP_MOVE);
10357 ins->dreg = alloc_ireg_mp (cfg);
10358 ins->sreg1 = sp [0]->dreg;
10359 ins->type = STACK_MP;
10360 MONO_ADD_INS (cfg->cbb, ins);
10361 *sp++ = ins;
10362 break;
10363 case MONO_CEE_MONO_LDNATIVEOBJ:
10365 * Similar to LDOBJ, but instead load the unmanaged
10366 * representation of the vtype to the stack.
10368 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10369 --sp;
10370 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10371 g_assert (m_class_is_valuetype (klass));
10372 mono_class_init_internal (klass);
10375 MonoInst *src, *dest, *temp;
10377 src = sp [0];
10378 temp = mono_compile_create_var (cfg, m_class_get_byval_arg (klass), OP_LOCAL);
10379 temp->backend.is_pinvoke = 1;
10380 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
10381 mini_emit_memory_copy (cfg, dest, src, klass, TRUE, 0);
10383 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
10384 dest->type = STACK_VTYPE;
10385 dest->klass = klass;
10387 *sp ++ = dest;
10389 break;
10390 case MONO_CEE_MONO_RETOBJ: {
10392 * Same as RET, but return the native representation of a vtype
10393 * to the caller.
10395 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10396 g_assert (cfg->ret);
10397 g_assert (mono_method_signature_internal (method)->pinvoke);
10398 --sp;
10400 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10402 if (!cfg->vret_addr) {
10403 g_assert (cfg->ret_var_is_local);
10405 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
10406 } else {
10407 EMIT_NEW_RETLOADA (cfg, ins);
10409 mini_emit_memory_copy (cfg, ins, sp [0], klass, TRUE, 0);
10411 if (sp != stack_start)
10412 UNVERIFIED;
10414 mini_profiler_emit_leave (cfg, sp [0]);
10416 MONO_INST_NEW (cfg, ins, OP_BR);
10417 ins->inst_target_bb = end_bblock;
10418 MONO_ADD_INS (cfg->cbb, ins);
10419 link_bblock (cfg, cfg->cbb, end_bblock);
10420 start_new_bblock = 1;
10421 break;
10423 case MONO_CEE_MONO_SAVE_LMF:
10424 case MONO_CEE_MONO_RESTORE_LMF:
10425 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10426 break;
10427 case MONO_CEE_MONO_CLASSCONST:
10428 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10429 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
10430 *sp++ = ins;
10431 inline_costs += CALL_COST * MIN(10, num_calls++);
10432 break;
10433 case MONO_CEE_MONO_NOT_TAKEN:
10434 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10435 cfg->cbb->out_of_line = TRUE;
10436 break;
10437 case MONO_CEE_MONO_TLS: {
10438 MonoTlsKey key;
10440 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10441 key = (MonoTlsKey)n;
10442 g_assert (key < TLS_KEY_NUM);
10444 ins = mono_create_tls_get (cfg, key);
10445 g_assert (ins);
10446 ins->type = STACK_PTR;
10447 *sp++ = ins;
10448 break;
10450 case MONO_CEE_MONO_DYN_CALL: {
10451 MonoCallInst *call;
10453 /* It would be easier to call a trampoline, but that would put an
10454 * extra frame on the stack, confusing exception handling. So
10455 * implement it inline using an opcode for now.
10458 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10459 if (!cfg->dyn_call_var) {
10460 cfg->dyn_call_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
10461 /* prevent it from being register allocated */
10462 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
10465 /* Has to use a call inst since local regalloc expects it */
10466 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
10467 ins = (MonoInst*)call;
10468 sp -= 2;
10469 ins->sreg1 = sp [0]->dreg;
10470 ins->sreg2 = sp [1]->dreg;
10471 MONO_ADD_INS (cfg->cbb, ins);
10473 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
10474 /* OP_DYN_CALL might need to allocate a dynamically sized param area */
10475 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10477 inline_costs += CALL_COST * MIN(10, num_calls++);
10478 break;
10480 case MONO_CEE_MONO_MEMORY_BARRIER: {
10481 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10482 mini_emit_memory_barrier (cfg, (int)n);
10483 break;
10485 case MONO_CEE_MONO_ATOMIC_STORE_I4: {
10486 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10487 g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4));
10489 sp -= 2;
10491 MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4);
10492 ins->dreg = sp [0]->dreg;
10493 ins->sreg1 = sp [1]->dreg;
10494 ins->backend.memory_barrier_kind = (int)n;
10495 MONO_ADD_INS (cfg->cbb, ins);
10496 break;
10498 case MONO_CEE_MONO_LD_DELEGATE_METHOD_PTR: {
10499 CHECK_STACK (1);
10500 --sp;
10502 dreg = alloc_preg (cfg);
10503 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
10504 *sp++ = ins;
10505 break;
10507 case MONO_CEE_MONO_CALLI_EXTRA_ARG: {
10508 MonoInst *addr;
10509 MonoMethodSignature *fsig;
10510 MonoInst *arg;
10513 * This is the same as CEE_CALLI, but passes an additional argument
10514 * to the called method in llvmonly mode.
10515 * This is only used by delegate invoke wrappers to call the
10516 * actual delegate method.
10518 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
10520 ins = NULL;
10522 cmethod = NULL;
10523 CHECK_STACK (1);
10524 --sp;
10525 addr = *sp;
10526 fsig = mini_get_signature (method, token, generic_context, cfg->error);
10527 CHECK_CFG_ERROR;
10529 if (cfg->llvm_only)
10530 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
10532 n = fsig->param_count + fsig->hasthis + 1;
10534 CHECK_STACK (n);
10536 sp -= n;
10537 arg = sp [n - 1];
10539 if (cfg->llvm_only) {
10541 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
10542 * cconv. This is set by mono_init_delegate ().
10544 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
10545 MonoInst *callee = addr;
10546 MonoInst *call, *localloc_ins;
10547 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
10548 int low_bit_reg = alloc_preg (cfg);
10550 NEW_BBLOCK (cfg, is_gsharedvt_bb);
10551 NEW_BBLOCK (cfg, end_bb);
10553 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
10554 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
10555 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
10557 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
10558 addr = emit_get_rgctx_sig (cfg, context_used,
10559 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
10561 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
10563 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
10564 ins->dreg = alloc_preg (cfg);
10565 ins->inst_imm = 2 * TARGET_SIZEOF_VOID_P;
10566 MONO_ADD_INS (cfg->cbb, ins);
10567 localloc_ins = ins;
10568 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10569 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
10570 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, TARGET_SIZEOF_VOID_P, arg->dreg);
10572 call = mini_emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
10573 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
10575 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
10576 MONO_START_BB (cfg, is_gsharedvt_bb);
10577 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
10578 ins = mini_emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
10579 ins->dreg = call->dreg;
10581 MONO_START_BB (cfg, end_bb);
10582 } else {
10583 /* Caller uses a normal calling conv */
10585 MonoInst *callee = addr;
10586 MonoInst *call, *localloc_ins;
10587 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
10588 int low_bit_reg = alloc_preg (cfg);
10590 NEW_BBLOCK (cfg, is_gsharedvt_bb);
10591 NEW_BBLOCK (cfg, end_bb);
10593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
10594 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
10595 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
10597 /* Normal case: callee uses a normal cconv, no conversion is needed */
10598 call = mini_emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
10599 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
10600 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
10601 MONO_START_BB (cfg, is_gsharedvt_bb);
10602 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
10603 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
10604 MONO_ADD_INS (cfg->cbb, addr);
10606 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
10608 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
10609 ins->dreg = alloc_preg (cfg);
10610 ins->inst_imm = 2 * TARGET_SIZEOF_VOID_P;
10611 MONO_ADD_INS (cfg->cbb, ins);
10612 localloc_ins = ins;
10613 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10614 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
10615 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, TARGET_SIZEOF_VOID_P, arg->dreg);
10617 ins = mini_emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
10618 ins->dreg = call->dreg;
10619 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
10621 MONO_START_BB (cfg, end_bb);
10623 } else {
10624 /* Same as CEE_CALLI */
10625 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
10627 * We pass the address to the gsharedvt trampoline in the rgctx reg
10629 MonoInst *callee = addr;
10631 addr = emit_get_rgctx_sig (cfg, context_used,
10632 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
10633 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, callee);
10634 } else {
10635 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
10639 if (!MONO_TYPE_IS_VOID (fsig->ret))
10640 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
10642 CHECK_CFG_EXCEPTION;
10644 ins_flag = 0;
10645 constrained_class = NULL;
10646 break;
10648 case MONO_CEE_MONO_LDDOMAIN:
10649 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10650 EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : cfg->domain);
10651 *sp++ = ins;
10652 break;
10653 case MONO_CEE_MONO_SAVE_LAST_ERROR:
10654 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10656 // Just an IL prefix, setting this flag, picked up by call instructions.
10657 save_last_error = TRUE;
10658 break;
10659 case MONO_CEE_MONO_GET_RGCTX_ARG:
10660 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10662 mono_create_rgctx_var (cfg);
10664 MONO_INST_NEW (cfg, ins, OP_MOVE);
10665 ins->dreg = alloc_dreg (cfg, STACK_PTR);
10666 ins->sreg1 = cfg->rgctx_var->dreg;
10667 ins->type = STACK_PTR;
10668 MONO_ADD_INS (cfg->cbb, ins);
10670 *sp++ = ins;
10671 break;
10673 case MONO_CEE_ARGLIST: {
10674 /* somewhat similar to LDTOKEN */
10675 MonoInst *addr, *vtvar;
10676 vtvar = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.argumenthandle_class), OP_LOCAL);
10678 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10679 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
10681 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10682 ins->type = STACK_VTYPE;
10683 ins->klass = mono_defaults.argumenthandle_class;
10684 *sp++ = ins;
10685 break;
10687 case MONO_CEE_CEQ:
10688 case MONO_CEE_CGT:
10689 case MONO_CEE_CGT_UN:
10690 case MONO_CEE_CLT:
10691 case MONO_CEE_CLT_UN: {
10692 MonoInst *cmp, *arg1, *arg2;
10694 sp -= 2;
10695 arg1 = sp [0];
10696 arg2 = sp [1];
10699 * The following transforms:
10700 * CEE_CEQ into OP_CEQ
10701 * CEE_CGT into OP_CGT
10702 * CEE_CGT_UN into OP_CGT_UN
10703 * CEE_CLT into OP_CLT
10704 * CEE_CLT_UN into OP_CLT_UN
10706 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
10708 MONO_INST_NEW (cfg, ins, cmp->opcode);
10709 cmp->sreg1 = arg1->dreg;
10710 cmp->sreg2 = arg2->dreg;
10711 type_from_op (cfg, cmp, arg1, arg2);
10712 CHECK_TYPE (cmp);
10713 add_widen_op (cfg, cmp, &arg1, &arg2);
10714 if ((arg1->type == STACK_I8) || ((TARGET_SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
10715 cmp->opcode = OP_LCOMPARE;
10716 else if (arg1->type == STACK_R4)
10717 cmp->opcode = OP_RCOMPARE;
10718 else if (arg1->type == STACK_R8)
10719 cmp->opcode = OP_FCOMPARE;
10720 else
10721 cmp->opcode = OP_ICOMPARE;
10722 MONO_ADD_INS (cfg->cbb, cmp);
10723 ins->type = STACK_I4;
10724 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
10725 type_from_op (cfg, ins, arg1, arg2);
10727 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
10729 * The backends expect the fceq opcodes to do the
10730 * comparison too.
10732 ins->sreg1 = cmp->sreg1;
10733 ins->sreg2 = cmp->sreg2;
10734 NULLIFY_INS (cmp);
10736 MONO_ADD_INS (cfg->cbb, ins);
10737 *sp++ = ins;
10738 break;
10740 case MONO_CEE_LDFTN: {
10741 MonoInst *argconst;
10742 MonoMethod *cil_method;
10744 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10745 CHECK_CFG_ERROR;
10747 mono_class_init_internal (cmethod->klass);
10749 mono_save_token_info (cfg, image, n, cmethod);
10751 context_used = mini_method_check_context_used (cfg, cmethod);
10753 cil_method = cmethod;
10754 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
10755 emit_method_access_failure (cfg, method, cil_method);
10757 if (mono_security_core_clr_enabled ())
10758 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10761 * Optimize the common case of ldftn+delegate creation
10763 if ((sp > stack_start) && (next_ip + 4 < end) && ip_in_bb (cfg, cfg->cbb, next_ip) && (next_ip [0] == CEE_NEWOBJ)) {
10764 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (next_ip + 1), NULL, generic_context);
10765 if (ctor_method && (m_class_get_parent (ctor_method->klass) == mono_defaults.multicastdelegate_class)) {
10766 MonoInst *target_ins, *handle_ins;
10767 MonoMethod *invoke;
10768 int invoke_context_used;
10770 invoke = mono_get_delegate_invoke_internal (ctor_method->klass);
10771 if (!invoke || !mono_method_signature_internal (invoke))
10772 LOAD_ERROR;
10774 invoke_context_used = mini_method_check_context_used (cfg, invoke);
10776 target_ins = sp [-1];
10778 if (mono_security_core_clr_enabled ())
10779 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
10781 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
10782 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
10783 if (mono_method_signature_internal (invoke)->param_count == mono_method_signature_internal (cmethod)->param_count) {
10784 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
10785 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
10789 if ((invoke_context_used == 0 || !cfg->gsharedvt) || cfg->llvm_only) {
10790 if (cfg->verbose_level > 3)
10791 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip + 6, NULL));
10792 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, invoke_context_used, FALSE))) {
10793 sp --;
10794 *sp = handle_ins;
10795 CHECK_CFG_EXCEPTION;
10796 sp ++;
10797 next_ip += 5;
10798 il_op = MONO_CEE_NEWOBJ;
10799 break;
10800 } else {
10801 CHECK_CFG_ERROR;
10807 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
10808 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
10809 *sp++ = ins;
10811 inline_costs += CALL_COST * MIN(10, num_calls++);
10812 break;
10814 case MONO_CEE_LDVIRTFTN: {
10815 MonoInst *args [2];
10817 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10818 CHECK_CFG_ERROR;
10820 mono_class_init_internal (cmethod->klass);
10822 context_used = mini_method_check_context_used (cfg, cmethod);
10824 if (mono_security_core_clr_enabled ())
10825 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10828 * Optimize the common case of ldvirtftn+delegate creation
10830 if (previous_il_op == MONO_CEE_DUP && (sp > stack_start) && (next_ip + 4 < end) && ip_in_bb (cfg, cfg->cbb, next_ip) && (next_ip [0] == CEE_NEWOBJ)) {
10831 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (next_ip + 1), NULL, generic_context);
10832 if (ctor_method && (m_class_get_parent (ctor_method->klass) == mono_defaults.multicastdelegate_class)) {
10833 MonoInst *target_ins, *handle_ins;
10834 MonoMethod *invoke;
10835 int invoke_context_used;
10836 const gboolean is_virtual = (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) != 0;
10838 invoke = mono_get_delegate_invoke_internal (ctor_method->klass);
10839 if (!invoke || !mono_method_signature_internal (invoke))
10840 LOAD_ERROR;
10842 invoke_context_used = mini_method_check_context_used (cfg, invoke);
10844 target_ins = sp [-1];
10846 if (mono_security_core_clr_enabled ())
10847 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
10849 if (invoke_context_used == 0 || !cfg->gsharedvt || cfg->llvm_only) {
10850 if (cfg->verbose_level > 3)
10851 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip + 6, NULL));
10852 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, invoke_context_used, is_virtual))) {
10853 sp -= 2;
10854 *sp = handle_ins;
10855 CHECK_CFG_EXCEPTION;
10856 next_ip += 5;
10857 previous_il_op = MONO_CEE_NEWOBJ;
10858 sp ++;
10859 break;
10860 } else {
10861 CHECK_CFG_ERROR;
10867 --sp;
10868 args [0] = *sp;
10870 args [1] = emit_get_rgctx_method (cfg, context_used,
10871 cmethod, MONO_RGCTX_INFO_METHOD);
10873 if (context_used)
10874 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
10875 else
10876 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
10878 inline_costs += CALL_COST * MIN(10, num_calls++);
10879 break;
10881 case MONO_CEE_LOCALLOC: {
10882 MonoBasicBlock *non_zero_bb, *end_bb;
10883 int alloc_ptr = alloc_preg (cfg);
10884 --sp;
10885 if (sp != stack_start)
10886 UNVERIFIED;
10887 if (cfg->method != method)
10889 * Inlining this into a loop in a parent could lead to
10890 * stack overflows which is different behavior than the
10891 * non-inlined case, thus disable inlining in this case.
10893 INLINE_FAILURE("localloc");
10895 NEW_BBLOCK (cfg, non_zero_bb);
10896 NEW_BBLOCK (cfg, end_bb);
10898 /* if size != zero */
10899 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10900 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_zero_bb);
10902 //size is zero, so result is NULL
10903 MONO_EMIT_NEW_PCONST (cfg, alloc_ptr, NULL);
10904 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
10906 MONO_START_BB (cfg, non_zero_bb);
10907 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
10908 ins->dreg = alloc_ptr;
10909 ins->sreg1 = sp [0]->dreg;
10910 ins->type = STACK_PTR;
10911 MONO_ADD_INS (cfg->cbb, ins);
10913 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10914 if (init_locals)
10915 ins->flags |= MONO_INST_INIT;
10917 MONO_START_BB (cfg, end_bb);
10918 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), alloc_ptr);
10919 ins->type = STACK_PTR;
10921 *sp++ = ins;
10922 break;
10924 case MONO_CEE_ENDFILTER: {
10925 MonoExceptionClause *clause, *nearest;
10926 int cc;
10928 --sp;
10929 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
10930 UNVERIFIED;
10931 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
10932 ins->sreg1 = (*sp)->dreg;
10933 MONO_ADD_INS (cfg->cbb, ins);
10934 start_new_bblock = 1;
10936 nearest = NULL;
10937 for (cc = 0; cc < header->num_clauses; ++cc) {
10938 clause = &header->clauses [cc];
10939 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
10940 ((next_ip - header->code) > clause->data.filter_offset && (next_ip - header->code) <= clause->handler_offset) &&
10941 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
10942 nearest = clause;
10944 g_assert (nearest);
10945 if ((next_ip - header->code) != nearest->handler_offset)
10946 UNVERIFIED;
10948 break;
10950 case MONO_CEE_UNALIGNED_:
10951 ins_flag |= MONO_INST_UNALIGNED;
10952 /* FIXME: record alignment? we can assume 1 for now */
10953 break;
10954 case MONO_CEE_VOLATILE_:
10955 ins_flag |= MONO_INST_VOLATILE;
10956 break;
10957 case MONO_CEE_TAIL_:
10958 ins_flag |= MONO_INST_TAILCALL;
10959 cfg->flags |= MONO_CFG_HAS_TAILCALL;
10960 /* Can't inline tailcalls at this time */
10961 inline_costs += 100000;
10962 break;
10963 case MONO_CEE_INITOBJ:
10964 --sp;
10965 klass = mini_get_class (method, token, generic_context);
10966 CHECK_TYPELOAD (klass);
10967 if (mini_class_is_reference (klass))
10968 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
10969 else
10970 mini_emit_initobj (cfg, *sp, NULL, klass);
10971 inline_costs += 1;
10972 break;
10973 case MONO_CEE_CONSTRAINED_:
10974 constrained_class = mini_get_class (method, token, generic_context);
10975 CHECK_TYPELOAD (constrained_class);
10976 break;
10977 case MONO_CEE_CPBLK:
10978 sp -= 3;
10979 mini_emit_memory_copy_bytes (cfg, sp [0], sp [1], sp [2], ins_flag);
10980 ins_flag = 0;
10981 inline_costs += 1;
10982 break;
10983 case MONO_CEE_INITBLK:
10984 sp -= 3;
10985 mini_emit_memory_init_bytes (cfg, sp [0], sp [1], sp [2], ins_flag);
10986 ins_flag = 0;
10987 inline_costs += 1;
10988 break;
10989 case MONO_CEE_NO_:
10990 if (ip [2] & 1)
10991 ins_flag |= MONO_INST_NOTYPECHECK;
10992 if (ip [2] & 2)
10993 ins_flag |= MONO_INST_NORANGECHECK;
10994 /* we ignore the no-nullcheck for now since we
10995 * really do it explicitly only when doing callvirt->call
10997 break;
10998 case MONO_CEE_RETHROW: {
10999 MonoInst *load;
11000 int handler_offset = -1;
11002 for (i = 0; i < header->num_clauses; ++i) {
11003 MonoExceptionClause *clause = &header->clauses [i];
11004 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
11005 handler_offset = clause->handler_offset;
11006 break;
11010 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
11012 if (handler_offset == -1)
11013 UNVERIFIED;
11015 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
11016 MONO_INST_NEW (cfg, ins, OP_RETHROW);
11017 ins->sreg1 = load->dreg;
11018 MONO_ADD_INS (cfg->cbb, ins);
11020 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11021 MONO_ADD_INS (cfg->cbb, ins);
11023 sp = stack_start;
11024 link_bblock (cfg, cfg->cbb, end_bblock);
11025 start_new_bblock = 1;
11026 break;
11028 case MONO_CEE_MONO_RETHROW: {
11029 if (sp [-1]->type != STACK_OBJ)
11030 UNVERIFIED;
11032 MONO_INST_NEW (cfg, ins, OP_RETHROW);
11033 --sp;
11034 ins->sreg1 = sp [0]->dreg;
11035 cfg->cbb->out_of_line = TRUE;
11036 MONO_ADD_INS (cfg->cbb, ins);
11037 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11038 MONO_ADD_INS (cfg->cbb, ins);
11039 sp = stack_start;
11041 link_bblock (cfg, cfg->cbb, end_bblock);
11042 start_new_bblock = 1;
11043 /* This can complicate code generation for llvm since the return value might not be defined */
11044 if (COMPILE_LLVM (cfg))
11045 INLINE_FAILURE ("mono_rethrow");
11046 break;
11048 case MONO_CEE_SIZEOF: {
11049 guint32 val;
11050 int ialign;
11052 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (m_class_get_image (method->klass)) && !generic_context) {
11053 MonoType *type = mono_type_create_from_typespec_checked (image, token, cfg->error);
11054 CHECK_CFG_ERROR;
11056 val = mono_type_size (type, &ialign);
11057 EMIT_NEW_ICONST (cfg, ins, val);
11058 } else {
11059 MonoClass *klass = mini_get_class (method, token, generic_context);
11060 CHECK_TYPELOAD (klass);
11062 if (mini_is_gsharedvt_klass (klass)) {
11063 ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_SIZEOF);
11064 ins->type = STACK_I4;
11065 } else {
11066 val = mono_type_size (m_class_get_byval_arg (klass), &ialign);
11067 EMIT_NEW_ICONST (cfg, ins, val);
11071 *sp++ = ins;
11072 break;
11074 case MONO_CEE_REFANYTYPE: {
11075 MonoInst *src_var, *src;
11077 GSHAREDVT_FAILURE (il_op);
11079 --sp;
11081 // FIXME:
11082 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11083 if (!src_var)
11084 src_var = mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.typed_reference_class), OP_LOCAL, sp [0]->dreg);
11085 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11086 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (mono_defaults.typehandle_class), src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
11087 *sp++ = ins;
11088 break;
11090 case MONO_CEE_READONLY_:
11091 readonly = TRUE;
11092 break;
11094 case MONO_CEE_UNUSED56:
11095 case MONO_CEE_UNUSED57:
11096 case MONO_CEE_UNUSED70:
11097 case MONO_CEE_UNUSED:
11098 case MONO_CEE_UNUSED99:
11099 case MONO_CEE_UNUSED58:
11100 case MONO_CEE_UNUSED1:
11101 UNVERIFIED;
11103 default:
11104 g_warning ("opcode 0x%02x not handled", il_op);
11105 UNVERIFIED;
11108 if (start_new_bblock != 1)
11109 UNVERIFIED;
11111 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
11112 if (cfg->cbb->next_bb) {
11113 /* This could already be set because of inlining, #693905 */
11114 MonoBasicBlock *bb = cfg->cbb;
11116 while (bb->next_bb)
11117 bb = bb->next_bb;
11118 bb->next_bb = end_bblock;
11119 } else {
11120 cfg->cbb->next_bb = end_bblock;
11123 if (cfg->method == method && cfg->domainvar) {
11124 MonoInst *store;
11125 MonoInst *get_domain;
11127 cfg->cbb = init_localsbb;
11129 get_domain = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
11130 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
11131 MONO_ADD_INS (cfg->cbb, store);
11132 cfg->domainvar_inited = TRUE;
11135 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
11136 if (cfg->compile_aot)
11137 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
11138 mono_get_got_var (cfg);
11139 #endif
11141 if (cfg->method == method && cfg->got_var)
11142 mono_emit_load_got_addr (cfg);
11144 if (init_localsbb) {
11145 cfg->cbb = init_localsbb;
11146 cfg->ip = NULL;
11147 for (i = 0; i < header->num_locals; ++i) {
11149 * Vtype initialization might need to be done after CEE_JIT_ATTACH, since it can make calls to memset (),
11150 * which need the trampoline code to work.
11152 if (MONO_TYPE_ISSTRUCT (header->locals [i]))
11153 cfg->cbb = init_localsbb2;
11154 else
11155 cfg->cbb = init_localsbb;
11156 emit_init_local (cfg, i, header->locals [i], init_locals);
11160 if (cfg->init_ref_vars && cfg->method == method) {
11161 /* Emit initialization for ref vars */
11162 // FIXME: Avoid duplication initialization for IL locals.
11163 for (i = 0; i < cfg->num_varinfo; ++i) {
11164 MonoInst *ins = cfg->varinfo [i];
11166 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
11167 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
11171 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
11172 cfg->cbb = init_localsbb;
11173 emit_push_lmf (cfg);
11176 cfg->cbb = init_localsbb;
11177 mini_profiler_emit_enter (cfg);
11179 if (seq_points) {
11180 MonoBasicBlock *bb;
11183 * Make seq points at backward branch targets interruptable.
11185 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
11186 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
11187 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
11190 /* Add a sequence point for method entry/exit events */
11191 if (seq_points && cfg->gen_sdb_seq_points) {
11192 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
11193 MONO_ADD_INS (init_localsbb, ins);
11194 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
11195 MONO_ADD_INS (cfg->bb_exit, ins);
11199 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
11200 * the code they refer to was dead (#11880).
11202 if (sym_seq_points) {
11203 for (i = 0; i < header->code_size; ++i) {
11204 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
11205 MonoInst *ins;
11207 NEW_SEQ_POINT (cfg, ins, i, FALSE);
11208 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
11213 cfg->ip = NULL;
11215 if (cfg->method == method) {
11216 compute_bb_regions (cfg);
11217 } else {
11218 MonoBasicBlock *bb;
11219 /* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
11220 for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) {
11221 bb->real_offset = inline_offset;
11225 if (inline_costs < 0) {
11226 char *mname;
11228 /* Method is too large */
11229 mname = mono_method_full_name (method, TRUE);
11230 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
11231 g_free (mname);
11234 if ((cfg->verbose_level > 2) && (cfg->method == method))
11235 mono_print_code (cfg, "AFTER METHOD-TO-IR");
11237 goto cleanup;
11239 mono_error_exit:
11240 if (cfg->verbose_level > 3)
11241 g_print ("exiting due to error");
11243 g_assert (!is_ok (cfg->error));
11244 goto cleanup;
11246 exception_exit:
11247 if (cfg->verbose_level > 3)
11248 g_print ("exiting due to exception");
11250 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
11251 goto cleanup;
11253 unverified:
11254 if (cfg->verbose_level > 3)
11255 g_print ("exiting due to invalid il");
11257 set_exception_type_from_invalid_il (cfg, method, ip);
11258 goto cleanup;
11260 cleanup:
11261 g_slist_free (class_inits);
11262 mono_basic_block_free (original_bb);
11263 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
11264 if (cfg->exception_type)
11265 return -1;
11266 else
11267 return inline_costs;
11270 static int
11271 store_membase_reg_to_store_membase_imm (int opcode)
11273 switch (opcode) {
11274 case OP_STORE_MEMBASE_REG:
11275 return OP_STORE_MEMBASE_IMM;
11276 case OP_STOREI1_MEMBASE_REG:
11277 return OP_STOREI1_MEMBASE_IMM;
11278 case OP_STOREI2_MEMBASE_REG:
11279 return OP_STOREI2_MEMBASE_IMM;
11280 case OP_STOREI4_MEMBASE_REG:
11281 return OP_STOREI4_MEMBASE_IMM;
11282 case OP_STOREI8_MEMBASE_REG:
11283 return OP_STOREI8_MEMBASE_IMM;
11284 default:
11285 g_assert_not_reached ();
11288 return -1;
11292 mono_op_to_op_imm (int opcode)
11294 switch (opcode) {
11295 case OP_IADD:
11296 return OP_IADD_IMM;
11297 case OP_ISUB:
11298 return OP_ISUB_IMM;
11299 case OP_IDIV:
11300 return OP_IDIV_IMM;
11301 case OP_IDIV_UN:
11302 return OP_IDIV_UN_IMM;
11303 case OP_IREM:
11304 return OP_IREM_IMM;
11305 case OP_IREM_UN:
11306 return OP_IREM_UN_IMM;
11307 case OP_IMUL:
11308 return OP_IMUL_IMM;
11309 case OP_IAND:
11310 return OP_IAND_IMM;
11311 case OP_IOR:
11312 return OP_IOR_IMM;
11313 case OP_IXOR:
11314 return OP_IXOR_IMM;
11315 case OP_ISHL:
11316 return OP_ISHL_IMM;
11317 case OP_ISHR:
11318 return OP_ISHR_IMM;
11319 case OP_ISHR_UN:
11320 return OP_ISHR_UN_IMM;
11322 case OP_LADD:
11323 return OP_LADD_IMM;
11324 case OP_LSUB:
11325 return OP_LSUB_IMM;
11326 case OP_LAND:
11327 return OP_LAND_IMM;
11328 case OP_LOR:
11329 return OP_LOR_IMM;
11330 case OP_LXOR:
11331 return OP_LXOR_IMM;
11332 case OP_LSHL:
11333 return OP_LSHL_IMM;
11334 case OP_LSHR:
11335 return OP_LSHR_IMM;
11336 case OP_LSHR_UN:
11337 return OP_LSHR_UN_IMM;
11338 #if SIZEOF_REGISTER == 8
11339 case OP_LMUL:
11340 return OP_LMUL_IMM;
11341 case OP_LREM:
11342 return OP_LREM_IMM;
11343 #endif
11345 case OP_COMPARE:
11346 return OP_COMPARE_IMM;
11347 case OP_ICOMPARE:
11348 return OP_ICOMPARE_IMM;
11349 case OP_LCOMPARE:
11350 return OP_LCOMPARE_IMM;
11352 case OP_STORE_MEMBASE_REG:
11353 return OP_STORE_MEMBASE_IMM;
11354 case OP_STOREI1_MEMBASE_REG:
11355 return OP_STOREI1_MEMBASE_IMM;
11356 case OP_STOREI2_MEMBASE_REG:
11357 return OP_STOREI2_MEMBASE_IMM;
11358 case OP_STOREI4_MEMBASE_REG:
11359 return OP_STOREI4_MEMBASE_IMM;
11361 #if defined(TARGET_X86) || defined (TARGET_AMD64)
11362 case OP_X86_PUSH:
11363 return OP_X86_PUSH_IMM;
11364 case OP_X86_COMPARE_MEMBASE_REG:
11365 return OP_X86_COMPARE_MEMBASE_IMM;
11366 #endif
11367 #if defined(TARGET_AMD64)
11368 case OP_AMD64_ICOMPARE_MEMBASE_REG:
11369 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
11370 #endif
11371 case OP_VOIDCALL_REG:
11372 return OP_VOIDCALL;
11373 case OP_CALL_REG:
11374 return OP_CALL;
11375 case OP_LCALL_REG:
11376 return OP_LCALL;
11377 case OP_FCALL_REG:
11378 return OP_FCALL;
11379 case OP_LOCALLOC:
11380 return OP_LOCALLOC_IMM;
11383 return -1;
11386 static int
11387 stind_to_store_membase (int opcode)
11389 switch (opcode) {
11390 case MONO_CEE_STIND_I1:
11391 return OP_STOREI1_MEMBASE_REG;
11392 case MONO_CEE_STIND_I2:
11393 return OP_STOREI2_MEMBASE_REG;
11394 case MONO_CEE_STIND_I4:
11395 return OP_STOREI4_MEMBASE_REG;
11396 case MONO_CEE_STIND_I:
11397 case MONO_CEE_STIND_REF:
11398 return OP_STORE_MEMBASE_REG;
11399 case MONO_CEE_STIND_I8:
11400 return OP_STOREI8_MEMBASE_REG;
11401 case MONO_CEE_STIND_R4:
11402 return OP_STORER4_MEMBASE_REG;
11403 case MONO_CEE_STIND_R8:
11404 return OP_STORER8_MEMBASE_REG;
11405 default:
11406 g_assert_not_reached ();
11409 return -1;
11413 mono_load_membase_to_load_mem (int opcode)
11415 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
11416 #if defined(TARGET_X86) || defined(TARGET_AMD64)
11417 switch (opcode) {
11418 case OP_LOAD_MEMBASE:
11419 return OP_LOAD_MEM;
11420 case OP_LOADU1_MEMBASE:
11421 return OP_LOADU1_MEM;
11422 case OP_LOADU2_MEMBASE:
11423 return OP_LOADU2_MEM;
11424 case OP_LOADI4_MEMBASE:
11425 return OP_LOADI4_MEM;
11426 case OP_LOADU4_MEMBASE:
11427 return OP_LOADU4_MEM;
11428 #if SIZEOF_REGISTER == 8
11429 case OP_LOADI8_MEMBASE:
11430 return OP_LOADI8_MEM;
11431 #endif
11433 #endif
11435 return -1;
11438 static int
11439 op_to_op_dest_membase (int store_opcode, int opcode)
11441 #if defined(TARGET_X86)
11442 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
11443 return -1;
11445 switch (opcode) {
11446 case OP_IADD:
11447 return OP_X86_ADD_MEMBASE_REG;
11448 case OP_ISUB:
11449 return OP_X86_SUB_MEMBASE_REG;
11450 case OP_IAND:
11451 return OP_X86_AND_MEMBASE_REG;
11452 case OP_IOR:
11453 return OP_X86_OR_MEMBASE_REG;
11454 case OP_IXOR:
11455 return OP_X86_XOR_MEMBASE_REG;
11456 case OP_ADD_IMM:
11457 case OP_IADD_IMM:
11458 return OP_X86_ADD_MEMBASE_IMM;
11459 case OP_SUB_IMM:
11460 case OP_ISUB_IMM:
11461 return OP_X86_SUB_MEMBASE_IMM;
11462 case OP_AND_IMM:
11463 case OP_IAND_IMM:
11464 return OP_X86_AND_MEMBASE_IMM;
11465 case OP_OR_IMM:
11466 case OP_IOR_IMM:
11467 return OP_X86_OR_MEMBASE_IMM;
11468 case OP_XOR_IMM:
11469 case OP_IXOR_IMM:
11470 return OP_X86_XOR_MEMBASE_IMM;
11471 case OP_MOVE:
11472 return OP_NOP;
11474 #endif
11476 #if defined(TARGET_AMD64)
11477 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
11478 return -1;
11480 switch (opcode) {
11481 case OP_IADD:
11482 return OP_X86_ADD_MEMBASE_REG;
11483 case OP_ISUB:
11484 return OP_X86_SUB_MEMBASE_REG;
11485 case OP_IAND:
11486 return OP_X86_AND_MEMBASE_REG;
11487 case OP_IOR:
11488 return OP_X86_OR_MEMBASE_REG;
11489 case OP_IXOR:
11490 return OP_X86_XOR_MEMBASE_REG;
11491 case OP_IADD_IMM:
11492 return OP_X86_ADD_MEMBASE_IMM;
11493 case OP_ISUB_IMM:
11494 return OP_X86_SUB_MEMBASE_IMM;
11495 case OP_IAND_IMM:
11496 return OP_X86_AND_MEMBASE_IMM;
11497 case OP_IOR_IMM:
11498 return OP_X86_OR_MEMBASE_IMM;
11499 case OP_IXOR_IMM:
11500 return OP_X86_XOR_MEMBASE_IMM;
11501 case OP_LADD:
11502 return OP_AMD64_ADD_MEMBASE_REG;
11503 case OP_LSUB:
11504 return OP_AMD64_SUB_MEMBASE_REG;
11505 case OP_LAND:
11506 return OP_AMD64_AND_MEMBASE_REG;
11507 case OP_LOR:
11508 return OP_AMD64_OR_MEMBASE_REG;
11509 case OP_LXOR:
11510 return OP_AMD64_XOR_MEMBASE_REG;
11511 case OP_ADD_IMM:
11512 case OP_LADD_IMM:
11513 return OP_AMD64_ADD_MEMBASE_IMM;
11514 case OP_SUB_IMM:
11515 case OP_LSUB_IMM:
11516 return OP_AMD64_SUB_MEMBASE_IMM;
11517 case OP_AND_IMM:
11518 case OP_LAND_IMM:
11519 return OP_AMD64_AND_MEMBASE_IMM;
11520 case OP_OR_IMM:
11521 case OP_LOR_IMM:
11522 return OP_AMD64_OR_MEMBASE_IMM;
11523 case OP_XOR_IMM:
11524 case OP_LXOR_IMM:
11525 return OP_AMD64_XOR_MEMBASE_IMM;
11526 case OP_MOVE:
11527 return OP_NOP;
11529 #endif
11531 return -1;
11534 static int
11535 op_to_op_store_membase (int store_opcode, int opcode)
11537 #if defined(TARGET_X86) || defined(TARGET_AMD64)
11538 switch (opcode) {
11539 case OP_ICEQ:
11540 if (store_opcode == OP_STOREI1_MEMBASE_REG)
11541 return OP_X86_SETEQ_MEMBASE;
11542 case OP_CNE:
11543 if (store_opcode == OP_STOREI1_MEMBASE_REG)
11544 return OP_X86_SETNE_MEMBASE;
11546 #endif
11548 return -1;
11551 static int
11552 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
11554 #ifdef TARGET_X86
11555 /* FIXME: This has sign extension issues */
11557 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
11558 return OP_X86_COMPARE_MEMBASE8_IMM;
11561 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
11562 return -1;
11564 switch (opcode) {
11565 case OP_X86_PUSH:
11566 return OP_X86_PUSH_MEMBASE;
11567 case OP_COMPARE_IMM:
11568 case OP_ICOMPARE_IMM:
11569 return OP_X86_COMPARE_MEMBASE_IMM;
11570 case OP_COMPARE:
11571 case OP_ICOMPARE:
11572 return OP_X86_COMPARE_MEMBASE_REG;
11574 #endif
11576 #ifdef TARGET_AMD64
11577 /* FIXME: This has sign extension issues */
11579 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
11580 return OP_X86_COMPARE_MEMBASE8_IMM;
11583 switch (opcode) {
11584 case OP_X86_PUSH:
11585 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
11586 return OP_X86_PUSH_MEMBASE;
11587 break;
11588 /* FIXME: This only works for 32 bit immediates
11589 case OP_COMPARE_IMM:
11590 case OP_LCOMPARE_IMM:
11591 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11592 return OP_AMD64_COMPARE_MEMBASE_IMM;
11594 case OP_ICOMPARE_IMM:
11595 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
11596 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
11597 break;
11598 case OP_COMPARE:
11599 case OP_LCOMPARE:
11600 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
11601 return OP_AMD64_ICOMPARE_MEMBASE_REG;
11602 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
11603 return OP_AMD64_COMPARE_MEMBASE_REG;
11604 break;
11605 case OP_ICOMPARE:
11606 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
11607 return OP_AMD64_ICOMPARE_MEMBASE_REG;
11608 break;
11610 #endif
11612 return -1;
11615 static int
11616 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
11618 #ifdef TARGET_X86
11619 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
11620 return -1;
11622 switch (opcode) {
11623 case OP_COMPARE:
11624 case OP_ICOMPARE:
11625 return OP_X86_COMPARE_REG_MEMBASE;
11626 case OP_IADD:
11627 return OP_X86_ADD_REG_MEMBASE;
11628 case OP_ISUB:
11629 return OP_X86_SUB_REG_MEMBASE;
11630 case OP_IAND:
11631 return OP_X86_AND_REG_MEMBASE;
11632 case OP_IOR:
11633 return OP_X86_OR_REG_MEMBASE;
11634 case OP_IXOR:
11635 return OP_X86_XOR_REG_MEMBASE;
11637 #endif
11639 #ifdef TARGET_AMD64
11640 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
11641 switch (opcode) {
11642 case OP_ICOMPARE:
11643 return OP_AMD64_ICOMPARE_REG_MEMBASE;
11644 case OP_IADD:
11645 return OP_X86_ADD_REG_MEMBASE;
11646 case OP_ISUB:
11647 return OP_X86_SUB_REG_MEMBASE;
11648 case OP_IAND:
11649 return OP_X86_AND_REG_MEMBASE;
11650 case OP_IOR:
11651 return OP_X86_OR_REG_MEMBASE;
11652 case OP_IXOR:
11653 return OP_X86_XOR_REG_MEMBASE;
11655 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
11656 switch (opcode) {
11657 case OP_COMPARE:
11658 case OP_LCOMPARE:
11659 return OP_AMD64_COMPARE_REG_MEMBASE;
11660 case OP_LADD:
11661 return OP_AMD64_ADD_REG_MEMBASE;
11662 case OP_LSUB:
11663 return OP_AMD64_SUB_REG_MEMBASE;
11664 case OP_LAND:
11665 return OP_AMD64_AND_REG_MEMBASE;
11666 case OP_LOR:
11667 return OP_AMD64_OR_REG_MEMBASE;
11668 case OP_LXOR:
11669 return OP_AMD64_XOR_REG_MEMBASE;
11672 #endif
11674 return -1;
11678 mono_op_to_op_imm_noemul (int opcode)
11680 switch (opcode) {
11681 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
11682 case OP_LSHR:
11683 case OP_LSHL:
11684 case OP_LSHR_UN:
11685 return -1;
11686 #endif
11687 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
11688 case OP_IDIV:
11689 case OP_IDIV_UN:
11690 case OP_IREM:
11691 case OP_IREM_UN:
11692 return -1;
11693 #endif
11694 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
11695 case OP_IMUL:
11696 return -1;
11697 #endif
11698 default:
11699 return mono_op_to_op_imm (opcode);
11704 * mono_handle_global_vregs:
11706 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
11707 * for them.
11709 void
11710 mono_handle_global_vregs (MonoCompile *cfg)
11712 gint32 *vreg_to_bb;
11713 MonoBasicBlock *bb;
11714 int i, pos;
11716 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
11718 #ifdef MONO_ARCH_SIMD_INTRINSICS
11719 if (cfg->uses_simd_intrinsics & MONO_CFG_USES_SIMD_INTRINSICS_SIMPLIFY_INDIRECTION)
11720 mono_simd_simplify_indirection (cfg);
11721 #endif
11723 /* Find local vregs used in more than one bb */
11724 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11725 MonoInst *ins = bb->code;
11726 int block_num = bb->block_num;
11728 if (cfg->verbose_level > 2)
11729 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
11731 cfg->cbb = bb;
11732 for (; ins; ins = ins->next) {
11733 const char *spec = INS_INFO (ins->opcode);
11734 int regtype = 0, regindex;
11735 gint32 prev_bb;
11737 if (G_UNLIKELY (cfg->verbose_level > 2))
11738 mono_print_ins (ins);
11740 g_assert (ins->opcode >= MONO_CEE_LAST);
11742 for (regindex = 0; regindex < 4; regindex ++) {
11743 int vreg = 0;
11745 if (regindex == 0) {
11746 regtype = spec [MONO_INST_DEST];
11747 if (regtype == ' ')
11748 continue;
11749 vreg = ins->dreg;
11750 } else if (regindex == 1) {
11751 regtype = spec [MONO_INST_SRC1];
11752 if (regtype == ' ')
11753 continue;
11754 vreg = ins->sreg1;
11755 } else if (regindex == 2) {
11756 regtype = spec [MONO_INST_SRC2];
11757 if (regtype == ' ')
11758 continue;
11759 vreg = ins->sreg2;
11760 } else if (regindex == 3) {
11761 regtype = spec [MONO_INST_SRC3];
11762 if (regtype == ' ')
11763 continue;
11764 vreg = ins->sreg3;
11767 #if SIZEOF_REGISTER == 4
11768 /* In the LLVM case, the long opcodes are not decomposed */
11769 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
11771 * Since some instructions reference the original long vreg,
11772 * and some reference the two component vregs, it is quite hard
11773 * to determine when it needs to be global. So be conservative.
11775 if (!get_vreg_to_inst (cfg, vreg)) {
11776 mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.int64_class), OP_LOCAL, vreg);
11778 if (cfg->verbose_level > 2)
11779 printf ("LONG VREG R%d made global.\n", vreg);
11783 * Make the component vregs volatile since the optimizations can
11784 * get confused otherwise.
11786 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
11787 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
11789 #endif
11791 g_assert (vreg != -1);
11793 prev_bb = vreg_to_bb [vreg];
11794 if (prev_bb == 0) {
11795 /* 0 is a valid block num */
11796 vreg_to_bb [vreg] = block_num + 1;
11797 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
11798 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
11799 continue;
11801 if (!get_vreg_to_inst (cfg, vreg)) {
11802 if (G_UNLIKELY (cfg->verbose_level > 2))
11803 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
11805 switch (regtype) {
11806 case 'i':
11807 if (vreg_is_ref (cfg, vreg))
11808 mono_compile_create_var_for_vreg (cfg, mono_get_object_type (), OP_LOCAL, vreg);
11809 else
11810 mono_compile_create_var_for_vreg (cfg, mono_get_int_type (), OP_LOCAL, vreg);
11811 break;
11812 case 'l':
11813 mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.int64_class), OP_LOCAL, vreg);
11814 break;
11815 case 'f':
11816 mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.double_class), OP_LOCAL, vreg);
11817 break;
11818 case 'v':
11819 case 'x':
11820 mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (ins->klass), OP_LOCAL, vreg);
11821 break;
11822 default:
11823 g_assert_not_reached ();
11827 /* Flag as having been used in more than one bb */
11828 vreg_to_bb [vreg] = -1;
11834 /* If a variable is used in only one bblock, convert it into a local vreg */
11835 for (i = 0; i < cfg->num_varinfo; i++) {
11836 MonoInst *var = cfg->varinfo [i];
11837 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
11839 switch (var->type) {
11840 case STACK_I4:
11841 case STACK_OBJ:
11842 case STACK_PTR:
11843 case STACK_MP:
11844 case STACK_VTYPE:
11845 #if SIZEOF_REGISTER == 8
11846 case STACK_I8:
11847 #endif
11848 #if !defined(TARGET_X86)
11849 /* Enabling this screws up the fp stack on x86 */
11850 case STACK_R8:
11851 #endif
11852 if (mono_arch_is_soft_float ())
11853 break;
11856 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
11857 break;
11860 /* Arguments are implicitly global */
11861 /* Putting R4 vars into registers doesn't work currently */
11862 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
11863 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (m_class_get_byval_arg (var->klass)->type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
11865 * Make that the variable's liveness interval doesn't contain a call, since
11866 * that would cause the lvreg to be spilled, making the whole optimization
11867 * useless.
11869 /* This is too slow for JIT compilation */
11870 #if 0
11871 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
11872 MonoInst *ins;
11873 int def_index, call_index, ins_index;
11874 gboolean spilled = FALSE;
11876 def_index = -1;
11877 call_index = -1;
11878 ins_index = 0;
11879 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
11880 const char *spec = INS_INFO (ins->opcode);
11882 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
11883 def_index = ins_index;
11885 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
11886 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
11887 if (call_index > def_index) {
11888 spilled = TRUE;
11889 break;
11893 if (MONO_IS_CALL (ins))
11894 call_index = ins_index;
11896 ins_index ++;
11899 if (spilled)
11900 break;
11902 #endif
11904 if (G_UNLIKELY (cfg->verbose_level > 2))
11905 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
11906 var->flags |= MONO_INST_IS_DEAD;
11907 cfg->vreg_to_inst [var->dreg] = NULL;
11909 break;
11914 * Compress the varinfo and vars tables so the liveness computation is faster and
11915 * takes up less space.
11917 pos = 0;
11918 for (i = 0; i < cfg->num_varinfo; ++i) {
11919 MonoInst *var = cfg->varinfo [i];
11920 if (pos < i && cfg->locals_start == i)
11921 cfg->locals_start = pos;
11922 if (!(var->flags & MONO_INST_IS_DEAD)) {
11923 if (pos < i) {
11924 cfg->varinfo [pos] = cfg->varinfo [i];
11925 cfg->varinfo [pos]->inst_c0 = pos;
11926 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
11927 cfg->vars [pos].idx = pos;
11928 #if SIZEOF_REGISTER == 4
11929 if (cfg->varinfo [pos]->type == STACK_I8) {
11930 /* Modify the two component vars too */
11931 MonoInst *var1;
11933 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
11934 var1->inst_c0 = pos;
11935 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
11936 var1->inst_c0 = pos;
11938 #endif
11940 pos ++;
11943 cfg->num_varinfo = pos;
11944 if (cfg->locals_start > cfg->num_varinfo)
11945 cfg->locals_start = cfg->num_varinfo;
11949 * mono_allocate_gsharedvt_vars:
11951 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
11952 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
11954 void
11955 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
11957 int i;
11959 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
11961 for (i = 0; i < cfg->num_varinfo; ++i) {
11962 MonoInst *ins = cfg->varinfo [i];
11963 int idx;
11965 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
11966 if (i >= cfg->locals_start) {
11967 /* Local */
11968 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
11969 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
11970 ins->opcode = OP_GSHAREDVT_LOCAL;
11971 ins->inst_imm = idx;
11972 } else {
11973 /* Arg */
11974 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
11975 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
11982 * mono_spill_global_vars:
11984 * Generate spill code for variables which are not allocated to registers,
11985 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
11986 * code is generated which could be optimized by the local optimization passes.
11988 void
11989 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
11991 MonoBasicBlock *bb;
11992 char spec2 [16];
11993 int orig_next_vreg;
11994 guint32 *vreg_to_lvreg;
11995 guint32 *lvregs;
11996 guint32 i, lvregs_len, lvregs_size;
11997 gboolean dest_has_lvreg = FALSE;
11998 MonoStackType stacktypes [128];
11999 MonoInst **live_range_start, **live_range_end;
12000 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
12002 *need_local_opts = FALSE;
12004 memset (spec2, 0, sizeof (spec2));
12006 /* FIXME: Move this function to mini.c */
12007 stacktypes [(int)'i'] = STACK_PTR;
12008 stacktypes [(int)'l'] = STACK_I8;
12009 stacktypes [(int)'f'] = STACK_R8;
12010 #ifdef MONO_ARCH_SIMD_INTRINSICS
12011 stacktypes [(int)'x'] = STACK_VTYPE;
12012 #endif
12014 #if SIZEOF_REGISTER == 4
12015 /* Create MonoInsts for longs */
12016 for (i = 0; i < cfg->num_varinfo; i++) {
12017 MonoInst *ins = cfg->varinfo [i];
12019 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
12020 switch (ins->type) {
12021 case STACK_R8:
12022 case STACK_I8: {
12023 MonoInst *tree;
12025 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
12026 break;
12028 g_assert (ins->opcode == OP_REGOFFSET);
12030 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
12031 g_assert (tree);
12032 tree->opcode = OP_REGOFFSET;
12033 tree->inst_basereg = ins->inst_basereg;
12034 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
12036 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
12037 g_assert (tree);
12038 tree->opcode = OP_REGOFFSET;
12039 tree->inst_basereg = ins->inst_basereg;
12040 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
12041 break;
12043 default:
12044 break;
12048 #endif
12050 if (cfg->compute_gc_maps) {
12051 /* registers need liveness info even for !non refs */
12052 for (i = 0; i < cfg->num_varinfo; i++) {
12053 MonoInst *ins = cfg->varinfo [i];
12055 if (ins->opcode == OP_REGVAR)
12056 ins->flags |= MONO_INST_GC_TRACK;
12060 /* FIXME: widening and truncation */
12063 * As an optimization, when a variable allocated to the stack is first loaded into
12064 * an lvreg, we will remember the lvreg and use it the next time instead of loading
12065 * the variable again.
12067 orig_next_vreg = cfg->next_vreg;
12068 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
12069 lvregs_size = 1024;
12070 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * lvregs_size);
12071 lvregs_len = 0;
12074 * These arrays contain the first and last instructions accessing a given
12075 * variable.
12076 * Since we emit bblocks in the same order we process them here, and we
12077 * don't split live ranges, these will precisely describe the live range of
12078 * the variable, i.e. the instruction range where a valid value can be found
12079 * in the variables location.
12080 * The live range is computed using the liveness info computed by the liveness pass.
12081 * We can't use vmv->range, since that is an abstract live range, and we need
12082 * one which is instruction precise.
12083 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
12085 /* FIXME: Only do this if debugging info is requested */
12086 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
12087 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
12088 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12089 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12091 /* Add spill loads/stores */
12092 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12093 MonoInst *ins;
12095 if (cfg->verbose_level > 2)
12096 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
12098 /* Clear vreg_to_lvreg array */
12099 for (i = 0; i < lvregs_len; i++)
12100 vreg_to_lvreg [lvregs [i]] = 0;
12101 lvregs_len = 0;
12103 cfg->cbb = bb;
12104 MONO_BB_FOR_EACH_INS (bb, ins) {
12105 const char *spec = INS_INFO (ins->opcode);
12106 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
12107 gboolean store, no_lvreg;
12108 int sregs [MONO_MAX_SRC_REGS];
12110 if (G_UNLIKELY (cfg->verbose_level > 2))
12111 mono_print_ins (ins);
12113 if (ins->opcode == OP_NOP)
12114 continue;
12117 * We handle LDADDR here as well, since it can only be decomposed
12118 * when variable addresses are known.
12120 if (ins->opcode == OP_LDADDR) {
12121 MonoInst *var = (MonoInst *)ins->inst_p0;
12123 if (var->opcode == OP_VTARG_ADDR) {
12124 /* Happens on SPARC/S390 where vtypes are passed by reference */
12125 MonoInst *vtaddr = var->inst_left;
12126 if (vtaddr->opcode == OP_REGVAR) {
12127 ins->opcode = OP_MOVE;
12128 ins->sreg1 = vtaddr->dreg;
12130 else if (var->inst_left->opcode == OP_REGOFFSET) {
12131 ins->opcode = OP_LOAD_MEMBASE;
12132 ins->inst_basereg = vtaddr->inst_basereg;
12133 ins->inst_offset = vtaddr->inst_offset;
12134 } else
12135 NOT_IMPLEMENTED;
12136 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
12137 /* gsharedvt arg passed by ref */
12138 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
12140 ins->opcode = OP_LOAD_MEMBASE;
12141 ins->inst_basereg = var->inst_basereg;
12142 ins->inst_offset = var->inst_offset;
12143 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
12144 MonoInst *load, *load2, *load3;
12145 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
12146 int reg1, reg2, reg3;
12147 MonoInst *info_var = cfg->gsharedvt_info_var;
12148 MonoInst *locals_var = cfg->gsharedvt_locals_var;
12151 * gsharedvt local.
12152 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
12155 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
12157 g_assert (info_var);
12158 g_assert (locals_var);
12160 /* Mark the instruction used to compute the locals var as used */
12161 cfg->gsharedvt_locals_var_ins = NULL;
12163 /* Load the offset */
12164 if (info_var->opcode == OP_REGOFFSET) {
12165 reg1 = alloc_ireg (cfg);
12166 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
12167 } else if (info_var->opcode == OP_REGVAR) {
12168 load = NULL;
12169 reg1 = info_var->dreg;
12170 } else {
12171 g_assert_not_reached ();
12173 reg2 = alloc_ireg (cfg);
12174 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * TARGET_SIZEOF_VOID_P));
12175 /* Load the locals area address */
12176 reg3 = alloc_ireg (cfg);
12177 if (locals_var->opcode == OP_REGOFFSET) {
12178 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
12179 } else if (locals_var->opcode == OP_REGVAR) {
12180 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
12181 } else {
12182 g_assert_not_reached ();
12184 /* Compute the address */
12185 ins->opcode = OP_PADD;
12186 ins->sreg1 = reg3;
12187 ins->sreg2 = reg2;
12189 mono_bblock_insert_before_ins (bb, ins, load3);
12190 mono_bblock_insert_before_ins (bb, load3, load2);
12191 if (load)
12192 mono_bblock_insert_before_ins (bb, load2, load);
12193 } else {
12194 g_assert (var->opcode == OP_REGOFFSET);
12196 ins->opcode = OP_ADD_IMM;
12197 ins->sreg1 = var->inst_basereg;
12198 ins->inst_imm = var->inst_offset;
12201 *need_local_opts = TRUE;
12202 spec = INS_INFO (ins->opcode);
12205 if (ins->opcode < MONO_CEE_LAST) {
12206 mono_print_ins (ins);
12207 g_assert_not_reached ();
12211 * Store opcodes have destbasereg in the dreg, but in reality, it is an
12212 * src register.
12213 * FIXME:
12215 if (MONO_IS_STORE_MEMBASE (ins)) {
12216 tmp_reg = ins->dreg;
12217 ins->dreg = ins->sreg2;
12218 ins->sreg2 = tmp_reg;
12219 store = TRUE;
12221 spec2 [MONO_INST_DEST] = ' ';
12222 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
12223 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
12224 spec2 [MONO_INST_SRC3] = ' ';
12225 spec = spec2;
12226 } else if (MONO_IS_STORE_MEMINDEX (ins))
12227 g_assert_not_reached ();
12228 else
12229 store = FALSE;
12230 no_lvreg = FALSE;
12232 if (G_UNLIKELY (cfg->verbose_level > 2)) {
12233 printf ("\t %.3s %d", spec, ins->dreg);
12234 num_sregs = mono_inst_get_src_registers (ins, sregs);
12235 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
12236 printf (" %d", sregs [srcindex]);
12237 printf ("\n");
12240 /***************/
12241 /* DREG */
12242 /***************/
12243 regtype = spec [MONO_INST_DEST];
12244 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
12245 prev_dreg = -1;
12246 int dreg_using_dest_to_membase_op = -1;
12248 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
12249 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
12250 MonoInst *store_ins;
12251 int store_opcode;
12252 MonoInst *def_ins = ins;
12253 int dreg = ins->dreg; /* The original vreg */
12255 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
12257 if (var->opcode == OP_REGVAR) {
12258 ins->dreg = var->dreg;
12259 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
12261 * Instead of emitting a load+store, use a _membase opcode.
12263 g_assert (var->opcode == OP_REGOFFSET);
12264 if (ins->opcode == OP_MOVE) {
12265 NULLIFY_INS (ins);
12266 def_ins = NULL;
12267 } else {
12268 dreg_using_dest_to_membase_op = ins->dreg;
12269 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
12270 ins->inst_basereg = var->inst_basereg;
12271 ins->inst_offset = var->inst_offset;
12272 ins->dreg = -1;
12274 spec = INS_INFO (ins->opcode);
12275 } else {
12276 guint32 lvreg;
12278 g_assert (var->opcode == OP_REGOFFSET);
12280 prev_dreg = ins->dreg;
12282 /* Invalidate any previous lvreg for this vreg */
12283 vreg_to_lvreg [ins->dreg] = 0;
12285 lvreg = 0;
12287 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
12288 regtype = 'l';
12289 store_opcode = OP_STOREI8_MEMBASE_REG;
12292 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
12294 #if SIZEOF_REGISTER != 8
12295 if (regtype == 'l') {
12296 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
12297 mono_bblock_insert_after_ins (bb, ins, store_ins);
12298 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
12299 mono_bblock_insert_after_ins (bb, ins, store_ins);
12300 def_ins = store_ins;
12302 else
12303 #endif
12305 g_assert (store_opcode != OP_STOREV_MEMBASE);
12307 /* Try to fuse the store into the instruction itself */
12308 /* FIXME: Add more instructions */
12309 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
12310 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
12311 ins->inst_imm = ins->inst_c0;
12312 ins->inst_destbasereg = var->inst_basereg;
12313 ins->inst_offset = var->inst_offset;
12314 spec = INS_INFO (ins->opcode);
12315 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
12316 ins->opcode = store_opcode;
12317 ins->inst_destbasereg = var->inst_basereg;
12318 ins->inst_offset = var->inst_offset;
12320 no_lvreg = TRUE;
12322 tmp_reg = ins->dreg;
12323 ins->dreg = ins->sreg2;
12324 ins->sreg2 = tmp_reg;
12325 store = TRUE;
12327 spec2 [MONO_INST_DEST] = ' ';
12328 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
12329 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
12330 spec2 [MONO_INST_SRC3] = ' ';
12331 spec = spec2;
12332 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
12333 // FIXME: The backends expect the base reg to be in inst_basereg
12334 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
12335 ins->dreg = -1;
12336 ins->inst_basereg = var->inst_basereg;
12337 ins->inst_offset = var->inst_offset;
12338 spec = INS_INFO (ins->opcode);
12339 } else {
12340 /* printf ("INS: "); mono_print_ins (ins); */
12341 /* Create a store instruction */
12342 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
12344 /* Insert it after the instruction */
12345 mono_bblock_insert_after_ins (bb, ins, store_ins);
12347 def_ins = store_ins;
12350 * We can't assign ins->dreg to var->dreg here, since the
12351 * sregs could use it. So set a flag, and do it after
12352 * the sregs.
12354 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
12355 dest_has_lvreg = TRUE;
12360 if (def_ins && !live_range_start [dreg]) {
12361 live_range_start [dreg] = def_ins;
12362 live_range_start_bb [dreg] = bb;
12365 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
12366 MonoInst *tmp;
12368 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
12369 tmp->inst_c1 = dreg;
12370 mono_bblock_insert_after_ins (bb, def_ins, tmp);
12374 /************/
12375 /* SREGS */
12376 /************/
12377 num_sregs = mono_inst_get_src_registers (ins, sregs);
12378 for (srcindex = 0; srcindex < 3; ++srcindex) {
12379 regtype = spec [MONO_INST_SRC1 + srcindex];
12380 sreg = sregs [srcindex];
12382 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
12383 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
12384 MonoInst *var = get_vreg_to_inst (cfg, sreg);
12385 MonoInst *use_ins = ins;
12386 MonoInst *load_ins;
12387 guint32 load_opcode;
12389 if (var->opcode == OP_REGVAR) {
12390 sregs [srcindex] = var->dreg;
12391 //mono_inst_set_src_registers (ins, sregs);
12392 live_range_end [sreg] = use_ins;
12393 live_range_end_bb [sreg] = bb;
12395 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
12396 MonoInst *tmp;
12398 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
12399 /* var->dreg is a hreg */
12400 tmp->inst_c1 = sreg;
12401 mono_bblock_insert_after_ins (bb, ins, tmp);
12404 continue;
12407 g_assert (var->opcode == OP_REGOFFSET);
12409 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
12411 g_assert (load_opcode != OP_LOADV_MEMBASE);
12413 if (vreg_to_lvreg [sreg]) {
12414 g_assert (vreg_to_lvreg [sreg] != -1);
12416 /* The variable is already loaded to an lvreg */
12417 if (G_UNLIKELY (cfg->verbose_level > 2))
12418 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
12419 sregs [srcindex] = vreg_to_lvreg [sreg];
12420 //mono_inst_set_src_registers (ins, sregs);
12421 continue;
12424 /* Try to fuse the load into the instruction */
12425 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
12426 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
12427 sregs [0] = var->inst_basereg;
12428 //mono_inst_set_src_registers (ins, sregs);
12429 ins->inst_offset = var->inst_offset;
12430 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
12431 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
12432 sregs [1] = var->inst_basereg;
12433 //mono_inst_set_src_registers (ins, sregs);
12434 ins->inst_offset = var->inst_offset;
12435 } else {
12436 if (MONO_IS_REAL_MOVE (ins)) {
12437 ins->opcode = OP_NOP;
12438 sreg = ins->dreg;
12439 } else {
12440 //printf ("%d ", srcindex); mono_print_ins (ins);
12442 sreg = alloc_dreg (cfg, stacktypes [regtype]);
12444 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
12445 if (var->dreg == prev_dreg) {
12447 * sreg refers to the value loaded by the load
12448 * emitted below, but we need to use ins->dreg
12449 * since it refers to the store emitted earlier.
12451 sreg = ins->dreg;
12453 g_assert (sreg != -1);
12454 if (var->dreg == dreg_using_dest_to_membase_op) {
12455 if (cfg->verbose_level > 2)
12456 printf ("\tCan't cache R%d because it's part of a dreg dest_membase optimization\n", var->dreg);
12457 } else {
12458 vreg_to_lvreg [var->dreg] = sreg;
12460 if (lvregs_len >= lvregs_size) {
12461 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
12462 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
12463 lvregs = new_lvregs;
12464 lvregs_size *= 2;
12466 lvregs [lvregs_len ++] = var->dreg;
12470 sregs [srcindex] = sreg;
12471 //mono_inst_set_src_registers (ins, sregs);
12473 #if SIZEOF_REGISTER != 8
12474 if (regtype == 'l') {
12475 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
12476 mono_bblock_insert_before_ins (bb, ins, load_ins);
12477 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
12478 mono_bblock_insert_before_ins (bb, ins, load_ins);
12479 use_ins = load_ins;
12481 else
12482 #endif
12484 #if SIZEOF_REGISTER == 4
12485 g_assert (load_opcode != OP_LOADI8_MEMBASE);
12486 #endif
12487 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
12488 mono_bblock_insert_before_ins (bb, ins, load_ins);
12489 use_ins = load_ins;
12493 if (var->dreg < orig_next_vreg) {
12494 live_range_end [var->dreg] = use_ins;
12495 live_range_end_bb [var->dreg] = bb;
12498 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
12499 MonoInst *tmp;
12501 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
12502 tmp->inst_c1 = var->dreg;
12503 mono_bblock_insert_after_ins (bb, ins, tmp);
12507 mono_inst_set_src_registers (ins, sregs);
12509 if (dest_has_lvreg) {
12510 g_assert (ins->dreg != -1);
12511 vreg_to_lvreg [prev_dreg] = ins->dreg;
12512 if (lvregs_len >= lvregs_size) {
12513 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
12514 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
12515 lvregs = new_lvregs;
12516 lvregs_size *= 2;
12518 lvregs [lvregs_len ++] = prev_dreg;
12519 dest_has_lvreg = FALSE;
12522 if (store) {
12523 tmp_reg = ins->dreg;
12524 ins->dreg = ins->sreg2;
12525 ins->sreg2 = tmp_reg;
12528 if (MONO_IS_CALL (ins)) {
12529 /* Clear vreg_to_lvreg array */
12530 for (i = 0; i < lvregs_len; i++)
12531 vreg_to_lvreg [lvregs [i]] = 0;
12532 lvregs_len = 0;
12533 } else if (ins->opcode == OP_NOP) {
12534 ins->dreg = -1;
12535 MONO_INST_NULLIFY_SREGS (ins);
12538 if (cfg->verbose_level > 2)
12539 mono_print_ins_index (1, ins);
12542 /* Extend the live range based on the liveness info */
12543 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
12544 for (i = 0; i < cfg->num_varinfo; i ++) {
12545 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
12547 if (vreg_is_volatile (cfg, vi->vreg))
12548 /* The liveness info is incomplete */
12549 continue;
12551 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
12552 /* Live from at least the first ins of this bb */
12553 live_range_start [vi->vreg] = bb->code;
12554 live_range_start_bb [vi->vreg] = bb;
12557 if (mono_bitset_test_fast (bb->live_out_set, i)) {
12558 /* Live at least until the last ins of this bb */
12559 live_range_end [vi->vreg] = bb->last_ins;
12560 live_range_end_bb [vi->vreg] = bb;
12567 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
12568 * by storing the current native offset into MonoMethodVar->live_range_start/end.
12570 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
12571 for (i = 0; i < cfg->num_varinfo; ++i) {
12572 int vreg = MONO_VARINFO (cfg, i)->vreg;
12573 MonoInst *ins;
12575 if (live_range_start [vreg]) {
12576 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
12577 ins->inst_c0 = i;
12578 ins->inst_c1 = vreg;
12579 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
12581 if (live_range_end [vreg]) {
12582 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
12583 ins->inst_c0 = i;
12584 ins->inst_c1 = vreg;
12585 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
12586 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
12587 else
12588 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
12593 if (cfg->gsharedvt_locals_var_ins) {
12594 /* Nullify if unused */
12595 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
12596 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
12599 g_free (live_range_start);
12600 g_free (live_range_end);
12601 g_free (live_range_start_bb);
12602 g_free (live_range_end_bb);
12606 * FIXME:
12607 * - use 'iadd' instead of 'int_add'
12608 * - handling ovf opcodes: decompose in method_to_ir.
12609 * - unify iregs/fregs
12610 * -> partly done, the missing parts are:
12611 * - a more complete unification would involve unifying the hregs as well, so
12612 * code wouldn't need if (fp) all over the place. but that would mean the hregs
12613 * would no longer map to the machine hregs, so the code generators would need to
12614 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
12615 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
12616 * fp/non-fp branches speeds it up by about 15%.
12617 * - use sext/zext opcodes instead of shifts
12618 * - add OP_ICALL
12619 * - get rid of TEMPLOADs if possible and use vregs instead
12620 * - clean up usage of OP_P/OP_ opcodes
12621 * - cleanup usage of DUMMY_USE
12622 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
12623 * stack
12624 * - set the stack type and allocate a dreg in the EMIT_NEW macros
12625 * - get rid of all the <foo>2 stuff when the new JIT is ready.
12626 * - make sure handle_stack_args () is called before the branch is emitted
12627 * - when the new IR is done, get rid of all unused stuff
12628 * - COMPARE/BEQ as separate instructions or unify them ?
12629 * - keeping them separate allows specialized compare instructions like
12630 * compare_imm, compare_membase
12631 * - most back ends unify fp compare+branch, fp compare+ceq
12632 * - integrate mono_save_args into inline_method
12633 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
12634 * - handle long shift opts on 32 bit platforms somehow: they require
12635 * 3 sregs (2 for arg1 and 1 for arg2)
12636 * - make byref a 'normal' type.
12637 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
12638 * variable if needed.
12639 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
12640 * like inline_method.
12641 * - remove inlining restrictions
12642 * - fix LNEG and enable cfold of INEG
12643 * - generalize x86 optimizations like ldelema as a peephole optimization
12644 * - add store_mem_imm for amd64
12645 * - optimize the loading of the interruption flag in the managed->native wrappers
12646 * - avoid special handling of OP_NOP in passes
12647 * - move code inserting instructions into one function/macro.
12648 * - try a coalescing phase after liveness analysis
12649 * - add float -> vreg conversion + local optimizations on !x86
12650 * - figure out how to handle decomposed branches during optimizations, ie.
12651 * compare+branch, op_jump_table+op_br etc.
12652 * - promote RuntimeXHandles to vregs
12653 * - vtype cleanups:
12654 * - add a NEW_VARLOADA_VREG macro
12655 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
12656 * accessing vtype fields.
12657 * - get rid of I8CONST on 64 bit platforms
12658 * - dealing with the increase in code size due to branches created during opcode
12659 * decomposition:
12660 * - use extended basic blocks
12661 * - all parts of the JIT
12662 * - handle_global_vregs () && local regalloc
12663 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
12664 * - sources of increase in code size:
12665 * - vtypes
12666 * - long compares
12667 * - isinst and castclass
12668 * - lvregs not allocated to global registers even if used multiple times
12669 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
12670 * meaningful.
12671 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
12672 * - add all micro optimizations from the old JIT
12673 * - put tree optimizations into the deadce pass
12674 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
12675 * specific function.
12676 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
12677 * fcompare + branchCC.
12678 * - create a helper function for allocating a stack slot, taking into account
12679 * MONO_CFG_HAS_SPILLUP.
12680 * - merge r68207.
12681 * - optimize mono_regstate2_alloc_int/float.
12682 * - fix the pessimistic handling of variables accessed in exception handler blocks.
12683 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
12684 * parts of the tree could be separated by other instructions, killing the tree
12685 * arguments, or stores killing loads etc. Also, should we fold loads into other
12686 * instructions if the result of the load is used multiple times ?
12687 * - make the REM_IMM optimization in mini-x86.c arch-independent.
12688 * - LAST MERGE: 108395.
12689 * - when returning vtypes in registers, generate IR and append it to the end of the
12690 * last bb instead of doing it in the epilog.
12691 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
12696 NOTES
12697 -----
12699 - When to decompose opcodes:
12700 - earlier: this makes some optimizations hard to implement, since the low level IR
12701 no longer contains the neccessary information. But it is easier to do.
12702 - later: harder to implement, enables more optimizations.
12703 - Branches inside bblocks:
12704 - created when decomposing complex opcodes.
12705 - branches to another bblock: harmless, but not tracked by the branch
12706 optimizations, so need to branch to a label at the start of the bblock.
12707 - branches to inside the same bblock: very problematic, trips up the local
12708 reg allocator. Can be fixed by spitting the current bblock, but that is a
12709 complex operation, since some local vregs can become global vregs etc.
12710 - Local/global vregs:
12711 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
12712 local register allocator.
12713 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
12714 structure, created by mono_create_var (). Assigned to hregs or the stack by
12715 the global register allocator.
12716 - When to do optimizations like alu->alu_imm:
12717 - earlier -> saves work later on since the IR will be smaller/simpler
12718 - later -> can work on more instructions
12719 - Handling of valuetypes:
12720 - When a vtype is pushed on the stack, a new temporary is created, an
12721 instruction computing its address (LDADDR) is emitted and pushed on
12722 the stack. Need to optimize cases when the vtype is used immediately as in
12723 argument passing, stloc etc.
12724 - Instead of the to_end stuff in the old JIT, simply call the function handling
12725 the values on the stack before emitting the last instruction of the bb.
12727 #else /* !DISABLE_JIT */
12729 MONO_EMPTY_SOURCE_FILE (method_to_ir);
12730 #endif /* !DISABLE_JIT */