[wasm] Improve virtualenv installation script (#18470)
[mono-project.git] / mono / mini / method-to-ir.c
blobf35ba71f466fa63e6f810ae9312fbb64b2d93471
1 /**
2 * \file
3 * Convert CIL to the JIT internal representation
5 * Author:
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
9 * (C) 2002 Ximian, Inc.
10 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
11 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
15 #include <config.h>
16 #include <glib.h>
17 #include <mono/utils/mono-compiler.h>
18 #include "mini.h"
20 #ifndef DISABLE_JIT
22 #include <signal.h>
24 #ifdef HAVE_UNISTD_H
25 #include <unistd.h>
26 #endif
28 #include <math.h>
29 #include <string.h>
30 #include <ctype.h>
32 #ifdef HAVE_SYS_TIME_H
33 #include <sys/time.h>
34 #endif
36 #ifdef HAVE_ALLOCA_H
37 #include <alloca.h>
38 #endif
40 #include <mono/utils/memcheck.h>
41 #include <mono/metadata/abi-details.h>
42 #include <mono/metadata/assembly.h>
43 #include <mono/metadata/attrdefs.h>
44 #include <mono/metadata/loader.h>
45 #include <mono/metadata/tabledefs.h>
46 #include <mono/metadata/class.h>
47 #include <mono/metadata/class-abi-details.h>
48 #include <mono/metadata/object.h>
49 #include <mono/metadata/exception.h>
50 #include <mono/metadata/exception-internals.h>
51 #include <mono/metadata/opcodes.h>
52 #include <mono/metadata/mono-endian.h>
53 #include <mono/metadata/tokentype.h>
54 #include <mono/metadata/tabledefs.h>
55 #include <mono/metadata/marshal.h>
56 #include <mono/metadata/debug-helpers.h>
57 #include <mono/metadata/debug-internals.h>
58 #include <mono/metadata/gc-internals.h>
59 #include <mono/metadata/security-manager.h>
60 #include <mono/metadata/threads-types.h>
61 #include <mono/metadata/security-core-clr.h>
62 #include <mono/metadata/profiler-private.h>
63 #include <mono/metadata/profiler.h>
64 #include <mono/metadata/monitor.h>
65 #include <mono/utils/mono-memory-model.h>
66 #include <mono/utils/mono-error-internals.h>
67 #include <mono/metadata/mono-basic-block.h>
68 #include <mono/metadata/reflection-internals.h>
69 #include <mono/utils/mono-threads-coop.h>
70 #include <mono/utils/mono-utils-debug.h>
71 #include <mono/utils/mono-logger-internals.h>
72 #include <mono/metadata/verify-internals.h>
73 #include <mono/metadata/icall-decl.h>
74 #include "mono/metadata/icall-signatures.h"
76 #include "trace.h"
78 #include "ir-emit.h"
80 #include "jit-icalls.h"
81 #include "jit.h"
82 #include "debugger-agent.h"
83 #include "seq-points.h"
84 #include "aot-compiler.h"
85 #include "mini-llvm.h"
86 #include "mini-runtime.h"
87 #include "llvmonly-runtime.h"
88 #include "mono/utils/mono-tls-inline.h"
90 #define BRANCH_COST 10
91 #define CALL_COST 10
92 /* Used for the JIT */
93 #define INLINE_LENGTH_LIMIT 20
95 * The aot and jit inline limits should be different,
96 * since aot sees the whole program so we can let opt inline methods for us,
97 * while the jit only sees one method, so we have to inline things ourselves.
99 /* Used by LLVM AOT */
100 #define LLVM_AOT_INLINE_LENGTH_LIMIT 30
101 /* Used to LLVM JIT */
102 #define LLVM_JIT_INLINE_LENGTH_LIMIT 100
104 static const gboolean debug_tailcall = FALSE; // logging
105 static const gboolean debug_tailcall_try_all = FALSE; // consider any call followed by ret
107 gboolean
108 mono_tailcall_print_enabled (void)
110 return debug_tailcall || MONO_TRACE_IS_TRACED (G_LOG_LEVEL_DEBUG, MONO_TRACE_TAILCALL);
113 void
114 mono_tailcall_print (const char *format, ...)
116 if (!mono_tailcall_print_enabled ())
117 return;
118 va_list args;
119 va_start (args, format);
120 g_printv (format, args);
121 va_end (args);
124 /* These have 'cfg' as an implicit argument */
125 #define INLINE_FAILURE(msg) do { \
126 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
127 inline_failure (cfg, msg); \
128 goto exception_exit; \
130 } while (0)
131 #define CHECK_CFG_EXCEPTION do {\
132 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
133 goto exception_exit; \
134 } while (0)
135 #define FIELD_ACCESS_FAILURE(method, field) do { \
136 field_access_failure ((cfg), (method), (field)); \
137 goto exception_exit; \
138 } while (0)
139 #define GENERIC_SHARING_FAILURE(opcode) do { \
140 if (cfg->gshared) { \
141 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
142 goto exception_exit; \
144 } while (0)
145 #define GSHAREDVT_FAILURE(opcode) do { \
146 if (cfg->gsharedvt) { \
147 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
148 goto exception_exit; \
150 } while (0)
151 #define OUT_OF_MEMORY_FAILURE do { \
152 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
153 mono_error_set_out_of_memory (cfg->error, ""); \
154 goto exception_exit; \
155 } while (0)
156 #define DISABLE_AOT(cfg) do { \
157 if ((cfg)->verbose_level >= 2) \
158 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
159 (cfg)->disable_aot = TRUE; \
160 } while (0)
161 #define LOAD_ERROR do { \
162 break_on_unverified (); \
163 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
164 goto exception_exit; \
165 } while (0)
167 #define TYPE_LOAD_ERROR(klass) do { \
168 cfg->exception_ptr = klass; \
169 LOAD_ERROR; \
170 } while (0)
172 #define CHECK_CFG_ERROR do {\
173 if (!is_ok (cfg->error)) { \
174 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
175 goto mono_error_exit; \
177 } while (0)
179 static int stind_to_store_membase (int opcode);
181 int mono_op_to_op_imm (int opcode);
182 int mono_op_to_op_imm_noemul (int opcode);
184 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
185 guchar *ip, guint real_offset, gboolean inline_always);
186 static MonoInst*
187 convert_value (MonoCompile *cfg, MonoType *type, MonoInst *ins);
189 /* helper methods signatures */
191 /* type loading helpers */
192 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, "System.Diagnostics", "DebuggableAttribute")
193 static GENERATE_GET_CLASS_WITH_CACHE (iequatable, "System", "IEquatable`1")
194 static GENERATE_GET_CLASS_WITH_CACHE (geqcomparer, "System.Collections.Generic", "GenericEqualityComparer`1");
197 * Instruction metadata
199 #ifdef MINI_OP
200 #undef MINI_OP
201 #endif
202 #ifdef MINI_OP3
203 #undef MINI_OP3
204 #endif
205 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
206 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
207 #define NONE ' '
208 #define IREG 'i'
209 #define FREG 'f'
210 #define VREG 'v'
211 #define XREG 'x'
212 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == TARGET_SIZEOF_VOID_P
213 #define LREG IREG
214 #else
215 #define LREG 'l'
216 #endif
217 /* keep in sync with the enum in mini.h */
218 const char
219 mini_ins_info[] = {
220 #include "mini-ops.h"
222 #undef MINI_OP
223 #undef MINI_OP3
225 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
226 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
228 * This should contain the index of the last sreg + 1. This is not the same
229 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
231 const gint8 mini_ins_sreg_counts[] = {
232 #include "mini-ops.h"
234 #undef MINI_OP
235 #undef MINI_OP3
237 guint32
238 mono_alloc_ireg (MonoCompile *cfg)
240 return alloc_ireg (cfg);
243 guint32
244 mono_alloc_lreg (MonoCompile *cfg)
246 return alloc_lreg (cfg);
249 guint32
250 mono_alloc_freg (MonoCompile *cfg)
252 return alloc_freg (cfg);
255 guint32
256 mono_alloc_preg (MonoCompile *cfg)
258 return alloc_preg (cfg);
261 guint32
262 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
264 return alloc_dreg (cfg, stack_type);
268 * mono_alloc_ireg_ref:
270 * Allocate an IREG, and mark it as holding a GC ref.
272 guint32
273 mono_alloc_ireg_ref (MonoCompile *cfg)
275 return alloc_ireg_ref (cfg);
279 * mono_alloc_ireg_mp:
281 * Allocate an IREG, and mark it as holding a managed pointer.
283 guint32
284 mono_alloc_ireg_mp (MonoCompile *cfg)
286 return alloc_ireg_mp (cfg);
290 * mono_alloc_ireg_copy:
292 * Allocate an IREG with the same GC type as VREG.
294 guint32
295 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
297 if (vreg_is_ref (cfg, vreg))
298 return alloc_ireg_ref (cfg);
299 else if (vreg_is_mp (cfg, vreg))
300 return alloc_ireg_mp (cfg);
301 else
302 return alloc_ireg (cfg);
305 guint
306 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
308 if (type->byref)
309 return OP_MOVE;
311 type = mini_get_underlying_type (type);
312 handle_enum:
313 switch (type->type) {
314 case MONO_TYPE_I1:
315 case MONO_TYPE_U1:
316 return OP_MOVE;
317 case MONO_TYPE_I2:
318 case MONO_TYPE_U2:
319 return OP_MOVE;
320 case MONO_TYPE_I4:
321 case MONO_TYPE_U4:
322 return OP_MOVE;
323 case MONO_TYPE_I:
324 case MONO_TYPE_U:
325 case MONO_TYPE_PTR:
326 case MONO_TYPE_FNPTR:
327 return OP_MOVE;
328 case MONO_TYPE_CLASS:
329 case MONO_TYPE_STRING:
330 case MONO_TYPE_OBJECT:
331 case MONO_TYPE_SZARRAY:
332 case MONO_TYPE_ARRAY:
333 return OP_MOVE;
334 case MONO_TYPE_I8:
335 case MONO_TYPE_U8:
336 #if SIZEOF_REGISTER == 8
337 return OP_MOVE;
338 #else
339 return OP_LMOVE;
340 #endif
341 case MONO_TYPE_R4:
342 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
343 case MONO_TYPE_R8:
344 return OP_FMOVE;
345 case MONO_TYPE_VALUETYPE:
346 if (m_class_is_enumtype (type->data.klass)) {
347 type = mono_class_enum_basetype_internal (type->data.klass);
348 goto handle_enum;
350 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (type)))
351 return OP_XMOVE;
352 return OP_VMOVE;
353 case MONO_TYPE_TYPEDBYREF:
354 return OP_VMOVE;
355 case MONO_TYPE_GENERICINST:
356 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (type)))
357 return OP_XMOVE;
358 type = m_class_get_byval_arg (type->data.generic_class->container_class);
359 goto handle_enum;
360 case MONO_TYPE_VAR:
361 case MONO_TYPE_MVAR:
362 g_assert (cfg->gshared);
363 if (mini_type_var_is_vt (type))
364 return OP_VMOVE;
365 else
366 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
367 default:
368 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
370 return -1;
373 void
374 mono_print_bb (MonoBasicBlock *bb, const char *msg)
376 int i;
377 MonoInst *tree;
378 GString *str = g_string_new ("");
380 g_string_append_printf (str, "%s %d: [IN: ", msg, bb->block_num);
381 for (i = 0; i < bb->in_count; ++i)
382 g_string_append_printf (str, " BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
383 g_string_append_printf (str, ", OUT: ");
384 for (i = 0; i < bb->out_count; ++i)
385 g_string_append_printf (str, " BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
386 g_string_append_printf (str, " ]\n");
388 g_print ("%s", str->str);
389 g_string_free (str, TRUE);
391 for (tree = bb->code; tree; tree = tree->next)
392 mono_print_ins_index (-1, tree);
395 static MONO_NEVER_INLINE gboolean
396 break_on_unverified (void)
398 if (mini_debug_options.break_on_unverified) {
399 G_BREAKPOINT ();
400 return TRUE;
402 return FALSE;
405 static void
406 clear_cfg_error (MonoCompile *cfg)
408 mono_error_cleanup (cfg->error);
409 error_init (cfg->error);
412 static MONO_NEVER_INLINE void
413 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
415 char *method_fname = mono_method_full_name (method, TRUE);
416 char *field_fname = mono_field_full_name (field);
417 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
418 mono_error_set_generic_error (cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
419 g_free (method_fname);
420 g_free (field_fname);
423 static MONO_NEVER_INLINE void
424 inline_failure (MonoCompile *cfg, const char *msg)
426 if (cfg->verbose_level >= 2)
427 printf ("inline failed: %s\n", msg);
428 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
431 static MONO_NEVER_INLINE void
432 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
434 if (cfg->verbose_level > 2)
435 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", m_class_get_name_space (cfg->current_method->klass), m_class_get_name (cfg->current_method->klass), cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name (opcode), line);
436 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
439 static MONO_NEVER_INLINE void
440 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
442 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", m_class_get_name_space (cfg->current_method->klass), m_class_get_name (cfg->current_method->klass), cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
443 if (cfg->verbose_level >= 2)
444 printf ("%s\n", cfg->exception_message);
445 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
448 void
449 mini_set_inline_failure (MonoCompile *cfg, const char *msg)
451 if (cfg->verbose_level >= 2)
452 printf ("inline failed: %s\n", msg);
453 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
457 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
458 * foo<T> (int i) { ldarg.0; box T; }
460 #define UNVERIFIED do { \
461 if (cfg->gsharedvt) { \
462 if (cfg->verbose_level > 2) \
463 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
464 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
465 goto exception_exit; \
467 break_on_unverified (); \
468 goto unverified; \
469 } while (0)
471 #define GET_BBLOCK(cfg,tblock,ip) do { \
472 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
473 if (!(tblock)) { \
474 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
475 NEW_BBLOCK (cfg, (tblock)); \
476 (tblock)->cil_code = (ip); \
477 ADD_BBLOCK (cfg, (tblock)); \
479 } while (0)
481 /* Emit conversions so both operands of a binary opcode are of the same type */
482 static void
483 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
485 MonoInst *arg1 = *arg1_ref;
486 MonoInst *arg2 = *arg2_ref;
488 if (cfg->r4fp &&
489 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
490 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
491 MonoInst *conv;
493 /* Mixing r4/r8 is allowed by the spec */
494 if (arg1->type == STACK_R4) {
495 int dreg = alloc_freg (cfg);
497 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
498 conv->type = STACK_R8;
499 ins->sreg1 = dreg;
500 *arg1_ref = conv;
502 if (arg2->type == STACK_R4) {
503 int dreg = alloc_freg (cfg);
505 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
506 conv->type = STACK_R8;
507 ins->sreg2 = dreg;
508 *arg2_ref = conv;
512 #if SIZEOF_REGISTER == 8
513 /* FIXME: Need to add many more cases */
514 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
515 MonoInst *widen;
517 int dr = alloc_preg (cfg);
518 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
519 (ins)->sreg2 = widen->dreg;
521 #endif
524 #define ADD_BINOP(op) do { \
525 MONO_INST_NEW (cfg, ins, (op)); \
526 sp -= 2; \
527 ins->sreg1 = sp [0]->dreg; \
528 ins->sreg2 = sp [1]->dreg; \
529 type_from_op (cfg, ins, sp [0], sp [1]); \
530 CHECK_TYPE (ins); \
531 /* Have to insert a widening op */ \
532 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
533 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
534 MONO_ADD_INS ((cfg)->cbb, (ins)); \
535 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
536 } while (0)
538 #define ADD_UNOP(op) do { \
539 MONO_INST_NEW (cfg, ins, (op)); \
540 sp--; \
541 ins->sreg1 = sp [0]->dreg; \
542 type_from_op (cfg, ins, sp [0], NULL); \
543 CHECK_TYPE (ins); \
544 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
545 MONO_ADD_INS ((cfg)->cbb, (ins)); \
546 *sp++ = mono_decompose_opcode (cfg, ins); \
547 } while (0)
549 #define ADD_BINCOND(next_block) do { \
550 MonoInst *cmp; \
551 sp -= 2; \
552 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
553 cmp->sreg1 = sp [0]->dreg; \
554 cmp->sreg2 = sp [1]->dreg; \
555 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
556 type_from_op (cfg, cmp, sp [0], sp [1]); \
557 CHECK_TYPE (cmp); \
558 type_from_op (cfg, ins, sp [0], sp [1]); \
559 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
560 GET_BBLOCK (cfg, tblock, target); \
561 link_bblock (cfg, cfg->cbb, tblock); \
562 ins->inst_true_bb = tblock; \
563 if ((next_block)) { \
564 link_bblock (cfg, cfg->cbb, (next_block)); \
565 ins->inst_false_bb = (next_block); \
566 start_new_bblock = 1; \
567 } else { \
568 GET_BBLOCK (cfg, tblock, next_ip); \
569 link_bblock (cfg, cfg->cbb, tblock); \
570 ins->inst_false_bb = tblock; \
571 start_new_bblock = 2; \
573 if (sp != stack_start) { \
574 handle_stack_args (cfg, stack_start, sp - stack_start); \
575 CHECK_UNVERIFIABLE (cfg); \
577 MONO_ADD_INS (cfg->cbb, cmp); \
578 MONO_ADD_INS (cfg->cbb, ins); \
579 } while (0)
581 /* *
582 * link_bblock: Links two basic blocks
584 * links two basic blocks in the control flow graph, the 'from'
585 * argument is the starting block and the 'to' argument is the block
586 * the control flow ends to after 'from'.
588 static void
589 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
591 MonoBasicBlock **newa;
592 int i, found;
594 #if 0
595 if (from->cil_code) {
596 if (to->cil_code)
597 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
598 else
599 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
600 } else {
601 if (to->cil_code)
602 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
603 else
604 printf ("edge from entry to exit\n");
606 #endif
608 found = FALSE;
609 for (i = 0; i < from->out_count; ++i) {
610 if (to == from->out_bb [i]) {
611 found = TRUE;
612 break;
615 if (!found) {
616 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
617 for (i = 0; i < from->out_count; ++i) {
618 newa [i] = from->out_bb [i];
620 newa [i] = to;
621 from->out_count++;
622 from->out_bb = newa;
625 found = FALSE;
626 for (i = 0; i < to->in_count; ++i) {
627 if (from == to->in_bb [i]) {
628 found = TRUE;
629 break;
632 if (!found) {
633 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
634 for (i = 0; i < to->in_count; ++i) {
635 newa [i] = to->in_bb [i];
637 newa [i] = from;
638 to->in_count++;
639 to->in_bb = newa;
643 void
644 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
646 link_bblock (cfg, from, to);
649 static void
650 mono_create_spvar_for_region (MonoCompile *cfg, int region);
652 static void
653 mark_bb_in_region (MonoCompile *cfg, guint region, uint32_t start, uint32_t end)
655 MonoBasicBlock *bb = cfg->cil_offset_to_bb [start];
657 //start must exist in cil_offset_to_bb as those are il offsets used by EH which should have GET_BBLOCK early.
658 g_assert (bb);
660 if (cfg->verbose_level > 1)
661 g_print ("FIRST BB for %d is BB_%d\n", start, bb->block_num);
662 for (; bb && bb->real_offset < end; bb = bb->next_bb) {
663 //no one claimed this bb, take it.
664 if (bb->region == -1) {
665 bb->region = region;
666 continue;
669 //current region is an early handler, bail
670 if ((bb->region & (0xf << 4)) != MONO_REGION_TRY) {
671 continue;
674 //current region is a try, only overwrite if new region is a handler
675 if ((region & (0xf << 4)) != MONO_REGION_TRY) {
676 bb->region = region;
680 if (cfg->spvars)
681 mono_create_spvar_for_region (cfg, region);
684 static void
685 compute_bb_regions (MonoCompile *cfg)
687 MonoBasicBlock *bb;
688 MonoMethodHeader *header = cfg->header;
689 int i;
691 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
692 bb->region = -1;
694 for (i = 0; i < header->num_clauses; ++i) {
695 MonoExceptionClause *clause = &header->clauses [i];
697 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER)
698 mark_bb_in_region (cfg, ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags, clause->data.filter_offset, clause->handler_offset);
700 guint handler_region;
701 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
702 handler_region = ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
703 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
704 handler_region = ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
705 else
706 handler_region = ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
708 mark_bb_in_region (cfg, handler_region, clause->handler_offset, clause->handler_offset + clause->handler_len);
709 mark_bb_in_region (cfg, ((i + 1) << 8) | clause->flags, clause->try_offset, clause->try_offset + clause->try_len);
712 if (cfg->verbose_level > 2) {
713 MonoBasicBlock *bb;
714 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
715 g_print ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
721 static gboolean
722 ip_in_finally_clause (MonoCompile *cfg, int offset)
724 MonoMethodHeader *header = cfg->header;
725 MonoExceptionClause *clause;
726 int i;
728 for (i = 0; i < header->num_clauses; ++i) {
729 clause = &header->clauses [i];
730 if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)
731 continue;
733 if (MONO_OFFSET_IN_HANDLER (clause, offset))
734 return TRUE;
736 return FALSE;
739 /* Find clauses between ip and target, from inner to outer */
740 static GList*
741 mono_find_leave_clauses (MonoCompile *cfg, guchar *ip, guchar *target)
743 MonoMethodHeader *header = cfg->header;
744 MonoExceptionClause *clause;
745 int i;
746 GList *res = NULL;
748 for (i = 0; i < header->num_clauses; ++i) {
749 clause = &header->clauses [i];
750 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
751 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
752 MonoLeaveClause *leave = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoLeaveClause));
753 leave->index = i;
754 leave->clause = clause;
756 res = g_list_append_mempool (cfg->mempool, res, leave);
759 return res;
762 static void
763 mono_create_spvar_for_region (MonoCompile *cfg, int region)
765 MonoInst *var;
767 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
768 if (var)
769 return;
771 var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
772 /* prevent it from being register allocated */
773 var->flags |= MONO_INST_VOLATILE;
775 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
778 MonoInst *
779 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
781 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
784 static MonoInst*
785 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
787 MonoInst *var;
789 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
790 if (var)
791 return var;
793 var = mono_compile_create_var (cfg, mono_get_object_type (), OP_LOCAL);
794 /* prevent it from being register allocated */
795 var->flags |= MONO_INST_VOLATILE;
797 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
799 return var;
803 * Returns the type used in the eval stack when @type is loaded.
804 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
806 void
807 mini_type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
809 MonoClass *klass;
811 type = mini_get_underlying_type (type);
812 inst->klass = klass = mono_class_from_mono_type_internal (type);
813 if (type->byref) {
814 inst->type = STACK_MP;
815 return;
818 handle_enum:
819 switch (type->type) {
820 case MONO_TYPE_VOID:
821 inst->type = STACK_INV;
822 return;
823 case MONO_TYPE_I1:
824 case MONO_TYPE_U1:
825 case MONO_TYPE_I2:
826 case MONO_TYPE_U2:
827 case MONO_TYPE_I4:
828 case MONO_TYPE_U4:
829 inst->type = STACK_I4;
830 return;
831 case MONO_TYPE_I:
832 case MONO_TYPE_U:
833 case MONO_TYPE_PTR:
834 case MONO_TYPE_FNPTR:
835 inst->type = STACK_PTR;
836 return;
837 case MONO_TYPE_CLASS:
838 case MONO_TYPE_STRING:
839 case MONO_TYPE_OBJECT:
840 case MONO_TYPE_SZARRAY:
841 case MONO_TYPE_ARRAY:
842 inst->type = STACK_OBJ;
843 return;
844 case MONO_TYPE_I8:
845 case MONO_TYPE_U8:
846 inst->type = STACK_I8;
847 return;
848 case MONO_TYPE_R4:
849 inst->type = cfg->r4_stack_type;
850 break;
851 case MONO_TYPE_R8:
852 inst->type = STACK_R8;
853 return;
854 case MONO_TYPE_VALUETYPE:
855 if (m_class_is_enumtype (type->data.klass)) {
856 type = mono_class_enum_basetype_internal (type->data.klass);
857 goto handle_enum;
858 } else {
859 inst->klass = klass;
860 inst->type = STACK_VTYPE;
861 return;
863 case MONO_TYPE_TYPEDBYREF:
864 inst->klass = mono_defaults.typed_reference_class;
865 inst->type = STACK_VTYPE;
866 return;
867 case MONO_TYPE_GENERICINST:
868 type = m_class_get_byval_arg (type->data.generic_class->container_class);
869 goto handle_enum;
870 case MONO_TYPE_VAR:
871 case MONO_TYPE_MVAR:
872 g_assert (cfg->gshared);
873 if (mini_is_gsharedvt_type (type)) {
874 g_assert (cfg->gsharedvt);
875 inst->type = STACK_VTYPE;
876 } else {
877 mini_type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
879 return;
880 default:
881 g_error ("unknown type 0x%02x in eval stack type", type->type);
886 * The following tables are used to quickly validate the IL code in type_from_op ().
888 #define IF_P8(v) (SIZEOF_VOID_P == 8 ? v : STACK_INV)
889 #define IF_P8_I8 IF_P8(STACK_I8)
890 #define IF_P8_PTR IF_P8(STACK_PTR)
892 static const char
893 bin_num_table [STACK_MAX] [STACK_MAX] = {
894 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
895 {STACK_INV, STACK_I4, IF_P8_I8, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
896 {STACK_INV, IF_P8_I8, STACK_I8, IF_P8_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
897 {STACK_INV, STACK_PTR, IF_P8_PTR, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
898 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
899 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
900 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
901 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
902 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
905 static const char
906 neg_table [] = {
907 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
910 /* reduce the size of this table */
911 static const char
912 bin_int_table [STACK_MAX] [STACK_MAX] = {
913 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
914 {STACK_INV, STACK_I4, IF_P8_I8, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
915 {STACK_INV, IF_P8_I8, STACK_I8, IF_P8_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
916 {STACK_INV, STACK_PTR, IF_P8_PTR, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
917 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
918 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
919 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
920 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
923 #define P1 (SIZEOF_VOID_P == 8)
924 static const char
925 bin_comp_table [STACK_MAX] [STACK_MAX] = {
926 /* Inv i L p F & O vt r4 */
927 {0},
928 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
929 {0, 0, 1,P1, 0, 0, 0, 0}, /* L, int64 */
930 {0, 1,P1, 1, 0, 2, 4, 0}, /* p, ptr */
931 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
932 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
933 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
934 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
935 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
937 #undef P1
939 /* reduce the size of this table */
940 static const char
941 shift_table [STACK_MAX] [STACK_MAX] = {
942 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
943 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
944 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
945 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
946 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
947 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
948 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
949 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
953 * Tables to map from the non-specific opcode to the matching
954 * type-specific opcode.
956 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
957 static const guint16
958 binops_op_map [STACK_MAX] = {
959 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
962 /* handles from CEE_NEG to CEE_CONV_U8 */
963 static const guint16
964 unops_op_map [STACK_MAX] = {
965 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
968 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
969 static const guint16
970 ovfops_op_map [STACK_MAX] = {
971 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
974 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
975 static const guint16
976 ovf2ops_op_map [STACK_MAX] = {
977 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
980 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
981 static const guint16
982 ovf3ops_op_map [STACK_MAX] = {
983 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
986 /* handles from CEE_BEQ to CEE_BLT_UN */
987 static const guint16
988 beqops_op_map [STACK_MAX] = {
989 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
992 /* handles from CEE_CEQ to CEE_CLT_UN */
993 static const guint16
994 ceqops_op_map [STACK_MAX] = {
995 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
999 * Sets ins->type (the type on the eval stack) according to the
1000 * type of the opcode and the arguments to it.
1001 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
1003 * FIXME: this function sets ins->type unconditionally in some cases, but
1004 * it should set it to invalid for some types (a conv.x on an object)
1006 static void
1007 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
1009 switch (ins->opcode) {
1010 /* binops */
1011 case MONO_CEE_ADD:
1012 case MONO_CEE_SUB:
1013 case MONO_CEE_MUL:
1014 case MONO_CEE_DIV:
1015 case MONO_CEE_REM:
1016 /* FIXME: check unverifiable args for STACK_MP */
1017 ins->type = bin_num_table [src1->type] [src2->type];
1018 ins->opcode += binops_op_map [ins->type];
1019 break;
1020 case MONO_CEE_DIV_UN:
1021 case MONO_CEE_REM_UN:
1022 case MONO_CEE_AND:
1023 case MONO_CEE_OR:
1024 case MONO_CEE_XOR:
1025 ins->type = bin_int_table [src1->type] [src2->type];
1026 ins->opcode += binops_op_map [ins->type];
1027 break;
1028 case MONO_CEE_SHL:
1029 case MONO_CEE_SHR:
1030 case MONO_CEE_SHR_UN:
1031 ins->type = shift_table [src1->type] [src2->type];
1032 ins->opcode += binops_op_map [ins->type];
1033 break;
1034 case OP_COMPARE:
1035 case OP_LCOMPARE:
1036 case OP_ICOMPARE:
1037 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
1038 if ((src1->type == STACK_I8) || ((TARGET_SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
1039 ins->opcode = OP_LCOMPARE;
1040 else if (src1->type == STACK_R4)
1041 ins->opcode = OP_RCOMPARE;
1042 else if (src1->type == STACK_R8)
1043 ins->opcode = OP_FCOMPARE;
1044 else
1045 ins->opcode = OP_ICOMPARE;
1046 break;
1047 case OP_ICOMPARE_IMM:
1048 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
1049 if ((src1->type == STACK_I8) || ((TARGET_SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
1050 ins->opcode = OP_LCOMPARE_IMM;
1051 break;
1052 case MONO_CEE_BEQ:
1053 case MONO_CEE_BGE:
1054 case MONO_CEE_BGT:
1055 case MONO_CEE_BLE:
1056 case MONO_CEE_BLT:
1057 case MONO_CEE_BNE_UN:
1058 case MONO_CEE_BGE_UN:
1059 case MONO_CEE_BGT_UN:
1060 case MONO_CEE_BLE_UN:
1061 case MONO_CEE_BLT_UN:
1062 ins->opcode += beqops_op_map [src1->type];
1063 break;
1064 case OP_CEQ:
1065 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
1066 ins->opcode += ceqops_op_map [src1->type];
1067 break;
1068 case OP_CGT:
1069 case OP_CGT_UN:
1070 case OP_CLT:
1071 case OP_CLT_UN:
1072 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
1073 ins->opcode += ceqops_op_map [src1->type];
1074 break;
1075 /* unops */
1076 case MONO_CEE_NEG:
1077 ins->type = neg_table [src1->type];
1078 ins->opcode += unops_op_map [ins->type];
1079 break;
1080 case MONO_CEE_NOT:
1081 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1082 ins->type = src1->type;
1083 else
1084 ins->type = STACK_INV;
1085 ins->opcode += unops_op_map [ins->type];
1086 break;
1087 case MONO_CEE_CONV_I1:
1088 case MONO_CEE_CONV_I2:
1089 case MONO_CEE_CONV_I4:
1090 case MONO_CEE_CONV_U4:
1091 ins->type = STACK_I4;
1092 ins->opcode += unops_op_map [src1->type];
1093 break;
1094 case MONO_CEE_CONV_R_UN:
1095 ins->type = STACK_R8;
1096 switch (src1->type) {
1097 case STACK_I4:
1098 case STACK_PTR:
1099 ins->opcode = OP_ICONV_TO_R_UN;
1100 break;
1101 case STACK_I8:
1102 ins->opcode = OP_LCONV_TO_R_UN;
1103 break;
1104 case STACK_R8:
1105 ins->opcode = OP_FMOVE;
1106 break;
1108 break;
1109 case MONO_CEE_CONV_OVF_I1:
1110 case MONO_CEE_CONV_OVF_U1:
1111 case MONO_CEE_CONV_OVF_I2:
1112 case MONO_CEE_CONV_OVF_U2:
1113 case MONO_CEE_CONV_OVF_I4:
1114 case MONO_CEE_CONV_OVF_U4:
1115 ins->type = STACK_I4;
1116 ins->opcode += ovf3ops_op_map [src1->type];
1117 break;
1118 case MONO_CEE_CONV_OVF_I_UN:
1119 case MONO_CEE_CONV_OVF_U_UN:
1120 ins->type = STACK_PTR;
1121 ins->opcode += ovf2ops_op_map [src1->type];
1122 break;
1123 case MONO_CEE_CONV_OVF_I1_UN:
1124 case MONO_CEE_CONV_OVF_I2_UN:
1125 case MONO_CEE_CONV_OVF_I4_UN:
1126 case MONO_CEE_CONV_OVF_U1_UN:
1127 case MONO_CEE_CONV_OVF_U2_UN:
1128 case MONO_CEE_CONV_OVF_U4_UN:
1129 ins->type = STACK_I4;
1130 ins->opcode += ovf2ops_op_map [src1->type];
1131 break;
1132 case MONO_CEE_CONV_U:
1133 ins->type = STACK_PTR;
1134 switch (src1->type) {
1135 case STACK_I4:
1136 ins->opcode = OP_ICONV_TO_U;
1137 break;
1138 case STACK_PTR:
1139 case STACK_MP:
1140 case STACK_OBJ:
1141 #if TARGET_SIZEOF_VOID_P == 8
1142 ins->opcode = OP_LCONV_TO_U;
1143 #else
1144 ins->opcode = OP_MOVE;
1145 #endif
1146 break;
1147 case STACK_I8:
1148 ins->opcode = OP_LCONV_TO_U;
1149 break;
1150 case STACK_R8:
1151 ins->opcode = OP_FCONV_TO_U;
1152 break;
1153 case STACK_R4:
1154 if (TARGET_SIZEOF_VOID_P == 8)
1155 ins->opcode = OP_RCONV_TO_U8;
1156 else
1157 ins->opcode = OP_RCONV_TO_U4;
1158 break;
1160 break;
1161 case MONO_CEE_CONV_I8:
1162 case MONO_CEE_CONV_U8:
1163 ins->type = STACK_I8;
1164 ins->opcode += unops_op_map [src1->type];
1165 break;
1166 case MONO_CEE_CONV_OVF_I8:
1167 case MONO_CEE_CONV_OVF_U8:
1168 ins->type = STACK_I8;
1169 ins->opcode += ovf3ops_op_map [src1->type];
1170 break;
1171 case MONO_CEE_CONV_OVF_U8_UN:
1172 case MONO_CEE_CONV_OVF_I8_UN:
1173 ins->type = STACK_I8;
1174 ins->opcode += ovf2ops_op_map [src1->type];
1175 break;
1176 case MONO_CEE_CONV_R4:
1177 ins->type = cfg->r4_stack_type;
1178 ins->opcode += unops_op_map [src1->type];
1179 break;
1180 case MONO_CEE_CONV_R8:
1181 ins->type = STACK_R8;
1182 ins->opcode += unops_op_map [src1->type];
1183 break;
1184 case OP_CKFINITE:
1185 ins->type = STACK_R8;
1186 break;
1187 case MONO_CEE_CONV_U2:
1188 case MONO_CEE_CONV_U1:
1189 ins->type = STACK_I4;
1190 ins->opcode += ovfops_op_map [src1->type];
1191 break;
1192 case MONO_CEE_CONV_I:
1193 case MONO_CEE_CONV_OVF_I:
1194 case MONO_CEE_CONV_OVF_U:
1195 ins->type = STACK_PTR;
1196 ins->opcode += ovfops_op_map [src1->type];
1197 break;
1198 case MONO_CEE_ADD_OVF:
1199 case MONO_CEE_ADD_OVF_UN:
1200 case MONO_CEE_MUL_OVF:
1201 case MONO_CEE_MUL_OVF_UN:
1202 case MONO_CEE_SUB_OVF:
1203 case MONO_CEE_SUB_OVF_UN:
1204 ins->type = bin_num_table [src1->type] [src2->type];
1205 ins->opcode += ovfops_op_map [src1->type];
1206 if (ins->type == STACK_R8)
1207 ins->type = STACK_INV;
1208 break;
1209 case OP_LOAD_MEMBASE:
1210 ins->type = STACK_PTR;
1211 break;
1212 case OP_LOADI1_MEMBASE:
1213 case OP_LOADU1_MEMBASE:
1214 case OP_LOADI2_MEMBASE:
1215 case OP_LOADU2_MEMBASE:
1216 case OP_LOADI4_MEMBASE:
1217 case OP_LOADU4_MEMBASE:
1218 ins->type = STACK_PTR;
1219 break;
1220 case OP_LOADI8_MEMBASE:
1221 ins->type = STACK_I8;
1222 break;
1223 case OP_LOADR4_MEMBASE:
1224 ins->type = cfg->r4_stack_type;
1225 break;
1226 case OP_LOADR8_MEMBASE:
1227 ins->type = STACK_R8;
1228 break;
1229 default:
1230 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1231 break;
1234 if (ins->type == STACK_MP) {
1235 if (src1->type == STACK_MP)
1236 ins->klass = src1->klass;
1237 else
1238 ins->klass = mono_defaults.object_class;
1242 void
1243 mini_type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
1245 type_from_op (cfg, ins, src1, src2);
1248 static MonoClass*
1249 ldind_to_type (int op)
1251 switch (op) {
1252 case MONO_CEE_LDIND_I1: return mono_defaults.sbyte_class;
1253 case MONO_CEE_LDIND_U1: return mono_defaults.byte_class;
1254 case MONO_CEE_LDIND_I2: return mono_defaults.int16_class;
1255 case MONO_CEE_LDIND_U2: return mono_defaults.uint16_class;
1256 case MONO_CEE_LDIND_I4: return mono_defaults.int32_class;
1257 case MONO_CEE_LDIND_U4: return mono_defaults.uint32_class;
1258 case MONO_CEE_LDIND_I8: return mono_defaults.int64_class;
1259 case MONO_CEE_LDIND_I: return mono_defaults.int_class;
1260 case MONO_CEE_LDIND_R4: return mono_defaults.single_class;
1261 case MONO_CEE_LDIND_R8: return mono_defaults.double_class;
1262 case MONO_CEE_LDIND_REF:return mono_defaults.object_class; //FIXME we should try to return a more specific type
1263 default: g_error ("Unknown ldind type %d", op);
1267 #if 0
1269 static const char
1270 param_table [STACK_MAX] [STACK_MAX] = {
1271 {0},
1274 static int
1275 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1277 int i;
1279 if (sig->hasthis) {
1280 switch (args->type) {
1281 case STACK_I4:
1282 case STACK_I8:
1283 case STACK_R8:
1284 case STACK_VTYPE:
1285 case STACK_INV:
1286 return 0;
1288 args++;
1290 for (i = 0; i < sig->param_count; ++i) {
1291 switch (args [i].type) {
1292 case STACK_INV:
1293 return 0;
1294 case STACK_MP:
1295 if (!sig->params [i]->byref)
1296 return 0;
1297 continue;
1298 case STACK_OBJ:
1299 if (sig->params [i]->byref)
1300 return 0;
1301 switch (sig->params [i]->type) {
1302 case MONO_TYPE_CLASS:
1303 case MONO_TYPE_STRING:
1304 case MONO_TYPE_OBJECT:
1305 case MONO_TYPE_SZARRAY:
1306 case MONO_TYPE_ARRAY:
1307 break;
1308 default:
1309 return 0;
1311 continue;
1312 case STACK_R8:
1313 if (sig->params [i]->byref)
1314 return 0;
1315 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1316 return 0;
1317 continue;
1318 case STACK_PTR:
1319 case STACK_I4:
1320 case STACK_I8:
1321 case STACK_VTYPE:
1322 break;
1324 /*if (!param_table [args [i].type] [sig->params [i]->type])
1325 return 0;*/
1327 return 1;
1329 #endif
1332 * When we need a pointer to the current domain many times in a method, we
1333 * call mono_domain_get() once and we store the result in a local variable.
1334 * This function returns the variable that represents the MonoDomain*.
1336 inline static MonoInst *
1337 mono_get_domainvar (MonoCompile *cfg)
1339 if (!cfg->domainvar) {
1340 /* Make sure we don't generate references after checking whenever to init this */
1341 g_assert (!cfg->domainvar_inited);
1342 cfg->domainvar = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
1343 /* Avoid optimizing it away */
1344 cfg->domainvar->flags |= MONO_INST_VOLATILE;
1346 return cfg->domainvar;
1350 * The got_var contains the address of the Global Offset Table when AOT
1351 * compiling.
1353 MonoInst *
1354 mono_get_got_var (MonoCompile *cfg)
1356 if (!cfg->compile_aot || !cfg->backend->need_got_var || cfg->llvm_only)
1357 return NULL;
1358 if (!cfg->got_var) {
1359 cfg->got_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
1361 return cfg->got_var;
1364 static void
1365 mono_create_rgctx_var (MonoCompile *cfg)
1367 if (!cfg->rgctx_var) {
1368 cfg->rgctx_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
1369 /* force the var to be stack allocated */
1370 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1374 static MonoInst *
1375 mono_get_vtable_var (MonoCompile *cfg)
1377 g_assert (cfg->gshared);
1379 mono_create_rgctx_var (cfg);
1381 return cfg->rgctx_var;
1384 static MonoType*
1385 type_from_stack_type (MonoInst *ins) {
1386 switch (ins->type) {
1387 case STACK_I4: return mono_get_int32_type ();
1388 case STACK_I8: return m_class_get_byval_arg (mono_defaults.int64_class);
1389 case STACK_PTR: return mono_get_int_type ();
1390 case STACK_R4: return m_class_get_byval_arg (mono_defaults.single_class);
1391 case STACK_R8: return m_class_get_byval_arg (mono_defaults.double_class);
1392 case STACK_MP:
1393 return m_class_get_this_arg (ins->klass);
1394 case STACK_OBJ: return mono_get_object_type ();
1395 case STACK_VTYPE: return m_class_get_byval_arg (ins->klass);
1396 default:
1397 g_error ("stack type %d to monotype not handled\n", ins->type);
1399 return NULL;
1402 static G_GNUC_UNUSED int
1403 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1405 t = mono_type_get_underlying_type (t);
1406 switch (t->type) {
1407 case MONO_TYPE_I1:
1408 case MONO_TYPE_U1:
1409 case MONO_TYPE_I2:
1410 case MONO_TYPE_U2:
1411 case MONO_TYPE_I4:
1412 case MONO_TYPE_U4:
1413 return STACK_I4;
1414 case MONO_TYPE_I:
1415 case MONO_TYPE_U:
1416 case MONO_TYPE_PTR:
1417 case MONO_TYPE_FNPTR:
1418 return STACK_PTR;
1419 case MONO_TYPE_CLASS:
1420 case MONO_TYPE_STRING:
1421 case MONO_TYPE_OBJECT:
1422 case MONO_TYPE_SZARRAY:
1423 case MONO_TYPE_ARRAY:
1424 return STACK_OBJ;
1425 case MONO_TYPE_I8:
1426 case MONO_TYPE_U8:
1427 return STACK_I8;
1428 case MONO_TYPE_R4:
1429 return cfg->r4_stack_type;
1430 case MONO_TYPE_R8:
1431 return STACK_R8;
1432 case MONO_TYPE_VALUETYPE:
1433 case MONO_TYPE_TYPEDBYREF:
1434 return STACK_VTYPE;
1435 case MONO_TYPE_GENERICINST:
1436 if (mono_type_generic_inst_is_valuetype (t))
1437 return STACK_VTYPE;
1438 else
1439 return STACK_OBJ;
1440 break;
1441 default:
1442 g_assert_not_reached ();
1445 return -1;
1448 static MonoClass*
1449 array_access_to_klass (int opcode)
1451 switch (opcode) {
1452 case MONO_CEE_LDELEM_U1:
1453 return mono_defaults.byte_class;
1454 case MONO_CEE_LDELEM_U2:
1455 return mono_defaults.uint16_class;
1456 case MONO_CEE_LDELEM_I:
1457 case MONO_CEE_STELEM_I:
1458 return mono_defaults.int_class;
1459 case MONO_CEE_LDELEM_I1:
1460 case MONO_CEE_STELEM_I1:
1461 return mono_defaults.sbyte_class;
1462 case MONO_CEE_LDELEM_I2:
1463 case MONO_CEE_STELEM_I2:
1464 return mono_defaults.int16_class;
1465 case MONO_CEE_LDELEM_I4:
1466 case MONO_CEE_STELEM_I4:
1467 return mono_defaults.int32_class;
1468 case MONO_CEE_LDELEM_U4:
1469 return mono_defaults.uint32_class;
1470 case MONO_CEE_LDELEM_I8:
1471 case MONO_CEE_STELEM_I8:
1472 return mono_defaults.int64_class;
1473 case MONO_CEE_LDELEM_R4:
1474 case MONO_CEE_STELEM_R4:
1475 return mono_defaults.single_class;
1476 case MONO_CEE_LDELEM_R8:
1477 case MONO_CEE_STELEM_R8:
1478 return mono_defaults.double_class;
1479 case MONO_CEE_LDELEM_REF:
1480 case MONO_CEE_STELEM_REF:
1481 return mono_defaults.object_class;
1482 default:
1483 g_assert_not_reached ();
1485 return NULL;
1489 * We try to share variables when possible
1491 static MonoInst *
1492 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1494 MonoInst *res;
1495 int pos, vnum;
1496 MonoType *type;
1498 type = type_from_stack_type (ins);
1500 /* inlining can result in deeper stacks */
1501 if (cfg->inline_depth || slot >= cfg->header->max_stack)
1502 return mono_compile_create_var (cfg, type, OP_LOCAL);
1504 pos = ins->type - 1 + slot * STACK_MAX;
1506 switch (ins->type) {
1507 case STACK_I4:
1508 case STACK_I8:
1509 case STACK_R8:
1510 case STACK_PTR:
1511 case STACK_MP:
1512 case STACK_OBJ:
1513 if ((vnum = cfg->intvars [pos]))
1514 return cfg->varinfo [vnum];
1515 res = mono_compile_create_var (cfg, type, OP_LOCAL);
1516 cfg->intvars [pos] = res->inst_c0;
1517 break;
1518 default:
1519 res = mono_compile_create_var (cfg, type, OP_LOCAL);
1521 return res;
1524 static void
1525 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1528 * Don't use this if a generic_context is set, since that means AOT can't
1529 * look up the method using just the image+token.
1530 * table == 0 means this is a reference made from a wrapper.
1532 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1533 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1534 jump_info_token->image = image;
1535 jump_info_token->token = token;
1536 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1541 * This function is called to handle items that are left on the evaluation stack
1542 * at basic block boundaries. What happens is that we save the values to local variables
1543 * and we reload them later when first entering the target basic block (with the
1544 * handle_loaded_temps () function).
1545 * A single joint point will use the same variables (stored in the array bb->out_stack or
1546 * bb->in_stack, if the basic block is before or after the joint point).
1548 * This function needs to be called _before_ emitting the last instruction of
1549 * the bb (i.e. before emitting a branch).
1550 * If the stack merge fails at a join point, cfg->unverifiable is set.
1552 static void
1553 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1555 int i, bindex;
1556 MonoBasicBlock *bb = cfg->cbb;
1557 MonoBasicBlock *outb;
1558 MonoInst *inst, **locals;
1559 gboolean found;
1561 if (!count)
1562 return;
1563 if (cfg->verbose_level > 3)
1564 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1565 if (!bb->out_scount) {
1566 bb->out_scount = count;
1567 //printf ("bblock %d has out:", bb->block_num);
1568 found = FALSE;
1569 for (i = 0; i < bb->out_count; ++i) {
1570 outb = bb->out_bb [i];
1571 /* exception handlers are linked, but they should not be considered for stack args */
1572 if (outb->flags & BB_EXCEPTION_HANDLER)
1573 continue;
1574 //printf (" %d", outb->block_num);
1575 if (outb->in_stack) {
1576 found = TRUE;
1577 bb->out_stack = outb->in_stack;
1578 break;
1581 //printf ("\n");
1582 if (!found) {
1583 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1584 for (i = 0; i < count; ++i) {
1586 * try to reuse temps already allocated for this purpouse, if they occupy the same
1587 * stack slot and if they are of the same type.
1588 * This won't cause conflicts since if 'local' is used to
1589 * store one of the values in the in_stack of a bblock, then
1590 * the same variable will be used for the same outgoing stack
1591 * slot as well.
1592 * This doesn't work when inlining methods, since the bblocks
1593 * in the inlined methods do not inherit their in_stack from
1594 * the bblock they are inlined to. See bug #58863 for an
1595 * example.
1597 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1602 for (i = 0; i < bb->out_count; ++i) {
1603 outb = bb->out_bb [i];
1604 /* exception handlers are linked, but they should not be considered for stack args */
1605 if (outb->flags & BB_EXCEPTION_HANDLER)
1606 continue;
1607 if (outb->in_scount) {
1608 if (outb->in_scount != bb->out_scount) {
1609 cfg->unverifiable = TRUE;
1610 return;
1612 continue; /* check they are the same locals */
1614 outb->in_scount = count;
1615 outb->in_stack = bb->out_stack;
1618 locals = bb->out_stack;
1619 cfg->cbb = bb;
1620 for (i = 0; i < count; ++i) {
1621 sp [i] = convert_value (cfg, locals [i]->inst_vtype, sp [i]);
1622 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1623 inst->cil_code = sp [i]->cil_code;
1624 sp [i] = locals [i];
1625 if (cfg->verbose_level > 3)
1626 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1630 * It is possible that the out bblocks already have in_stack assigned, and
1631 * the in_stacks differ. In this case, we will store to all the different
1632 * in_stacks.
1635 found = TRUE;
1636 bindex = 0;
1637 while (found) {
1638 /* Find a bblock which has a different in_stack */
1639 found = FALSE;
1640 while (bindex < bb->out_count) {
1641 outb = bb->out_bb [bindex];
1642 /* exception handlers are linked, but they should not be considered for stack args */
1643 if (outb->flags & BB_EXCEPTION_HANDLER) {
1644 bindex++;
1645 continue;
1647 if (outb->in_stack != locals) {
1648 for (i = 0; i < count; ++i) {
1649 sp [i] = convert_value (cfg, outb->in_stack [i]->inst_vtype, sp [i]);
1650 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1651 inst->cil_code = sp [i]->cil_code;
1652 sp [i] = locals [i];
1653 if (cfg->verbose_level > 3)
1654 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1656 locals = outb->in_stack;
1657 found = TRUE;
1658 break;
1660 bindex ++;
1665 MonoInst*
1666 mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1668 MonoInst *ins;
1670 if (cfg->compile_aot) {
1671 MONO_DISABLE_WARNING (4306) // 'type cast': conversion from 'MonoJumpInfoType' to 'MonoInst *' of greater size
1672 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1673 MONO_RESTORE_WARNING
1674 } else {
1675 MonoJumpInfo ji;
1676 gpointer target;
1677 ERROR_DECL (error);
1679 ji.type = patch_type;
1680 ji.data.target = data;
1681 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, error);
1682 mono_error_assert_ok (error);
1684 EMIT_NEW_PCONST (cfg, ins, target);
1686 return ins;
1689 static MonoInst*
1690 mono_create_fast_tls_getter (MonoCompile *cfg, MonoTlsKey key)
1692 int tls_offset = mono_tls_get_tls_offset (key);
1694 if (cfg->compile_aot)
1695 return NULL;
1697 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1698 MonoInst *ins;
1699 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
1700 ins->dreg = mono_alloc_preg (cfg);
1701 ins->inst_offset = tls_offset;
1702 return ins;
1704 return NULL;
1707 static MonoInst*
1708 mono_create_tls_get (MonoCompile *cfg, MonoTlsKey key)
1710 MonoInst *fast_tls = NULL;
1712 if (!mini_debug_options.use_fallback_tls)
1713 fast_tls = mono_create_fast_tls_getter (cfg, key);
1715 if (fast_tls) {
1716 MONO_ADD_INS (cfg->cbb, fast_tls);
1717 return fast_tls;
1720 const MonoJitICallId jit_icall_id = mono_get_tls_key_to_jit_icall_id (key);
1722 if (cfg->compile_aot) {
1723 MonoInst *addr;
1725 * tls getters are critical pieces of code and we don't want to resolve them
1726 * through the standard plt/tramp mechanism since we might expose ourselves
1727 * to crashes and infinite recursions.
1728 * Therefore the NOCALL part of MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL, FALSE in is_plt_patch.
1730 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL, GUINT_TO_POINTER (jit_icall_id));
1731 return mini_emit_calli (cfg, mono_icall_sig_ptr, NULL, addr, NULL, NULL);
1732 } else {
1733 return mono_emit_jit_icall_id (cfg, jit_icall_id, NULL);
1738 * emit_push_lmf:
1740 * Emit IR to push the current LMF onto the LMF stack.
1742 static void
1743 emit_push_lmf (MonoCompile *cfg)
1746 * Emit IR to push the LMF:
1747 * lmf_addr = <lmf_addr from tls>
1748 * lmf->lmf_addr = lmf_addr
1749 * lmf->prev_lmf = *lmf_addr
1750 * *lmf_addr = lmf
1752 MonoInst *ins, *lmf_ins;
1754 if (!cfg->lmf_ir)
1755 return;
1757 int lmf_reg, prev_lmf_reg;
1759 * Store lmf_addr in a variable, so it can be allocated to a global register.
1761 if (!cfg->lmf_addr_var)
1762 cfg->lmf_addr_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
1764 lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR);
1765 g_assert (lmf_ins);
1767 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1769 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1770 lmf_reg = ins->dreg;
1772 prev_lmf_reg = alloc_preg (cfg);
1773 /* Save previous_lmf */
1774 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1775 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1776 /* Set new lmf */
1777 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1781 * emit_pop_lmf:
1783 * Emit IR to pop the current LMF from the LMF stack.
1785 static void
1786 emit_pop_lmf (MonoCompile *cfg)
1788 int lmf_reg, lmf_addr_reg;
1789 MonoInst *ins;
1791 if (!cfg->lmf_ir)
1792 return;
1794 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1795 lmf_reg = ins->dreg;
1797 int prev_lmf_reg;
1799 * Emit IR to pop the LMF:
1800 * *(lmf->lmf_addr) = lmf->prev_lmf
1802 /* This could be called before emit_push_lmf () */
1803 if (!cfg->lmf_addr_var)
1804 cfg->lmf_addr_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
1805 lmf_addr_reg = cfg->lmf_addr_var->dreg;
1807 prev_lmf_reg = alloc_preg (cfg);
1808 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
1809 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
1813 * target_type_is_incompatible:
1814 * @cfg: MonoCompile context
1816 * Check that the item @arg on the evaluation stack can be stored
1817 * in the target type (can be a local, or field, etc).
1818 * The cfg arg can be used to check if we need verification or just
1819 * validity checks.
1821 * Returns: non-0 value if arg can't be stored on a target.
1823 static int
1824 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1826 MonoType *simple_type;
1827 MonoClass *klass;
1829 if (target->byref) {
1830 /* FIXME: check that the pointed to types match */
1831 if (arg->type == STACK_MP) {
1832 /* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
1833 MonoClass *target_class_lowered = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (mono_class_from_mono_type_internal (target))));
1834 MonoClass *source_class_lowered = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (arg->klass)));
1836 /* if the target is native int& or X* or same type */
1837 if (target->type == MONO_TYPE_I || target->type == MONO_TYPE_PTR || target_class_lowered == source_class_lowered)
1838 return 0;
1840 /* Both are primitive type byrefs and the source points to a larger type that the destination */
1841 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (m_class_get_byval_arg (target_class_lowered)) && MONO_TYPE_IS_PRIMITIVE_SCALAR (m_class_get_byval_arg (source_class_lowered)) &&
1842 mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
1843 return 0;
1844 return 1;
1846 if (arg->type == STACK_PTR)
1847 return 0;
1848 return 1;
1851 simple_type = mini_get_underlying_type (target);
1852 switch (simple_type->type) {
1853 case MONO_TYPE_VOID:
1854 return 1;
1855 case MONO_TYPE_I1:
1856 case MONO_TYPE_U1:
1857 case MONO_TYPE_I2:
1858 case MONO_TYPE_U2:
1859 case MONO_TYPE_I4:
1860 case MONO_TYPE_U4:
1861 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1862 return 1;
1863 return 0;
1864 case MONO_TYPE_PTR:
1865 /* STACK_MP is needed when setting pinned locals */
1866 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1867 #if SIZEOF_VOID_P == 8
1868 if (arg->type != STACK_I8)
1869 #endif
1870 return 1;
1871 return 0;
1872 case MONO_TYPE_I:
1873 case MONO_TYPE_U:
1874 case MONO_TYPE_FNPTR:
1876 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
1877 * in native int. (#688008).
1879 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1880 return 1;
1881 return 0;
1882 case MONO_TYPE_CLASS:
1883 case MONO_TYPE_STRING:
1884 case MONO_TYPE_OBJECT:
1885 case MONO_TYPE_SZARRAY:
1886 case MONO_TYPE_ARRAY:
1887 if (arg->type != STACK_OBJ)
1888 return 1;
1889 /* FIXME: check type compatibility */
1890 return 0;
1891 case MONO_TYPE_I8:
1892 case MONO_TYPE_U8:
1893 if (arg->type != STACK_I8)
1894 #if SIZEOF_VOID_P == 8
1895 if (arg->type != STACK_PTR)
1896 #endif
1897 return 1;
1898 return 0;
1899 case MONO_TYPE_R4:
1900 if (arg->type != cfg->r4_stack_type)
1901 return 1;
1902 return 0;
1903 case MONO_TYPE_R8:
1904 if (arg->type != STACK_R8)
1905 return 1;
1906 return 0;
1907 case MONO_TYPE_VALUETYPE:
1908 if (arg->type != STACK_VTYPE)
1909 return 1;
1910 klass = mono_class_from_mono_type_internal (simple_type);
1911 if (klass != arg->klass)
1912 return 1;
1913 return 0;
1914 case MONO_TYPE_TYPEDBYREF:
1915 if (arg->type != STACK_VTYPE)
1916 return 1;
1917 klass = mono_class_from_mono_type_internal (simple_type);
1918 if (klass != arg->klass)
1919 return 1;
1920 return 0;
1921 case MONO_TYPE_GENERICINST:
1922 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1923 MonoClass *target_class;
1924 if (arg->type != STACK_VTYPE)
1925 return 1;
1926 klass = mono_class_from_mono_type_internal (simple_type);
1927 target_class = mono_class_from_mono_type_internal (target);
1928 /* The second cases is needed when doing partial sharing */
1929 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (arg->klass))))
1930 return 1;
1931 return 0;
1932 } else {
1933 if (arg->type != STACK_OBJ)
1934 return 1;
1935 /* FIXME: check type compatibility */
1936 return 0;
1938 case MONO_TYPE_VAR:
1939 case MONO_TYPE_MVAR:
1940 g_assert (cfg->gshared);
1941 if (mini_type_var_is_vt (simple_type)) {
1942 if (arg->type != STACK_VTYPE)
1943 return 1;
1944 } else {
1945 if (arg->type != STACK_OBJ)
1946 return 1;
1948 return 0;
1949 default:
1950 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1952 return 1;
1956 * convert_value:
1958 * Emit some implicit conversions which are not part of the .net spec, but are allowed by MS.NET.
1960 static MonoInst*
1961 convert_value (MonoCompile *cfg, MonoType *type, MonoInst *ins)
1963 if (!cfg->r4fp)
1964 return ins;
1965 type = mini_get_underlying_type (type);
1966 switch (type->type) {
1967 case MONO_TYPE_R4:
1968 if (ins->type == STACK_R8) {
1969 int dreg = alloc_freg (cfg);
1970 MonoInst *conv;
1971 EMIT_NEW_UNALU (cfg, conv, OP_FCONV_TO_R4, dreg, ins->dreg);
1972 conv->type = STACK_R4;
1973 return conv;
1975 break;
1976 case MONO_TYPE_R8:
1977 if (ins->type == STACK_R4) {
1978 int dreg = alloc_freg (cfg);
1979 MonoInst *conv;
1980 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, ins->dreg);
1981 conv->type = STACK_R8;
1982 return conv;
1984 break;
1985 default:
1986 break;
1988 return ins;
1992 * Prepare arguments for passing to a function call.
1993 * Return a non-zero value if the arguments can't be passed to the given
1994 * signature.
1995 * The type checks are not yet complete and some conversions may need
1996 * casts on 32 or 64 bit architectures.
1998 * FIXME: implement this using target_type_is_incompatible ()
2000 static gboolean
2001 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2003 MonoType *simple_type;
2004 int i;
2006 if (sig->hasthis) {
2007 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2008 return TRUE;
2009 args++;
2011 for (i = 0; i < sig->param_count; ++i) {
2012 if (sig->params [i]->byref) {
2013 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2014 return TRUE;
2015 continue;
2017 simple_type = mini_get_underlying_type (sig->params [i]);
2018 handle_enum:
2019 switch (simple_type->type) {
2020 case MONO_TYPE_VOID:
2021 return TRUE;
2022 case MONO_TYPE_I1:
2023 case MONO_TYPE_U1:
2024 case MONO_TYPE_I2:
2025 case MONO_TYPE_U2:
2026 case MONO_TYPE_I4:
2027 case MONO_TYPE_U4:
2028 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2029 return TRUE;
2030 continue;
2031 case MONO_TYPE_I:
2032 case MONO_TYPE_U:
2033 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2034 return TRUE;
2035 continue;
2036 case MONO_TYPE_PTR:
2037 case MONO_TYPE_FNPTR:
2038 if (args [i]->type != STACK_I4 && !(SIZEOF_VOID_P == 8 && args [i]->type == STACK_I8) &&
2039 args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2040 return TRUE;
2041 continue;
2042 case MONO_TYPE_CLASS:
2043 case MONO_TYPE_STRING:
2044 case MONO_TYPE_OBJECT:
2045 case MONO_TYPE_SZARRAY:
2046 case MONO_TYPE_ARRAY:
2047 if (args [i]->type != STACK_OBJ)
2048 return TRUE;
2049 continue;
2050 case MONO_TYPE_I8:
2051 case MONO_TYPE_U8:
2052 if (args [i]->type != STACK_I8 &&
2053 !(SIZEOF_VOID_P == 8 && (args [i]->type == STACK_I4 || args [i]->type == STACK_PTR)))
2054 return TRUE;
2055 continue;
2056 case MONO_TYPE_R4:
2057 if (args [i]->type != cfg->r4_stack_type)
2058 return TRUE;
2059 continue;
2060 case MONO_TYPE_R8:
2061 if (args [i]->type != STACK_R8)
2062 return TRUE;
2063 continue;
2064 case MONO_TYPE_VALUETYPE:
2065 if (m_class_is_enumtype (simple_type->data.klass)) {
2066 simple_type = mono_class_enum_basetype_internal (simple_type->data.klass);
2067 goto handle_enum;
2069 if (args [i]->type != STACK_VTYPE)
2070 return TRUE;
2071 continue;
2072 case MONO_TYPE_TYPEDBYREF:
2073 if (args [i]->type != STACK_VTYPE)
2074 return TRUE;
2075 continue;
2076 case MONO_TYPE_GENERICINST:
2077 simple_type = m_class_get_byval_arg (simple_type->data.generic_class->container_class);
2078 goto handle_enum;
2079 case MONO_TYPE_VAR:
2080 case MONO_TYPE_MVAR:
2081 /* gsharedvt */
2082 if (args [i]->type != STACK_VTYPE)
2083 return TRUE;
2084 continue;
2085 default:
2086 g_error ("unknown type 0x%02x in check_call_signature",
2087 simple_type->type);
2090 return FALSE;
2093 MonoJumpInfo *
2094 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2096 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2098 ji->ip.i = ip;
2099 ji->type = type;
2100 ji->data.target = target;
2102 return ji;
2106 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2108 if (cfg->gshared)
2109 return mono_class_check_context_used (klass);
2110 else
2111 return 0;
2115 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2117 if (cfg->gshared)
2118 return mono_method_check_context_used (method);
2119 else
2120 return 0;
2124 * check_method_sharing:
2126 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2128 static void
2129 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2131 gboolean pass_vtable = FALSE;
2132 gboolean pass_mrgctx = FALSE;
2134 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || m_class_is_valuetype (cmethod->klass)) &&
2135 (mono_class_is_ginst (cmethod->klass) || mono_class_is_gtd (cmethod->klass))) {
2136 gboolean sharable = FALSE;
2138 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2139 sharable = TRUE;
2142 * Pass vtable iff target method might
2143 * be shared, which means that sharing
2144 * is enabled for its class and its
2145 * context is sharable (and it's not a
2146 * generic method).
2148 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2149 pass_vtable = TRUE;
2152 if (mini_method_needs_mrgctx (cmethod)) {
2153 if (mini_method_is_default_method (cmethod))
2154 pass_vtable = FALSE;
2155 else
2156 g_assert (!pass_vtable);
2158 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2159 pass_mrgctx = TRUE;
2160 } else {
2161 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature_internal (cmethod)))
2162 pass_mrgctx = TRUE;
2166 if (out_pass_vtable)
2167 *out_pass_vtable = pass_vtable;
2168 if (out_pass_mrgctx)
2169 *out_pass_mrgctx = pass_mrgctx;
2172 static gboolean
2173 direct_icalls_enabled (MonoCompile *cfg, MonoMethod *method)
2175 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
2176 return FALSE;
2178 if (method && mono_aot_direct_icalls_enabled_for_method (cfg, method))
2179 return TRUE;
2181 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2182 #ifdef TARGET_AMD64
2183 if (cfg->compile_llvm && !cfg->llvm_only)
2184 return FALSE;
2185 #endif
2187 return FALSE;
2190 MonoInst*
2191 mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
2194 * Call the jit icall without a wrapper if possible.
2195 * The wrapper is needed to be able to do stack walks for asynchronously suspended
2196 * threads when debugging.
2198 if (direct_icalls_enabled (cfg, NULL)) {
2199 int costs;
2201 if (!info->wrapper_method) {
2202 info->wrapper_method = mono_marshal_get_icall_wrapper (info, TRUE);
2203 mono_memory_barrier ();
2207 * Inline the wrapper method, which is basically a call to the C icall, and
2208 * an exception check.
2210 costs = inline_method (cfg, info->wrapper_method, NULL,
2211 args, NULL, il_offset, TRUE);
2212 g_assert (costs > 0);
2213 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
2215 return args [0];
2217 return mono_emit_jit_icall_id (cfg, mono_jit_icall_info_id (info), args);
2220 static MonoInst*
2221 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2223 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2224 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2225 int widen_op = -1;
2228 * Native code might return non register sized integers
2229 * without initializing the upper bits.
2231 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2232 case OP_LOADI1_MEMBASE:
2233 widen_op = OP_ICONV_TO_I1;
2234 break;
2235 case OP_LOADU1_MEMBASE:
2236 widen_op = OP_ICONV_TO_U1;
2237 break;
2238 case OP_LOADI2_MEMBASE:
2239 widen_op = OP_ICONV_TO_I2;
2240 break;
2241 case OP_LOADU2_MEMBASE:
2242 widen_op = OP_ICONV_TO_U2;
2243 break;
2244 default:
2245 break;
2248 if (widen_op != -1) {
2249 int dreg = alloc_preg (cfg);
2250 MonoInst *widen;
2252 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2253 widen->type = ins->type;
2254 ins = widen;
2259 return ins;
2262 static MonoInst*
2263 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2264 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2266 static void
2267 emit_method_access_failure (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
2269 MonoInst *args [2];
2270 args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (caller), caller, MONO_RGCTX_INFO_METHOD);
2271 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (callee), callee, MONO_RGCTX_INFO_METHOD);
2272 mono_emit_jit_icall (cfg, mono_throw_method_access, args);
2275 static void
2276 emit_bad_image_failure (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
2278 mono_emit_jit_icall (cfg, mono_throw_bad_image, NULL);
2281 static MonoMethod*
2282 get_method_nofail (MonoClass *klass, const char *method_name, int num_params, int flags)
2284 MonoMethod *method;
2285 ERROR_DECL (error);
2286 method = mono_class_get_method_from_name_checked (klass, method_name, num_params, flags, error);
2287 mono_error_assert_ok (error);
2288 g_assertf (method, "Could not lookup method %s in %s", method_name, m_class_get_name (klass));
2289 return method;
2292 MonoMethod*
2293 mini_get_memcpy_method (void)
2295 static MonoMethod *memcpy_method = NULL;
2296 if (!memcpy_method) {
2297 memcpy_method = get_method_nofail (mono_defaults.string_class, "memcpy", 3, 0);
2298 if (!memcpy_method)
2299 g_error ("Old corlib found. Install a new one");
2301 return memcpy_method;
2304 MonoInst*
2305 mini_emit_storing_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2307 MonoInst *store;
2310 * Add a release memory barrier so the object contents are flushed
2311 * to memory before storing the reference into another object.
2313 if (!mini_debug_options.weak_memory_model)
2314 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
2316 EMIT_NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, ptr->dreg, 0, value->dreg);
2318 mini_emit_write_barrier (cfg, ptr, value);
2319 return store;
2322 void
2323 mini_emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2325 int card_table_shift_bits;
2326 target_mgreg_t card_table_mask;
2327 guint8 *card_table;
2328 MonoInst *dummy_use;
2329 int nursery_shift_bits;
2330 size_t nursery_size;
2332 if (!cfg->gen_write_barriers)
2333 return;
2335 //method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1])
2337 card_table = mono_gc_get_target_card_table (&card_table_shift_bits, &card_table_mask);
2339 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2341 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
2342 MonoInst *wbarrier;
2344 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2345 wbarrier->sreg1 = ptr->dreg;
2346 wbarrier->sreg2 = value->dreg;
2347 MONO_ADD_INS (cfg->cbb, wbarrier);
2348 } else if (card_table) {
2349 int offset_reg = alloc_preg (cfg);
2350 int card_reg;
2351 MonoInst *ins;
2354 * We emit a fast light weight write barrier. This always marks cards as in the concurrent
2355 * collector case, so, for the serial collector, it might slightly slow down nursery
2356 * collections. We also expect that the host system and the target system have the same card
2357 * table configuration, which is the case if they have the same pointer size.
2360 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2361 if (card_table_mask)
2362 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2364 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2365 * IMM's larger than 32bits.
2367 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
2368 card_reg = ins->dreg;
2370 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2371 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2372 } else {
2373 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2374 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2377 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2380 MonoMethod*
2381 mini_get_memset_method (void)
2383 static MonoMethod *memset_method = NULL;
2384 if (!memset_method) {
2385 memset_method = get_method_nofail (mono_defaults.string_class, "memset", 3, 0);
2386 if (!memset_method)
2387 g_error ("Old corlib found. Install a new one");
2389 return memset_method;
2392 void
2393 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2395 MonoInst *iargs [3];
2396 int n;
2397 guint32 align;
2398 MonoMethod *memset_method;
2399 MonoInst *size_ins = NULL;
2400 MonoInst *bzero_ins = NULL;
2401 static MonoMethod *bzero_method;
2403 /* FIXME: Optimize this for the case when dest is an LDADDR */
2404 mono_class_init_internal (klass);
2405 if (mini_is_gsharedvt_klass (klass)) {
2406 size_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
2407 bzero_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
2408 if (!bzero_method)
2409 bzero_method = get_method_nofail (mono_defaults.string_class, "bzero_aligned_1", 2, 0);
2410 g_assert (bzero_method);
2411 iargs [0] = dest;
2412 iargs [1] = size_ins;
2413 mini_emit_calli (cfg, mono_method_signature_internal (bzero_method), iargs, bzero_ins, NULL, NULL);
2414 return;
2417 klass = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (klass)));
2419 n = mono_class_value_size (klass, &align);
2421 if (n <= TARGET_SIZEOF_VOID_P * 8) {
2422 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2424 else {
2425 memset_method = mini_get_memset_method ();
2426 iargs [0] = dest;
2427 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2428 EMIT_NEW_ICONST (cfg, iargs [2], n);
2429 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2433 static gboolean
2434 context_used_is_mrgctx (MonoCompile *cfg, int context_used)
2436 /* gshared dim methods use an mrgctx */
2437 if (mini_method_is_default_method (cfg->method))
2438 return context_used != 0;
2439 return context_used & MONO_GENERIC_CONTEXT_USED_METHOD;
2443 * emit_get_rgctx:
2445 * Emit IR to return either the this pointer for instance method,
2446 * or the mrgctx for static methods.
2448 static MonoInst*
2449 emit_get_rgctx (MonoCompile *cfg, int context_used)
2451 MonoInst *this_ins = NULL;
2452 MonoMethod *method = cfg->method;
2454 g_assert (cfg->gshared);
2456 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2457 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2458 !m_class_is_valuetype (method->klass))
2459 EMIT_NEW_VARLOAD (cfg, this_ins, cfg->this_arg, mono_get_object_type ());
2461 if (context_used_is_mrgctx (cfg, context_used)) {
2462 MonoInst *mrgctx_loc, *mrgctx_var;
2464 if (!mini_method_is_default_method (method)) {
2465 g_assert (!this_ins);
2466 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2469 mrgctx_loc = mono_get_vtable_var (cfg);
2470 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2472 return mrgctx_var;
2473 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || m_class_is_valuetype (method->klass)) {
2474 MonoInst *vtable_loc, *vtable_var;
2476 g_assert (!this_ins);
2478 vtable_loc = mono_get_vtable_var (cfg);
2479 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2481 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2482 MonoInst *mrgctx_var = vtable_var;
2483 int vtable_reg;
2485 vtable_reg = alloc_preg (cfg);
2486 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2487 vtable_var->type = STACK_PTR;
2490 return vtable_var;
2491 } else {
2492 MonoInst *ins;
2493 int vtable_reg;
2495 vtable_reg = alloc_preg (cfg);
2496 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2497 return ins;
2501 static MonoJumpInfoRgctxEntry *
2502 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
2504 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2505 if (in_mrgctx)
2506 res->d.method = method;
2507 else
2508 res->d.klass = method->klass;
2509 res->in_mrgctx = in_mrgctx;
2510 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2511 res->data->type = patch_type;
2512 res->data->data.target = patch_data;
2513 res->info_type = info_type;
2515 return res;
2518 static MonoInst*
2519 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2521 MonoInst *args [16];
2522 MonoInst *call;
2524 // FIXME: No fastpath since the slot is not a compile time constant
2525 args [0] = rgctx;
2526 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
2527 if (entry->in_mrgctx)
2528 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
2529 else
2530 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
2531 return call;
2532 #if 0
2534 * FIXME: This can be called during decompose, which is a problem since it creates
2535 * new bblocks.
2536 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
2538 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
2539 gboolean mrgctx;
2540 MonoBasicBlock *is_null_bb, *end_bb;
2541 MonoInst *res, *ins, *call;
2542 MonoInst *args[16];
2544 slot = mini_get_rgctx_entry_slot (entry);
2546 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
2547 index = MONO_RGCTX_SLOT_INDEX (slot);
2548 if (mrgctx)
2549 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / TARGET_SIZEOF_VOID_P;
2550 for (depth = 0; ; ++depth) {
2551 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
2553 if (index < size - 1)
2554 break;
2555 index -= size - 1;
2558 NEW_BBLOCK (cfg, end_bb);
2559 NEW_BBLOCK (cfg, is_null_bb);
2561 if (mrgctx) {
2562 rgctx_reg = rgctx->dreg;
2563 } else {
2564 rgctx_reg = alloc_preg (cfg);
2566 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
2567 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
2568 NEW_BBLOCK (cfg, is_null_bb);
2570 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
2571 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2574 for (i = 0; i < depth; ++i) {
2575 int array_reg = alloc_preg (cfg);
2577 /* load ptr to next array */
2578 if (mrgctx && i == 0)
2579 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
2580 else
2581 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
2582 rgctx_reg = array_reg;
2583 /* is the ptr null? */
2584 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
2585 /* if yes, jump to actual trampoline */
2586 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2589 /* fetch slot */
2590 val_reg = alloc_preg (cfg);
2591 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * TARGET_SIZEOF_VOID_P);
2592 /* is the slot null? */
2593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
2594 /* if yes, jump to actual trampoline */
2595 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2597 /* Fastpath */
2598 res_reg = alloc_preg (cfg);
2599 MONO_INST_NEW (cfg, ins, OP_MOVE);
2600 ins->dreg = res_reg;
2601 ins->sreg1 = val_reg;
2602 MONO_ADD_INS (cfg->cbb, ins);
2603 res = ins;
2604 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
2606 /* Slowpath */
2607 MONO_START_BB (cfg, is_null_bb);
2608 args [0] = rgctx;
2609 EMIT_NEW_ICONST (cfg, args [1], index);
2610 if (mrgctx)
2611 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
2612 else
2613 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
2614 MONO_INST_NEW (cfg, ins, OP_MOVE);
2615 ins->dreg = res_reg;
2616 ins->sreg1 = call->dreg;
2617 MONO_ADD_INS (cfg->cbb, ins);
2618 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
2620 MONO_START_BB (cfg, end_bb);
2622 return res;
2623 #endif
2627 * emit_rgctx_fetch:
2629 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
2630 * given by RGCTX.
2632 static MonoInst*
2633 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2635 if (cfg->llvm_only)
2636 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
2637 else
2638 return mini_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, mono_icall_sig_ptr_ptr, &rgctx);
2642 * mini_emit_get_rgctx_klass:
2644 * Emit IR to load the property RGCTX_TYPE of KLASS. If context_used is 0, emit
2645 * normal constants, else emit a load from the rgctx.
2647 MonoInst*
2648 mini_emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2649 MonoClass *klass, MonoRgctxInfoType rgctx_type)
2651 if (!context_used) {
2652 MonoInst *ins;
2654 switch (rgctx_type) {
2655 case MONO_RGCTX_INFO_KLASS:
2656 EMIT_NEW_CLASSCONST (cfg, ins, klass);
2657 return ins;
2658 default:
2659 g_assert_not_reached ();
2663 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2664 MonoInst *rgctx = emit_get_rgctx (cfg, context_used);
2666 return emit_rgctx_fetch (cfg, rgctx, entry);
2669 static MonoInst*
2670 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
2671 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
2673 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
2674 MonoInst *rgctx = emit_get_rgctx (cfg, context_used);
2676 return emit_rgctx_fetch (cfg, rgctx, entry);
2679 static MonoInst*
2680 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
2681 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
2683 MonoJumpInfoGSharedVtCall *call_info;
2684 MonoJumpInfoRgctxEntry *entry;
2685 MonoInst *rgctx;
2687 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
2688 call_info->sig = sig;
2689 call_info->method = cmethod;
2691 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
2692 rgctx = emit_get_rgctx (cfg, context_used);
2694 return emit_rgctx_fetch (cfg, rgctx, entry);
2698 * emit_get_rgctx_virt_method:
2700 * Return data for method VIRT_METHOD for a receiver of type KLASS.
2702 static MonoInst*
2703 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
2704 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
2706 MonoJumpInfoVirtMethod *info;
2707 MonoJumpInfoRgctxEntry *entry;
2708 MonoInst *rgctx;
2710 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
2711 info->klass = klass;
2712 info->method = virt_method;
2714 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
2715 rgctx = emit_get_rgctx (cfg, context_used);
2717 return emit_rgctx_fetch (cfg, rgctx, entry);
2720 static MonoInst*
2721 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
2722 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
2724 MonoJumpInfoRgctxEntry *entry;
2725 MonoInst *rgctx;
2727 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
2728 rgctx = emit_get_rgctx (cfg, context_used);
2730 return emit_rgctx_fetch (cfg, rgctx, entry);
2734 * emit_get_rgctx_method:
2736 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2737 * normal constants, else emit a load from the rgctx.
2739 static MonoInst*
2740 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2741 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
2743 if (context_used == -1)
2744 context_used = mono_method_check_context_used (cmethod);
2746 if (!context_used) {
2747 MonoInst *ins;
2749 switch (rgctx_type) {
2750 case MONO_RGCTX_INFO_METHOD:
2751 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2752 return ins;
2753 case MONO_RGCTX_INFO_METHOD_RGCTX:
2754 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2755 return ins;
2756 case MONO_RGCTX_INFO_METHOD_FTNDESC:
2757 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_METHOD_FTNDESC, cmethod);
2758 return ins;
2759 default:
2760 g_assert_not_reached ();
2762 } else {
2763 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2764 MonoInst *rgctx = emit_get_rgctx (cfg, context_used);
2766 return emit_rgctx_fetch (cfg, rgctx, entry);
2770 static MonoInst*
2771 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2772 MonoClassField *field, MonoRgctxInfoType rgctx_type)
2774 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_FIELD, field, rgctx_type);
2775 MonoInst *rgctx = emit_get_rgctx (cfg, context_used);
2777 return emit_rgctx_fetch (cfg, rgctx, entry);
2780 MonoInst*
2781 mini_emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2782 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
2784 return emit_get_rgctx_method (cfg, context_used, cmethod, rgctx_type);
2787 static int
2788 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
2790 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
2791 MonoRuntimeGenericContextInfoTemplate *template_;
2792 int i, idx;
2794 g_assert (info);
2796 for (i = 0; i < info->num_entries; ++i) {
2797 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
2799 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
2800 return i;
2803 if (info->num_entries == info->count_entries) {
2804 MonoRuntimeGenericContextInfoTemplate *new_entries;
2805 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
2807 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
2809 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
2810 info->entries = new_entries;
2811 info->count_entries = new_count_entries;
2814 idx = info->num_entries;
2815 template_ = &info->entries [idx];
2816 template_->info_type = rgctx_type;
2817 template_->data = data;
2819 info->num_entries ++;
2821 return idx;
2825 * emit_get_gsharedvt_info:
2827 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
2829 static MonoInst*
2830 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
2832 MonoInst *ins;
2833 int idx, dreg;
2835 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
2836 /* Load info->entries [idx] */
2837 dreg = alloc_preg (cfg);
2838 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * TARGET_SIZEOF_VOID_P));
2840 return ins;
2843 MonoInst*
2844 mini_emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
2846 return emit_get_gsharedvt_info (cfg, m_class_get_byval_arg (klass), rgctx_type);
2850 * On return the caller must check @klass for load errors.
2852 static void
2853 emit_class_init (MonoCompile *cfg, MonoClass *klass)
2855 MonoInst *vtable_arg;
2856 int context_used;
2858 context_used = mini_class_check_context_used (cfg, klass);
2860 if (context_used) {
2861 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
2862 klass, MONO_RGCTX_INFO_VTABLE);
2863 } else {
2864 MonoVTable *vtable = mono_class_vtable_checked (cfg->domain, klass, cfg->error);
2865 if (!is_ok (cfg->error)) {
2866 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
2867 return;
2870 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2873 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
2874 MonoInst *ins;
2877 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
2878 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
2880 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
2881 ins->sreg1 = vtable_arg->dreg;
2882 MONO_ADD_INS (cfg->cbb, ins);
2883 } else {
2884 int inited_reg;
2885 MonoBasicBlock *inited_bb;
2887 inited_reg = alloc_ireg (cfg);
2889 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, inited_reg, vtable_arg->dreg, MONO_STRUCT_OFFSET (MonoVTable, initialized));
2891 NEW_BBLOCK (cfg, inited_bb);
2893 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
2894 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
2896 mono_emit_jit_icall (cfg, mono_generic_class_init, &vtable_arg);
2898 MONO_START_BB (cfg, inited_bb);
2902 static void
2903 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
2905 MonoInst *ins;
2907 if (cfg->gen_seq_points && cfg->method == method) {
2908 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
2909 if (nonempty_stack)
2910 ins->flags |= MONO_INST_NONEMPTY_STACK;
2911 MONO_ADD_INS (cfg->cbb, ins);
2912 cfg->last_seq_point = ins;
2916 void
2917 mini_save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
2919 if (mini_debug_options.better_cast_details) {
2920 int vtable_reg = alloc_preg (cfg);
2921 int klass_reg = alloc_preg (cfg);
2922 MonoBasicBlock *is_null_bb = NULL;
2923 MonoInst *tls_get;
2925 if (null_check) {
2926 NEW_BBLOCK (cfg, is_null_bb);
2928 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2929 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2932 tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
2933 if (!tls_get) {
2934 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2935 exit (1);
2938 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2939 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
2941 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2943 MonoInst *class_ins = mini_emit_get_rgctx_klass (cfg, mini_class_check_context_used (cfg, klass), klass, MONO_RGCTX_INFO_KLASS);
2944 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), class_ins->dreg);
2946 if (null_check)
2947 MONO_START_BB (cfg, is_null_bb);
2951 void
2952 mini_reset_cast_details (MonoCompile *cfg)
2954 /* Reset the variables holding the cast details */
2955 if (mini_debug_options.better_cast_details) {
2956 MonoInst *tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
2957 /* It is enough to reset the from field */
2958 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2963 * On return the caller must check @array_class for load errors
2965 static void
2966 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2968 int vtable_reg = alloc_preg (cfg);
2969 int context_used;
2971 context_used = mini_class_check_context_used (cfg, array_class);
2973 mini_save_cast_details (cfg, array_class, obj->dreg, FALSE);
2975 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2977 if (cfg->opt & MONO_OPT_SHARED) {
2978 int class_reg = alloc_preg (cfg);
2979 MonoInst *ins;
2981 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
2982 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
2983 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
2984 } else if (context_used) {
2985 MonoInst *vtable_ins;
2987 vtable_ins = mini_emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2988 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2989 } else {
2990 if (cfg->compile_aot) {
2991 int vt_reg;
2992 MonoVTable *vtable;
2994 if (!(vtable = mono_class_vtable_checked (cfg->domain, array_class, cfg->error))) {
2995 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
2996 return;
2998 vt_reg = alloc_preg (cfg);
2999 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3000 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3001 } else {
3002 MonoVTable *vtable;
3003 if (!(vtable = mono_class_vtable_checked (cfg->domain, array_class, cfg->error))) {
3004 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
3005 return;
3007 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, (gssize)vtable);
3011 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3013 mini_reset_cast_details (cfg);
3017 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3018 * generic code is generated.
3020 static MonoInst*
3021 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3023 MonoMethod* method;
3025 if (m_class_is_enumtype (mono_class_get_nullable_param_internal (klass)))
3026 method = get_method_nofail (klass, "UnboxExact", 1, 0);
3027 else
3028 method = get_method_nofail (klass, "Unbox", 1, 0);
3029 g_assert (method);
3031 if (context_used) {
3032 MonoInst *rgctx, *addr;
3034 /* FIXME: What if the class is shared? We might not
3035 have to get the address of the method from the
3036 RGCTX. */
3037 if (cfg->llvm_only) {
3038 addr = emit_get_rgctx_method (cfg, context_used, method,
3039 MONO_RGCTX_INFO_METHOD_FTNDESC);
3040 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature_internal (method));
3041 return mini_emit_llvmonly_calli (cfg, mono_method_signature_internal (method), &val, addr);
3042 } else {
3043 addr = emit_get_rgctx_method (cfg, context_used, method,
3044 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3045 rgctx = emit_get_rgctx (cfg, context_used);
3047 return mini_emit_calli (cfg, mono_method_signature_internal (method), &val, addr, NULL, rgctx);
3049 } else {
3050 gboolean pass_vtable, pass_mrgctx;
3051 MonoInst *rgctx_arg = NULL;
3053 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3054 g_assert (!pass_mrgctx);
3056 if (pass_vtable) {
3057 MonoVTable *vtable = mono_class_vtable_checked (cfg->domain, method->klass, cfg->error);
3059 mono_error_assert_ok (cfg->error);
3060 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3063 return mini_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3067 static MonoInst*
3068 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3070 MonoInst *add;
3071 int obj_reg;
3072 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3073 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3074 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3075 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3077 obj_reg = sp [0]->dreg;
3078 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3079 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3081 /* FIXME: generics */
3082 g_assert (m_class_get_rank (klass) == 0);
3084 // Check rank == 0
3085 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3086 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3088 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3089 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, m_class_offsetof_element_class ());
3091 if (context_used) {
3092 MonoInst *element_class;
3094 /* This assertion is from the unboxcast insn */
3095 g_assert (m_class_get_rank (klass) == 0);
3097 element_class = mini_emit_get_rgctx_klass (cfg, context_used,
3098 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3100 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3101 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3102 } else {
3103 mini_save_cast_details (cfg, m_class_get_element_class (klass), obj_reg, FALSE);
3104 mini_emit_class_check (cfg, eclass_reg, m_class_get_element_class (klass));
3105 mini_reset_cast_details (cfg);
3108 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, MONO_ABI_SIZEOF (MonoObject));
3109 MONO_ADD_INS (cfg->cbb, add);
3110 add->type = STACK_MP;
3111 add->klass = klass;
3113 return add;
3116 static MonoInst*
3117 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
3119 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3120 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3121 MonoInst *ins;
3122 int dreg, addr_reg;
3124 klass_inst = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3126 /* obj */
3127 args [0] = obj;
3129 /* klass */
3130 args [1] = klass_inst;
3132 /* CASTCLASS */
3133 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3135 NEW_BBLOCK (cfg, is_ref_bb);
3136 NEW_BBLOCK (cfg, is_nullable_bb);
3137 NEW_BBLOCK (cfg, end_bb);
3138 is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3139 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
3140 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3142 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
3143 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3145 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3146 addr_reg = alloc_dreg (cfg, STACK_MP);
3148 /* Non-ref case */
3149 /* UNBOX */
3150 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, MONO_ABI_SIZEOF (MonoObject));
3151 MONO_ADD_INS (cfg->cbb, addr);
3153 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3155 /* Ref case */
3156 MONO_START_BB (cfg, is_ref_bb);
3158 /* Save the ref to a temporary */
3159 dreg = alloc_ireg (cfg);
3160 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, m_class_get_byval_arg (klass));
3161 addr->dreg = addr_reg;
3162 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3163 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3165 /* Nullable case */
3166 MONO_START_BB (cfg, is_nullable_bb);
3169 MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3170 MonoInst *unbox_call;
3171 MonoMethodSignature *unbox_sig;
3173 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3174 unbox_sig->ret = m_class_get_byval_arg (klass);
3175 unbox_sig->param_count = 1;
3176 unbox_sig->params [0] = mono_get_object_type ();
3178 if (cfg->llvm_only)
3179 unbox_call = mini_emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
3180 else
3181 unbox_call = mini_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3183 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, m_class_get_byval_arg (klass));
3184 addr->dreg = addr_reg;
3187 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3189 /* End */
3190 MONO_START_BB (cfg, end_bb);
3192 /* LDOBJ */
3193 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr_reg, 0);
3195 return ins;
3199 * Returns NULL and set the cfg exception on error.
3201 static MonoInst*
3202 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3204 MonoInst *iargs [2];
3205 MonoJitICallId alloc_ftn;
3207 if (mono_class_get_flags (klass) & TYPE_ATTRIBUTE_ABSTRACT) {
3208 char* full_name = mono_type_get_full_name (klass);
3209 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
3210 mono_error_set_member_access (cfg->error, "Cannot create an abstract class: %s", full_name);
3211 g_free (full_name);
3212 return NULL;
3215 if (context_used) {
3216 MonoInst *data;
3217 MonoRgctxInfoType rgctx_info;
3218 MonoInst *iargs [2];
3219 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
3221 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
3223 if (cfg->opt & MONO_OPT_SHARED)
3224 rgctx_info = MONO_RGCTX_INFO_KLASS;
3225 else
3226 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3227 data = mini_emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3229 if (cfg->opt & MONO_OPT_SHARED) {
3230 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3231 iargs [1] = data;
3232 alloc_ftn = MONO_JIT_ICALL_ves_icall_object_new;
3233 } else {
3234 iargs [0] = data;
3235 alloc_ftn = MONO_JIT_ICALL_ves_icall_object_new_specific;
3238 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
3239 if (known_instance_size) {
3240 int size = mono_class_instance_size (klass);
3241 if (size < MONO_ABI_SIZEOF (MonoObject))
3242 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
3244 EMIT_NEW_ICONST (cfg, iargs [1], size);
3246 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3249 return mono_emit_jit_icall_id (cfg, alloc_ftn, iargs);
3252 if (cfg->opt & MONO_OPT_SHARED) {
3253 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3254 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3256 alloc_ftn = MONO_JIT_ICALL_ves_icall_object_new;
3257 } else if (cfg->compile_aot && cfg->cbb->out_of_line && m_class_get_type_token (klass) && m_class_get_image (klass) == mono_defaults.corlib && !mono_class_is_ginst (klass)) {
3258 /* This happens often in argument checking code, eg. throw new FooException... */
3259 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3260 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (m_class_get_type_token (klass)));
3261 alloc_ftn = MONO_JIT_ICALL_mono_helper_newobj_mscorlib;
3262 } else {
3263 MonoVTable *vtable = mono_class_vtable_checked (cfg->domain, klass, cfg->error);
3265 if (!is_ok (cfg->error)) {
3266 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
3267 return NULL;
3270 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
3272 if (managed_alloc) {
3273 int size = mono_class_instance_size (klass);
3274 if (size < MONO_ABI_SIZEOF (MonoObject))
3275 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
3277 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3278 EMIT_NEW_ICONST (cfg, iargs [1], size);
3279 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3281 alloc_ftn = MONO_JIT_ICALL_ves_icall_object_new_specific;
3282 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3285 return mono_emit_jit_icall_id (cfg, alloc_ftn, iargs);
3289 * Returns NULL and set the cfg exception on error.
3291 MonoInst*
3292 mini_emit_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3294 MonoInst *alloc, *ins;
3296 if (G_UNLIKELY (m_class_is_byreflike (klass))) {
3297 mono_error_set_bad_image (cfg->error, m_class_get_image (cfg->method->klass), "Cannot box IsByRefLike type '%s.%s'", m_class_get_name_space (klass), m_class_get_name (klass));
3298 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
3299 return NULL;
3302 if (mono_class_is_nullable (klass)) {
3303 MonoMethod* method = get_method_nofail (klass, "Box", 1, 0);
3305 if (context_used) {
3306 if (cfg->llvm_only && cfg->gsharedvt) {
3307 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3308 MONO_RGCTX_INFO_METHOD_FTNDESC);
3309 return mini_emit_llvmonly_calli (cfg, mono_method_signature_internal (method), &val, addr);
3310 } else {
3311 /* FIXME: What if the class is shared? We might not
3312 have to get the method address from the RGCTX. */
3313 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3314 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3315 MonoInst *rgctx = emit_get_rgctx (cfg, context_used);
3317 return mini_emit_calli (cfg, mono_method_signature_internal (method), &val, addr, NULL, rgctx);
3319 } else {
3320 gboolean pass_vtable, pass_mrgctx;
3321 MonoInst *rgctx_arg = NULL;
3323 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3324 g_assert (!pass_mrgctx);
3326 if (pass_vtable) {
3327 MonoVTable *vtable = mono_class_vtable_checked (cfg->domain, method->klass, cfg->error);
3329 mono_error_assert_ok (cfg->error);
3330 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3333 return mini_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3337 if (mini_is_gsharedvt_klass (klass)) {
3338 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3339 MonoInst *res, *is_ref, *src_var, *addr;
3340 int dreg;
3342 dreg = alloc_ireg (cfg);
3344 NEW_BBLOCK (cfg, is_ref_bb);
3345 NEW_BBLOCK (cfg, is_nullable_bb);
3346 NEW_BBLOCK (cfg, end_bb);
3347 is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3348 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
3349 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3351 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
3352 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3354 /* Non-ref case */
3355 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3356 if (!alloc)
3357 return NULL;
3358 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), alloc->dreg, MONO_ABI_SIZEOF (MonoObject), val->dreg);
3359 ins->opcode = OP_STOREV_MEMBASE;
3361 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
3362 res->type = STACK_OBJ;
3363 res->klass = klass;
3364 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3366 /* Ref case */
3367 MONO_START_BB (cfg, is_ref_bb);
3369 /* val is a vtype, so has to load the value manually */
3370 src_var = get_vreg_to_inst (cfg, val->dreg);
3371 if (!src_var)
3372 src_var = mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (klass), OP_LOCAL, val->dreg);
3373 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
3374 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
3375 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3377 /* Nullable case */
3378 MONO_START_BB (cfg, is_nullable_bb);
3381 MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass,
3382 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
3383 MonoInst *box_call;
3384 MonoMethodSignature *box_sig;
3387 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
3388 * construct that method at JIT time, so have to do things by hand.
3390 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3391 box_sig->ret = mono_get_object_type ();
3392 box_sig->param_count = 1;
3393 box_sig->params [0] = m_class_get_byval_arg (klass);
3395 if (cfg->llvm_only)
3396 box_call = mini_emit_llvmonly_calli (cfg, box_sig, &val, addr);
3397 else
3398 box_call = mini_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
3399 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
3400 res->type = STACK_OBJ;
3401 res->klass = klass;
3404 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3406 MONO_START_BB (cfg, end_bb);
3408 return res;
3411 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3412 if (!alloc)
3413 return NULL;
3415 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), alloc->dreg, MONO_ABI_SIZEOF (MonoObject), val->dreg);
3416 return alloc;
3419 static gboolean
3420 method_needs_stack_walk (MonoCompile *cfg, MonoMethod *cmethod)
3422 if (cmethod->klass == mono_defaults.systemtype_class) {
3423 if (!strcmp (cmethod->name, "GetType"))
3424 return TRUE;
3426 return FALSE;
3429 G_GNUC_UNUSED MonoInst*
3430 mini_handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, int enum_val_reg, MonoInst *enum_flag)
3432 MonoType *enum_type = mono_type_get_underlying_type (m_class_get_byval_arg (klass));
3433 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
3434 gboolean is_i4;
3436 switch (enum_type->type) {
3437 case MONO_TYPE_I8:
3438 case MONO_TYPE_U8:
3439 #if SIZEOF_REGISTER == 8
3440 case MONO_TYPE_I:
3441 case MONO_TYPE_U:
3442 #endif
3443 is_i4 = FALSE;
3444 break;
3445 default:
3446 is_i4 = TRUE;
3447 break;
3451 MonoInst *load = NULL, *and_, *cmp, *ceq;
3452 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
3453 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
3454 int dest_reg = alloc_ireg (cfg);
3456 if (enum_this) {
3457 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
3458 } else {
3459 g_assert (enum_val_reg != -1);
3460 enum_reg = enum_val_reg;
3462 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
3463 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
3464 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
3466 ceq->type = STACK_I4;
3468 if (!is_i4) {
3469 load = load ? mono_decompose_opcode (cfg, load) : NULL;
3470 and_ = mono_decompose_opcode (cfg, and_);
3471 cmp = mono_decompose_opcode (cfg, cmp);
3472 ceq = mono_decompose_opcode (cfg, ceq);
3475 return ceq;
3479 static MonoInst*
3480 emit_get_rgctx_dele_tramp (MonoCompile *cfg, int context_used,
3481 MonoClass *klass, MonoMethod *virt_method, gboolean _virtual, MonoRgctxInfoType rgctx_type)
3483 MonoDelegateClassMethodPair *info;
3484 MonoJumpInfoRgctxEntry *entry;
3485 MonoInst *rgctx;
3487 info = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
3488 info->klass = klass;
3489 info->method = virt_method;
3490 info->is_virtual = _virtual;
3492 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, info, rgctx_type);
3493 rgctx = emit_get_rgctx (cfg, context_used);
3495 return emit_rgctx_fetch (cfg, rgctx, entry);
3500 * Returns NULL and set the cfg exception on error.
3502 static G_GNUC_UNUSED MonoInst*
3503 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int target_method_context_used, int invoke_context_used, gboolean virtual_)
3505 MonoInst *ptr;
3506 int dreg;
3507 gpointer trampoline;
3508 MonoInst *obj, *tramp_ins;
3509 MonoDomain *domain;
3510 guint8 **code_slot;
3512 if (virtual_ && !cfg->llvm_only) {
3513 MonoMethod *invoke = mono_get_delegate_invoke_internal (klass);
3514 g_assert (invoke);
3516 //FIXME verify & fix any issue with removing invoke_context_used restriction
3517 if (invoke_context_used || !mono_get_delegate_virtual_invoke_impl (mono_method_signature_internal (invoke), target_method_context_used ? NULL : method))
3518 return NULL;
3521 obj = handle_alloc (cfg, klass, FALSE, invoke_context_used);
3522 if (!obj)
3523 return NULL;
3525 /* Inline the contents of mono_delegate_ctor */
3527 /* Set target field */
3528 /* Optimize away setting of NULL target */
3529 if (!MONO_INS_IS_PCONST_NULL (target)) {
3530 if (!(method->flags & METHOD_ATTRIBUTE_STATIC)) {
3531 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target->dreg, 0);
3532 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
3534 if (!mini_debug_options.weak_memory_model)
3535 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
3536 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3537 if (cfg->gen_write_barriers) {
3538 dreg = alloc_preg (cfg);
3539 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
3540 mini_emit_write_barrier (cfg, ptr, target);
3544 /* Set method field */
3545 if (!(target_method_context_used || invoke_context_used) || cfg->llvm_only) {
3546 //If compiling with gsharing enabled, it's faster to load method the delegate trampoline info than to use a rgctx slot
3547 MonoInst *method_ins = emit_get_rgctx_method (cfg, target_method_context_used, method, MONO_RGCTX_INFO_METHOD);
3548 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3552 * To avoid looking up the compiled code belonging to the target method
3553 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3554 * store it, and we fill it after the method has been compiled.
3556 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
3557 MonoInst *code_slot_ins;
3559 if (target_method_context_used) {
3560 code_slot_ins = emit_get_rgctx_method (cfg, target_method_context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3561 } else {
3562 domain = mono_domain_get ();
3563 mono_domain_lock (domain);
3564 if (!domain_jit_info (domain)->method_code_hash)
3565 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3566 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3567 if (!code_slot) {
3568 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
3569 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3571 mono_domain_unlock (domain);
3573 code_slot_ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
3575 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3578 if (cfg->llvm_only) {
3579 if (virtual_) {
3580 MonoInst *args [ ] = {
3581 obj,
3582 target,
3583 emit_get_rgctx_method (cfg, target_method_context_used, method, MONO_RGCTX_INFO_METHOD)
3585 mono_emit_jit_icall (cfg, mini_llvmonly_init_delegate_virtual, args);
3586 } else {
3587 mono_emit_jit_icall (cfg, mini_llvmonly_init_delegate, &obj);
3590 return obj;
3592 if (target_method_context_used || invoke_context_used) {
3593 tramp_ins = emit_get_rgctx_dele_tramp (cfg, target_method_context_used | invoke_context_used, klass, method, virtual_, MONO_RGCTX_INFO_DELEGATE_TRAMP_INFO);
3595 //This is emited as a contant store for the non-shared case.
3596 //We copy from the delegate trampoline info as it's faster than a rgctx fetch
3597 dreg = alloc_preg (cfg);
3598 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method));
3599 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), dreg);
3600 } else if (cfg->compile_aot) {
3601 MonoDelegateClassMethodPair *del_tramp;
3603 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
3604 del_tramp->klass = klass;
3605 del_tramp->method = method;
3606 del_tramp->is_virtual = virtual_;
3607 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
3608 } else {
3609 if (virtual_)
3610 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, method);
3611 else
3612 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, method);
3613 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3616 /* Set invoke_impl field */
3617 if (virtual_) {
3618 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3619 } else {
3620 dreg = alloc_preg (cfg);
3621 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
3622 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
3624 dreg = alloc_preg (cfg);
3625 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
3626 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
3629 dreg = alloc_preg (cfg);
3630 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
3631 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
3633 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3635 return obj;
3639 * handle_constrained_gsharedvt_call:
3641 * Handle constrained calls where the receiver is a gsharedvt type.
3642 * Return the instruction representing the call. Set the cfg exception on failure.
3644 static MonoInst*
3645 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
3646 gboolean *ref_emit_widen)
3648 MonoInst *ins = NULL;
3649 gboolean emit_widen = *ref_emit_widen;
3650 gboolean supported;
3653 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
3654 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
3655 * pack the arguments into an array, and do the rest of the work in in an icall.
3657 supported = ((cmethod->klass == mono_defaults.object_class) || mono_class_is_interface (cmethod->klass) || (!m_class_is_valuetype (cmethod->klass) && m_class_get_image (cmethod->klass) != mono_defaults.corlib));
3658 if (supported)
3659 supported = (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || m_class_is_enumtype (mono_class_from_mono_type_internal (fsig->ret)) || mini_is_gsharedvt_type (fsig->ret));
3660 if (supported) {
3661 if (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1)) {
3662 supported = TRUE;
3663 } else {
3664 /* Allow scalar parameters and a gsharedvt first parameter */
3665 supported = MONO_TYPE_IS_PRIMITIVE (fsig->params [0]) || MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]);
3666 if (supported) {
3667 for (int i = 1; i < fsig->param_count; ++i) {
3668 if (!(fsig->params [i]->byref || MONO_TYPE_IS_PRIMITIVE (fsig->params [i]) || MONO_TYPE_IS_REFERENCE (fsig->params [i]) || MONO_TYPE_ISSTRUCT (fsig->params [i])))
3669 supported = FALSE;
3674 if (supported) {
3675 MonoInst *args [16];
3678 * This case handles calls to
3679 * - object:ToString()/Equals()/GetHashCode(),
3680 * - System.IComparable<T>:CompareTo()
3681 * - System.IEquatable<T>:Equals ()
3682 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
3685 args [0] = sp [0];
3686 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
3687 args [2] = mini_emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
3689 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
3690 if (fsig->hasthis && fsig->param_count) {
3691 /* Call mono_gsharedvt_constrained_call (gpointer mp, MonoMethod *cmethod, MonoClass *klass, gboolean deref_arg, gpointer *args) */
3692 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
3693 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
3694 ins->dreg = alloc_preg (cfg);
3695 ins->inst_imm = fsig->param_count * sizeof (target_mgreg_t);
3696 MONO_ADD_INS (cfg->cbb, ins);
3697 args [4] = ins;
3699 /* Only the first argument is allowed to be gsharedvt */
3700 /* args [3] = deref_arg */
3701 if (mini_is_gsharedvt_type (fsig->params [0])) {
3702 int deref_arg_reg;
3703 ins = mini_emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type_internal (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3704 deref_arg_reg = alloc_preg (cfg);
3705 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
3706 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
3707 } else {
3708 EMIT_NEW_ICONST (cfg, args [3], 0);
3711 for (int i = 0; i < fsig->param_count; ++i) {
3712 int addr_reg;
3714 if (mini_is_gsharedvt_type (fsig->params [i]) || MONO_TYPE_IS_PRIMITIVE (fsig->params [i]) || MONO_TYPE_ISSTRUCT (fsig->params [i])) {
3715 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [i + 1]->dreg, fsig->params [i]);
3716 addr_reg = ins->dreg;
3717 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, i * sizeof (target_mgreg_t), addr_reg);
3718 } else {
3719 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, i * sizeof (target_mgreg_t), sp [i + 1]->dreg);
3722 } else {
3723 EMIT_NEW_ICONST (cfg, args [3], 0);
3724 EMIT_NEW_ICONST (cfg, args [4], 0);
3726 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
3727 emit_widen = FALSE;
3729 if (mini_is_gsharedvt_type (fsig->ret)) {
3730 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type_internal (fsig->ret), ins);
3731 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || m_class_is_enumtype (mono_class_from_mono_type_internal (fsig->ret))) {
3732 MonoInst *add;
3734 /* Unbox */
3735 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, MONO_ABI_SIZEOF (MonoObject));
3736 MONO_ADD_INS (cfg->cbb, add);
3737 /* Load value */
3738 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
3739 MONO_ADD_INS (cfg->cbb, ins);
3740 /* ins represents the call result */
3742 } else {
3743 GSHAREDVT_FAILURE (CEE_CALLVIRT);
3746 *ref_emit_widen = emit_widen;
3748 return ins;
3750 exception_exit:
3751 return NULL;
3754 static void
3755 mono_emit_load_got_addr (MonoCompile *cfg)
3757 MonoInst *getaddr, *dummy_use;
3759 if (!cfg->got_var || cfg->got_var_allocated)
3760 return;
3762 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3763 getaddr->cil_code = cfg->header->code;
3764 getaddr->dreg = cfg->got_var->dreg;
3766 /* Add it to the start of the first bblock */
3767 if (cfg->bb_entry->code) {
3768 getaddr->next = cfg->bb_entry->code;
3769 cfg->bb_entry->code = getaddr;
3771 else
3772 MONO_ADD_INS (cfg->bb_entry, getaddr);
3774 cfg->got_var_allocated = TRUE;
3777 * Add a dummy use to keep the got_var alive, since real uses might
3778 * only be generated by the back ends.
3779 * Add it to end_bblock, so the variable's lifetime covers the whole
3780 * method.
3781 * It would be better to make the usage of the got var explicit in all
3782 * cases when the backend needs it (i.e. calls, throw etc.), so this
3783 * wouldn't be needed.
3785 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3786 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3789 static gboolean
3790 method_does_not_return (MonoMethod *method)
3792 // FIXME: Under netcore, these are decorated with the [DoesNotReturn] attribute
3793 return m_class_get_image (method->klass) == mono_defaults.corlib &&
3794 !strcmp (m_class_get_name (method->klass), "ThrowHelper") &&
3795 strstr (method->name, "Throw") == method->name &&
3796 !method->is_inflated;
3799 static int inline_limit, llvm_jit_inline_limit, llvm_aot_inline_limit;
3800 static gboolean inline_limit_inited;
3802 static gboolean
3803 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3805 MonoMethodHeaderSummary header;
3806 MonoVTable *vtable;
3807 int limit;
3808 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
3809 MonoMethodSignature *sig = mono_method_signature_internal (method);
3810 int i;
3811 #endif
3813 if (cfg->disable_inline)
3814 return FALSE;
3815 if (cfg->gsharedvt)
3816 return FALSE;
3818 if (cfg->inline_depth > 10)
3819 return FALSE;
3821 if (!mono_method_get_header_summary (method, &header))
3822 return FALSE;
3824 /*runtime, icall and pinvoke are checked by summary call*/
3825 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3826 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3827 (mono_class_is_marshalbyref (method->klass)) ||
3828 header.has_clauses)
3829 return FALSE;
3831 if (method->flags & METHOD_ATTRIBUTE_REQSECOBJ)
3832 /* Used to mark methods containing StackCrawlMark locals */
3833 return FALSE;
3835 /* also consider num_locals? */
3836 /* Do the size check early to avoid creating vtables */
3837 if (!inline_limit_inited) {
3838 char *inlinelimit;
3839 if ((inlinelimit = g_getenv ("MONO_INLINELIMIT"))) {
3840 inline_limit = atoi (inlinelimit);
3841 llvm_jit_inline_limit = inline_limit;
3842 llvm_aot_inline_limit = inline_limit;
3843 g_free (inlinelimit);
3844 } else {
3845 inline_limit = INLINE_LENGTH_LIMIT;
3846 llvm_jit_inline_limit = LLVM_JIT_INLINE_LENGTH_LIMIT;
3847 llvm_aot_inline_limit = LLVM_AOT_INLINE_LENGTH_LIMIT;
3849 inline_limit_inited = TRUE;
3852 #ifdef ENABLE_NETCORE
3853 if (COMPILE_LLVM (cfg)) {
3854 if (cfg->compile_aot)
3855 limit = llvm_aot_inline_limit;
3856 else
3857 limit = llvm_jit_inline_limit;
3858 } else {
3859 limit = inline_limit;
3861 #else
3862 if (COMPILE_LLVM (cfg) && !cfg->compile_aot)
3863 limit = llvm_jit_inline_limit;
3864 else
3865 limit = inline_limit;
3866 #endif
3867 if (header.code_size >= limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
3868 return FALSE;
3871 * if we can initialize the class of the method right away, we do,
3872 * otherwise we don't allow inlining if the class needs initialization,
3873 * since it would mean inserting a call to mono_runtime_class_init()
3874 * inside the inlined code
3876 if (cfg->gshared && m_class_has_cctor (method->klass) && mini_class_check_context_used (cfg, method->klass))
3877 return FALSE;
3879 if (!(cfg->opt & MONO_OPT_SHARED)) {
3880 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
3881 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
3882 if (m_class_has_cctor (method->klass)) {
3883 ERROR_DECL (error);
3884 vtable = mono_class_vtable_checked (cfg->domain, method->klass, error);
3885 if (!is_ok (error)) {
3886 mono_error_cleanup (error);
3887 return FALSE;
3889 if (!cfg->compile_aot) {
3890 if (!mono_runtime_class_init_full (vtable, error)) {
3891 mono_error_cleanup (error);
3892 return FALSE;
3896 } else if (mono_class_is_before_field_init (method->klass)) {
3897 if (cfg->run_cctors && m_class_has_cctor (method->klass)) {
3898 ERROR_DECL (error);
3899 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3900 if (!m_class_get_runtime_info (method->klass))
3901 /* No vtable created yet */
3902 return FALSE;
3903 vtable = mono_class_vtable_checked (cfg->domain, method->klass, error);
3904 if (!is_ok (error)) {
3905 mono_error_cleanup (error);
3906 return FALSE;
3908 /* This makes so that inline cannot trigger */
3909 /* .cctors: too many apps depend on them */
3910 /* running with a specific order... */
3911 if (! vtable->initialized)
3912 return FALSE;
3913 if (!mono_runtime_class_init_full (vtable, error)) {
3914 mono_error_cleanup (error);
3915 return FALSE;
3918 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3919 ERROR_DECL (error);
3920 if (!m_class_get_runtime_info (method->klass))
3921 /* No vtable created yet */
3922 return FALSE;
3923 vtable = mono_class_vtable_checked (cfg->domain, method->klass, error);
3924 if (!is_ok (error)) {
3925 mono_error_cleanup (error);
3926 return FALSE;
3928 if (!vtable->initialized)
3929 return FALSE;
3931 } else {
3933 * If we're compiling for shared code
3934 * the cctor will need to be run at aot method load time, for example,
3935 * or at the end of the compilation of the inlining method.
3937 if (mono_class_needs_cctor_run (method->klass, NULL) && !mono_class_is_before_field_init (method->klass))
3938 return FALSE;
3941 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
3942 if (mono_arch_is_soft_float ()) {
3943 /* FIXME: */
3944 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3945 return FALSE;
3946 for (i = 0; i < sig->param_count; ++i)
3947 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3948 return FALSE;
3950 #endif
3952 if (g_list_find (cfg->dont_inline, method))
3953 return FALSE;
3955 if (mono_profiler_get_call_instrumentation_flags (method))
3956 return FALSE;
3958 if (mono_profiler_coverage_instrumentation_enabled (method))
3959 return FALSE;
3961 if (method_does_not_return (method))
3962 return FALSE;
3964 return TRUE;
3967 static gboolean
3968 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
3970 if (!cfg->compile_aot) {
3971 g_assert (vtable);
3972 if (vtable->initialized)
3973 return FALSE;
3976 if (mono_class_is_before_field_init (klass)) {
3977 if (cfg->method == method)
3978 return FALSE;
3981 if (!mono_class_needs_cctor_run (klass, method))
3982 return FALSE;
3984 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
3985 /* The initialization is already done before the method is called */
3986 return FALSE;
3988 return TRUE;
3992 mini_emit_sext_index_reg (MonoCompile *cfg, MonoInst *index)
3994 int index_reg = index->dreg;
3995 int index2_reg;
3997 #if SIZEOF_REGISTER == 8
3998 /* The array reg is 64 bits but the index reg is only 32 */
3999 if (COMPILE_LLVM (cfg)) {
4001 * abcrem can't handle the OP_SEXT_I4, so add this after abcrem,
4002 * during OP_BOUNDS_CHECK decomposition, and in the implementation
4003 * of OP_X86_LEA for llvm.
4005 index2_reg = index_reg;
4006 } else {
4007 index2_reg = alloc_preg (cfg);
4008 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4010 #else
4011 if (index->type == STACK_I8) {
4012 index2_reg = alloc_preg (cfg);
4013 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4014 } else {
4015 index2_reg = index_reg;
4017 #endif
4019 return index2_reg;
4022 MonoInst*
4023 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4025 MonoInst *ins;
4026 guint32 size;
4027 int mult_reg, add_reg, array_reg, index2_reg;
4028 int context_used;
4030 if (mini_is_gsharedvt_variable_klass (klass)) {
4031 size = -1;
4032 } else {
4033 mono_class_init_internal (klass);
4034 size = mono_class_array_element_size (klass);
4037 mult_reg = alloc_preg (cfg);
4038 array_reg = arr->dreg;
4040 index2_reg = mini_emit_sext_index_reg (cfg, index);
4042 if (bcheck)
4043 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4045 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4046 if (size == 1 || size == 2 || size == 4 || size == 8) {
4047 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4049 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
4050 ins->klass = klass;
4051 ins->type = STACK_MP;
4053 return ins;
4055 #endif
4057 add_reg = alloc_ireg_mp (cfg);
4059 if (size == -1) {
4060 MonoInst *rgctx_ins;
4062 /* gsharedvt */
4063 g_assert (cfg->gshared);
4064 context_used = mini_class_check_context_used (cfg, klass);
4065 g_assert (context_used);
4066 rgctx_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4067 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4068 } else {
4069 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4071 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4072 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4073 ins->klass = klass;
4074 ins->type = STACK_MP;
4075 MONO_ADD_INS (cfg->cbb, ins);
4077 return ins;
4080 static MonoInst*
4081 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4083 int bounds_reg = alloc_preg (cfg);
4084 int add_reg = alloc_ireg_mp (cfg);
4085 int mult_reg = alloc_preg (cfg);
4086 int mult2_reg = alloc_preg (cfg);
4087 int low1_reg = alloc_preg (cfg);
4088 int low2_reg = alloc_preg (cfg);
4089 int high1_reg = alloc_preg (cfg);
4090 int high2_reg = alloc_preg (cfg);
4091 int realidx1_reg = alloc_preg (cfg);
4092 int realidx2_reg = alloc_preg (cfg);
4093 int sum_reg = alloc_preg (cfg);
4094 int index1, index2;
4095 MonoInst *ins;
4096 guint32 size;
4098 mono_class_init_internal (klass);
4099 size = mono_class_array_element_size (klass);
4101 index1 = index_ins1->dreg;
4102 index2 = index_ins2->dreg;
4104 #if SIZEOF_REGISTER == 8
4105 /* The array reg is 64 bits but the index reg is only 32 */
4106 if (COMPILE_LLVM (cfg)) {
4107 /* Not needed */
4108 } else {
4109 int tmpreg = alloc_preg (cfg);
4110 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4111 index1 = tmpreg;
4112 tmpreg = alloc_preg (cfg);
4113 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4114 index2 = tmpreg;
4116 #else
4117 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4118 #endif
4120 /* range checking */
4121 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4122 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4124 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4125 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4126 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4127 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4128 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4129 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4130 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4132 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4133 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4134 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4135 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4136 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4137 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4138 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4140 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4141 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4142 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4143 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4144 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4146 ins->type = STACK_MP;
4147 ins->klass = klass;
4148 MONO_ADD_INS (cfg->cbb, ins);
4150 return ins;
4153 static MonoInst*
4154 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, guchar *ip, gboolean is_set)
4156 int rank;
4157 MonoInst *addr;
4158 MonoMethod *addr_method;
4159 int element_size;
4160 MonoClass *eclass = m_class_get_element_class (cmethod->klass);
4162 rank = mono_method_signature_internal (cmethod)->param_count - (is_set? 1: 0);
4164 if (rank == 1)
4165 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
4167 /* emit_ldelema_2 depends on OP_LMUL */
4168 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
4169 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
4172 if (mini_is_gsharedvt_variable_klass (eclass))
4173 element_size = 0;
4174 else
4175 element_size = mono_class_array_element_size (eclass);
4176 addr_method = mono_marshal_get_array_address (rank, element_size);
4177 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4179 return addr;
4182 static gboolean
4183 mini_class_is_reference (MonoClass *klass)
4185 return mini_type_is_reference (m_class_get_byval_arg (klass));
4188 MonoInst*
4189 mini_emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4191 if (safety_checks && mini_class_is_reference (klass) &&
4192 !(MONO_INS_IS_PCONST_NULL (sp [2]))) {
4193 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class);
4194 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4195 MonoInst *iargs [3];
4197 if (!helper->slot)
4198 mono_class_setup_vtable (obj_array);
4199 g_assert (helper->slot);
4201 if (sp [0]->type != STACK_OBJ)
4202 return NULL;
4203 if (sp [2]->type != STACK_OBJ)
4204 return NULL;
4206 iargs [2] = sp [2];
4207 iargs [1] = sp [1];
4208 iargs [0] = sp [0];
4210 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4211 } else {
4212 MonoInst *ins;
4214 if (mini_is_gsharedvt_variable_klass (klass)) {
4215 MonoInst *addr;
4217 // FIXME-VT: OP_ICONST optimization
4218 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4219 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0, sp [2]->dreg);
4220 ins->opcode = OP_STOREV_MEMBASE;
4221 } else if (sp [1]->opcode == OP_ICONST) {
4222 int array_reg = sp [0]->dreg;
4223 int index_reg = sp [1]->dreg;
4224 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
4226 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg) && sp [1]->inst_c0 < 0)
4227 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
4229 if (safety_checks)
4230 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
4231 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), array_reg, offset, sp [2]->dreg);
4232 } else {
4233 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
4234 if (!mini_debug_options.weak_memory_model && mini_class_is_reference (klass))
4235 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
4236 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0, sp [2]->dreg);
4237 if (mini_class_is_reference (klass))
4238 mini_emit_write_barrier (cfg, addr, sp [2]);
4240 return ins;
4244 MonoInst*
4245 mini_emit_memory_barrier (MonoCompile *cfg, int kind)
4247 MonoInst *ins = NULL;
4248 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4249 MONO_ADD_INS (cfg->cbb, ins);
4250 ins->backend.memory_barrier_kind = kind;
4252 return ins;
4256 * This entry point could be used later for arbitrary method
4257 * redirection.
4259 inline static MonoInst*
4260 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4261 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
4263 if (method->klass == mono_defaults.string_class) {
4264 /* managed string allocation support */
4265 if (strcmp (method->name, "FastAllocateString") == 0 && !(cfg->opt & MONO_OPT_SHARED)) {
4266 MonoInst *iargs [2];
4267 MonoVTable *vtable = mono_class_vtable_checked (cfg->domain, method->klass, cfg->error);
4268 MonoMethod *managed_alloc = NULL;
4270 mono_error_assert_ok (cfg->error); /*Should not fail since it System.String*/
4271 #ifndef MONO_CROSS_COMPILE
4272 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
4273 #endif
4274 if (!managed_alloc)
4275 return NULL;
4276 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4277 iargs [1] = args [0];
4278 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
4281 return NULL;
4284 static void
4285 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4287 MonoInst *store, *temp;
4288 int i;
4290 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4291 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4294 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4295 * would be different than the MonoInst's used to represent arguments, and
4296 * the ldelema implementation can't deal with that.
4297 * Solution: When ldelema is used on an inline argument, create a var for
4298 * it, emit ldelema on that var, and emit the saving code below in
4299 * inline_method () if needed.
4301 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4302 cfg->args [i] = temp;
4303 /* This uses cfg->args [i] which is set by the preceeding line */
4304 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4305 store->cil_code = sp [0]->cil_code;
4306 sp++;
4310 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4311 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4313 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4314 static gboolean
4315 check_inline_called_method_name_limit (MonoMethod *called_method)
4317 int strncmp_result;
4318 static const char *limit = NULL;
4320 if (limit == NULL) {
4321 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4323 if (limit_string != NULL)
4324 limit = limit_string;
4325 else
4326 limit = "";
4329 if (limit [0] != '\0') {
4330 char *called_method_name = mono_method_full_name (called_method, TRUE);
4332 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4333 g_free (called_method_name);
4335 //return (strncmp_result <= 0);
4336 return (strncmp_result == 0);
4337 } else {
4338 return TRUE;
4341 #endif
4343 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4344 static gboolean
4345 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4347 int strncmp_result;
4348 static const char *limit = NULL;
4350 if (limit == NULL) {
4351 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4352 if (limit_string != NULL) {
4353 limit = limit_string;
4354 } else {
4355 limit = "";
4359 if (limit [0] != '\0') {
4360 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4362 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4363 g_free (caller_method_name);
4365 //return (strncmp_result <= 0);
4366 return (strncmp_result == 0);
4367 } else {
4368 return TRUE;
4371 #endif
4373 static void
4374 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
4376 static double r8_0 = 0.0;
4377 static float r4_0 = 0.0;
4378 MonoInst *ins;
4379 int t;
4381 rtype = mini_get_underlying_type (rtype);
4382 t = rtype->type;
4384 if (rtype->byref) {
4385 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
4386 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
4387 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4388 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
4389 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
4390 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
4391 MONO_INST_NEW (cfg, ins, OP_R4CONST);
4392 ins->type = STACK_R4;
4393 ins->inst_p0 = (void*)&r4_0;
4394 ins->dreg = dreg;
4395 MONO_ADD_INS (cfg->cbb, ins);
4396 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
4397 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4398 ins->type = STACK_R8;
4399 ins->inst_p0 = (void*)&r8_0;
4400 ins->dreg = dreg;
4401 MONO_ADD_INS (cfg->cbb, ins);
4402 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
4403 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
4404 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type_internal (rtype));
4405 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
4406 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type_internal (rtype));
4407 } else {
4408 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
4412 static void
4413 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
4415 int t;
4417 rtype = mini_get_underlying_type (rtype);
4418 t = rtype->type;
4420 if (rtype->byref) {
4421 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
4422 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
4423 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
4424 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
4425 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
4426 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
4427 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
4428 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
4429 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
4430 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
4431 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
4432 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
4433 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
4434 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
4435 } else {
4436 emit_init_rvar (cfg, dreg, rtype);
4440 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
4441 static void
4442 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
4444 MonoInst *var = cfg->locals [local];
4445 if (COMPILE_SOFT_FLOAT (cfg)) {
4446 MonoInst *store;
4447 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
4448 emit_init_rvar (cfg, reg, type);
4449 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
4450 } else {
4451 if (init)
4452 emit_init_rvar (cfg, var->dreg, type);
4453 else
4454 emit_dummy_init_rvar (cfg, var->dreg, type);
4459 mini_inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always)
4461 return inline_method (cfg, cmethod, fsig, sp, ip, real_offset, inline_always);
4465 * inline_method:
4467 * Return the cost of inlining CMETHOD, or zero if it should not be inlined.
4469 static int
4470 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4471 guchar *ip, guint real_offset, gboolean inline_always)
4473 ERROR_DECL (error);
4474 MonoInst *ins, *rvar = NULL;
4475 MonoMethodHeader *cheader;
4476 MonoBasicBlock *ebblock, *sbblock;
4477 int i, costs;
4478 MonoInst **prev_locals, **prev_args;
4479 MonoType **prev_arg_types;
4480 guint prev_real_offset;
4481 GHashTable *prev_cbb_hash;
4482 MonoBasicBlock **prev_cil_offset_to_bb;
4483 MonoBasicBlock *prev_cbb;
4484 const guchar *prev_ip;
4485 guchar *prev_cil_start;
4486 guint32 prev_cil_offset_to_bb_len;
4487 MonoMethod *prev_current_method;
4488 MonoGenericContext *prev_generic_context;
4489 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
4491 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4493 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4494 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
4495 return 0;
4496 #endif
4497 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4498 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
4499 return 0;
4500 #endif
4502 if (!fsig)
4503 fsig = mono_method_signature_internal (cmethod);
4505 if (cfg->verbose_level > 2)
4506 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4508 if (!cmethod->inline_info) {
4509 cfg->stat_inlineable_methods++;
4510 cmethod->inline_info = 1;
4513 /* allocate local variables */
4514 cheader = mono_method_get_header_checked (cmethod, error);
4515 if (!cheader) {
4516 if (inline_always) {
4517 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
4518 mono_error_move (cfg->error, error);
4519 } else {
4520 mono_error_cleanup (error);
4522 return 0;
4525 /*Must verify before creating locals as it can cause the JIT to assert.*/
4526 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
4527 mono_metadata_free_mh (cheader);
4528 return 0;
4531 /* allocate space to store the return value */
4532 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4533 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4536 prev_locals = cfg->locals;
4537 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4538 for (i = 0; i < cheader->num_locals; ++i)
4539 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4541 /* allocate start and end blocks */
4542 /* This is needed so if the inline is aborted, we can clean up */
4543 NEW_BBLOCK (cfg, sbblock);
4544 sbblock->real_offset = real_offset;
4546 NEW_BBLOCK (cfg, ebblock);
4547 ebblock->block_num = cfg->num_bblocks++;
4548 ebblock->real_offset = real_offset;
4550 prev_args = cfg->args;
4551 prev_arg_types = cfg->arg_types;
4552 prev_ret_var_set = cfg->ret_var_set;
4553 prev_real_offset = cfg->real_offset;
4554 prev_cbb_hash = cfg->cbb_hash;
4555 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4556 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4557 prev_cil_start = cfg->cil_start;
4558 prev_ip = cfg->ip;
4559 prev_cbb = cfg->cbb;
4560 prev_current_method = cfg->current_method;
4561 prev_generic_context = cfg->generic_context;
4562 prev_disable_inline = cfg->disable_inline;
4564 cfg->ret_var_set = FALSE;
4565 cfg->inline_depth ++;
4567 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
4568 virtual_ = TRUE;
4570 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
4572 ret_var_set = cfg->ret_var_set;
4574 cfg->real_offset = prev_real_offset;
4575 cfg->cbb_hash = prev_cbb_hash;
4576 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4577 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4578 cfg->cil_start = prev_cil_start;
4579 cfg->ip = prev_ip;
4580 cfg->locals = prev_locals;
4581 cfg->args = prev_args;
4582 cfg->arg_types = prev_arg_types;
4583 cfg->current_method = prev_current_method;
4584 cfg->generic_context = prev_generic_context;
4585 cfg->ret_var_set = prev_ret_var_set;
4586 cfg->disable_inline = prev_disable_inline;
4587 cfg->inline_depth --;
4589 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
4590 if (cfg->verbose_level > 2)
4591 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4593 mono_error_assert_ok (cfg->error);
4595 cfg->stat_inlined_methods++;
4597 /* always add some code to avoid block split failures */
4598 MONO_INST_NEW (cfg, ins, OP_NOP);
4599 MONO_ADD_INS (prev_cbb, ins);
4601 prev_cbb->next_bb = sbblock;
4602 link_bblock (cfg, prev_cbb, sbblock);
4605 * Get rid of the begin and end bblocks if possible to aid local
4606 * optimizations.
4608 if (prev_cbb->out_count == 1)
4609 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4611 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4612 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4614 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4615 MonoBasicBlock *prev = ebblock->in_bb [0];
4617 if (prev->next_bb == ebblock) {
4618 mono_merge_basic_blocks (cfg, prev, ebblock);
4619 cfg->cbb = prev;
4620 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4621 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4622 cfg->cbb = prev_cbb;
4624 } else {
4625 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
4626 cfg->cbb = ebblock;
4628 } else {
4630 * Its possible that the rvar is set in some prev bblock, but not in others.
4631 * (#1835).
4633 if (rvar) {
4634 MonoBasicBlock *bb;
4636 for (i = 0; i < ebblock->in_count; ++i) {
4637 bb = ebblock->in_bb [i];
4639 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
4640 cfg->cbb = bb;
4642 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
4647 cfg->cbb = ebblock;
4650 if (rvar) {
4652 * If the inlined method contains only a throw, then the ret var is not
4653 * set, so set it to a dummy value.
4655 if (!ret_var_set)
4656 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
4658 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4659 *sp++ = ins;
4661 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4662 return costs + 1;
4663 } else {
4664 if (cfg->verbose_level > 2) {
4665 const char *msg = mono_error_get_message (cfg->error);
4666 printf ("INLINE ABORTED %s (cost %d) %s\n", mono_method_full_name (cmethod, TRUE), costs, msg ? msg : "");
4668 cfg->exception_type = MONO_EXCEPTION_NONE;
4670 clear_cfg_error (cfg);
4672 /* This gets rid of the newly added bblocks */
4673 cfg->cbb = prev_cbb;
4675 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4676 return 0;
4680 * Some of these comments may well be out-of-date.
4681 * Design decisions: we do a single pass over the IL code (and we do bblock
4682 * splitting/merging in the few cases when it's required: a back jump to an IL
4683 * address that was not already seen as bblock starting point).
4684 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4685 * Complex operations are decomposed in simpler ones right away. We need to let the
4686 * arch-specific code peek and poke inside this process somehow (except when the
4687 * optimizations can take advantage of the full semantic info of coarse opcodes).
4688 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4689 * MonoInst->opcode initially is the IL opcode or some simplification of that
4690 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4691 * opcode with value bigger than OP_LAST.
4692 * At this point the IR can be handed over to an interpreter, a dumb code generator
4693 * or to the optimizing code generator that will translate it to SSA form.
4695 * Profiling directed optimizations.
4696 * We may compile by default with few or no optimizations and instrument the code
4697 * or the user may indicate what methods to optimize the most either in a config file
4698 * or through repeated runs where the compiler applies offline the optimizations to
4699 * each method and then decides if it was worth it.
4702 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4703 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4704 #define CHECK_STACK_OVF() if (((sp - stack_start) + 1) > header->max_stack) UNVERIFIED
4705 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4706 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4707 #define CHECK_OPSIZE(size) if ((size) < 1 || ip + (size) > end) UNVERIFIED
4708 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4709 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
4711 /* offset from br.s -> br like opcodes */
4712 #define BIG_BRANCH_OFFSET 13
4714 static gboolean
4715 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4717 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4719 return b == NULL || b == bb;
4722 static int
4723 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, guchar *start, guchar *end, guchar **pos)
4725 guchar *ip = start;
4726 guchar *target;
4727 int i;
4728 guint cli_addr;
4729 MonoBasicBlock *bblock;
4730 const MonoOpcode *opcode;
4732 while (ip < end) {
4733 cli_addr = ip - start;
4734 i = mono_opcode_value ((const guint8 **)&ip, end);
4735 if (i < 0)
4736 UNVERIFIED;
4737 opcode = &mono_opcodes [i];
4738 switch (opcode->argument) {
4739 case MonoInlineNone:
4740 ip++;
4741 break;
4742 case MonoInlineString:
4743 case MonoInlineType:
4744 case MonoInlineField:
4745 case MonoInlineMethod:
4746 case MonoInlineTok:
4747 case MonoInlineSig:
4748 case MonoShortInlineR:
4749 case MonoInlineI:
4750 ip += 5;
4751 break;
4752 case MonoInlineVar:
4753 ip += 3;
4754 break;
4755 case MonoShortInlineVar:
4756 case MonoShortInlineI:
4757 ip += 2;
4758 break;
4759 case MonoShortInlineBrTarget:
4760 target = start + cli_addr + 2 + (signed char)ip [1];
4761 GET_BBLOCK (cfg, bblock, target);
4762 ip += 2;
4763 if (ip < end)
4764 GET_BBLOCK (cfg, bblock, ip);
4765 break;
4766 case MonoInlineBrTarget:
4767 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4768 GET_BBLOCK (cfg, bblock, target);
4769 ip += 5;
4770 if (ip < end)
4771 GET_BBLOCK (cfg, bblock, ip);
4772 break;
4773 case MonoInlineSwitch: {
4774 guint32 n = read32 (ip + 1);
4775 guint32 j;
4776 ip += 5;
4777 cli_addr += 5 + 4 * n;
4778 target = start + cli_addr;
4779 GET_BBLOCK (cfg, bblock, target);
4781 for (j = 0; j < n; ++j) {
4782 target = start + cli_addr + (gint32)read32 (ip);
4783 GET_BBLOCK (cfg, bblock, target);
4784 ip += 4;
4786 break;
4788 case MonoInlineR:
4789 case MonoInlineI8:
4790 ip += 9;
4791 break;
4792 default:
4793 g_assert_not_reached ();
4796 if (i == CEE_THROW) {
4797 guchar *bb_start = ip - 1;
4799 /* Find the start of the bblock containing the throw */
4800 bblock = NULL;
4801 while ((bb_start >= start) && !bblock) {
4802 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4803 bb_start --;
4805 if (bblock)
4806 bblock->out_of_line = 1;
4809 return 0;
4810 unverified:
4811 exception_exit:
4812 *pos = ip;
4813 return 1;
4816 static MonoMethod *
4817 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
4819 MonoMethod *method;
4821 error_init (error);
4823 if (m->wrapper_type != MONO_WRAPPER_NONE) {
4824 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
4825 if (context) {
4826 method = mono_class_inflate_generic_method_checked (method, context, error);
4828 } else {
4829 method = mono_get_method_checked (m_class_get_image (m->klass), token, klass, context, error);
4832 return method;
4835 static MonoMethod *
4836 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4838 ERROR_DECL (error);
4839 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? cfg->error : error);
4841 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (m_class_get_byval_arg (method->klass))) {
4842 mono_error_set_bad_image (cfg->error, m_class_get_image (cfg->method->klass), "Method with open type while not compiling gshared");
4843 method = NULL;
4846 if (!method && !cfg)
4847 mono_error_cleanup (error); /* FIXME don't swallow the error */
4849 return method;
4852 static MonoMethodSignature*
4853 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
4855 MonoMethodSignature *fsig;
4857 error_init (error);
4858 if (method->wrapper_type != MONO_WRAPPER_NONE) {
4859 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
4860 } else {
4861 fsig = mono_metadata_parse_signature_checked (m_class_get_image (method->klass), token, error);
4862 return_val_if_nok (error, NULL);
4864 if (context) {
4865 fsig = mono_inflate_generic_signature(fsig, context, error);
4867 return fsig;
4870 static MonoMethod*
4871 throw_exception (void)
4873 static MonoMethod *method = NULL;
4875 if (!method) {
4876 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4877 method = get_method_nofail (secman->securitymanager, "ThrowException", 1, 0);
4879 g_assert (method);
4880 return method;
4883 static void
4884 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
4886 MonoMethod *thrower = throw_exception ();
4887 MonoInst *args [1];
4889 EMIT_NEW_PCONST (cfg, args [0], ex);
4890 mono_emit_method_call (cfg, thrower, args, NULL);
4894 * Return the original method is a wrapper is specified. We can only access
4895 * the custom attributes from the original method.
4897 static MonoMethod*
4898 get_original_method (MonoMethod *method)
4900 if (method->wrapper_type == MONO_WRAPPER_NONE)
4901 return method;
4903 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4904 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
4905 return NULL;
4907 /* in other cases we need to find the original method */
4908 return mono_marshal_method_from_wrapper (method);
4911 static void
4912 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
4914 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4915 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
4916 if (ex)
4917 emit_throw_exception (cfg, ex);
4920 static void
4921 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4923 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4924 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
4925 if (ex)
4926 emit_throw_exception (cfg, ex);
4929 static guchar*
4930 il_read_op (guchar *ip, guchar *end, guchar first_byte, MonoOpcodeEnum desired_il_op)
4931 // If ip is desired_il_op, return the next ip, else NULL.
4933 if (G_LIKELY (ip < end) && G_UNLIKELY (*ip == first_byte)) {
4934 MonoOpcodeEnum il_op = MonoOpcodeEnum_Invalid;
4935 // mono_opcode_value_and_size updates ip, but not in the expected way.
4936 const guchar *temp_ip = ip;
4937 const int size = mono_opcode_value_and_size (&temp_ip, end, &il_op);
4938 return (G_LIKELY (size > 0) && G_UNLIKELY (il_op == desired_il_op)) ? (ip + size) : NULL;
4940 return NULL;
4943 static guchar*
4944 il_read_op_and_token (guchar *ip, guchar *end, guchar first_byte, MonoOpcodeEnum desired_il_op, guint32 *token)
4946 ip = il_read_op (ip, end, first_byte, desired_il_op);
4947 if (ip)
4948 *token = read32 (ip - 4); // could be +1 or +2 from start
4949 return ip;
4952 static guchar*
4953 il_read_branch_and_target (guchar *ip, guchar *end, guchar first_byte, MonoOpcodeEnum desired_il_op, int size, guchar **target)
4955 ip = il_read_op (ip, end, first_byte, desired_il_op);
4956 if (ip) {
4957 gint32 delta = 0;
4958 switch (size) {
4959 case 1:
4960 delta = (signed char)ip [-1];
4961 break;
4962 case 4:
4963 delta = (gint32)read32 (ip - 4);
4964 break;
4966 // FIXME verify it is within the function and start of an instruction.
4967 *target = ip + delta;
4968 return ip;
4970 return NULL;
4973 #define il_read_brtrue(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRTRUE, MONO_CEE_BRTRUE, 4, target))
4974 #define il_read_brtrue_s(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRTRUE_S, MONO_CEE_BRTRUE_S, 1, target))
4975 #define il_read_brfalse(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRFALSE, MONO_CEE_BRFALSE, 4, target))
4976 #define il_read_brfalse_s(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRFALSE_S, MONO_CEE_BRFALSE_S, 1, target))
4977 #define il_read_dup(ip, end) (il_read_op (ip, end, CEE_DUP, MONO_CEE_DUP))
4978 #define il_read_newobj(ip, end, token) (il_read_op_and_token (ip, end, CEE_NEW_OBJ, MONO_CEE_NEWOBJ, token))
4979 #define il_read_ldtoken(ip, end, token) (il_read_op_and_token (ip, end, CEE_LDTOKEN, MONO_CEE_LDTOKEN, token))
4980 #define il_read_call(ip, end, token) (il_read_op_and_token (ip, end, CEE_CALL, MONO_CEE_CALL, token))
4981 #define il_read_callvirt(ip, end, token) (il_read_op_and_token (ip, end, CEE_CALLVIRT, MONO_CEE_CALLVIRT, token))
4982 #define il_read_initobj(ip, end, token) (il_read_op_and_token (ip, end, CEE_PREFIX1, MONO_CEE_INITOBJ, token))
4983 #define il_read_constrained(ip, end, token) (il_read_op_and_token (ip, end, CEE_PREFIX1, MONO_CEE_CONSTRAINED_, token))
4984 #define il_read_unbox_any(ip, end, token) (il_read_op_and_token (ip, end, CEE_UNBOX_ANY, MONO_CEE_UNBOX_ANY, token))
4987 * Check that the IL instructions at ip are the array initialization
4988 * sequence and return the pointer to the data and the size.
4990 static const char*
4991 initialize_array_data (MonoCompile *cfg, MonoMethod *method, gboolean aot, guchar *ip,
4992 guchar *end, MonoClass *klass, guint32 len, int *out_size,
4993 guint32 *out_field_token, MonoOpcodeEnum *il_op, guchar **next_ip)
4996 * newarr[System.Int32]
4997 * dup
4998 * ldtoken field valuetype ...
4999 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5002 guint32 token;
5003 guint32 field_token;
5005 if ((ip = il_read_dup (ip, end))
5006 && ip_in_bb (cfg, cfg->cbb, ip)
5007 && (ip = il_read_ldtoken (ip, end, &field_token))
5008 && IS_FIELD_DEF (field_token)
5009 && ip_in_bb (cfg, cfg->cbb, ip)
5010 && (ip = il_read_call (ip, end, &token))) {
5011 ERROR_DECL (error);
5012 guint32 rva;
5013 const char *data_ptr;
5014 int size = 0;
5015 MonoMethod *cmethod;
5016 MonoClass *dummy_class;
5017 MonoClassField *field = mono_field_from_token_checked (m_class_get_image (method->klass), field_token, &dummy_class, NULL, error);
5018 int dummy_align;
5020 if (!field) {
5021 mono_error_cleanup (error); /* FIXME don't swallow the error */
5022 return NULL;
5025 *out_field_token = field_token;
5027 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5028 if (!cmethod)
5029 return NULL;
5030 if (strcmp (cmethod->name, "InitializeArray") || strcmp (m_class_get_name (cmethod->klass), "RuntimeHelpers") || m_class_get_image (cmethod->klass) != mono_defaults.corlib)
5031 return NULL;
5032 switch (mini_get_underlying_type (m_class_get_byval_arg (klass))->type) {
5033 case MONO_TYPE_I1:
5034 case MONO_TYPE_U1:
5035 size = 1; break;
5036 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5037 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5038 case MONO_TYPE_I2:
5039 case MONO_TYPE_U2:
5040 size = 2; break;
5041 case MONO_TYPE_I4:
5042 case MONO_TYPE_U4:
5043 case MONO_TYPE_R4:
5044 size = 4; break;
5045 case MONO_TYPE_R8:
5046 case MONO_TYPE_I8:
5047 case MONO_TYPE_U8:
5048 size = 8; break;
5049 #endif
5050 default:
5051 return NULL;
5053 size *= len;
5054 if (size > mono_type_size (field->type, &dummy_align))
5055 return NULL;
5056 *out_size = size;
5057 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5058 MonoImage *method_klass_image = m_class_get_image (method->klass);
5059 if (!image_is_dynamic (method_klass_image)) {
5060 guint32 field_index = mono_metadata_token_index (field_token);
5061 mono_metadata_field_info (method_klass_image, field_index - 1, NULL, &rva, NULL);
5062 data_ptr = mono_image_rva_map (method_klass_image, rva);
5063 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5064 /* for aot code we do the lookup on load */
5065 if (aot && data_ptr)
5066 data_ptr = (const char *)GUINT_TO_POINTER (rva);
5067 } else {
5068 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5069 g_assert (!aot);
5070 data_ptr = mono_field_get_data (field);
5072 if (!data_ptr)
5073 return NULL;
5074 *il_op = MONO_CEE_CALL;
5075 *next_ip = ip;
5076 return data_ptr;
5078 return NULL;
5081 static void
5082 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, guchar *ip)
5084 ERROR_DECL (error);
5085 char *method_fname = mono_method_full_name (method, TRUE);
5086 char *method_code;
5087 MonoMethodHeader *header = mono_method_get_header_checked (method, error);
5089 if (!header) {
5090 method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (error));
5091 mono_error_cleanup (error);
5092 } else if (header->code_size == 0)
5093 method_code = g_strdup ("method body is empty.");
5094 else
5095 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5096 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
5097 g_free (method_fname);
5098 g_free (method_code);
5099 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5102 guint32
5103 mono_type_to_stloc_coerce (MonoType *type)
5105 if (type->byref)
5106 return 0;
5108 type = mini_get_underlying_type (type);
5109 handle_enum:
5110 switch (type->type) {
5111 case MONO_TYPE_I1:
5112 return OP_ICONV_TO_I1;
5113 case MONO_TYPE_U1:
5114 return OP_ICONV_TO_U1;
5115 case MONO_TYPE_I2:
5116 return OP_ICONV_TO_I2;
5117 case MONO_TYPE_U2:
5118 return OP_ICONV_TO_U2;
5119 case MONO_TYPE_I4:
5120 case MONO_TYPE_U4:
5121 case MONO_TYPE_I:
5122 case MONO_TYPE_U:
5123 case MONO_TYPE_PTR:
5124 case MONO_TYPE_FNPTR:
5125 case MONO_TYPE_CLASS:
5126 case MONO_TYPE_STRING:
5127 case MONO_TYPE_OBJECT:
5128 case MONO_TYPE_SZARRAY:
5129 case MONO_TYPE_ARRAY:
5130 case MONO_TYPE_I8:
5131 case MONO_TYPE_U8:
5132 case MONO_TYPE_R4:
5133 case MONO_TYPE_R8:
5134 case MONO_TYPE_TYPEDBYREF:
5135 case MONO_TYPE_GENERICINST:
5136 return 0;
5137 case MONO_TYPE_VALUETYPE:
5138 if (m_class_is_enumtype (type->data.klass)) {
5139 type = mono_class_enum_basetype_internal (type->data.klass);
5140 goto handle_enum;
5142 return 0;
5143 case MONO_TYPE_VAR:
5144 case MONO_TYPE_MVAR: //TODO I believe we don't need to handle gsharedvt as there won't be match and, for example, u1 is not covariant to u32
5145 return 0;
5146 default:
5147 g_error ("unknown type 0x%02x in mono_type_to_stloc_coerce", type->type);
5149 return -1;
5152 static void
5153 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5155 MonoInst *ins;
5156 guint32 coerce_op = mono_type_to_stloc_coerce (header->locals [n]);
5158 if (coerce_op) {
5159 if (cfg->cbb->last_ins == sp [0] && sp [0]->opcode == coerce_op) {
5160 if (cfg->verbose_level > 2)
5161 printf ("Found existing coercing is enough for stloc\n");
5162 } else {
5163 MONO_INST_NEW (cfg, ins, coerce_op);
5164 ins->dreg = alloc_ireg (cfg);
5165 ins->sreg1 = sp [0]->dreg;
5166 ins->type = STACK_I4;
5167 ins->klass = mono_class_from_mono_type_internal (header->locals [n]);
5168 MONO_ADD_INS (cfg->cbb, ins);
5169 *sp = mono_decompose_opcode (cfg, ins);
5174 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5175 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5176 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5177 /* Optimize reg-reg moves away */
5179 * Can't optimize other opcodes, since sp[0] might point to
5180 * the last ins of a decomposed opcode.
5182 sp [0]->dreg = (cfg)->locals [n]->dreg;
5183 } else {
5184 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5188 static void
5189 emit_starg_ir (MonoCompile *cfg, MonoInst **sp, int n)
5191 MonoInst *ins;
5192 guint32 coerce_op = mono_type_to_stloc_coerce (cfg->arg_types [n]);
5194 if (coerce_op) {
5195 if (cfg->cbb->last_ins == sp [0] && sp [0]->opcode == coerce_op) {
5196 if (cfg->verbose_level > 2)
5197 printf ("Found existing coercing is enough for starg\n");
5198 } else {
5199 MONO_INST_NEW (cfg, ins, coerce_op);
5200 ins->dreg = alloc_ireg (cfg);
5201 ins->sreg1 = sp [0]->dreg;
5202 ins->type = STACK_I4;
5203 ins->klass = mono_class_from_mono_type_internal (cfg->arg_types [n]);
5204 MONO_ADD_INS (cfg->cbb, ins);
5205 *sp = mono_decompose_opcode (cfg, ins);
5209 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5213 * ldloca inhibits many optimizations so try to get rid of it in common
5214 * cases.
5216 static guchar *
5217 emit_optimized_ldloca_ir (MonoCompile *cfg, guchar *ip, guchar *end, int local)
5219 guint32 token;
5220 MonoClass *klass;
5221 MonoType *type;
5223 guchar *start = ip;
5225 if ((ip = il_read_initobj (ip, end, &token)) && ip_in_bb (cfg, cfg->cbb, start + 1)) {
5226 /* From the INITOBJ case */
5227 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5228 CHECK_TYPELOAD (klass);
5229 type = mini_get_underlying_type (m_class_get_byval_arg (klass));
5230 emit_init_local (cfg, local, type, TRUE);
5231 return ip;
5233 exception_exit:
5234 return NULL;
5237 static MonoInst*
5238 handle_call_res_devirt (MonoCompile *cfg, MonoMethod *cmethod, MonoInst *call_res)
5241 * Devirt EqualityComparer.Default.Equals () calls for some types.
5242 * The corefx code excepts these calls to be devirtualized.
5243 * This depends on the implementation of EqualityComparer.Default, which is
5244 * in mcs/class/referencesource/mscorlib/system/collections/generic/equalitycomparer.cs
5246 if (m_class_get_image (cmethod->klass) == mono_defaults.corlib &&
5247 !strcmp (m_class_get_name (cmethod->klass), "EqualityComparer`1") &&
5248 !strcmp (cmethod->name, "get_Default")) {
5249 MonoType *param_type = mono_class_get_generic_class (cmethod->klass)->context.class_inst->type_argv [0];
5250 MonoClass *inst;
5251 MonoGenericContext ctx;
5252 MonoType *args [16];
5253 ERROR_DECL (error);
5255 memset (&ctx, 0, sizeof (ctx));
5257 args [0] = param_type;
5258 ctx.class_inst = mono_metadata_get_generic_inst (1, args);
5260 inst = mono_class_inflate_generic_class_checked (mono_class_get_iequatable_class (), &ctx, error);
5261 mono_error_assert_ok (error);
5263 /* EqualityComparer<T>.Default returns specific types depending on T */
5264 // FIXME: Add more
5265 /* 1. Implements IEquatable<T> */
5267 * Can't use this for string/byte as it might use a different comparer:
5269 * // Specialize type byte for performance reasons
5270 * if (t == typeof(byte)) {
5271 * return (EqualityComparer<T>)(object)(new ByteEqualityComparer());
5273 * #if MOBILE
5274 * // Breaks .net serialization compatibility
5275 * if (t == typeof (string))
5276 * return (EqualityComparer<T>)(object)new InternalStringComparer ();
5277 * #endif
5279 if (mono_class_is_assignable_from_internal (inst, mono_class_from_mono_type_internal (param_type)) && param_type->type != MONO_TYPE_U1 && param_type->type != MONO_TYPE_STRING) {
5280 MonoInst *typed_objref;
5281 MonoClass *gcomparer_inst;
5283 memset (&ctx, 0, sizeof (ctx));
5285 args [0] = param_type;
5286 ctx.class_inst = mono_metadata_get_generic_inst (1, args);
5288 MonoClass *gcomparer = mono_class_get_geqcomparer_class ();
5289 g_assert (gcomparer);
5290 gcomparer_inst = mono_class_inflate_generic_class_checked (gcomparer, &ctx, error);
5291 mono_error_assert_ok (error);
5293 MONO_INST_NEW (cfg, typed_objref, OP_TYPED_OBJREF);
5294 typed_objref->type = STACK_OBJ;
5295 typed_objref->dreg = alloc_ireg_ref (cfg);
5296 typed_objref->sreg1 = call_res->dreg;
5297 typed_objref->klass = gcomparer_inst;
5298 MONO_ADD_INS (cfg->cbb, typed_objref);
5300 call_res = typed_objref;
5302 /* Force decompose */
5303 cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE;
5304 cfg->cbb->needs_decompose = TRUE;
5308 return call_res;
5311 static gboolean
5312 is_exception_class (MonoClass *klass)
5314 if (G_LIKELY (m_class_get_supertypes (klass)))
5315 return mono_class_has_parent_fast (klass, mono_defaults.exception_class);
5316 while (klass) {
5317 if (klass == mono_defaults.exception_class)
5318 return TRUE;
5319 klass = m_class_get_parent (klass);
5321 return FALSE;
5325 * is_jit_optimizer_disabled:
5327 * Determine whenever M's assembly has a DebuggableAttribute with the
5328 * IsJITOptimizerDisabled flag set.
5330 static gboolean
5331 is_jit_optimizer_disabled (MonoMethod *m)
5333 ERROR_DECL (error);
5334 MonoAssembly *ass = m_class_get_image (m->klass)->assembly;
5335 MonoCustomAttrInfo* attrs;
5336 MonoClass *klass;
5337 int i;
5338 gboolean val = FALSE;
5340 g_assert (ass);
5341 if (ass->jit_optimizer_disabled_inited)
5342 return ass->jit_optimizer_disabled;
5344 klass = mono_class_try_get_debuggable_attribute_class ();
5346 if (!klass) {
5347 /* Linked away */
5348 ass->jit_optimizer_disabled = FALSE;
5349 mono_memory_barrier ();
5350 ass->jit_optimizer_disabled_inited = TRUE;
5351 return FALSE;
5354 attrs = mono_custom_attrs_from_assembly_checked (ass, FALSE, error);
5355 mono_error_cleanup (error); /* FIXME don't swallow the error */
5356 if (attrs) {
5357 for (i = 0; i < attrs->num_attrs; ++i) {
5358 MonoCustomAttrEntry *attr = &attrs->attrs [i];
5359 const gchar *p;
5360 MonoMethodSignature *sig;
5362 if (!attr->ctor || attr->ctor->klass != klass)
5363 continue;
5364 /* Decode the attribute. See reflection.c */
5365 p = (const char*)attr->data;
5366 g_assert (read16 (p) == 0x0001);
5367 p += 2;
5369 // FIXME: Support named parameters
5370 sig = mono_method_signature_internal (attr->ctor);
5371 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
5372 continue;
5373 /* Two boolean arguments */
5374 p ++;
5375 val = *p;
5377 mono_custom_attrs_free (attrs);
5380 ass->jit_optimizer_disabled = val;
5381 mono_memory_barrier ();
5382 ass->jit_optimizer_disabled_inited = TRUE;
5384 return val;
5387 gboolean
5388 mono_is_supported_tailcall_helper (gboolean value, const char *svalue)
5390 if (!value)
5391 mono_tailcall_print ("%s %s\n", __func__, svalue);
5392 return value;
5395 static gboolean
5396 mono_is_not_supported_tailcall_helper (gboolean value, const char *svalue, MonoMethod *method, MonoMethod *cmethod)
5398 // Return value, printing if it inhibits tailcall.
5400 if (value && mono_tailcall_print_enabled ()) {
5401 const char *lparen = strchr (svalue, ' ') ? "(" : "";
5402 const char *rparen = *lparen ? ")" : "";
5403 mono_tailcall_print ("%s %s -> %s %s%s%s:%d\n", __func__, method->name, cmethod->name, lparen, svalue, rparen, value);
5405 return value;
5408 #define IS_NOT_SUPPORTED_TAILCALL(x) (mono_is_not_supported_tailcall_helper((x), #x, method, cmethod))
5410 static gboolean
5411 is_supported_tailcall (MonoCompile *cfg, const guint8 *ip, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig,
5412 gboolean virtual_, gboolean extra_arg, gboolean *ptailcall_calli)
5414 // Some checks apply to "regular", some to "calli", some to both.
5415 // To ease burden on caller, always compute regular and calli.
5417 gboolean tailcall = TRUE;
5418 gboolean tailcall_calli = TRUE;
5420 if (IS_NOT_SUPPORTED_TAILCALL (virtual_ && !cfg->backend->have_op_tailcall_membase))
5421 tailcall = FALSE;
5423 if (IS_NOT_SUPPORTED_TAILCALL (!cfg->backend->have_op_tailcall_reg))
5424 tailcall_calli = FALSE;
5426 if (!tailcall && !tailcall_calli)
5427 goto exit;
5429 // FIXME in calli, there is no type for for the this parameter,
5430 // so we assume it might be valuetype; in future we should issue a range
5431 // check, so rule out pointing to frame (for other reference parameters also)
5433 if ( IS_NOT_SUPPORTED_TAILCALL (cmethod && fsig->hasthis && m_class_is_valuetype (cmethod->klass)) // This might point to the current method's stack. Emit range check?
5434 || IS_NOT_SUPPORTED_TAILCALL (cmethod && (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL))
5435 || IS_NOT_SUPPORTED_TAILCALL (fsig->pinvoke) // i.e. if !cmethod (calli)
5436 || IS_NOT_SUPPORTED_TAILCALL (cfg->method->save_lmf)
5437 || IS_NOT_SUPPORTED_TAILCALL (!cmethod && fsig->hasthis) // FIXME could be valuetype to current frame; range check
5438 || IS_NOT_SUPPORTED_TAILCALL (cmethod && cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
5440 // http://www.mono-project.com/docs/advanced/runtime/docs/generic-sharing/
5442 // 1. Non-generic non-static methods of reference types have access to the
5443 // RGCTX via the “this” argument (this->vtable->rgctx).
5444 // 2. a Non-generic static methods of reference types and b. non-generic methods
5445 // of value types need to be passed a pointer to the caller’s class’s VTable in the MONO_ARCH_RGCTX_REG register.
5446 // 3. Generic methods need to be passed a pointer to the MRGCTX in the MONO_ARCH_RGCTX_REG register
5448 // That is what vtable_arg is here (always?).
5450 // Passing vtable_arg uses (requires?) a volatile non-parameter register,
5451 // such as AMD64 rax, r10, r11, or the return register on many architectures.
5452 // ARM32 does not always clearly have such a register. ARM32's return register
5453 // is a parameter register.
5454 // iPhone could use r9 except on old systems. iPhone/ARM32 is not particularly
5455 // important. Linux/arm32 is less clear.
5456 // ARM32's scratch r12 might work but only with much collateral change.
5458 // Imagine F1 calls F2, and F2 tailcalls F3.
5459 // F2 and F3 are managed. F1 is native.
5460 // Without a tailcall, F2 can save and restore everything needed for F1.
5461 // However if the extra parameter were in a non-volatile, such as ARM32 V5/R8,
5462 // F3 cannot easily restore it for F1, in the current scheme. The current
5463 // scheme where the extra parameter is not merely an extra parameter, but
5464 // passed "outside of the ABI".
5466 // If all native to managed transitions are intercepted and wrapped (w/o tailcall),
5467 // then they can preserve this register and the rest of the managed callgraph
5468 // treat it as volatile.
5470 // Interface method dispatch has the same problem (imt_arg).
5472 || IS_NOT_SUPPORTED_TAILCALL (extra_arg && !cfg->backend->have_volatile_non_param_register)
5473 || IS_NOT_SUPPORTED_TAILCALL (cfg->gsharedvt)
5475 tailcall_calli = FALSE;
5476 tailcall = FALSE;
5477 goto exit;
5480 for (int i = 0; i < fsig->param_count; ++i) {
5481 if (IS_NOT_SUPPORTED_TAILCALL (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)) {
5482 tailcall_calli = FALSE;
5483 tailcall = FALSE; // These can point to the current method's stack. Emit range check?
5484 goto exit;
5488 MonoMethodSignature *caller_signature;
5489 MonoMethodSignature *callee_signature;
5490 caller_signature = mono_method_signature_internal (method);
5491 callee_signature = cmethod ? mono_method_signature_internal (cmethod) : fsig;
5493 g_assert (caller_signature);
5494 g_assert (callee_signature);
5496 // Require an exact match on return type due to various conversions in emit_move_return_value that would be skipped.
5497 // The main troublesome conversions are double <=> float.
5498 // CoreCLR allows some conversions here, such as integer truncation.
5499 // As well I <=> I[48] and U <=> U[48] would be ok, for matching size.
5500 if (IS_NOT_SUPPORTED_TAILCALL (mini_get_underlying_type (caller_signature->ret)->type != mini_get_underlying_type (callee_signature->ret)->type)
5501 || IS_NOT_SUPPORTED_TAILCALL (!mono_arch_tailcall_supported (cfg, caller_signature, callee_signature, virtual_))) {
5502 tailcall_calli = FALSE;
5503 tailcall = FALSE;
5504 goto exit;
5507 /* Debugging support */
5508 #if 0
5509 if (!mono_debug_count ()) {
5510 tailcall_calli = FALSE;
5511 tailcall = FALSE;
5512 goto exit;
5514 #endif
5515 // See check_sp in mini_emit_calli_full.
5516 if (tailcall_calli && IS_NOT_SUPPORTED_TAILCALL (mini_should_check_stack_pointer (cfg)))
5517 tailcall_calli = FALSE;
5518 exit:
5519 mono_tailcall_print ("tail.%s %s -> %s tailcall:%d tailcall_calli:%d gshared:%d extra_arg:%d virtual_:%d\n",
5520 mono_opcode_name (*ip), method->name, cmethod ? cmethod->name : "calli", tailcall, tailcall_calli,
5521 cfg->gshared, extra_arg, virtual_);
5523 *ptailcall_calli = tailcall_calli;
5524 return tailcall;
5528 * is_addressable_valuetype_load
5530 * Returns true if a previous load can be done without doing an extra copy, given the new instruction ip and the type of the object being loaded ldtype
5532 static gboolean
5533 is_addressable_valuetype_load (MonoCompile* cfg, guint8* ip, MonoType* ldtype)
5535 /* Avoid loading a struct just to load one of its fields */
5536 gboolean is_load_instruction = (*ip == CEE_LDFLD);
5537 gboolean is_in_previous_bb = ip_in_bb(cfg, cfg->cbb, ip);
5538 gboolean is_struct = MONO_TYPE_ISSTRUCT(ldtype);
5539 return is_load_instruction && is_in_previous_bb && is_struct;
5543 * handle_ctor_call:
5545 * Handle calls made to ctors from NEWOBJ opcodes.
5547 static void
5548 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
5549 MonoInst **sp, guint8 *ip, int *inline_costs)
5551 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
5553 if (m_class_is_valuetype (cmethod->klass) && mono_class_generic_sharing_enabled (cmethod->klass) &&
5554 mono_method_is_generic_sharable (cmethod, TRUE)) {
5555 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
5556 mono_class_vtable_checked (cfg->domain, cmethod->klass, cfg->error);
5557 CHECK_CFG_ERROR;
5558 CHECK_TYPELOAD (cmethod->klass);
5560 vtable_arg = emit_get_rgctx_method (cfg, context_used,
5561 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
5562 } else {
5563 if (context_used) {
5564 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
5565 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
5566 } else {
5567 MonoVTable *vtable = mono_class_vtable_checked (cfg->domain, cmethod->klass, cfg->error);
5568 CHECK_CFG_ERROR;
5569 CHECK_TYPELOAD (cmethod->klass);
5570 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
5575 /* Avoid virtual calls to ctors if possible */
5576 if (mono_class_is_marshalbyref (cmethod->klass))
5577 callvirt_this_arg = sp [0];
5579 if (cmethod && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
5580 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
5581 CHECK_CFG_EXCEPTION;
5582 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
5583 mono_method_check_inlining (cfg, cmethod) &&
5584 !mono_class_is_subclass_of_internal (cmethod->klass, mono_defaults.exception_class, FALSE)) {
5585 int costs;
5587 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
5588 cfg->real_offset += 5;
5590 *inline_costs += costs - 5;
5591 } else {
5592 INLINE_FAILURE ("inline failure");
5593 // FIXME-VT: Clean this up
5594 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
5595 GSHAREDVT_FAILURE(*ip);
5596 mini_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
5598 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
5599 MonoInst *addr;
5601 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
5603 if (cfg->llvm_only) {
5604 // FIXME: Avoid initializing vtable_arg
5605 mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
5606 } else {
5607 mini_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
5609 } else if (context_used &&
5610 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
5611 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
5612 MonoInst *cmethod_addr;
5614 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
5616 if (cfg->llvm_only) {
5617 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
5618 MONO_RGCTX_INFO_METHOD_FTNDESC);
5619 mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
5620 } else {
5621 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
5622 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
5624 mini_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
5626 } else {
5627 INLINE_FAILURE ("ctor call");
5628 ins = mini_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
5629 callvirt_this_arg, NULL, vtable_arg);
5631 exception_exit:
5632 mono_error_exit:
5633 return;
5636 typedef struct {
5637 MonoMethod *method;
5638 gboolean inst_tailcall;
5639 } HandleCallData;
5642 * handle_constrained_call:
5644 * Handle constrained calls. Return a MonoInst* representing the call or NULL.
5645 * May overwrite sp [0] and modify the ref_... parameters.
5647 static MonoInst*
5648 handle_constrained_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoClass *constrained_class, MonoInst **sp,
5649 HandleCallData *cdata, MonoMethod **ref_cmethod, gboolean *ref_virtual, gboolean *ref_emit_widen)
5651 MonoInst *ins, *addr;
5652 MonoMethod *method = cdata->method;
5653 gboolean constrained_partial_call = FALSE;
5654 gboolean constrained_is_generic_param =
5655 m_class_get_byval_arg (constrained_class)->type == MONO_TYPE_VAR ||
5656 m_class_get_byval_arg (constrained_class)->type == MONO_TYPE_MVAR;
5658 if (constrained_is_generic_param && cfg->gshared) {
5659 if (!mini_is_gsharedvt_klass (constrained_class)) {
5660 g_assert (!m_class_is_valuetype (cmethod->klass));
5661 if (!mini_type_is_reference (m_class_get_byval_arg (constrained_class)))
5662 constrained_partial_call = TRUE;
5666 if (mini_is_gsharedvt_klass (constrained_class)) {
5667 if ((cmethod->klass != mono_defaults.object_class) && m_class_is_valuetype (constrained_class) && m_class_is_valuetype (cmethod->klass)) {
5668 /* The 'Own method' case below */
5669 } else if (m_class_get_image (cmethod->klass) != mono_defaults.corlib && !mono_class_is_interface (cmethod->klass) && !m_class_is_valuetype (cmethod->klass)) {
5670 /* 'The type parameter is instantiated as a reference type' case below. */
5671 } else {
5672 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, ref_emit_widen);
5673 CHECK_CFG_EXCEPTION;
5674 g_assert (ins);
5675 if (cdata->inst_tailcall) // FIXME
5676 mono_tailcall_print ("missed tailcall constrained_class %s -> %s\n", method->name, cmethod->name);
5677 return ins;
5681 if (constrained_partial_call) {
5682 gboolean need_box = TRUE;
5685 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
5686 * called method is not known at compile time either. The called method could end up being
5687 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
5688 * to box the receiver.
5689 * A simple solution would be to box always and make a normal virtual call, but that would
5690 * be bad performance wise.
5692 if (mono_class_is_interface (cmethod->klass) && mono_class_is_ginst (cmethod->klass) &&
5693 (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT)) {
5695 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
5697 /* If the method is not abstract, it's a default interface method, and we need to box */
5698 need_box = FALSE;
5701 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == m_class_get_parent (mono_defaults.enum_class) || cmethod->klass == mono_defaults.enum_class)) {
5702 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
5703 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (constrained_class), sp [0]->dreg, 0);
5704 ins->klass = constrained_class;
5705 sp [0] = mini_emit_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
5706 CHECK_CFG_EXCEPTION;
5707 } else if (need_box) {
5708 MonoInst *box_type;
5709 MonoBasicBlock *is_ref_bb, *end_bb;
5710 MonoInst *nonbox_call, *addr;
5713 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
5714 * if needed.
5715 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
5716 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
5718 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
5720 NEW_BBLOCK (cfg, is_ref_bb);
5721 NEW_BBLOCK (cfg, end_bb);
5723 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
5724 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
5725 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
5727 /* Non-ref case */
5728 if (cfg->llvm_only)
5729 /* addr is an ftndesc in this case */
5730 nonbox_call = mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
5731 else
5732 nonbox_call = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
5734 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5736 /* Ref case */
5737 MONO_START_BB (cfg, is_ref_bb);
5738 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (constrained_class), sp [0]->dreg, 0);
5739 ins->klass = constrained_class;
5740 sp [0] = mini_emit_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
5741 CHECK_CFG_EXCEPTION;
5742 if (cfg->llvm_only)
5743 ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
5744 else
5745 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
5747 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5749 MONO_START_BB (cfg, end_bb);
5750 cfg->cbb = end_bb;
5752 nonbox_call->dreg = ins->dreg;
5753 if (cdata->inst_tailcall) // FIXME
5754 mono_tailcall_print ("missed tailcall constrained_partial_need_box %s -> %s\n", method->name, cmethod->name);
5755 return ins;
5756 } else {
5757 g_assert (mono_class_is_interface (cmethod->klass));
5758 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
5759 if (cfg->llvm_only)
5760 ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
5761 else
5762 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
5763 if (cdata->inst_tailcall) // FIXME
5764 mono_tailcall_print ("missed tailcall constrained_partial %s -> %s\n", method->name, cmethod->name);
5765 return ins;
5767 } else if (!m_class_is_valuetype (constrained_class)) {
5768 int dreg = alloc_ireg_ref (cfg);
5771 * The type parameter is instantiated as a reference
5772 * type. We have a managed pointer on the stack, so
5773 * we need to dereference it here.
5775 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
5776 ins->type = STACK_OBJ;
5777 sp [0] = ins;
5778 } else if (cmethod->klass == mono_defaults.object_class || cmethod->klass == m_class_get_parent (mono_defaults.enum_class) || cmethod->klass == mono_defaults.enum_class) {
5780 * The type parameter is instantiated as a valuetype,
5781 * but that type doesn't override the method we're
5782 * calling, so we need to box `this'.
5784 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (constrained_class), sp [0]->dreg, 0);
5785 ins->klass = constrained_class;
5786 sp [0] = mini_emit_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
5787 CHECK_CFG_EXCEPTION;
5788 } else {
5789 if (cmethod->klass != constrained_class) {
5790 /* Enums/default interface methods */
5791 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (constrained_class), sp [0]->dreg, 0);
5792 ins->klass = constrained_class;
5793 sp [0] = mini_emit_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
5794 CHECK_CFG_EXCEPTION;
5796 *ref_virtual = FALSE;
5799 exception_exit:
5800 return NULL;
5803 static void
5804 emit_setret (MonoCompile *cfg, MonoInst *val)
5806 MonoType *ret_type = mini_get_underlying_type (mono_method_signature_internal (cfg->method)->ret);
5807 MonoInst *ins;
5809 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
5810 MonoInst *ret_addr;
5812 if (!cfg->vret_addr) {
5813 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
5814 } else {
5815 EMIT_NEW_RETLOADA (cfg, ret_addr);
5817 MonoClass *ret_class = mono_class_from_mono_type_internal (ret_type);
5818 if (MONO_CLASS_IS_SIMD (cfg, ret_class))
5819 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREX_MEMBASE, ret_addr->dreg, 0, val->dreg);
5820 else
5821 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
5822 ins->klass = ret_class;
5824 } else {
5825 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5826 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
5827 MonoInst *iargs [1];
5828 MonoInst *conv;
5830 iargs [0] = val;
5831 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
5832 mono_arch_emit_setret (cfg, cfg->method, conv);
5833 } else {
5834 mono_arch_emit_setret (cfg, cfg->method, val);
5836 #else
5837 mono_arch_emit_setret (cfg, cfg->method, val);
5838 #endif
5842 typedef union _MonoOpcodeParameter {
5843 gint32 i32;
5844 gint64 i64;
5845 float f;
5846 double d;
5847 guchar *branch_target;
5848 } MonoOpcodeParameter;
5850 typedef struct _MonoOpcodeInfo {
5851 guint constant : 4; // private
5852 gint pops : 3; // public -1 means variable
5853 gint pushes : 3; // public -1 means variable
5854 } MonoOpcodeInfo;
5856 static const MonoOpcodeInfo*
5857 mono_opcode_decode (guchar *ip, guint op_size, MonoOpcodeEnum il_op, MonoOpcodeParameter *parameter)
5859 #define Push0 (0)
5860 #define Pop0 (0)
5861 #define Push1 (1)
5862 #define Pop1 (1)
5863 #define PushI (1)
5864 #define PopI (1)
5865 #define PushI8 (1)
5866 #define PopI8 (1)
5867 #define PushRef (1)
5868 #define PopRef (1)
5869 #define PushR4 (1)
5870 #define PopR4 (1)
5871 #define PushR8 (1)
5872 #define PopR8 (1)
5873 #define VarPush (-1)
5874 #define VarPop (-1)
5876 static const MonoOpcodeInfo mono_opcode_info [ ] = {
5877 #define OPDEF(name, str, pops, pushes, param, param_constant, a, b, c, flow) {param_constant + 1, pops, pushes },
5878 #include "mono/cil/opcode.def"
5879 #undef OPDEF
5882 #undef Push0
5883 #undef Pop0
5884 #undef Push1
5885 #undef Pop1
5886 #undef PushI
5887 #undef PopI
5888 #undef PushI8
5889 #undef PopI8
5890 #undef PushRef
5891 #undef PopRef
5892 #undef PushR4
5893 #undef PopR4
5894 #undef PushR8
5895 #undef PopR8
5896 #undef VarPush
5897 #undef VarPop
5899 gint32 delta;
5900 guchar *next_ip = ip + op_size;
5902 const MonoOpcodeInfo *info = &mono_opcode_info [il_op];
5904 switch (mono_opcodes [il_op].argument) {
5905 case MonoInlineNone:
5906 parameter->i32 = (int)info->constant - 1;
5907 break;
5908 case MonoInlineString:
5909 case MonoInlineType:
5910 case MonoInlineField:
5911 case MonoInlineMethod:
5912 case MonoInlineTok:
5913 case MonoInlineSig:
5914 case MonoShortInlineR:
5915 case MonoInlineI:
5916 parameter->i32 = read32 (next_ip - 4);
5917 // FIXME check token type?
5918 break;
5919 case MonoShortInlineI:
5920 parameter->i32 = (signed char)next_ip [-1];
5921 break;
5922 case MonoInlineVar:
5923 parameter->i32 = read16 (next_ip - 2);
5924 break;
5925 case MonoShortInlineVar:
5926 parameter->i32 = next_ip [-1];
5927 break;
5928 case MonoInlineR:
5929 case MonoInlineI8:
5930 parameter->i64 = read64 (next_ip - 8);
5931 break;
5932 case MonoShortInlineBrTarget:
5933 delta = (signed char)next_ip [-1];
5934 goto branch_target;
5935 case MonoInlineBrTarget:
5936 delta = (gint32)read32 (next_ip - 4);
5937 branch_target:
5938 parameter->branch_target = delta + next_ip;
5939 break;
5940 case MonoInlineSwitch: // complicated
5941 break;
5942 default:
5943 g_error ("%s %d %d\n", __func__, il_op, mono_opcodes [il_op].argument);
5945 return info;
5949 * mono_method_to_ir:
5951 * Translate the .net IL into linear IR.
5953 * @start_bblock: if not NULL, the starting basic block, used during inlining.
5954 * @end_bblock: if not NULL, the ending basic block, used during inlining.
5955 * @return_var: if not NULL, the place where the return value is stored, used during inlining.
5956 * @inline_args: if not NULL, contains the arguments to the inline call
5957 * @inline_offset: if not zero, the real offset from the inline call, or zero otherwise.
5958 * @is_virtual_call: whether this method is being called as a result of a call to callvirt
5960 * This method is used to turn ECMA IL into Mono's internal Linear IR
5961 * reprensetation. It is used both for entire methods, as well as
5962 * inlining existing methods. In the former case, the @start_bblock,
5963 * @end_bblock, @return_var, @inline_args are all set to NULL, and the
5964 * inline_offset is set to zero.
5966 * Returns: the inline cost, or -1 if there was an error processing this method.
5969 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5970 MonoInst *return_var, MonoInst **inline_args,
5971 guint inline_offset, gboolean is_virtual_call)
5973 ERROR_DECL (error);
5974 // Buffer to hold parameters to mono_new_array, instead of varargs.
5975 MonoInst *array_new_localalloc_ins = NULL;
5976 MonoInst *ins, **sp, **stack_start;
5977 MonoBasicBlock *tblock = NULL;
5978 MonoBasicBlock *init_localsbb = NULL, *init_localsbb2 = NULL;
5979 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5980 MonoMethod *method_definition;
5981 MonoInst **arg_array;
5982 MonoMethodHeader *header;
5983 MonoImage *image;
5984 guint32 token, ins_flag;
5985 MonoClass *klass;
5986 MonoClass *constrained_class = NULL;
5987 gboolean save_last_error = FALSE;
5988 guchar *ip, *end, *target, *err_pos;
5989 MonoMethodSignature *sig;
5990 MonoGenericContext *generic_context = NULL;
5991 MonoGenericContainer *generic_container = NULL;
5992 MonoType **param_types;
5993 int i, n, start_new_bblock, dreg;
5994 int num_calls = 0, inline_costs = 0;
5995 int breakpoint_id = 0;
5996 guint num_args;
5997 GSList *class_inits = NULL;
5998 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5999 int context_used;
6000 gboolean init_locals, seq_points, skip_dead_blocks;
6001 gboolean sym_seq_points = FALSE;
6002 MonoDebugMethodInfo *minfo;
6003 MonoBitSet *seq_point_locs = NULL;
6004 MonoBitSet *seq_point_set_locs = NULL;
6005 gboolean emitted_funccall_seq_point = FALSE;
6007 cfg->disable_inline = is_jit_optimizer_disabled (method);
6008 cfg->current_method = method;
6010 image = m_class_get_image (method->klass);
6012 /* serialization and xdomain stuff may need access to private fields and methods */
6013 dont_verify = image->assembly->corlib_internal? TRUE: FALSE;
6014 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
6015 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
6016 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
6017 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
6018 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
6020 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
6021 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
6022 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_OTHER;
6023 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
6024 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
6026 header = mono_method_get_header_checked (method, cfg->error);
6027 if (!header) {
6028 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
6029 goto exception_exit;
6030 } else {
6031 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6034 generic_container = mono_method_get_generic_container (method);
6035 sig = mono_method_signature_internal (method);
6036 num_args = sig->hasthis + sig->param_count;
6037 ip = (guchar*)header->code;
6038 cfg->cil_start = ip;
6039 end = ip + header->code_size;
6040 cfg->stat_cil_code_size += header->code_size;
6042 seq_points = cfg->gen_seq_points && cfg->method == method;
6044 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
6045 /* We could hit a seq point before attaching to the JIT (#8338) */
6046 seq_points = FALSE;
6049 if (cfg->prof_coverage) {
6050 if (cfg->compile_aot)
6051 g_error ("Coverage profiling is not supported with AOT.");
6053 INLINE_FAILURE ("coverage profiling");
6055 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
6058 if ((cfg->gen_sdb_seq_points && cfg->method == method) || cfg->prof_coverage) {
6059 minfo = mono_debug_lookup_method (method);
6060 if (minfo) {
6061 MonoSymSeqPoint *sps;
6062 int i, n_il_offsets;
6064 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
6065 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6066 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6067 sym_seq_points = TRUE;
6068 for (i = 0; i < n_il_offsets; ++i) {
6069 if (sps [i].il_offset < header->code_size)
6070 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
6072 g_free (sps);
6074 MonoDebugMethodAsyncInfo* asyncMethod = mono_debug_lookup_method_async_debug_info (method);
6075 if (asyncMethod) {
6076 for (i = 0; asyncMethod != NULL && i < asyncMethod->num_awaits; i++)
6078 mono_bitset_set_fast (seq_point_locs, asyncMethod->resume_offsets[i]);
6079 mono_bitset_set_fast (seq_point_locs, asyncMethod->yield_offsets[i]);
6081 mono_debug_free_method_async_debug_info (asyncMethod);
6083 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (m_class_get_image (method->klass))) {
6084 /* Methods without line number info like auto-generated property accessors */
6085 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6086 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6087 sym_seq_points = TRUE;
6092 * Methods without init_locals set could cause asserts in various passes
6093 * (#497220). To work around this, we emit dummy initialization opcodes
6094 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
6095 * on some platforms.
6097 if (cfg->opt & MONO_OPT_UNSAFE)
6098 init_locals = header->init_locals;
6099 else
6100 init_locals = TRUE;
6102 method_definition = method;
6103 while (method_definition->is_inflated) {
6104 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
6105 method_definition = imethod->declaring;
6108 /* SkipVerification is not allowed if core-clr is enabled */
6109 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
6110 dont_verify = TRUE;
6111 dont_verify_stloc = TRUE;
6114 if (sig->is_inflated)
6115 generic_context = mono_method_get_context (method);
6116 else if (generic_container)
6117 generic_context = &generic_container->context;
6118 cfg->generic_context = generic_context;
6120 if (!cfg->gshared)
6121 g_assert (!sig->has_type_parameters);
6123 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
6124 g_assert (method->is_inflated);
6125 g_assert (mono_method_get_context (method)->method_inst);
6127 if (method->is_inflated && mono_method_get_context (method)->method_inst)
6128 g_assert (sig->generic_param_count);
6130 if (cfg->method == method) {
6131 cfg->real_offset = 0;
6132 } else {
6133 cfg->real_offset = inline_offset;
6136 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
6137 cfg->cil_offset_to_bb_len = header->code_size;
6139 if (cfg->verbose_level > 2)
6140 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
6142 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
6143 if (sig->hasthis)
6144 param_types [0] = m_class_is_valuetype (method->klass) ? m_class_get_this_arg (method->klass) : m_class_get_byval_arg (method->klass);
6145 for (n = 0; n < sig->param_count; ++n)
6146 param_types [n + sig->hasthis] = sig->params [n];
6147 cfg->arg_types = param_types;
6149 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
6150 if (cfg->method == method) {
6151 /* ENTRY BLOCK */
6152 NEW_BBLOCK (cfg, start_bblock);
6153 cfg->bb_entry = start_bblock;
6154 start_bblock->cil_code = NULL;
6155 start_bblock->cil_length = 0;
6157 /* EXIT BLOCK */
6158 NEW_BBLOCK (cfg, end_bblock);
6159 cfg->bb_exit = end_bblock;
6160 end_bblock->cil_code = NULL;
6161 end_bblock->cil_length = 0;
6162 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
6163 g_assert (cfg->num_bblocks == 2);
6165 arg_array = cfg->args;
6167 if (header->num_clauses) {
6168 cfg->spvars = g_hash_table_new (NULL, NULL);
6169 cfg->exvars = g_hash_table_new (NULL, NULL);
6171 /* handle exception clauses */
6172 for (i = 0; i < header->num_clauses; ++i) {
6173 MonoBasicBlock *try_bb;
6174 MonoExceptionClause *clause = &header->clauses [i];
6175 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
6177 try_bb->real_offset = clause->try_offset;
6178 try_bb->try_start = TRUE;
6179 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
6180 tblock->real_offset = clause->handler_offset;
6181 tblock->flags |= BB_EXCEPTION_HANDLER;
6183 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
6184 mono_create_exvar_for_offset (cfg, clause->handler_offset);
6186 * Linking the try block with the EH block hinders inlining as we won't be able to
6187 * merge the bblocks from inlining and produce an artificial hole for no good reason.
6189 if (COMPILE_LLVM (cfg))
6190 link_bblock (cfg, try_bb, tblock);
6192 if (*(ip + clause->handler_offset) == CEE_POP)
6193 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
6195 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
6196 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
6197 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
6198 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6199 MONO_ADD_INS (tblock, ins);
6201 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
6202 /* finally clauses already have a seq point */
6203 /* seq points for filter clauses are emitted below */
6204 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
6205 MONO_ADD_INS (tblock, ins);
6208 /* todo: is a fault block unsafe to optimize? */
6209 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
6210 tblock->flags |= BB_EXCEPTION_UNSAFE;
6213 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
6214 while (p < end) {
6215 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
6217 /* catch and filter blocks get the exception object on the stack */
6218 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
6219 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6221 /* mostly like handle_stack_args (), but just sets the input args */
6222 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
6223 tblock->in_scount = 1;
6224 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6225 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6227 cfg->cbb = tblock;
6229 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
6230 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
6231 if (!cfg->compile_llvm) {
6232 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
6233 ins->dreg = tblock->in_stack [0]->dreg;
6234 MONO_ADD_INS (tblock, ins);
6236 #else
6237 MonoInst *dummy_use;
6240 * Add a dummy use for the exvar so its liveness info will be
6241 * correct.
6243 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
6244 #endif
6246 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6247 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
6248 MONO_ADD_INS (tblock, ins);
6251 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6252 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
6253 tblock->flags |= BB_EXCEPTION_HANDLER;
6254 tblock->real_offset = clause->data.filter_offset;
6255 tblock->in_scount = 1;
6256 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6257 /* The filter block shares the exvar with the handler block */
6258 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6259 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6260 MONO_ADD_INS (tblock, ins);
6264 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
6265 clause->data.catch_class &&
6266 cfg->gshared &&
6267 mono_class_check_context_used (clause->data.catch_class)) {
6269 * In shared generic code with catch
6270 * clauses containing type variables
6271 * the exception handling code has to
6272 * be able to get to the rgctx.
6273 * Therefore we have to make sure that
6274 * the vtable/mrgctx argument (for
6275 * static or generic methods) or the
6276 * "this" argument (for non-static
6277 * methods) are live.
6279 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6280 mini_method_get_context (method)->method_inst ||
6281 m_class_is_valuetype (method->klass)) {
6282 mono_get_vtable_var (cfg);
6283 } else {
6284 MonoInst *dummy_use;
6286 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
6290 } else {
6291 arg_array = g_newa (MonoInst*, num_args);
6292 cfg->cbb = start_bblock;
6293 cfg->args = arg_array;
6294 mono_save_args (cfg, sig, inline_args);
6297 /* FIRST CODE BLOCK */
6298 NEW_BBLOCK (cfg, tblock);
6299 tblock->cil_code = ip;
6300 cfg->cbb = tblock;
6301 cfg->ip = ip;
6303 ADD_BBLOCK (cfg, tblock);
6305 if (cfg->method == method) {
6306 breakpoint_id = mono_debugger_method_has_breakpoint (method);
6307 if (breakpoint_id) {
6308 MONO_INST_NEW (cfg, ins, OP_BREAK);
6309 MONO_ADD_INS (cfg->cbb, ins);
6313 /* we use a separate basic block for the initialization code */
6314 NEW_BBLOCK (cfg, init_localsbb);
6315 if (cfg->method == method)
6316 cfg->bb_init = init_localsbb;
6317 init_localsbb->real_offset = cfg->real_offset;
6318 start_bblock->next_bb = init_localsbb;
6319 init_localsbb->next_bb = cfg->cbb;
6320 link_bblock (cfg, start_bblock, init_localsbb);
6321 link_bblock (cfg, init_localsbb, cfg->cbb);
6322 init_localsbb2 = init_localsbb;
6323 cfg->cbb = init_localsbb;
6325 if (cfg->gsharedvt && cfg->method == method) {
6326 MonoGSharedVtMethodInfo *info;
6327 MonoInst *var, *locals_var;
6328 int dreg;
6330 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
6331 info->method = cfg->method;
6332 info->count_entries = 16;
6333 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
6334 cfg->gsharedvt_info = info;
6336 var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
6337 /* prevent it from being register allocated */
6338 //var->flags |= MONO_INST_VOLATILE;
6339 cfg->gsharedvt_info_var = var;
6341 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
6342 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
6344 /* Allocate locals */
6345 locals_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
6346 /* prevent it from being register allocated */
6347 //locals_var->flags |= MONO_INST_VOLATILE;
6348 cfg->gsharedvt_locals_var = locals_var;
6350 dreg = alloc_ireg (cfg);
6351 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
6353 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
6354 ins->dreg = locals_var->dreg;
6355 ins->sreg1 = dreg;
6356 MONO_ADD_INS (cfg->cbb, ins);
6357 cfg->gsharedvt_locals_var_ins = ins;
6359 cfg->flags |= MONO_CFG_HAS_ALLOCA;
6361 if (init_locals)
6362 ins->flags |= MONO_INST_INIT;
6366 if (mono_security_core_clr_enabled ()) {
6367 /* check if this is native code, e.g. an icall or a p/invoke */
6368 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
6369 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6370 if (wrapped) {
6371 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
6372 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
6374 /* if this ia a native call then it can only be JITted from platform code */
6375 if ((icall || pinvk) && method->klass && m_class_get_image (method->klass)) {
6376 if (!mono_security_core_clr_is_platform_image (m_class_get_image (method->klass))) {
6377 MonoException *ex = icall ? mono_get_exception_security () :
6378 mono_get_exception_method_access ();
6379 emit_throw_exception (cfg, ex);
6386 CHECK_CFG_EXCEPTION;
6388 if (header->code_size == 0)
6389 UNVERIFIED;
6391 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
6392 ip = err_pos;
6393 UNVERIFIED;
6396 if (cfg->method == method)
6397 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
6399 for (n = 0; n < header->num_locals; ++n) {
6400 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
6401 UNVERIFIED;
6403 class_inits = NULL;
6405 /* We force the vtable variable here for all shared methods
6406 for the possibility that they might show up in a stack
6407 trace where their exact instantiation is needed. */
6408 if (cfg->gshared && method == cfg->method) {
6409 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6410 mini_method_get_context (method)->method_inst ||
6411 m_class_is_valuetype (method->klass)) {
6412 mono_get_vtable_var (cfg);
6413 } else {
6414 /* FIXME: Is there a better way to do this?
6415 We need the variable live for the duration
6416 of the whole method. */
6417 cfg->args [0]->flags |= MONO_INST_VOLATILE;
6421 /* add a check for this != NULL to inlined methods */
6422 if (is_virtual_call) {
6423 MonoInst *arg_ins;
6425 NEW_ARGLOAD (cfg, arg_ins, 0);
6426 MONO_ADD_INS (cfg->cbb, arg_ins);
6427 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
6430 skip_dead_blocks = !dont_verify;
6431 if (skip_dead_blocks) {
6432 original_bb = bb = mono_basic_block_split (method, cfg->error, header);
6433 CHECK_CFG_ERROR;
6434 g_assert (bb);
6437 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
6438 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
6440 ins_flag = 0;
6441 start_new_bblock = 0;
6442 MonoOpcodeEnum il_op; il_op = MonoOpcodeEnum_Invalid;
6444 for (guchar *next_ip = ip; ip < end; ip = next_ip) {
6445 MonoOpcodeEnum previous_il_op = il_op;
6446 const guchar *tmp_ip = ip;
6447 const int op_size = mono_opcode_value_and_size (&tmp_ip, end, &il_op);
6448 CHECK_OPSIZE (op_size);
6449 next_ip += op_size;
6451 if (cfg->method == method)
6452 cfg->real_offset = ip - header->code;
6453 else
6454 cfg->real_offset = inline_offset;
6455 cfg->ip = ip;
6457 context_used = 0;
6459 if (start_new_bblock) {
6460 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
6461 if (start_new_bblock == 2) {
6462 g_assert (ip == tblock->cil_code);
6463 } else {
6464 GET_BBLOCK (cfg, tblock, ip);
6466 cfg->cbb->next_bb = tblock;
6467 cfg->cbb = tblock;
6468 start_new_bblock = 0;
6469 for (i = 0; i < cfg->cbb->in_scount; ++i) {
6470 if (cfg->verbose_level > 3)
6471 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
6472 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
6473 *sp++ = ins;
6475 if (class_inits)
6476 g_slist_free (class_inits);
6477 class_inits = NULL;
6478 } else {
6479 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
6480 link_bblock (cfg, cfg->cbb, tblock);
6481 if (sp != stack_start) {
6482 handle_stack_args (cfg, stack_start, sp - stack_start);
6483 sp = stack_start;
6484 CHECK_UNVERIFIABLE (cfg);
6486 cfg->cbb->next_bb = tblock;
6487 cfg->cbb = tblock;
6488 for (i = 0; i < cfg->cbb->in_scount; ++i) {
6489 if (cfg->verbose_level > 3)
6490 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
6491 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
6492 *sp++ = ins;
6494 g_slist_free (class_inits);
6495 class_inits = NULL;
6499 if (skip_dead_blocks) {
6500 int ip_offset = ip - header->code;
6502 if (ip_offset == bb->end)
6503 bb = bb->next;
6505 if (bb->dead) {
6506 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
6508 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
6510 if (ip_offset + op_size == bb->end) {
6511 MONO_INST_NEW (cfg, ins, OP_NOP);
6512 MONO_ADD_INS (cfg->cbb, ins);
6513 start_new_bblock = 1;
6515 continue;
6519 * Sequence points are points where the debugger can place a breakpoint.
6520 * Currently, we generate these automatically at points where the IL
6521 * stack is empty.
6523 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
6525 * Make methods interruptable at the beginning, and at the targets of
6526 * backward branches.
6527 * Also, do this at the start of every bblock in methods with clauses too,
6528 * to be able to handle instructions with inprecise control flow like
6529 * throw/endfinally.
6530 * Backward branches are handled at the end of method-to-ir ().
6532 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
6533 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
6535 /* Avoid sequence points on empty IL like .volatile */
6536 // FIXME: Enable this
6537 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
6538 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
6539 if ((sp != stack_start) && !sym_seq_point)
6540 ins->flags |= MONO_INST_NONEMPTY_STACK;
6541 MONO_ADD_INS (cfg->cbb, ins);
6543 if (sym_seq_points)
6544 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
6546 if (cfg->prof_coverage) {
6547 guint32 cil_offset = ip - header->code;
6548 gpointer counter = &cfg->coverage_info->data [cil_offset].count;
6549 cfg->coverage_info->data [cil_offset].cil_code = ip;
6551 if (mono_arch_opcode_supported (OP_ATOMIC_ADD_I4)) {
6552 MonoInst *one_ins, *load_ins;
6554 EMIT_NEW_PCONST (cfg, load_ins, counter);
6555 EMIT_NEW_ICONST (cfg, one_ins, 1);
6556 MONO_INST_NEW (cfg, ins, OP_ATOMIC_ADD_I4);
6557 ins->dreg = mono_alloc_ireg (cfg);
6558 ins->inst_basereg = load_ins->dreg;
6559 ins->inst_offset = 0;
6560 ins->sreg2 = one_ins->dreg;
6561 ins->type = STACK_I4;
6562 MONO_ADD_INS (cfg->cbb, ins);
6563 } else {
6564 EMIT_NEW_PCONST (cfg, ins, counter);
6565 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
6570 cfg->cbb->real_offset = cfg->real_offset;
6572 if (cfg->verbose_level > 3)
6573 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
6575 // Variables shared by CEE_CALLI CEE_CALL CEE_CALLVIRT CEE_JMP.
6576 // Initialize to either what they all need or zero.
6577 gboolean emit_widen = TRUE;
6578 gboolean tailcall = FALSE;
6579 gboolean common_call = FALSE;
6580 MonoInst *keep_this_alive = NULL;
6581 MonoMethod *cmethod = NULL;
6582 MonoMethodSignature *fsig = NULL;
6584 // These are used only in CALL/CALLVIRT but must be initialized also for CALLI,
6585 // since it jumps into CALL/CALLVIRT.
6586 gboolean need_seq_point = FALSE;
6587 gboolean push_res = TRUE;
6588 gboolean skip_ret = FALSE;
6589 gboolean tailcall_remove_ret = FALSE;
6591 // FIXME split 500 lines load/store field into separate file/function.
6593 MonoOpcodeParameter parameter;
6594 const MonoOpcodeInfo* info = mono_opcode_decode (ip, op_size, il_op, &parameter);
6595 g_assert (info);
6596 n = parameter.i32;
6597 token = parameter.i32;
6598 target = parameter.branch_target;
6600 // Check stack size for push/pop except variable cases -- -1 like call/ret/newobj.
6601 const int pushes = info->pushes;
6602 const int pops = info->pops;
6603 if (pushes >= 0 && pops >= 0) {
6604 g_assert (pushes - pops <= 1);
6605 if (pushes - pops == 1)
6606 CHECK_STACK_OVF ();
6608 if (pops >= 0)
6609 CHECK_STACK (pops);
6611 switch (il_op) {
6612 case MONO_CEE_NOP:
6613 if (seq_points && !sym_seq_points && sp != stack_start) {
6615 * The C# compiler uses these nops to notify the JIT that it should
6616 * insert seq points.
6618 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
6619 MONO_ADD_INS (cfg->cbb, ins);
6621 if (cfg->keep_cil_nops)
6622 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
6623 else
6624 MONO_INST_NEW (cfg, ins, OP_NOP);
6625 MONO_ADD_INS (cfg->cbb, ins);
6626 emitted_funccall_seq_point = FALSE;
6627 break;
6628 case MONO_CEE_BREAK:
6629 if (mini_should_insert_breakpoint (cfg->method)) {
6630 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6631 } else {
6632 MONO_INST_NEW (cfg, ins, OP_NOP);
6633 MONO_ADD_INS (cfg->cbb, ins);
6635 break;
6636 case MONO_CEE_LDARG_0:
6637 case MONO_CEE_LDARG_1:
6638 case MONO_CEE_LDARG_2:
6639 case MONO_CEE_LDARG_3:
6640 case MONO_CEE_LDARG_S:
6641 case MONO_CEE_LDARG:
6642 CHECK_ARG (n);
6643 if (next_ip < end && is_addressable_valuetype_load (cfg, next_ip, cfg->arg_types[n])) {
6644 EMIT_NEW_ARGLOADA (cfg, ins, n);
6645 } else {
6646 EMIT_NEW_ARGLOAD (cfg, ins, n);
6648 *sp++ = ins;
6649 break;
6651 case MONO_CEE_LDLOC_0:
6652 case MONO_CEE_LDLOC_1:
6653 case MONO_CEE_LDLOC_2:
6654 case MONO_CEE_LDLOC_3:
6655 case MONO_CEE_LDLOC_S:
6656 case MONO_CEE_LDLOC:
6657 CHECK_LOCAL (n);
6658 if (next_ip < end && is_addressable_valuetype_load (cfg, next_ip, header->locals[n])) {
6659 EMIT_NEW_LOCLOADA (cfg, ins, n);
6660 } else {
6661 EMIT_NEW_LOCLOAD (cfg, ins, n);
6663 *sp++ = ins;
6664 break;
6666 case MONO_CEE_STLOC_0:
6667 case MONO_CEE_STLOC_1:
6668 case MONO_CEE_STLOC_2:
6669 case MONO_CEE_STLOC_3:
6670 case MONO_CEE_STLOC_S:
6671 case MONO_CEE_STLOC:
6672 CHECK_LOCAL (n);
6673 --sp;
6674 *sp = convert_value (cfg, header->locals [n], *sp);
6675 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
6676 UNVERIFIED;
6677 emit_stloc_ir (cfg, sp, header, n);
6678 inline_costs += 1;
6679 break;
6680 case MONO_CEE_LDARGA_S:
6681 case MONO_CEE_LDARGA:
6682 CHECK_ARG (n);
6683 NEW_ARGLOADA (cfg, ins, n);
6684 MONO_ADD_INS (cfg->cbb, ins);
6685 *sp++ = ins;
6686 break;
6687 case MONO_CEE_STARG_S:
6688 case MONO_CEE_STARG:
6689 --sp;
6690 CHECK_ARG (n);
6691 *sp = convert_value (cfg, param_types [n], *sp);
6692 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
6693 UNVERIFIED;
6694 emit_starg_ir (cfg, sp, n);
6695 break;
6696 case MONO_CEE_LDLOCA:
6697 case MONO_CEE_LDLOCA_S: {
6698 guchar *tmp_ip;
6699 CHECK_LOCAL (n);
6701 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, next_ip, end, n))) {
6702 next_ip = tmp_ip;
6703 il_op = MONO_CEE_INITOBJ;
6704 inline_costs += 1;
6705 break;
6708 EMIT_NEW_LOCLOADA (cfg, ins, n);
6709 *sp++ = ins;
6710 break;
6712 case MONO_CEE_LDNULL:
6713 EMIT_NEW_PCONST (cfg, ins, NULL);
6714 ins->type = STACK_OBJ;
6715 *sp++ = ins;
6716 break;
6717 case MONO_CEE_LDC_I4_M1:
6718 case MONO_CEE_LDC_I4_0:
6719 case MONO_CEE_LDC_I4_1:
6720 case MONO_CEE_LDC_I4_2:
6721 case MONO_CEE_LDC_I4_3:
6722 case MONO_CEE_LDC_I4_4:
6723 case MONO_CEE_LDC_I4_5:
6724 case MONO_CEE_LDC_I4_6:
6725 case MONO_CEE_LDC_I4_7:
6726 case MONO_CEE_LDC_I4_8:
6727 case MONO_CEE_LDC_I4_S:
6728 case MONO_CEE_LDC_I4:
6729 EMIT_NEW_ICONST (cfg, ins, n);
6730 *sp++ = ins;
6731 break;
6732 case MONO_CEE_LDC_I8:
6733 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6734 ins->type = STACK_I8;
6735 ins->dreg = alloc_dreg (cfg, STACK_I8);
6736 ins->inst_l = parameter.i64;
6737 MONO_ADD_INS (cfg->cbb, ins);
6738 *sp++ = ins;
6739 break;
6740 case MONO_CEE_LDC_R4: {
6741 float *f;
6742 gboolean use_aotconst = FALSE;
6744 #ifdef TARGET_POWERPC
6745 /* FIXME: Clean this up */
6746 if (cfg->compile_aot)
6747 use_aotconst = TRUE;
6748 #endif
6749 /* FIXME: we should really allocate this only late in the compilation process */
6750 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
6752 if (use_aotconst) {
6753 MonoInst *cons;
6754 int dreg;
6756 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6758 dreg = alloc_freg (cfg);
6759 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6760 ins->type = cfg->r4_stack_type;
6761 } else {
6762 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6763 ins->type = cfg->r4_stack_type;
6764 ins->dreg = alloc_dreg (cfg, STACK_R8);
6765 ins->inst_p0 = f;
6766 MONO_ADD_INS (cfg->cbb, ins);
6768 *f = parameter.f;
6769 *sp++ = ins;
6770 break;
6772 case MONO_CEE_LDC_R8: {
6773 double *d;
6774 gboolean use_aotconst = FALSE;
6776 #ifdef TARGET_POWERPC
6777 /* FIXME: Clean this up */
6778 if (cfg->compile_aot)
6779 use_aotconst = TRUE;
6780 #endif
6782 /* FIXME: we should really allocate this only late in the compilation process */
6783 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
6785 if (use_aotconst) {
6786 MonoInst *cons;
6787 int dreg;
6789 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6791 dreg = alloc_freg (cfg);
6792 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6793 ins->type = STACK_R8;
6794 } else {
6795 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6796 ins->type = STACK_R8;
6797 ins->dreg = alloc_dreg (cfg, STACK_R8);
6798 ins->inst_p0 = d;
6799 MONO_ADD_INS (cfg->cbb, ins);
6801 *d = parameter.d;
6802 *sp++ = ins;
6803 break;
6805 case MONO_CEE_DUP: {
6806 MonoInst *temp, *store;
6807 sp--;
6808 ins = *sp;
6810 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6811 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6813 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6814 *sp++ = ins;
6816 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6817 *sp++ = ins;
6819 inline_costs += 2;
6820 break;
6822 case MONO_CEE_POP:
6823 --sp;
6825 #ifdef TARGET_X86
6826 if (sp [0]->type == STACK_R8)
6827 /* we need to pop the value from the x86 FP stack */
6828 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6829 #endif
6830 break;
6831 case MONO_CEE_JMP: {
6832 MonoCallInst *call;
6833 int i, n;
6835 INLINE_FAILURE ("jmp");
6836 GSHAREDVT_FAILURE (il_op);
6838 if (stack_start != sp)
6839 UNVERIFIED;
6840 /* FIXME: check the signature matches */
6841 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6842 CHECK_CFG_ERROR;
6844 if (cfg->gshared && mono_method_check_context_used (cmethod))
6845 GENERIC_SHARING_FAILURE (CEE_JMP);
6847 mini_profiler_emit_tail_call (cfg, cmethod);
6849 fsig = mono_method_signature_internal (cmethod);
6850 n = fsig->param_count + fsig->hasthis;
6851 if (cfg->llvm_only) {
6852 MonoInst **args;
6854 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6855 for (i = 0; i < n; ++i)
6856 EMIT_NEW_ARGLOAD (cfg, args [i], i);
6857 ins = mini_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
6859 * The code in mono-basic-block.c treats the rest of the code as dead, but we
6860 * have to emit a normal return since llvm expects it.
6862 if (cfg->ret)
6863 emit_setret (cfg, ins);
6864 MONO_INST_NEW (cfg, ins, OP_BR);
6865 ins->inst_target_bb = end_bblock;
6866 MONO_ADD_INS (cfg->cbb, ins);
6867 link_bblock (cfg, cfg->cbb, end_bblock);
6868 break;
6869 } else {
6870 /* Handle tailcalls similarly to calls */
6871 DISABLE_AOT (cfg);
6873 mini_emit_tailcall_parameters (cfg, fsig);
6874 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6875 call->method = cmethod;
6876 // FIXME Other initialization of the tailcall field occurs after
6877 // it is used. So this is the only "real" use and needs more attention.
6878 call->tailcall = TRUE;
6879 call->signature = fsig;
6880 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6881 call->inst.inst_p0 = cmethod;
6882 for (i = 0; i < n; ++i)
6883 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6885 if (mini_type_is_vtype (mini_get_underlying_type (call->signature->ret)))
6886 call->vret_var = cfg->vret_addr;
6888 mono_arch_emit_call (cfg, call);
6889 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
6890 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
6893 start_new_bblock = 1;
6894 break;
6896 case MONO_CEE_CALLI: {
6897 // FIXME tail.calli is problemetic because the this pointer's type
6898 // is not in the signature, and we cannot check for a byref valuetype.
6899 MonoInst *addr;
6900 MonoInst *callee = NULL;
6902 // Variables shared by CEE_CALLI and CEE_CALL/CEE_CALLVIRT.
6903 common_call = TRUE; // i.e. skip_ret/push_res/seq_point logic
6904 cmethod = NULL;
6906 gboolean const inst_tailcall = G_UNLIKELY (debug_tailcall_try_all
6907 ? (next_ip < end && next_ip [0] == CEE_RET)
6908 : ((ins_flag & MONO_INST_TAILCALL) != 0));
6909 ins = NULL;
6911 //GSHAREDVT_FAILURE (il_op);
6912 CHECK_STACK (1);
6913 --sp;
6914 addr = *sp;
6915 g_assert (addr);
6916 fsig = mini_get_signature (method, token, generic_context, cfg->error);
6917 CHECK_CFG_ERROR;
6919 if (method->dynamic && fsig->pinvoke) {
6920 MonoInst *args [3];
6923 * This is a call through a function pointer using a pinvoke
6924 * signature. Have to create a wrapper and call that instead.
6925 * FIXME: This is very slow, need to create a wrapper at JIT time
6926 * instead based on the signature.
6928 EMIT_NEW_IMAGECONST (cfg, args [0], m_class_get_image (method->klass));
6929 EMIT_NEW_PCONST (cfg, args [1], fsig);
6930 args [2] = addr;
6931 // FIXME tailcall?
6932 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6935 n = fsig->param_count + fsig->hasthis;
6937 CHECK_STACK (n);
6939 //g_assert (!virtual_ || fsig->hasthis);
6941 sp -= n;
6943 if (!(cfg->method->wrapper_type && cfg->method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD) && check_call_signature (cfg, fsig, sp)) {
6944 if (break_on_unverified ())
6945 check_call_signature (cfg, fsig, sp); // Again, step through it.
6946 UNVERIFIED;
6949 inline_costs += CALL_COST * MIN(10, num_calls++);
6952 * Making generic calls out of gsharedvt methods.
6953 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
6954 * patching gshared method addresses into a gsharedvt method.
6956 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
6958 * We pass the address to the gsharedvt trampoline in the rgctx reg
6960 callee = addr;
6961 g_assert (addr); // Doubles as boolean after tailcall check.
6964 inst_tailcall && is_supported_tailcall (cfg, ip, method, NULL, fsig,
6965 FALSE/*virtual irrelevant*/, addr != NULL, &tailcall);
6967 if (callee) {
6968 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
6969 /* Not tested */
6970 GSHAREDVT_FAILURE (il_op);
6972 if (cfg->llvm_only)
6973 // FIXME:
6974 GSHAREDVT_FAILURE (il_op);
6976 addr = emit_get_rgctx_sig (cfg, context_used, fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
6977 ins = (MonoInst*)mini_emit_calli_full (cfg, fsig, sp, addr, NULL, callee, tailcall);
6978 goto calli_end;
6981 /* Prevent inlining of methods with indirect calls */
6982 INLINE_FAILURE ("indirect call");
6984 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
6985 MonoJumpInfoType info_type;
6986 gpointer info_data;
6989 * Instead of emitting an indirect call, emit a direct call
6990 * with the contents of the aotconst as the patch info.
6992 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
6993 info_type = (MonoJumpInfoType)addr->inst_c1;
6994 info_data = addr->inst_p0;
6995 } else {
6996 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
6997 info_data = addr->inst_right->inst_left;
7000 if (info_type == MONO_PATCH_INFO_ICALL_ADDR) {
7001 // non-JIT icall, mostly builtin, but also user-extensible
7002 tailcall = FALSE;
7003 ins = (MonoInst*)mini_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp);
7004 NULLIFY_INS (addr);
7005 goto calli_end;
7006 } else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR
7007 || info_type == MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR) {
7008 tailcall = FALSE;
7009 ins = (MonoInst*)mini_emit_abs_call (cfg, info_type, info_data, fsig, sp);
7010 NULLIFY_INS (addr);
7011 goto calli_end;
7014 ins = (MonoInst*)mini_emit_calli_full (cfg, fsig, sp, addr, NULL, NULL, tailcall);
7015 goto calli_end;
7017 case MONO_CEE_CALL:
7018 case MONO_CEE_CALLVIRT: {
7019 MonoInst *addr; addr = NULL;
7020 int array_rank; array_rank = 0;
7021 gboolean virtual_; virtual_ = il_op == MONO_CEE_CALLVIRT;
7022 gboolean pass_imt_from_rgctx; pass_imt_from_rgctx = FALSE;
7023 MonoInst *imt_arg; imt_arg = NULL;
7024 gboolean pass_vtable; pass_vtable = FALSE;
7025 gboolean pass_mrgctx; pass_mrgctx = FALSE;
7026 MonoInst *vtable_arg; vtable_arg = NULL;
7027 gboolean check_this; check_this = FALSE;
7028 gboolean delegate_invoke; delegate_invoke = FALSE;
7029 gboolean direct_icall; direct_icall = FALSE;
7030 gboolean tailcall_calli; tailcall_calli = FALSE;
7031 gboolean noreturn; noreturn = FALSE;
7033 // Variables shared by CEE_CALLI and CEE_CALL/CEE_CALLVIRT.
7034 common_call = FALSE;
7036 // variables to help in assertions
7037 gboolean called_is_supported_tailcall; called_is_supported_tailcall = FALSE;
7038 MonoMethod *tailcall_method; tailcall_method = NULL;
7039 MonoMethod *tailcall_cmethod; tailcall_cmethod = NULL;
7040 MonoMethodSignature *tailcall_fsig; tailcall_fsig = NULL;
7041 gboolean tailcall_virtual; tailcall_virtual = FALSE;
7042 gboolean tailcall_extra_arg; tailcall_extra_arg = FALSE;
7044 gboolean inst_tailcall; inst_tailcall = G_UNLIKELY (debug_tailcall_try_all
7045 ? (next_ip < end && next_ip [0] == CEE_RET)
7046 : ((ins_flag & MONO_INST_TAILCALL) != 0));
7047 ins = NULL;
7049 /* Used to pass arguments to called functions */
7050 HandleCallData cdata;
7051 memset (&cdata, 0, sizeof (HandleCallData));
7053 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7054 CHECK_CFG_ERROR;
7056 if (cfg->verbose_level > 3)
7057 printf ("cmethod = %s\n", mono_method_get_full_name (cmethod));
7059 MonoMethod *cil_method; cil_method = cmethod;
7061 if (constrained_class) {
7062 gboolean constrained_is_generic_param =
7063 m_class_get_byval_arg (constrained_class)->type == MONO_TYPE_VAR ||
7064 m_class_get_byval_arg (constrained_class)->type == MONO_TYPE_MVAR;
7066 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7067 if (cfg->verbose_level > 2)
7068 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
7069 if (!(constrained_is_generic_param &&
7070 cfg->gshared)) {
7071 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, cfg->error);
7072 CHECK_CFG_ERROR;
7074 } else {
7075 if (cfg->verbose_level > 2)
7076 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
7078 if (constrained_is_generic_param && cfg->gshared) {
7080 * This is needed since get_method_constrained can't find
7081 * the method in klass representing a type var.
7082 * The type var is guaranteed to be a reference type in this
7083 * case.
7085 if (!mini_is_gsharedvt_klass (constrained_class))
7086 g_assert (!m_class_is_valuetype (cmethod->klass));
7087 } else {
7088 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, cfg->error);
7089 CHECK_CFG_ERROR;
7093 if (m_class_is_enumtype (constrained_class) && !strcmp (cmethod->name, "GetHashCode")) {
7094 /* Use the corresponding method from the base type to avoid boxing */
7095 MonoType *base_type = mono_class_enum_basetype_internal (constrained_class);
7096 g_assert (base_type);
7097 constrained_class = mono_class_from_mono_type_internal (base_type);
7098 cmethod = get_method_nofail (constrained_class, cmethod->name, 0, 0);
7099 g_assert (cmethod);
7103 if (!dont_verify && !cfg->skip_visibility) {
7104 MonoMethod *target_method = cil_method;
7105 if (method->is_inflated) {
7106 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), cfg->error);
7107 CHECK_CFG_ERROR;
7109 if (!mono_method_can_access_method (method_definition, target_method) &&
7110 !mono_method_can_access_method (method, cil_method))
7111 emit_method_access_failure (cfg, method, cil_method);
7114 if (mono_security_core_clr_enabled ())
7115 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
7117 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT)) {
7118 if (!mono_class_is_interface (method->klass))
7119 emit_bad_image_failure (cfg, method, cil_method);
7120 else
7121 virtual_ = TRUE;
7126 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
7127 * converts to a callvirt.
7129 * tests/bug-515884.il is an example of this behavior
7131 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
7132 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
7133 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
7134 virtual_ = TRUE;
7137 if (!m_class_is_inited (cmethod->klass))
7138 if (!mono_class_init_internal (cmethod->klass))
7139 TYPE_LOAD_ERROR (cmethod->klass);
7141 fsig = mono_method_signature_internal (cmethod);
7142 if (!fsig)
7143 LOAD_ERROR;
7144 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
7145 mini_class_is_system_array (cmethod->klass)) {
7146 array_rank = m_class_get_rank (cmethod->klass);
7147 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && direct_icalls_enabled (cfg, cmethod)) {
7148 direct_icall = TRUE;
7149 } else if (fsig->pinvoke) {
7150 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
7151 fsig = mono_method_signature_internal (wrapper);
7152 } else if (constrained_class) {
7153 } else {
7154 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, cfg->error);
7155 CHECK_CFG_ERROR;
7158 if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated))
7159 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
7161 /* See code below */
7162 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature_internal (cmethod)->param_count == 1) {
7163 MonoBasicBlock *tbb;
7165 GET_BBLOCK (cfg, tbb, next_ip);
7166 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
7168 * We want to extend the try block to cover the call, but we can't do it if the
7169 * call is made directly since its followed by an exception check.
7171 direct_icall = FALSE;
7175 mono_save_token_info (cfg, image, token, cil_method);
7177 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, next_ip - header->code)))
7178 need_seq_point = TRUE;
7180 /* Don't support calls made using type arguments for now */
7182 if (cfg->gsharedvt) {
7183 if (mini_is_gsharedvt_signature (fsig))
7184 GSHAREDVT_FAILURE (il_op);
7188 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
7189 g_assert_not_reached ();
7191 n = fsig->param_count + fsig->hasthis;
7193 if (!cfg->gshared && mono_class_is_gtd (cmethod->klass))
7194 UNVERIFIED;
7196 if (!cfg->gshared)
7197 g_assert (!mono_method_check_context_used (cmethod));
7199 CHECK_STACK (n);
7201 //g_assert (!virtual_ || fsig->hasthis);
7203 sp -= n;
7205 if (virtual_ && cmethod && sp [0]->opcode == OP_TYPED_OBJREF) {
7206 ERROR_DECL (error);
7208 MonoMethod *new_cmethod = mono_class_get_virtual_method (sp [0]->klass, cmethod, FALSE, error);
7209 mono_error_assert_ok (error);
7210 cmethod = new_cmethod;
7211 virtual_ = FALSE;
7214 if (cmethod && method_does_not_return (cmethod)) {
7215 cfg->cbb->out_of_line = TRUE;
7216 noreturn = TRUE;
7219 cdata.method = method;
7220 cdata.inst_tailcall = inst_tailcall;
7223 * We have the `constrained.' prefix opcode.
7225 if (constrained_class) {
7226 ins = handle_constrained_call (cfg, cmethod, fsig, constrained_class, sp, &cdata, &cmethod, &virtual_, &emit_widen);
7227 CHECK_CFG_EXCEPTION;
7228 constrained_class = NULL;
7229 if (ins)
7230 goto call_end;
7233 for (int i = 0; i < fsig->param_count; ++i)
7234 sp [i + fsig->hasthis] = convert_value (cfg, fsig->params [i], sp [i + fsig->hasthis]);
7236 if (check_call_signature (cfg, fsig, sp)) {
7237 if (break_on_unverified ())
7238 check_call_signature (cfg, fsig, sp); // Again, step through it.
7239 UNVERIFIED;
7242 if ((m_class_get_parent (cmethod->klass) == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
7243 delegate_invoke = TRUE;
7245 #ifndef ENABLE_NETCORE
7246 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
7247 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7248 mini_type_to_eval_stack_type ((cfg), fsig->ret, ins);
7249 emit_widen = FALSE;
7252 if (inst_tailcall) // FIXME
7253 mono_tailcall_print ("missed tailcall intrins_sharable %s -> %s\n", method->name, cmethod->name);
7254 goto call_end;
7256 #endif
7259 * Implement a workaround for the inherent races involved in locking:
7260 * Monitor.Enter ()
7261 * try {
7262 * } finally {
7263 * Monitor.Exit ()
7265 * If a thread abort happens between the call to Monitor.Enter () and the start of the
7266 * try block, the Exit () won't be executed, see:
7267 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
7268 * To work around this, we extend such try blocks to include the last x bytes
7269 * of the Monitor.Enter () call.
7271 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature_internal (cmethod)->param_count == 1) {
7272 MonoBasicBlock *tbb;
7274 GET_BBLOCK (cfg, tbb, next_ip);
7276 * Only extend try blocks with a finally, to avoid catching exceptions thrown
7277 * from Monitor.Enter like ArgumentNullException.
7279 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
7280 /* Mark this bblock as needing to be extended */
7281 tbb->extend_try_block = TRUE;
7285 /* Conversion to a JIT intrinsic */
7286 if ((ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
7287 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7288 mini_type_to_eval_stack_type ((cfg), fsig->ret, ins);
7289 emit_widen = FALSE;
7291 // FIXME This is only missed if in fact the intrinsic involves a call.
7292 if (inst_tailcall) // FIXME
7293 mono_tailcall_print ("missed tailcall intrins %s -> %s\n", method->name, cmethod->name);
7294 goto call_end;
7296 CHECK_CFG_ERROR;
7299 * If the callee is a shared method, then its static cctor
7300 * might not get called after the call was patched.
7302 if (cfg->gshared && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7303 emit_class_init (cfg, cmethod->klass);
7304 CHECK_TYPELOAD (cmethod->klass);
7307 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
7309 if (cfg->gshared) {
7310 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
7312 context_used = mini_method_check_context_used (cfg, cmethod);
7314 if (context_used && mono_class_is_interface (cmethod->klass)) {
7315 /* Generic method interface
7316 calls are resolved via a
7317 helper function and don't
7318 need an imt. */
7319 if (!cmethod_context || !cmethod_context->method_inst)
7320 pass_imt_from_rgctx = TRUE;
7324 * If a shared method calls another
7325 * shared method then the caller must
7326 * have a generic sharing context
7327 * because the magic trampoline
7328 * requires it. FIXME: We shouldn't
7329 * have to force the vtable/mrgctx
7330 * variable here. Instead there
7331 * should be a flag in the cfg to
7332 * request a generic sharing context.
7334 if (context_used &&
7335 ((cfg->method->flags & METHOD_ATTRIBUTE_STATIC) || m_class_is_valuetype (cfg->method->klass)))
7336 mono_get_vtable_var (cfg);
7339 if (pass_vtable) {
7340 if (context_used) {
7341 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7342 } else {
7343 MonoVTable *vtable = mono_class_vtable_checked (cfg->domain, cmethod->klass, cfg->error);
7344 CHECK_CFG_ERROR;
7346 CHECK_TYPELOAD (cmethod->klass);
7347 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7351 if (pass_mrgctx) {
7352 g_assert (!vtable_arg);
7354 if (!cfg->compile_aot) {
7356 * emit_get_rgctx_method () calls mono_class_vtable () so check
7357 * for type load errors before.
7359 mono_class_setup_vtable (cmethod->klass);
7360 CHECK_TYPELOAD (cmethod->klass);
7363 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7365 /* !marshalbyref is needed to properly handle generic methods + remoting */
7366 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
7367 MONO_METHOD_IS_FINAL (cmethod)) &&
7368 !mono_class_is_marshalbyref (cmethod->klass)) {
7369 if (virtual_)
7370 check_this = TRUE;
7371 virtual_ = FALSE;
7375 if (pass_imt_from_rgctx) {
7376 g_assert (!pass_vtable);
7378 imt_arg = emit_get_rgctx_method (cfg, context_used,
7379 cmethod, MONO_RGCTX_INFO_METHOD);
7380 g_assert (imt_arg);
7383 if (check_this)
7384 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
7386 /* Calling virtual generic methods */
7388 // These temporaries help detangle "pure" computation of
7389 // inputs to is_supported_tailcall from side effects, so that
7390 // is_supported_tailcall can be computed just once.
7391 gboolean virtual_generic; virtual_generic = FALSE;
7392 gboolean virtual_generic_imt; virtual_generic_imt = FALSE;
7394 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
7395 !(MONO_METHOD_IS_FINAL (cmethod) &&
7396 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
7397 fsig->generic_param_count &&
7398 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
7399 !cfg->llvm_only) {
7401 g_assert (fsig->is_inflated);
7403 virtual_generic = TRUE;
7405 /* Prevent inlining of methods that contain indirect calls */
7406 INLINE_FAILURE ("virtual generic call");
7408 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7409 GSHAREDVT_FAILURE (il_op);
7411 if (cfg->backend->have_generalized_imt_trampoline && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
7412 virtual_generic_imt = TRUE;
7413 g_assert (!imt_arg);
7414 if (!context_used)
7415 g_assert (cmethod->is_inflated);
7417 imt_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
7418 g_assert (imt_arg);
7420 virtual_ = TRUE;
7421 vtable_arg = NULL;
7425 // Capture some intent before computing tailcall.
7427 gboolean make_generic_call_out_of_gsharedvt_method;
7428 gboolean will_have_imt_arg;
7430 make_generic_call_out_of_gsharedvt_method = FALSE;
7431 will_have_imt_arg = FALSE;
7434 * Making generic calls out of gsharedvt methods.
7435 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
7436 * patching gshared method addresses into a gsharedvt method.
7438 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || mono_class_is_ginst (cmethod->klass)) &&
7439 !(m_class_get_rank (cmethod->klass) && m_class_get_byval_arg (cmethod->klass)->type != MONO_TYPE_SZARRAY) &&
7440 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
7442 make_generic_call_out_of_gsharedvt_method = TRUE;
7444 if (virtual_) {
7445 if (fsig->generic_param_count) {
7446 will_have_imt_arg = TRUE;
7447 } else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
7448 will_have_imt_arg = TRUE;
7453 #ifdef ENABLE_NETCORE
7454 if (save_last_error) {
7455 mono_emit_jit_icall (cfg, mono_marshal_clear_last_error, NULL);
7457 #endif
7459 /* Tail prefix / tailcall optimization */
7461 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests.
7462 Inlining and stack traces are not guaranteed however. */
7463 /* FIXME: runtime generic context pointer for jumps? */
7464 /* FIXME: handle this for generic sharing eventually */
7466 // tailcall means "the backend can and will handle it".
7467 // inst_tailcall means the tail. prefix is present.
7468 tailcall_extra_arg = vtable_arg || imt_arg || will_have_imt_arg || mono_class_is_interface (cmethod->klass);
7469 tailcall = inst_tailcall && is_supported_tailcall (cfg, ip, method, cmethod, fsig,
7470 virtual_, tailcall_extra_arg, &tailcall_calli);
7471 // Writes to imt_arg, vtable_arg, virtual_, cmethod, must not occur from here (inputs to is_supported_tailcall).
7472 // Capture values to later assert they don't change.
7473 called_is_supported_tailcall = TRUE;
7474 tailcall_method = method;
7475 tailcall_cmethod = cmethod;
7476 tailcall_fsig = fsig;
7477 tailcall_virtual = virtual_;
7479 if (virtual_generic) {
7480 if (virtual_generic_imt) {
7481 if (tailcall) {
7482 /* Prevent inlining of methods with tailcalls (the call stack would be altered) */
7483 INLINE_FAILURE ("tailcall");
7485 common_call = TRUE;
7486 goto call_end;
7489 MonoInst *this_temp, *this_arg_temp, *store;
7490 MonoInst *iargs [4];
7492 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
7493 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
7494 MONO_ADD_INS (cfg->cbb, store);
7496 /* FIXME: This should be a managed pointer */
7497 this_arg_temp = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
7499 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
7500 iargs [1] = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
7502 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
7503 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
7505 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
7507 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
7509 if (inst_tailcall) // FIXME
7510 mono_tailcall_print ("missed tailcall virtual generic %s -> %s\n", method->name, cmethod->name);
7511 goto call_end;
7513 CHECK_CFG_ERROR;
7515 /* Inlining */
7516 if ((cfg->opt & MONO_OPT_INLINE) &&
7517 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
7518 mono_method_check_inlining (cfg, cmethod)) {
7519 int costs;
7520 gboolean always = FALSE;
7522 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
7523 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7524 /* Prevent inlining of methods that call wrappers */
7525 INLINE_FAILURE ("wrapper call");
7526 // FIXME? Does this write to cmethod impact tailcall_supported? Probably not.
7527 // Neither pinvoke or icall are likely to be tailcalled.
7528 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
7529 always = TRUE;
7532 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
7533 if (costs) {
7534 cfg->real_offset += 5;
7536 if (!MONO_TYPE_IS_VOID (fsig->ret))
7537 /* *sp is already set by inline_method */
7538 ins = *sp;
7540 inline_costs += costs;
7541 // FIXME This is missed if the inlinee contains tail calls that
7542 // would work, but not once inlined into caller.
7543 // This matchingness could be a factor in inlining.
7544 // i.e. Do not inline if it hurts tailcall, do inline
7545 // if it helps and/or or is neutral, and helps performance
7546 // using usual heuristics.
7547 // Note that inlining will expose multiple tailcall opportunities
7548 // so the tradeoff is not obvious. If we can tailcall anything
7549 // like desktop, then this factor mostly falls away, except
7550 // that inlining can affect tailcall performance due to
7551 // signature match/mismatch.
7552 if (inst_tailcall) // FIXME
7553 mono_tailcall_print ("missed tailcall inline %s -> %s\n", method->name, cmethod->name);
7554 goto call_end;
7558 /* Tail recursion elimination */
7559 if (((cfg->opt & MONO_OPT_TAILCALL) || inst_tailcall) && il_op == MONO_CEE_CALL && cmethod == method && next_ip < end && next_ip [0] == CEE_RET && !vtable_arg) {
7560 gboolean has_vtargs = FALSE;
7561 int i;
7563 /* Prevent inlining of methods with tailcalls (the call stack would be altered) */
7564 INLINE_FAILURE ("tailcall");
7566 /* keep it simple */
7567 for (i = fsig->param_count - 1; !has_vtargs && i >= 0; i--)
7568 has_vtargs = MONO_TYPE_ISSTRUCT (mono_method_signature_internal (cmethod)->params [i]);
7570 if (!has_vtargs) {
7571 if (need_seq_point) {
7572 emit_seq_point (cfg, method, ip, FALSE, TRUE);
7573 need_seq_point = FALSE;
7575 for (i = 0; i < n; ++i)
7576 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7578 mini_profiler_emit_tail_call (cfg, cmethod);
7580 MONO_INST_NEW (cfg, ins, OP_BR);
7581 MONO_ADD_INS (cfg->cbb, ins);
7582 tblock = start_bblock->out_bb [0];
7583 link_bblock (cfg, cfg->cbb, tblock);
7584 ins->inst_target_bb = tblock;
7585 start_new_bblock = 1;
7587 /* skip the CEE_RET, too */
7588 if (ip_in_bb (cfg, cfg->cbb, next_ip))
7589 skip_ret = TRUE;
7590 push_res = FALSE;
7591 need_seq_point = FALSE;
7592 goto call_end;
7596 inline_costs += CALL_COST * MIN(10, num_calls++);
7599 * Synchronized wrappers.
7600 * Its hard to determine where to replace a method with its synchronized
7601 * wrapper without causing an infinite recursion. The current solution is
7602 * to add the synchronized wrapper in the trampolines, and to
7603 * change the called method to a dummy wrapper, and resolve that wrapper
7604 * to the real method in mono_jit_compile_method ().
7606 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
7607 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
7608 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig)) {
7609 // FIXME? Does this write to cmethod impact tailcall_supported? Probably not.
7610 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
7615 * Making generic calls out of gsharedvt methods.
7616 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
7617 * patching gshared method addresses into a gsharedvt method.
7619 if (make_generic_call_out_of_gsharedvt_method) {
7620 if (virtual_) {
7621 //if (mono_class_is_interface (cmethod->klass))
7622 //GSHAREDVT_FAILURE (il_op);
7623 // disable for possible remoting calls
7624 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
7625 GSHAREDVT_FAILURE (il_op);
7626 if (fsig->generic_param_count) {
7627 /* virtual generic call */
7628 g_assert (!imt_arg);
7629 g_assert (will_have_imt_arg);
7630 /* Same as the virtual generic case above */
7631 imt_arg = emit_get_rgctx_method (cfg, context_used,
7632 cmethod, MONO_RGCTX_INFO_METHOD);
7633 g_assert (imt_arg);
7634 } else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
7635 /* This can happen when we call a fully instantiated iface method */
7636 g_assert (will_have_imt_arg);
7637 imt_arg = emit_get_rgctx_method (cfg, context_used,
7638 cmethod, MONO_RGCTX_INFO_METHOD);
7639 g_assert (imt_arg);
7641 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
7642 vtable_arg = NULL;
7645 if ((m_class_get_parent (cmethod->klass) == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
7646 keep_this_alive = sp [0];
7648 MonoRgctxInfoType info_type;
7650 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
7651 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
7652 else
7653 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
7654 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
7656 if (cfg->llvm_only) {
7657 // FIXME: Avoid initializing vtable_arg
7658 ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
7659 if (inst_tailcall) // FIXME
7660 mono_tailcall_print ("missed tailcall llvmonly gsharedvt %s -> %s\n", method->name, cmethod->name);
7661 } else {
7662 tailcall = tailcall_calli;
7663 ins = (MonoInst*)mini_emit_calli_full (cfg, fsig, sp, addr, imt_arg, vtable_arg, tailcall);
7664 tailcall_remove_ret |= tailcall;
7666 goto call_end;
7669 /* Generic sharing */
7672 * Use this if the callee is gsharedvt sharable too, since
7673 * at runtime we might find an instantiation so the call cannot
7674 * be patched (the 'no_patch' code path in mini-trampolines.c).
7676 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
7677 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7678 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
7679 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
7680 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
7681 INLINE_FAILURE ("gshared");
7683 g_assert (cfg->gshared && cmethod);
7684 g_assert (!addr);
7687 * We are compiling a call to a
7688 * generic method from shared code,
7689 * which means that we have to look up
7690 * the method in the rgctx and do an
7691 * indirect call.
7693 if (fsig->hasthis)
7694 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
7696 if (cfg->llvm_only) {
7697 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
7698 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
7699 else
7700 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_FTNDESC);
7701 // FIXME: Avoid initializing imt_arg/vtable_arg
7702 ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
7703 if (inst_tailcall) // FIXME
7704 mono_tailcall_print ("missed tailcall context_used_llvmonly %s -> %s\n", method->name, cmethod->name);
7705 } else {
7706 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7707 if (inst_tailcall)
7708 mono_tailcall_print ("%s tailcall_calli#2 %s -> %s\n", tailcall_calli ? "making" : "missed", method->name, cmethod->name);
7709 tailcall = tailcall_calli;
7710 ins = (MonoInst*)mini_emit_calli_full (cfg, fsig, sp, addr, imt_arg, vtable_arg, tailcall);
7711 tailcall_remove_ret |= tailcall;
7713 goto call_end;
7716 /* Direct calls to icalls */
7717 if (direct_icall) {
7718 MonoMethod *wrapper;
7719 int costs;
7721 /* Inline the wrapper */
7722 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
7724 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
7725 g_assert (costs > 0);
7726 cfg->real_offset += 5;
7728 if (!MONO_TYPE_IS_VOID (fsig->ret))
7729 /* *sp is already set by inline_method */
7730 ins = *sp;
7732 inline_costs += costs;
7734 if (inst_tailcall) // FIXME
7735 mono_tailcall_print ("missed tailcall direct_icall %s -> %s\n", method->name, cmethod->name);
7736 goto call_end;
7739 /* Array methods */
7740 if (array_rank) {
7741 MonoInst *addr;
7743 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
7744 MonoInst *val = sp [fsig->param_count];
7746 if (val->type == STACK_OBJ) {
7747 MonoInst *iargs [2];
7749 iargs [0] = sp [0];
7750 iargs [1] = val;
7752 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
7755 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
7756 if (!mini_debug_options.weak_memory_model && val->type == STACK_OBJ)
7757 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
7758 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
7759 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !MONO_INS_IS_PCONST_NULL (val))
7760 mini_emit_write_barrier (cfg, addr, val);
7761 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
7762 GSHAREDVT_FAILURE (il_op);
7763 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
7764 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7766 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
7767 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
7768 if (!m_class_is_valuetype (m_class_get_element_class (cmethod->klass)) && !readonly)
7769 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
7770 CHECK_TYPELOAD (cmethod->klass);
7772 readonly = FALSE;
7773 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7774 ins = addr;
7775 } else {
7776 g_assert_not_reached ();
7779 emit_widen = FALSE;
7780 if (inst_tailcall) // FIXME
7781 mono_tailcall_print ("missed tailcall array_rank %s -> %s\n", method->name, cmethod->name);
7782 goto call_end;
7785 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
7786 if (ins) {
7787 if (inst_tailcall) // FIXME
7788 mono_tailcall_print ("missed tailcall redirect %s -> %s\n", method->name, cmethod->name);
7789 goto call_end;
7792 /* Tail prefix / tailcall optimization */
7794 if (tailcall) {
7795 /* Prevent inlining of methods with tailcalls (the call stack would be altered) */
7796 INLINE_FAILURE ("tailcall");
7800 * Virtual calls in llvm-only mode.
7802 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
7803 ins = mini_emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
7804 goto call_end;
7807 /* Common call */
7808 if (!(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) && !(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
7809 INLINE_FAILURE ("call");
7810 common_call = TRUE;
7812 call_end:
7813 // Check that the decision to tailcall would not have changed.
7814 g_assert (!called_is_supported_tailcall || tailcall_method == method);
7815 // FIXME? cmethod does change, weaken the assert if we weren't tailcalling anyway.
7816 // If this still fails, restructure the code, or call tailcall_supported again and assert no change.
7817 g_assert (!called_is_supported_tailcall || !tailcall || tailcall_cmethod == cmethod);
7818 g_assert (!called_is_supported_tailcall || tailcall_fsig == fsig);
7819 g_assert (!called_is_supported_tailcall || tailcall_virtual == virtual_);
7820 g_assert (!called_is_supported_tailcall || tailcall_extra_arg == (vtable_arg || imt_arg || will_have_imt_arg || mono_class_is_interface (cmethod->klass)));
7822 if (common_call) // FIXME goto call_end && !common_call often skips tailcall processing.
7823 ins = mini_emit_method_call_full (cfg, cmethod, fsig, tailcall, sp, virtual_ ? sp [0] : NULL,
7824 imt_arg, vtable_arg);
7827 * Handle devirt of some A.B.C calls by replacing the result of A.B with a OP_TYPED_OBJREF instruction, so the .C
7828 * call can be devirtualized above.
7830 if (cmethod)
7831 ins = handle_call_res_devirt (cfg, cmethod, ins);
7833 if (noreturn) {
7834 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
7835 MONO_ADD_INS (cfg->cbb, ins);
7837 calli_end:
7838 if ((tailcall_remove_ret || (common_call && tailcall)) && !cfg->llvm_only) {
7839 link_bblock (cfg, cfg->cbb, end_bblock);
7840 start_new_bblock = 1;
7842 // FIXME: Eliminate unreachable epilogs
7845 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
7846 * only reachable from this call.
7848 GET_BBLOCK (cfg, tblock, next_ip);
7849 if (tblock == cfg->cbb || tblock->in_count == 0)
7850 skip_ret = TRUE;
7851 push_res = FALSE;
7852 need_seq_point = FALSE;
7855 if (ins_flag & MONO_INST_TAILCALL)
7856 mini_test_tailcall (cfg, tailcall);
7858 /* End of call, INS should contain the result of the call, if any */
7860 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
7861 g_assert (ins);
7862 if (emit_widen)
7863 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7864 else
7865 *sp++ = ins;
7868 if (save_last_error) {
7869 save_last_error = FALSE;
7870 #ifdef TARGET_WIN32
7871 // Making icalls etc could clobber the value so emit inline code
7872 // to read last error on Windows.
7873 MONO_INST_NEW (cfg, ins, OP_GET_LAST_ERROR);
7874 ins->dreg = alloc_dreg (cfg, STACK_I4);
7875 ins->type = STACK_I4;
7876 MONO_ADD_INS (cfg->cbb, ins);
7877 mono_emit_jit_icall (cfg, mono_marshal_set_last_error_windows, &ins);
7878 #else
7879 mono_emit_jit_icall (cfg, mono_marshal_set_last_error, NULL);
7880 #endif
7883 if (keep_this_alive) {
7884 MonoInst *dummy_use;
7886 /* See mini_emit_method_call_full () */
7887 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
7890 if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) {
7892 * Clang can convert these calls to tailcalls which screw up the stack
7893 * walk. This happens even when the -fno-optimize-sibling-calls
7894 * option is passed to clang.
7895 * Work around this by emitting a dummy call.
7897 mono_emit_jit_icall (cfg, mono_dummy_jit_icall, NULL);
7900 CHECK_CFG_EXCEPTION;
7902 if (skip_ret) {
7903 // FIXME When not followed by CEE_RET, correct behavior is to raise an exception.
7904 g_assert (next_ip [0] == CEE_RET);
7905 next_ip += 1;
7906 il_op = MonoOpcodeEnum_Invalid; // Call or ret? Unclear.
7908 ins_flag = 0;
7909 constrained_class = NULL;
7911 if (need_seq_point) {
7912 //check is is a nested call and remove the non_empty_stack of the last call, only for non native methods
7913 if (!(method->flags & METHOD_IMPL_ATTRIBUTE_NATIVE)) {
7914 if (emitted_funccall_seq_point) {
7915 if (cfg->last_seq_point)
7916 cfg->last_seq_point->flags |= MONO_INST_NESTED_CALL;
7918 else
7919 emitted_funccall_seq_point = TRUE;
7921 emit_seq_point (cfg, method, next_ip, FALSE, TRUE);
7923 break;
7925 case MONO_CEE_RET:
7926 mini_profiler_emit_leave (cfg, sig->ret->type != MONO_TYPE_VOID ? sp [-1] : NULL);
7928 g_assert (!method_does_not_return (method));
7930 if (cfg->method != method) {
7931 /* return from inlined method */
7933 * If in_count == 0, that means the ret is unreachable due to
7934 * being preceeded by a throw. In that case, inline_method () will
7935 * handle setting the return value
7936 * (test case: test_0_inline_throw ()).
7938 if (return_var && cfg->cbb->in_count) {
7939 MonoType *ret_type = mono_method_signature_internal (method)->ret;
7941 MonoInst *store;
7942 CHECK_STACK (1);
7943 --sp;
7944 *sp = convert_value (cfg, ret_type, *sp);
7946 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7947 UNVERIFIED;
7949 //g_assert (returnvar != -1);
7950 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
7951 cfg->ret_var_set = TRUE;
7953 } else {
7954 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
7955 emit_pop_lmf (cfg);
7957 if (cfg->ret) {
7958 MonoType *ret_type = mini_get_underlying_type (mono_method_signature_internal (method)->ret);
7960 if (seq_points && !sym_seq_points) {
7962 * Place a seq point here too even through the IL stack is not
7963 * empty, so a step over on
7964 * call <FOO>
7965 * ret
7966 * will work correctly.
7968 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
7969 MONO_ADD_INS (cfg->cbb, ins);
7972 g_assert (!return_var);
7973 CHECK_STACK (1);
7974 --sp;
7975 *sp = convert_value (cfg, ret_type, *sp);
7977 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7978 UNVERIFIED;
7980 emit_setret (cfg, *sp);
7983 if (sp != stack_start)
7984 UNVERIFIED;
7985 MONO_INST_NEW (cfg, ins, OP_BR);
7986 ins->inst_target_bb = end_bblock;
7987 MONO_ADD_INS (cfg->cbb, ins);
7988 link_bblock (cfg, cfg->cbb, end_bblock);
7989 start_new_bblock = 1;
7990 break;
7991 case MONO_CEE_BR_S:
7992 MONO_INST_NEW (cfg, ins, OP_BR);
7993 GET_BBLOCK (cfg, tblock, target);
7994 link_bblock (cfg, cfg->cbb, tblock);
7995 ins->inst_target_bb = tblock;
7996 if (sp != stack_start) {
7997 handle_stack_args (cfg, stack_start, sp - stack_start);
7998 sp = stack_start;
7999 CHECK_UNVERIFIABLE (cfg);
8001 MONO_ADD_INS (cfg->cbb, ins);
8002 start_new_bblock = 1;
8003 inline_costs += BRANCH_COST;
8004 break;
8005 case MONO_CEE_BEQ_S:
8006 case MONO_CEE_BGE_S:
8007 case MONO_CEE_BGT_S:
8008 case MONO_CEE_BLE_S:
8009 case MONO_CEE_BLT_S:
8010 case MONO_CEE_BNE_UN_S:
8011 case MONO_CEE_BGE_UN_S:
8012 case MONO_CEE_BGT_UN_S:
8013 case MONO_CEE_BLE_UN_S:
8014 case MONO_CEE_BLT_UN_S:
8015 MONO_INST_NEW (cfg, ins, il_op + BIG_BRANCH_OFFSET);
8017 ADD_BINCOND (NULL);
8019 sp = stack_start;
8020 inline_costs += BRANCH_COST;
8021 break;
8022 case MONO_CEE_BR:
8023 MONO_INST_NEW (cfg, ins, OP_BR);
8025 GET_BBLOCK (cfg, tblock, target);
8026 link_bblock (cfg, cfg->cbb, tblock);
8027 ins->inst_target_bb = tblock;
8028 if (sp != stack_start) {
8029 handle_stack_args (cfg, stack_start, sp - stack_start);
8030 sp = stack_start;
8031 CHECK_UNVERIFIABLE (cfg);
8034 MONO_ADD_INS (cfg->cbb, ins);
8036 start_new_bblock = 1;
8037 inline_costs += BRANCH_COST;
8038 break;
8039 case MONO_CEE_BRFALSE_S:
8040 case MONO_CEE_BRTRUE_S:
8041 case MONO_CEE_BRFALSE:
8042 case MONO_CEE_BRTRUE: {
8043 MonoInst *cmp;
8044 gboolean is_true = il_op == MONO_CEE_BRTRUE_S || il_op == MONO_CEE_BRTRUE;
8046 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
8047 UNVERIFIED;
8049 sp--;
8051 GET_BBLOCK (cfg, tblock, target);
8052 link_bblock (cfg, cfg->cbb, tblock);
8053 GET_BBLOCK (cfg, tblock, next_ip);
8054 link_bblock (cfg, cfg->cbb, tblock);
8056 if (sp != stack_start) {
8057 handle_stack_args (cfg, stack_start, sp - stack_start);
8058 CHECK_UNVERIFIABLE (cfg);
8061 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
8062 cmp->sreg1 = sp [0]->dreg;
8063 type_from_op (cfg, cmp, sp [0], NULL);
8064 CHECK_TYPE (cmp);
8066 #if SIZEOF_REGISTER == 4
8067 if (cmp->opcode == OP_LCOMPARE_IMM) {
8068 /* Convert it to OP_LCOMPARE */
8069 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8070 ins->type = STACK_I8;
8071 ins->dreg = alloc_dreg (cfg, STACK_I8);
8072 ins->inst_l = 0;
8073 MONO_ADD_INS (cfg->cbb, ins);
8074 cmp->opcode = OP_LCOMPARE;
8075 cmp->sreg2 = ins->dreg;
8077 #endif
8078 MONO_ADD_INS (cfg->cbb, cmp);
8080 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
8081 type_from_op (cfg, ins, sp [0], NULL);
8082 MONO_ADD_INS (cfg->cbb, ins);
8083 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
8084 GET_BBLOCK (cfg, tblock, target);
8085 ins->inst_true_bb = tblock;
8086 GET_BBLOCK (cfg, tblock, next_ip);
8087 ins->inst_false_bb = tblock;
8088 start_new_bblock = 2;
8090 sp = stack_start;
8091 inline_costs += BRANCH_COST;
8092 break;
8094 case MONO_CEE_BEQ:
8095 case MONO_CEE_BGE:
8096 case MONO_CEE_BGT:
8097 case MONO_CEE_BLE:
8098 case MONO_CEE_BLT:
8099 case MONO_CEE_BNE_UN:
8100 case MONO_CEE_BGE_UN:
8101 case MONO_CEE_BGT_UN:
8102 case MONO_CEE_BLE_UN:
8103 case MONO_CEE_BLT_UN:
8104 MONO_INST_NEW (cfg, ins, il_op);
8106 ADD_BINCOND (NULL);
8108 sp = stack_start;
8109 inline_costs += BRANCH_COST;
8110 break;
8111 case MONO_CEE_SWITCH: {
8112 MonoInst *src1;
8113 MonoBasicBlock **targets;
8114 MonoBasicBlock *default_bblock;
8115 MonoJumpInfoBBTable *table;
8116 int offset_reg = alloc_preg (cfg);
8117 int target_reg = alloc_preg (cfg);
8118 int table_reg = alloc_preg (cfg);
8119 int sum_reg = alloc_preg (cfg);
8120 gboolean use_op_switch;
8122 n = read32 (ip + 1);
8123 --sp;
8124 src1 = sp [0];
8125 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
8126 UNVERIFIED;
8128 ip += 5;
8130 GET_BBLOCK (cfg, default_bblock, next_ip);
8131 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8133 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
8134 for (i = 0; i < n; ++i) {
8135 GET_BBLOCK (cfg, tblock, next_ip + (gint32)read32 (ip));
8136 targets [i] = tblock;
8137 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
8138 ip += 4;
8141 if (sp != stack_start) {
8143 * Link the current bb with the targets as well, so handle_stack_args
8144 * will set their in_stack correctly.
8146 link_bblock (cfg, cfg->cbb, default_bblock);
8147 for (i = 0; i < n; ++i)
8148 link_bblock (cfg, cfg->cbb, targets [i]);
8150 handle_stack_args (cfg, stack_start, sp - stack_start);
8151 sp = stack_start;
8152 CHECK_UNVERIFIABLE (cfg);
8154 /* Undo the links */
8155 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
8156 for (i = 0; i < n; ++i)
8157 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
8160 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
8161 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
8163 for (i = 0; i < n; ++i)
8164 link_bblock (cfg, cfg->cbb, targets [i]);
8166 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
8167 table->table = targets;
8168 table->table_size = n;
8170 use_op_switch = FALSE;
8171 #ifdef TARGET_ARM
8172 /* ARM implements SWITCH statements differently */
8173 /* FIXME: Make it use the generic implementation */
8174 if (!cfg->compile_aot)
8175 use_op_switch = TRUE;
8176 #endif
8178 if (COMPILE_LLVM (cfg))
8179 use_op_switch = TRUE;
8181 cfg->cbb->has_jump_table = 1;
8183 if (use_op_switch) {
8184 MONO_INST_NEW (cfg, ins, OP_SWITCH);
8185 ins->sreg1 = src1->dreg;
8186 ins->inst_p0 = table;
8187 ins->inst_many_bb = targets;
8188 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
8189 MONO_ADD_INS (cfg->cbb, ins);
8190 } else {
8191 if (TARGET_SIZEOF_VOID_P == 8)
8192 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
8193 else
8194 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
8196 #if SIZEOF_REGISTER == 8
8197 /* The upper word might not be zero, and we add it to a 64 bit address later */
8198 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
8199 #endif
8201 if (cfg->compile_aot) {
8202 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
8203 } else {
8204 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
8205 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
8206 ins->inst_p0 = table;
8207 ins->dreg = table_reg;
8208 MONO_ADD_INS (cfg->cbb, ins);
8211 /* FIXME: Use load_memindex */
8212 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
8213 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
8214 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
8216 start_new_bblock = 1;
8217 inline_costs += BRANCH_COST * 2;
8218 break;
8220 case MONO_CEE_LDIND_I1:
8221 case MONO_CEE_LDIND_U1:
8222 case MONO_CEE_LDIND_I2:
8223 case MONO_CEE_LDIND_U2:
8224 case MONO_CEE_LDIND_I4:
8225 case MONO_CEE_LDIND_U4:
8226 case MONO_CEE_LDIND_I8:
8227 case MONO_CEE_LDIND_I:
8228 case MONO_CEE_LDIND_R4:
8229 case MONO_CEE_LDIND_R8:
8230 case MONO_CEE_LDIND_REF:
8231 --sp;
8233 ins = mini_emit_memory_load (cfg, m_class_get_byval_arg (ldind_to_type (il_op)), sp [0], 0, ins_flag);
8234 *sp++ = ins;
8235 ins_flag = 0;
8236 break;
8237 case MONO_CEE_STIND_REF:
8238 case MONO_CEE_STIND_I1:
8239 case MONO_CEE_STIND_I2:
8240 case MONO_CEE_STIND_I4:
8241 case MONO_CEE_STIND_I8:
8242 case MONO_CEE_STIND_R4:
8243 case MONO_CEE_STIND_R8:
8244 case MONO_CEE_STIND_I: {
8245 sp -= 2;
8247 if (ins_flag & MONO_INST_VOLATILE) {
8248 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
8249 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
8252 if (il_op == MONO_CEE_STIND_R4 && sp [1]->type == STACK_R8)
8253 sp [1] = convert_value (cfg, m_class_get_byval_arg (mono_defaults.single_class), sp [1]);
8254 if (!mini_debug_options.weak_memory_model && il_op == MONO_CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
8255 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
8256 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (il_op), sp [0]->dreg, 0, sp [1]->dreg);
8257 ins->flags |= ins_flag;
8258 ins_flag = 0;
8260 MONO_ADD_INS (cfg->cbb, ins);
8262 if (il_op == MONO_CEE_STIND_REF) {
8263 /* stind.ref must only be used with object references. */
8264 if (sp [1]->type != STACK_OBJ)
8265 UNVERIFIED;
8266 if (cfg->gen_write_barriers && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1]))
8267 mini_emit_write_barrier (cfg, sp [0], sp [1]);
8270 inline_costs += 1;
8271 break;
8273 case MONO_CEE_MUL:
8274 MONO_INST_NEW (cfg, ins, il_op);
8275 sp -= 2;
8276 ins->sreg1 = sp [0]->dreg;
8277 ins->sreg2 = sp [1]->dreg;
8278 type_from_op (cfg, ins, sp [0], sp [1]);
8279 CHECK_TYPE (ins);
8280 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
8282 /* Use the immediate opcodes if possible */
8283 int imm_opcode; imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8285 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (ins->opcode, imm_opcode, sp [1]->inst_c0)) {
8286 if (imm_opcode != -1) {
8287 ins->opcode = imm_opcode;
8288 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
8289 ins->sreg2 = -1;
8291 NULLIFY_INS (sp [1]);
8295 MONO_ADD_INS ((cfg)->cbb, (ins));
8297 *sp++ = mono_decompose_opcode (cfg, ins);
8298 break;
8299 case MONO_CEE_ADD:
8300 case MONO_CEE_SUB:
8301 case MONO_CEE_DIV:
8302 case MONO_CEE_DIV_UN:
8303 case MONO_CEE_REM:
8304 case MONO_CEE_REM_UN:
8305 case MONO_CEE_AND:
8306 case MONO_CEE_OR:
8307 case MONO_CEE_XOR:
8308 case MONO_CEE_SHL:
8309 case MONO_CEE_SHR:
8310 case MONO_CEE_SHR_UN: {
8311 MONO_INST_NEW (cfg, ins, il_op);
8312 sp -= 2;
8313 ins->sreg1 = sp [0]->dreg;
8314 ins->sreg2 = sp [1]->dreg;
8315 type_from_op (cfg, ins, sp [0], sp [1]);
8316 CHECK_TYPE (ins);
8317 add_widen_op (cfg, ins, &sp [0], &sp [1]);
8318 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
8320 /* Use the immediate opcodes if possible */
8321 int imm_opcode; imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8323 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) &&
8324 mono_arch_is_inst_imm (ins->opcode, imm_opcode, sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
8325 if (imm_opcode != -1) {
8326 ins->opcode = imm_opcode;
8327 if (sp [1]->opcode == OP_I8CONST) {
8328 #if SIZEOF_REGISTER == 8
8329 ins->inst_imm = sp [1]->inst_l;
8330 #else
8331 ins->inst_l = sp [1]->inst_l;
8332 #endif
8333 } else {
8334 ins->inst_imm = (gssize)(sp [1]->inst_c0);
8336 ins->sreg2 = -1;
8338 /* Might be followed by an instruction added by add_widen_op */
8339 if (sp [1]->next == NULL)
8340 NULLIFY_INS (sp [1]);
8343 MONO_ADD_INS ((cfg)->cbb, (ins));
8345 *sp++ = mono_decompose_opcode (cfg, ins);
8346 break;
8348 case MONO_CEE_NEG:
8349 case MONO_CEE_NOT:
8350 case MONO_CEE_CONV_I1:
8351 case MONO_CEE_CONV_I2:
8352 case MONO_CEE_CONV_I4:
8353 case MONO_CEE_CONV_R4:
8354 case MONO_CEE_CONV_R8:
8355 case MONO_CEE_CONV_U4:
8356 case MONO_CEE_CONV_I8:
8357 case MONO_CEE_CONV_U8:
8358 case MONO_CEE_CONV_OVF_I8:
8359 case MONO_CEE_CONV_OVF_U8:
8360 case MONO_CEE_CONV_R_UN:
8361 /* Special case this earlier so we have long constants in the IR */
8362 if ((il_op == MONO_CEE_CONV_I8 || il_op == MONO_CEE_CONV_U8) && (sp [-1]->opcode == OP_ICONST)) {
8363 int data = sp [-1]->inst_c0;
8364 sp [-1]->opcode = OP_I8CONST;
8365 sp [-1]->type = STACK_I8;
8366 #if SIZEOF_REGISTER == 8
8367 if (il_op == MONO_CEE_CONV_U8)
8368 sp [-1]->inst_c0 = (guint32)data;
8369 else
8370 sp [-1]->inst_c0 = data;
8371 #else
8372 if (il_op == MONO_CEE_CONV_U8)
8373 sp [-1]->inst_l = (guint32)data;
8374 else
8375 sp [-1]->inst_l = data;
8376 #endif
8377 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
8379 else {
8380 ADD_UNOP (il_op);
8382 break;
8383 case MONO_CEE_CONV_OVF_I4:
8384 case MONO_CEE_CONV_OVF_I1:
8385 case MONO_CEE_CONV_OVF_I2:
8386 case MONO_CEE_CONV_OVF_I:
8387 case MONO_CEE_CONV_OVF_U:
8388 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
8389 ADD_UNOP (CEE_CONV_OVF_I8);
8390 ADD_UNOP (il_op);
8391 } else {
8392 ADD_UNOP (il_op);
8394 break;
8395 case MONO_CEE_CONV_OVF_U1:
8396 case MONO_CEE_CONV_OVF_U2:
8397 case MONO_CEE_CONV_OVF_U4:
8398 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
8399 ADD_UNOP (CEE_CONV_OVF_U8);
8400 ADD_UNOP (il_op);
8401 } else {
8402 ADD_UNOP (il_op);
8404 break;
8405 case MONO_CEE_CONV_OVF_I1_UN:
8406 case MONO_CEE_CONV_OVF_I2_UN:
8407 case MONO_CEE_CONV_OVF_I4_UN:
8408 case MONO_CEE_CONV_OVF_I8_UN:
8409 case MONO_CEE_CONV_OVF_U1_UN:
8410 case MONO_CEE_CONV_OVF_U2_UN:
8411 case MONO_CEE_CONV_OVF_U4_UN:
8412 case MONO_CEE_CONV_OVF_U8_UN:
8413 case MONO_CEE_CONV_OVF_I_UN:
8414 case MONO_CEE_CONV_OVF_U_UN:
8415 case MONO_CEE_CONV_U2:
8416 case MONO_CEE_CONV_U1:
8417 case MONO_CEE_CONV_I:
8418 case MONO_CEE_CONV_U:
8419 ADD_UNOP (il_op);
8420 CHECK_CFG_EXCEPTION;
8421 break;
8422 case MONO_CEE_ADD_OVF:
8423 case MONO_CEE_ADD_OVF_UN:
8424 case MONO_CEE_MUL_OVF:
8425 case MONO_CEE_MUL_OVF_UN:
8426 case MONO_CEE_SUB_OVF:
8427 case MONO_CEE_SUB_OVF_UN:
8428 ADD_BINOP (il_op);
8429 break;
8430 case MONO_CEE_CPOBJ:
8431 GSHAREDVT_FAILURE (il_op);
8432 GSHAREDVT_FAILURE (*ip);
8433 klass = mini_get_class (method, token, generic_context);
8434 CHECK_TYPELOAD (klass);
8435 sp -= 2;
8436 mini_emit_memory_copy (cfg, sp [0], sp [1], klass, FALSE, ins_flag);
8437 ins_flag = 0;
8438 break;
8439 case MONO_CEE_LDOBJ: {
8440 int loc_index = -1;
8441 int stloc_len = 0;
8443 --sp;
8444 klass = mini_get_class (method, token, generic_context);
8445 CHECK_TYPELOAD (klass);
8447 /* Optimize the common ldobj+stloc combination */
8448 if (next_ip < end) {
8449 switch (next_ip [0]) {
8450 case MONO_CEE_STLOC_S:
8451 CHECK_OPSIZE (7);
8452 loc_index = next_ip [1];
8453 stloc_len = 2;
8454 break;
8455 case MONO_CEE_STLOC_0:
8456 case MONO_CEE_STLOC_1:
8457 case MONO_CEE_STLOC_2:
8458 case MONO_CEE_STLOC_3:
8459 loc_index = next_ip [0] - CEE_STLOC_0;
8460 stloc_len = 1;
8461 break;
8462 default:
8463 break;
8467 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, next_ip)) {
8468 CHECK_LOCAL (loc_index);
8470 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), sp [0]->dreg, 0);
8471 ins->dreg = cfg->locals [loc_index]->dreg;
8472 ins->flags |= ins_flag;
8473 il_op = (MonoOpcodeEnum)next_ip [0];
8474 next_ip += stloc_len;
8475 if (ins_flag & MONO_INST_VOLATILE) {
8476 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
8477 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
8479 ins_flag = 0;
8480 break;
8483 /* Optimize the ldobj+stobj combination */
8484 if (next_ip + 4 < end && next_ip [0] == CEE_STOBJ && ip_in_bb (cfg, cfg->cbb, next_ip) && read32 (next_ip + 1) == token) {
8485 CHECK_STACK (1);
8487 sp --;
8489 mini_emit_memory_copy (cfg, sp [0], sp [1], klass, FALSE, ins_flag);
8491 il_op = (MonoOpcodeEnum)next_ip [0];
8492 next_ip += 5;
8493 ins_flag = 0;
8494 break;
8497 ins = mini_emit_memory_load (cfg, m_class_get_byval_arg (klass), sp [0], 0, ins_flag);
8498 *sp++ = ins;
8500 ins_flag = 0;
8501 inline_costs += 1;
8502 break;
8504 case MONO_CEE_LDSTR:
8505 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
8506 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
8507 ins->type = STACK_OBJ;
8508 *sp = ins;
8510 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
8511 MonoInst *iargs [1];
8512 char *str = (char *)mono_method_get_wrapper_data (method, n);
8514 if (cfg->compile_aot)
8515 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
8516 else
8517 EMIT_NEW_PCONST (cfg, iargs [0], str);
8518 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper_internal, iargs);
8519 } else {
8520 if (cfg->opt & MONO_OPT_SHARED) {
8521 MonoInst *iargs [3];
8523 if (cfg->compile_aot) {
8524 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
8526 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8527 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
8528 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
8529 *sp = mono_emit_jit_icall (cfg, ves_icall_mono_ldstr, iargs);
8530 mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), cfg->error);
8531 CHECK_CFG_ERROR;
8532 } else {
8533 if (cfg->cbb->out_of_line) {
8534 MonoInst *iargs [2];
8536 if (image == mono_defaults.corlib) {
8538 * Avoid relocations in AOT and save some space by using a
8539 * version of helper_ldstr specialized to mscorlib.
8541 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
8542 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
8543 } else {
8544 /* Avoid creating the string object */
8545 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8546 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
8547 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
8550 else
8551 if (cfg->compile_aot) {
8552 NEW_LDSTRCONST (cfg, ins, image, n);
8553 *sp = ins;
8554 MONO_ADD_INS (cfg->cbb, ins);
8556 else {
8557 NEW_PCONST (cfg, ins, NULL);
8558 ins->type = STACK_OBJ;
8559 ins->inst_p0 = mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), cfg->error);
8560 CHECK_CFG_ERROR;
8562 if (!ins->inst_p0)
8563 OUT_OF_MEMORY_FAILURE;
8565 *sp = ins;
8566 MONO_ADD_INS (cfg->cbb, ins);
8571 sp++;
8572 break;
8573 case MONO_CEE_NEWOBJ: {
8574 MonoInst *iargs [2];
8575 MonoMethodSignature *fsig;
8576 MonoInst this_ins;
8577 MonoInst *alloc;
8578 MonoInst *vtable_arg = NULL;
8580 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8581 CHECK_CFG_ERROR;
8583 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, cfg->error);
8584 CHECK_CFG_ERROR;
8586 mono_save_token_info (cfg, image, token, cmethod);
8588 if (!mono_class_init_internal (cmethod->klass))
8589 TYPE_LOAD_ERROR (cmethod->klass);
8591 context_used = mini_method_check_context_used (cfg, cmethod);
8593 if (!dont_verify && !cfg->skip_visibility) {
8594 MonoMethod *cil_method = cmethod;
8595 MonoMethod *target_method = cil_method;
8597 if (method->is_inflated) {
8598 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), cfg->error);
8599 CHECK_CFG_ERROR;
8602 if (!mono_method_can_access_method (method_definition, target_method) &&
8603 !mono_method_can_access_method (method, cil_method))
8604 emit_method_access_failure (cfg, method, cil_method);
8607 if (mono_security_core_clr_enabled ())
8608 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
8610 if (cfg->gshared && cmethod && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8611 emit_class_init (cfg, cmethod->klass);
8612 CHECK_TYPELOAD (cmethod->klass);
8616 if (cfg->gsharedvt) {
8617 if (mini_is_gsharedvt_variable_signature (sig))
8618 GSHAREDVT_FAILURE (il_op);
8622 n = fsig->param_count;
8623 CHECK_STACK (n);
8626 * Generate smaller code for the common newobj <exception> instruction in
8627 * argument checking code.
8629 if (cfg->cbb->out_of_line && m_class_get_image (cmethod->klass) == mono_defaults.corlib &&
8630 is_exception_class (cmethod->klass) && n <= 2 &&
8631 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
8632 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
8633 MonoInst *iargs [3];
8635 sp -= n;
8637 EMIT_NEW_ICONST (cfg, iargs [0], m_class_get_type_token (cmethod->klass));
8638 switch (n) {
8639 case 0:
8640 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
8641 break;
8642 case 1:
8643 iargs [1] = sp [0];
8644 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
8645 break;
8646 case 2:
8647 iargs [1] = sp [0];
8648 iargs [2] = sp [1];
8649 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
8650 break;
8651 default:
8652 g_assert_not_reached ();
8655 inline_costs += 5;
8656 break;
8659 /* move the args to allow room for 'this' in the first position */
8660 while (n--) {
8661 --sp;
8662 sp [1] = sp [0];
8665 for (int i = 0; i < fsig->param_count; ++i)
8666 sp [i + fsig->hasthis] = convert_value (cfg, fsig->params [i], sp [i + fsig->hasthis]);
8668 /* check_call_signature () requires sp[0] to be set */
8669 this_ins.type = STACK_OBJ;
8670 sp [0] = &this_ins;
8671 if (check_call_signature (cfg, fsig, sp))
8672 UNVERIFIED;
8674 iargs [0] = NULL;
8676 if (mini_class_is_system_array (cmethod->klass)) {
8677 *sp = emit_get_rgctx_method (cfg, context_used,
8678 cmethod, MONO_RGCTX_INFO_METHOD);
8679 /* Optimize the common cases */
8680 MonoJitICallId function = MONO_JIT_ICALL_ZeroIsReserved;;
8681 int n = fsig->param_count;
8682 switch (n) {
8683 case 1: function = MONO_JIT_ICALL_mono_array_new_1;
8684 break;
8685 case 2: function = MONO_JIT_ICALL_mono_array_new_2;
8686 break;
8687 case 3: function = MONO_JIT_ICALL_mono_array_new_3;
8688 break;
8689 case 4: function = MONO_JIT_ICALL_mono_array_new_4;
8690 break;
8691 default:
8692 // FIXME Maximum value of param_count? Realistically 64. Fits in imm?
8693 if (!array_new_localalloc_ins) {
8694 MONO_INST_NEW (cfg, array_new_localalloc_ins, OP_LOCALLOC_IMM);
8695 array_new_localalloc_ins->dreg = alloc_preg (cfg);
8696 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8697 MONO_ADD_INS (init_localsbb, array_new_localalloc_ins);
8699 array_new_localalloc_ins->inst_imm = MAX (array_new_localalloc_ins->inst_imm, n * sizeof (target_mgreg_t));
8700 int dreg = array_new_localalloc_ins->dreg;
8701 for (int i = 0; i < n; ++i) {
8702 NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, dreg, i * sizeof (target_mgreg_t), sp [i + 1]->dreg);
8703 MONO_ADD_INS (cfg->cbb, ins);
8705 EMIT_NEW_ICONST (cfg, ins, n);
8706 sp [1] = ins;
8707 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), dreg);
8708 ins->type = STACK_PTR;
8709 sp [2] = ins;
8710 // FIXME Adjust sp by n - 3? Attempts failed.
8711 function = MONO_JIT_ICALL_mono_array_new_n_icall;
8712 break;
8714 alloc = mono_emit_jit_icall_id (cfg, function, sp);
8715 } else if (cmethod->string_ctor) {
8716 g_assert (!context_used);
8717 g_assert (!vtable_arg);
8718 /* we simply pass a null pointer */
8719 EMIT_NEW_PCONST (cfg, *sp, NULL);
8720 /* now call the string ctor */
8721 alloc = mini_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
8722 } else {
8723 if (m_class_is_valuetype (cmethod->klass)) {
8724 iargs [0] = mono_compile_create_var (cfg, m_class_get_byval_arg (cmethod->klass), OP_LOCAL);
8725 emit_init_rvar (cfg, iargs [0]->dreg, m_class_get_byval_arg (cmethod->klass));
8726 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
8728 alloc = NULL;
8731 * The code generated by mini_emit_virtual_call () expects
8732 * iargs [0] to be a boxed instance, but luckily the vcall
8733 * will be transformed into a normal call there.
8735 } else if (context_used) {
8736 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
8737 *sp = alloc;
8738 } else {
8739 MonoVTable *vtable = NULL;
8741 if (!cfg->compile_aot)
8742 vtable = mono_class_vtable_checked (cfg->domain, cmethod->klass, cfg->error);
8743 CHECK_CFG_ERROR;
8744 CHECK_TYPELOAD (cmethod->klass);
8747 * TypeInitializationExceptions thrown from the mono_runtime_class_init
8748 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
8749 * As a workaround, we call class cctors before allocating objects.
8751 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
8752 emit_class_init (cfg, cmethod->klass);
8753 if (cfg->verbose_level > 2)
8754 printf ("class %s.%s needs init call for ctor\n", m_class_get_name_space (cmethod->klass), m_class_get_name (cmethod->klass));
8755 class_inits = g_slist_prepend (class_inits, cmethod->klass);
8758 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
8759 *sp = alloc;
8761 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
8763 if (alloc)
8764 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
8766 /* Now call the actual ctor */
8767 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
8768 CHECK_CFG_EXCEPTION;
8771 if (alloc == NULL) {
8772 /* Valuetype */
8773 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
8774 mini_type_to_eval_stack_type (cfg, m_class_get_byval_arg (ins->klass), ins);
8775 *sp++= ins;
8776 } else {
8777 *sp++ = alloc;
8780 inline_costs += 5;
8781 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, next_ip - header->code)))
8782 emit_seq_point (cfg, method, next_ip, FALSE, TRUE);
8783 break;
8785 case MONO_CEE_CASTCLASS:
8786 case MONO_CEE_ISINST: {
8787 --sp;
8788 klass = mini_get_class (method, token, generic_context);
8789 CHECK_TYPELOAD (klass);
8790 if (sp [0]->type != STACK_OBJ)
8791 UNVERIFIED;
8793 MONO_INST_NEW (cfg, ins, (il_op == MONO_CEE_ISINST) ? OP_ISINST : OP_CASTCLASS);
8794 ins->dreg = alloc_preg (cfg);
8795 ins->sreg1 = (*sp)->dreg;
8796 ins->klass = klass;
8797 ins->type = STACK_OBJ;
8798 MONO_ADD_INS (cfg->cbb, ins);
8800 CHECK_CFG_EXCEPTION;
8801 *sp++ = ins;
8803 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
8804 break;
8806 case MONO_CEE_UNBOX_ANY: {
8807 MonoInst *res, *addr;
8809 --sp;
8810 klass = mini_get_class (method, token, generic_context);
8811 CHECK_TYPELOAD (klass);
8813 mono_save_token_info (cfg, image, token, klass);
8815 context_used = mini_class_check_context_used (cfg, klass);
8817 if (mini_is_gsharedvt_klass (klass)) {
8818 res = handle_unbox_gsharedvt (cfg, klass, *sp);
8819 inline_costs += 2;
8820 } else if (mini_class_is_reference (klass)) {
8821 if (MONO_INS_IS_PCONST_NULL (*sp)) {
8822 EMIT_NEW_PCONST (cfg, res, NULL);
8823 res->type = STACK_OBJ;
8824 } else {
8825 MONO_INST_NEW (cfg, res, OP_CASTCLASS);
8826 res->dreg = alloc_preg (cfg);
8827 res->sreg1 = (*sp)->dreg;
8828 res->klass = klass;
8829 res->type = STACK_OBJ;
8830 MONO_ADD_INS (cfg->cbb, res);
8831 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
8833 } else if (mono_class_is_nullable (klass)) {
8834 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
8835 } else {
8836 addr = handle_unbox (cfg, klass, sp, context_used);
8837 /* LDOBJ */
8838 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0);
8839 res = ins;
8840 inline_costs += 2;
8843 *sp ++ = res;
8844 break;
8846 case MONO_CEE_BOX: {
8847 MonoInst *val;
8848 MonoClass *enum_class;
8849 MonoMethod *has_flag;
8851 --sp;
8852 val = *sp;
8853 klass = mini_get_class (method, token, generic_context);
8854 CHECK_TYPELOAD (klass);
8856 mono_save_token_info (cfg, image, token, klass);
8858 context_used = mini_class_check_context_used (cfg, klass);
8860 if (mini_class_is_reference (klass)) {
8861 *sp++ = val;
8862 break;
8865 val = convert_value (cfg, m_class_get_byval_arg (klass), val);
8867 if (klass == mono_defaults.void_class)
8868 UNVERIFIED;
8869 if (target_type_is_incompatible (cfg, m_class_get_byval_arg (klass), val))
8870 UNVERIFIED;
8871 /* frequent check in generic code: box (struct), brtrue */
8874 * Look for:
8876 * <push int/long ptr>
8877 * <push int/long>
8878 * box MyFlags
8879 * constrained. MyFlags
8880 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
8882 * If we find this sequence and the operand types on box and constrained
8883 * are equal, we can emit a specialized instruction sequence instead of
8884 * the very slow HasFlag () call.
8885 * This code sequence is generated by older mcs/csc, the newer one is handled in
8886 * emit_inst_for_method ().
8888 guint32 constrained_token;
8889 guint32 callvirt_token;
8891 if ((cfg->opt & MONO_OPT_INTRINS) &&
8892 // FIXME ip_in_bb as we go?
8893 next_ip < end && ip_in_bb (cfg, cfg->cbb, next_ip) &&
8894 (ip = il_read_constrained (next_ip, end, &constrained_token)) &&
8895 ip_in_bb (cfg, cfg->cbb, ip) &&
8896 (ip = il_read_callvirt (ip, end, &callvirt_token)) &&
8897 ip_in_bb (cfg, cfg->cbb, ip) &&
8898 m_class_is_enumtype (klass) &&
8899 (enum_class = mini_get_class (method, constrained_token, generic_context)) &&
8900 (has_flag = mini_get_method (cfg, method, callvirt_token, NULL, generic_context)) &&
8901 has_flag->klass == mono_defaults.enum_class &&
8902 !strcmp (has_flag->name, "HasFlag") &&
8903 has_flag->signature->hasthis &&
8904 has_flag->signature->param_count == 1) {
8905 CHECK_TYPELOAD (enum_class);
8907 if (enum_class == klass) {
8908 MonoInst *enum_this, *enum_flag;
8910 next_ip = ip;
8911 il_op = MONO_CEE_CALLVIRT;
8912 --sp;
8914 enum_this = sp [0];
8915 enum_flag = sp [1];
8917 *sp++ = mini_handle_enum_has_flag (cfg, klass, enum_this, -1, enum_flag);
8918 break;
8922 guint32 unbox_any_token;
8925 * Common in generic code:
8926 * box T1, unbox.any T2.
8928 if ((cfg->opt & MONO_OPT_INTRINS) &&
8929 next_ip < end && ip_in_bb (cfg, cfg->cbb, next_ip) &&
8930 (ip = il_read_unbox_any (next_ip, end, &unbox_any_token))) {
8931 MonoClass *unbox_klass = mini_get_class (method, unbox_any_token, generic_context);
8932 CHECK_TYPELOAD (unbox_klass);
8934 if (klass == unbox_klass) {
8935 next_ip = ip;
8936 *sp++ = val;
8937 break;
8941 #ifdef ENABLE_NETCORE
8942 // Optimize
8944 // box
8945 // ldnull
8946 // ceq (or cgt.un)
8948 // to just
8950 // ldc.i4.0 (or 1)
8951 guchar* ldnull_ip;
8952 if ((ldnull_ip = il_read_op (next_ip, end, CEE_LDNULL, MONO_CEE_LDNULL)) && ip_in_bb (cfg, cfg->cbb, ldnull_ip)) {
8953 gboolean is_eq = FALSE, is_neq = FALSE;
8954 if ((ip = il_read_op (ldnull_ip, end, CEE_PREFIX1, MONO_CEE_CEQ)))
8955 is_eq = TRUE;
8956 else if ((ip = il_read_op (ldnull_ip, end, CEE_PREFIX1, MONO_CEE_CGT_UN)))
8957 is_neq = TRUE;
8959 if ((is_eq || is_neq) && ip_in_bb (cfg, cfg->cbb, ip) &&
8960 !mono_class_is_nullable (klass) && !mini_is_gsharedvt_klass (klass)) {
8961 next_ip = ip;
8962 il_op = (MonoOpcodeEnum) (is_eq ? CEE_LDC_I4_0 : CEE_LDC_I4_1);
8963 EMIT_NEW_ICONST (cfg, ins, is_eq ? 0 : 1);
8964 ins->type = STACK_I4;
8965 *sp++ = ins;
8966 break;
8969 #endif
8971 gboolean is_true;
8973 // FIXME: LLVM can't handle the inconsistent bb linking
8974 if (!mono_class_is_nullable (klass) &&
8975 !mini_is_gsharedvt_klass (klass) &&
8976 next_ip < end && ip_in_bb (cfg, cfg->cbb, next_ip) &&
8977 ( (is_true = !!(ip = il_read_brtrue (next_ip, end, &target))) ||
8978 (is_true = !!(ip = il_read_brtrue_s (next_ip, end, &target))) ||
8979 (ip = il_read_brfalse (next_ip, end, &target)) ||
8980 (ip = il_read_brfalse_s (next_ip, end, &target)))) {
8982 int dreg;
8983 MonoBasicBlock *true_bb, *false_bb;
8985 il_op = (MonoOpcodeEnum)next_ip [0];
8986 next_ip = ip;
8988 if (cfg->verbose_level > 3) {
8989 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8990 printf ("<box+brtrue opt>\n");
8994 * We need to link both bblocks, since it is needed for handling stack
8995 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8996 * Branching to only one of them would lead to inconsistencies, so
8997 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8999 GET_BBLOCK (cfg, true_bb, target);
9000 GET_BBLOCK (cfg, false_bb, next_ip);
9002 mono_link_bblock (cfg, cfg->cbb, true_bb);
9003 mono_link_bblock (cfg, cfg->cbb, false_bb);
9005 if (sp != stack_start) {
9006 handle_stack_args (cfg, stack_start, sp - stack_start);
9007 sp = stack_start;
9008 CHECK_UNVERIFIABLE (cfg);
9011 if (COMPILE_LLVM (cfg)) {
9012 dreg = alloc_ireg (cfg);
9013 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
9014 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
9016 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
9017 } else {
9018 /* The JIT can't eliminate the iconst+compare */
9019 MONO_INST_NEW (cfg, ins, OP_BR);
9020 ins->inst_target_bb = is_true ? true_bb : false_bb;
9021 MONO_ADD_INS (cfg->cbb, ins);
9024 start_new_bblock = 1;
9025 break;
9028 if (m_class_is_enumtype (klass) && !mini_is_gsharedvt_klass (klass) && !(val->type == STACK_I8 && TARGET_SIZEOF_VOID_P == 4)) {
9029 /* Can't do this with 64 bit enums on 32 bit since the vtype decomp pass is ran after the long decomp pass */
9030 if (val->opcode == OP_ICONST) {
9031 MONO_INST_NEW (cfg, ins, OP_BOX_ICONST);
9032 ins->type = STACK_OBJ;
9033 ins->klass = klass;
9034 ins->inst_c0 = val->inst_c0;
9035 ins->dreg = alloc_dreg (cfg, (MonoStackType)val->type);
9036 } else {
9037 MONO_INST_NEW (cfg, ins, OP_BOX);
9038 ins->type = STACK_OBJ;
9039 ins->klass = klass;
9040 ins->sreg1 = val->dreg;
9041 ins->dreg = alloc_dreg (cfg, (MonoStackType)val->type);
9043 MONO_ADD_INS (cfg->cbb, ins);
9044 *sp++ = ins;
9045 /* Create domainvar early so it gets initialized earlier than this code */
9046 if (cfg->opt & MONO_OPT_SHARED)
9047 mono_get_domainvar (cfg);
9048 } else {
9049 *sp++ = mini_emit_box (cfg, val, klass, context_used);
9051 CHECK_CFG_EXCEPTION;
9052 inline_costs += 1;
9053 break;
9055 case MONO_CEE_UNBOX: {
9056 --sp;
9057 klass = mini_get_class (method, token, generic_context);
9058 CHECK_TYPELOAD (klass);
9060 mono_save_token_info (cfg, image, token, klass);
9062 context_used = mini_class_check_context_used (cfg, klass);
9064 if (mono_class_is_nullable (klass)) {
9065 MonoInst *val;
9067 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
9068 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), m_class_get_byval_arg (val->klass));
9070 *sp++= ins;
9071 } else {
9072 ins = handle_unbox (cfg, klass, sp, context_used);
9073 *sp++ = ins;
9075 inline_costs += 2;
9076 break;
9078 case MONO_CEE_LDFLD:
9079 case MONO_CEE_LDFLDA:
9080 case MONO_CEE_STFLD:
9081 case MONO_CEE_LDSFLD:
9082 case MONO_CEE_LDSFLDA:
9083 case MONO_CEE_STSFLD: {
9084 MonoClassField *field;
9085 #ifndef DISABLE_REMOTING
9086 int costs;
9087 #endif
9088 guint foffset;
9089 gboolean is_instance;
9090 gpointer addr = NULL;
9091 gboolean is_special_static;
9092 MonoType *ftype;
9093 MonoInst *store_val = NULL;
9094 MonoInst *thread_ins;
9096 is_instance = (il_op == MONO_CEE_LDFLD || il_op == MONO_CEE_LDFLDA || il_op == MONO_CEE_STFLD);
9097 if (is_instance) {
9098 if (il_op == MONO_CEE_STFLD) {
9099 sp -= 2;
9100 store_val = sp [1];
9101 } else {
9102 --sp;
9104 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
9105 UNVERIFIED;
9106 if (il_op != MONO_CEE_LDFLD && sp [0]->type == STACK_VTYPE)
9107 UNVERIFIED;
9108 } else {
9109 if (il_op == MONO_CEE_STSFLD) {
9110 sp--;
9111 store_val = sp [0];
9115 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9116 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
9117 klass = field->parent;
9119 else {
9120 field = mono_field_from_token_checked (image, token, &klass, generic_context, cfg->error);
9121 CHECK_CFG_ERROR;
9123 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
9124 FIELD_ACCESS_FAILURE (method, field);
9125 mono_class_init_internal (klass);
9126 mono_class_setup_fields (klass);
9128 /* if the class is Critical then transparent code cannot access it's fields */
9129 if (!is_instance && mono_security_core_clr_enabled ())
9130 ensure_method_is_allowed_to_access_field (cfg, method, field);
9132 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
9133 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
9134 if (mono_security_core_clr_enabled ())
9135 ensure_method_is_allowed_to_access_field (cfg, method, field);
9138 ftype = mono_field_get_type_internal (field);
9141 * LDFLD etc. is usable on static fields as well, so convert those cases to
9142 * the static case.
9144 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
9145 switch (il_op) {
9146 case MONO_CEE_LDFLD:
9147 il_op = MONO_CEE_LDSFLD;
9148 break;
9149 case MONO_CEE_STFLD:
9150 il_op = MONO_CEE_STSFLD;
9151 break;
9152 case MONO_CEE_LDFLDA:
9153 il_op = MONO_CEE_LDSFLDA;
9154 break;
9155 default:
9156 g_assert_not_reached ();
9158 is_instance = FALSE;
9161 context_used = mini_class_check_context_used (cfg, klass);
9163 if (il_op == MONO_CEE_LDSFLD) {
9164 ins = mini_emit_inst_for_field_load (cfg, field);
9165 if (ins) {
9166 *sp++ = ins;
9167 goto field_access_end;
9171 /* INSTANCE CASE */
9173 if (is_instance)
9174 g_assert (field->offset);
9175 foffset = m_class_is_valuetype (klass) ? field->offset - MONO_ABI_SIZEOF (MonoObject): field->offset;
9176 if (il_op == MONO_CEE_STFLD) {
9177 sp [1] = convert_value (cfg, field->type, sp [1]);
9178 if (target_type_is_incompatible (cfg, field->type, sp [1]))
9179 UNVERIFIED;
9180 #ifndef DISABLE_REMOTING
9181 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
9182 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
9183 MonoInst *iargs [5];
9185 GSHAREDVT_FAILURE (il_op);
9187 iargs [0] = sp [0];
9188 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9189 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9190 EMIT_NEW_ICONST (cfg, iargs [3], m_class_is_valuetype (klass) ? field->offset - MONO_ABI_SIZEOF (MonoObject) :
9191 field->offset);
9192 iargs [4] = sp [1];
9194 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9195 costs = inline_method (cfg, stfld_wrapper, mono_method_signature_internal (stfld_wrapper),
9196 iargs, ip, cfg->real_offset, TRUE);
9197 CHECK_CFG_EXCEPTION;
9198 g_assert (costs > 0);
9200 cfg->real_offset += 5;
9202 inline_costs += costs;
9203 } else {
9204 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
9206 } else
9207 #endif
9209 MonoInst *store;
9211 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg, foffset > mono_target_pagesize ());
9213 if (ins_flag & MONO_INST_VOLATILE) {
9214 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9215 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9218 if (mini_is_gsharedvt_klass (klass)) {
9219 MonoInst *offset_ins;
9221 context_used = mini_class_check_context_used (cfg, klass);
9223 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9224 /* The value is offset by 1 */
9225 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
9226 dreg = alloc_ireg_mp (cfg);
9227 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9228 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
9229 store = mini_emit_storing_write_barrier (cfg, ins, sp [1]);
9230 } else {
9231 /* The decomposition will call mini_emit_memory_copy () which will emit a wbarrier if needed */
9232 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
9234 } else {
9235 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
9236 /* insert call to write barrier */
9237 MonoInst *ptr;
9238 int dreg;
9240 dreg = alloc_ireg_mp (cfg);
9241 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9242 store = mini_emit_storing_write_barrier (cfg, ptr, sp [1]);
9243 } else {
9244 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
9248 if (sp [0]->opcode != OP_LDADDR)
9249 store->flags |= MONO_INST_FAULT;
9251 store->flags |= ins_flag;
9253 goto field_access_end;
9256 #ifndef DISABLE_REMOTING
9257 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
9258 MonoMethod *wrapper = (il_op == MONO_CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
9259 MonoInst *iargs [4];
9261 GSHAREDVT_FAILURE (il_op);
9263 iargs [0] = sp [0];
9264 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9265 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9266 EMIT_NEW_ICONST (cfg, iargs [3], m_class_is_valuetype (klass) ? field->offset - MONO_ABI_SIZEOF (MonoObject) : field->offset);
9267 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9268 costs = inline_method (cfg, wrapper, mono_method_signature_internal (wrapper),
9269 iargs, ip, cfg->real_offset, TRUE);
9270 CHECK_CFG_EXCEPTION;
9271 g_assert (costs > 0);
9273 cfg->real_offset += 5;
9275 *sp++ = iargs [0];
9277 inline_costs += costs;
9278 } else {
9279 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
9280 *sp++ = ins;
9282 } else
9283 #endif
9284 if (is_instance) {
9285 if (sp [0]->type == STACK_VTYPE) {
9286 MonoInst *var;
9288 /* Have to compute the address of the variable */
9290 var = get_vreg_to_inst (cfg, sp [0]->dreg);
9291 if (!var)
9292 var = mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (klass), OP_LOCAL, sp [0]->dreg);
9293 else
9294 g_assert (var->klass == klass);
9296 EMIT_NEW_VARLOADA (cfg, ins, var, m_class_get_byval_arg (var->klass));
9297 sp [0] = ins;
9300 if (il_op == MONO_CEE_LDFLDA) {
9301 if (sp [0]->type == STACK_OBJ) {
9302 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
9303 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
9306 dreg = alloc_ireg_mp (cfg);
9308 if (mini_is_gsharedvt_klass (klass)) {
9309 MonoInst *offset_ins;
9311 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9312 /* The value is offset by 1 */
9313 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
9314 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9315 } else {
9316 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9318 ins->klass = mono_class_from_mono_type_internal (field->type);
9319 ins->type = STACK_MP;
9320 *sp++ = ins;
9321 } else {
9322 MonoInst *load;
9324 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg, foffset > mono_target_pagesize ());
9326 #ifdef MONO_ARCH_SIMD_INTRINSICS
9327 if (sp [0]->opcode == OP_LDADDR && m_class_is_simd_type (klass) && cfg->opt & MONO_OPT_SIMD) {
9328 ins = mono_emit_simd_field_load (cfg, field, sp [0]);
9329 if (ins) {
9330 *sp++ = ins;
9331 goto field_access_end;
9334 #endif
9336 MonoInst *field_add_inst = sp [0];
9337 if (mini_is_gsharedvt_klass (klass)) {
9338 MonoInst *offset_ins;
9340 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9341 /* The value is offset by 1 */
9342 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
9343 EMIT_NEW_BIALU (cfg, field_add_inst, OP_PADD, alloc_ireg_mp (cfg), sp [0]->dreg, offset_ins->dreg);
9344 foffset = 0;
9347 load = mini_emit_memory_load (cfg, field->type, field_add_inst, foffset, ins_flag);
9349 if (sp [0]->opcode != OP_LDADDR)
9350 load->flags |= MONO_INST_FAULT;
9351 *sp++ = load;
9355 if (is_instance)
9356 goto field_access_end;
9358 /* STATIC CASE */
9359 context_used = mini_class_check_context_used (cfg, klass);
9361 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) {
9362 mono_error_set_field_missing (cfg->error, field->parent, field->name, NULL, "Using static instructions with literal field");
9363 CHECK_CFG_ERROR;
9366 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
9367 * to be called here.
9369 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
9370 mono_class_vtable_checked (cfg->domain, klass, cfg->error);
9371 CHECK_CFG_ERROR;
9372 CHECK_TYPELOAD (klass);
9374 mono_domain_lock (cfg->domain);
9375 if (cfg->domain->special_static_fields)
9376 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
9377 mono_domain_unlock (cfg->domain);
9379 is_special_static = mono_class_field_is_special_static (field);
9381 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
9382 thread_ins = mono_create_tls_get (cfg, TLS_KEY_THREAD);
9383 else
9384 thread_ins = NULL;
9386 /* Generate IR to compute the field address */
9387 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
9389 * Fast access to TLS data
9390 * Inline version of get_thread_static_data () in
9391 * threads.c.
9393 guint32 offset;
9394 int idx, static_data_reg, array_reg, dreg;
9396 if (context_used && cfg->gsharedvt && mini_is_gsharedvt_klass (klass))
9397 GSHAREDVT_FAILURE (il_op);
9399 static_data_reg = alloc_ireg (cfg);
9400 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
9402 if (cfg->compile_aot) {
9403 int offset_reg, offset2_reg, idx_reg;
9405 /* For TLS variables, this will return the TLS offset */
9406 EMIT_NEW_SFLDACONST (cfg, ins, field);
9407 offset_reg = ins->dreg;
9408 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
9409 idx_reg = alloc_ireg (cfg);
9410 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
9411 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, TARGET_SIZEOF_VOID_P == 8 ? 3 : 2);
9412 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
9413 array_reg = alloc_ireg (cfg);
9414 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
9415 offset2_reg = alloc_ireg (cfg);
9416 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
9417 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
9418 dreg = alloc_ireg (cfg);
9419 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
9420 } else {
9421 offset = (gsize)addr & 0x7fffffff;
9422 idx = offset & 0x3f;
9424 array_reg = alloc_ireg (cfg);
9425 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * TARGET_SIZEOF_VOID_P);
9426 dreg = alloc_ireg (cfg);
9427 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
9429 } else if ((cfg->opt & MONO_OPT_SHARED) ||
9430 (cfg->compile_aot && is_special_static) ||
9431 (context_used && is_special_static)) {
9432 MonoInst *iargs [2];
9434 g_assert (field->parent);
9435 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9436 if (context_used) {
9437 iargs [1] = emit_get_rgctx_field (cfg, context_used,
9438 field, MONO_RGCTX_INFO_CLASS_FIELD);
9439 } else {
9440 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
9442 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
9443 } else if (context_used) {
9444 MonoInst *static_data;
9447 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
9448 method->klass->name_space, method->klass->name, method->name,
9449 depth, field->offset);
9452 if (mono_class_needs_cctor_run (klass, method))
9453 emit_class_init (cfg, klass);
9456 * The pointer we're computing here is
9458 * super_info.static_data + field->offset
9460 static_data = mini_emit_get_rgctx_klass (cfg, context_used,
9461 klass, MONO_RGCTX_INFO_STATIC_DATA);
9463 if (mini_is_gsharedvt_klass (klass)) {
9464 MonoInst *offset_ins;
9466 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9467 /* The value is offset by 1 */
9468 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
9469 dreg = alloc_ireg_mp (cfg);
9470 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
9471 } else if (field->offset == 0) {
9472 ins = static_data;
9473 } else {
9474 int addr_reg = mono_alloc_preg (cfg);
9475 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
9477 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
9478 MonoInst *iargs [2];
9480 g_assert (field->parent);
9481 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9482 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
9483 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
9484 } else {
9485 MonoVTable *vtable = NULL;
9487 if (!cfg->compile_aot)
9488 vtable = mono_class_vtable_checked (cfg->domain, klass, cfg->error);
9489 CHECK_CFG_ERROR;
9490 CHECK_TYPELOAD (klass);
9492 if (!addr) {
9493 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
9494 if (!(g_slist_find (class_inits, klass))) {
9495 emit_class_init (cfg, klass);
9496 if (cfg->verbose_level > 2)
9497 printf ("class %s.%s needs init call for %s\n", m_class_get_name_space (klass), m_class_get_name (klass), mono_field_get_name (field));
9498 class_inits = g_slist_prepend (class_inits, klass);
9500 } else {
9501 if (cfg->run_cctors) {
9502 /* This makes so that inline cannot trigger */
9503 /* .cctors: too many apps depend on them */
9504 /* running with a specific order... */
9505 g_assert (vtable);
9506 if (!vtable->initialized && m_class_has_cctor (vtable->klass))
9507 INLINE_FAILURE ("class init");
9508 if (!mono_runtime_class_init_full (vtable, cfg->error)) {
9509 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
9510 goto exception_exit;
9514 if (cfg->compile_aot)
9515 EMIT_NEW_SFLDACONST (cfg, ins, field);
9516 else {
9517 g_assert (vtable);
9518 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
9519 g_assert (addr);
9520 EMIT_NEW_PCONST (cfg, ins, addr);
9522 } else {
9523 MonoInst *iargs [1];
9524 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
9525 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
9529 /* Generate IR to do the actual load/store operation */
9531 if ((il_op == MONO_CEE_STFLD || il_op == MONO_CEE_STSFLD)) {
9532 if (ins_flag & MONO_INST_VOLATILE) {
9533 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9534 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9535 } else if (!mini_debug_options.weak_memory_model && mini_type_is_reference (ftype)) {
9536 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9540 if (il_op == MONO_CEE_LDSFLDA) {
9541 ins->klass = mono_class_from_mono_type_internal (ftype);
9542 ins->type = STACK_PTR;
9543 *sp++ = ins;
9544 } else if (il_op == MONO_CEE_STSFLD) {
9545 MonoInst *store;
9547 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
9548 store->flags |= ins_flag;
9549 } else {
9550 gboolean is_const = FALSE;
9551 MonoVTable *vtable = NULL;
9552 gpointer addr = NULL;
9554 if (!context_used) {
9555 vtable = mono_class_vtable_checked (cfg->domain, klass, cfg->error);
9556 CHECK_CFG_ERROR;
9557 CHECK_TYPELOAD (klass);
9559 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
9560 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
9561 int ro_type = ftype->type;
9562 if (!addr)
9563 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
9564 if (ro_type == MONO_TYPE_VALUETYPE && m_class_is_enumtype (ftype->data.klass)) {
9565 ro_type = mono_class_enum_basetype_internal (ftype->data.klass)->type;
9568 GSHAREDVT_FAILURE (il_op);
9570 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
9571 is_const = TRUE;
9572 switch (ro_type) {
9573 case MONO_TYPE_BOOLEAN:
9574 case MONO_TYPE_U1:
9575 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
9576 sp++;
9577 break;
9578 case MONO_TYPE_I1:
9579 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
9580 sp++;
9581 break;
9582 case MONO_TYPE_CHAR:
9583 case MONO_TYPE_U2:
9584 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
9585 sp++;
9586 break;
9587 case MONO_TYPE_I2:
9588 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
9589 sp++;
9590 break;
9591 break;
9592 case MONO_TYPE_I4:
9593 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
9594 sp++;
9595 break;
9596 case MONO_TYPE_U4:
9597 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
9598 sp++;
9599 break;
9600 case MONO_TYPE_I:
9601 case MONO_TYPE_U:
9602 case MONO_TYPE_PTR:
9603 case MONO_TYPE_FNPTR:
9604 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
9605 mini_type_to_eval_stack_type ((cfg), field->type, *sp);
9606 sp++;
9607 break;
9608 case MONO_TYPE_STRING:
9609 case MONO_TYPE_OBJECT:
9610 case MONO_TYPE_CLASS:
9611 case MONO_TYPE_SZARRAY:
9612 case MONO_TYPE_ARRAY:
9613 if (!mono_gc_is_moving ()) {
9614 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
9615 mini_type_to_eval_stack_type ((cfg), field->type, *sp);
9616 sp++;
9617 } else {
9618 is_const = FALSE;
9620 break;
9621 case MONO_TYPE_I8:
9622 case MONO_TYPE_U8:
9623 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
9624 sp++;
9625 break;
9626 case MONO_TYPE_R4:
9627 case MONO_TYPE_R8:
9628 case MONO_TYPE_VALUETYPE:
9629 default:
9630 is_const = FALSE;
9631 break;
9635 if (!is_const) {
9636 MonoInst *load;
9638 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
9639 load->flags |= ins_flag;
9640 *sp++ = load;
9644 field_access_end:
9645 if ((il_op == MONO_CEE_LDFLD || il_op == MONO_CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
9646 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9647 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9650 ins_flag = 0;
9651 break;
9653 case MONO_CEE_STOBJ:
9654 sp -= 2;
9655 klass = mini_get_class (method, token, generic_context);
9656 CHECK_TYPELOAD (klass);
9658 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
9659 mini_emit_memory_store (cfg, m_class_get_byval_arg (klass), sp [0], sp [1], ins_flag);
9660 ins_flag = 0;
9661 inline_costs += 1;
9662 break;
9665 * Array opcodes
9667 case MONO_CEE_NEWARR: {
9668 MonoInst *len_ins;
9669 const char *data_ptr;
9670 int data_size = 0;
9671 guint32 field_token;
9673 --sp;
9675 klass = mini_get_class (method, token, generic_context);
9676 CHECK_TYPELOAD (klass);
9677 if (m_class_get_byval_arg (klass)->type == MONO_TYPE_VOID)
9678 UNVERIFIED;
9680 context_used = mini_class_check_context_used (cfg, klass);
9682 if (sp [0]->type == STACK_I8 || (TARGET_SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
9683 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
9684 ins->sreg1 = sp [0]->dreg;
9685 ins->type = STACK_I4;
9686 ins->dreg = alloc_ireg (cfg);
9687 MONO_ADD_INS (cfg->cbb, ins);
9688 *sp = mono_decompose_opcode (cfg, ins);
9691 if (context_used) {
9692 MonoInst *args [3];
9693 MonoClass *array_class = mono_class_create_array (klass, 1);
9694 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
9696 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
9698 /* vtable */
9699 args [0] = mini_emit_get_rgctx_klass (cfg, context_used,
9700 array_class, MONO_RGCTX_INFO_VTABLE);
9701 /* array len */
9702 args [1] = sp [0];
9704 if (managed_alloc)
9705 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
9706 else
9707 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
9708 } else {
9709 if (cfg->opt & MONO_OPT_SHARED) {
9710 /* Decompose now to avoid problems with references to the domainvar */
9711 MonoInst *iargs [3];
9713 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9714 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9715 iargs [2] = sp [0];
9717 ins = mono_emit_jit_icall (cfg, ves_icall_array_new, iargs);
9718 } else {
9719 /* Decompose later since it is needed by abcrem */
9720 MonoClass *array_type = mono_class_create_array (klass, 1);
9721 mono_class_vtable_checked (cfg->domain, array_type, cfg->error);
9722 CHECK_CFG_ERROR;
9723 CHECK_TYPELOAD (array_type);
9725 MONO_INST_NEW (cfg, ins, OP_NEWARR);
9726 ins->dreg = alloc_ireg_ref (cfg);
9727 ins->sreg1 = sp [0]->dreg;
9728 ins->inst_newa_class = klass;
9729 ins->type = STACK_OBJ;
9730 ins->klass = array_type;
9731 MONO_ADD_INS (cfg->cbb, ins);
9732 cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE;
9733 cfg->cbb->needs_decompose = TRUE;
9735 /* Needed so mono_emit_load_get_addr () gets called */
9736 mono_get_got_var (cfg);
9740 len_ins = sp [0];
9741 ip += 5;
9742 *sp++ = ins;
9743 inline_costs += 1;
9746 * we inline/optimize the initialization sequence if possible.
9747 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
9748 * for small sizes open code the memcpy
9749 * ensure the rva field is big enough
9751 if ((cfg->opt & MONO_OPT_INTRINS) && next_ip < end
9752 && ip_in_bb (cfg, cfg->cbb, next_ip)
9753 && (len_ins->opcode == OP_ICONST)
9754 && (data_ptr = initialize_array_data (cfg, method,
9755 cfg->compile_aot, next_ip, end, klass,
9756 len_ins->inst_c0, &data_size, &field_token,
9757 &il_op, &next_ip))) {
9758 MonoMethod *memcpy_method = mini_get_memcpy_method ();
9759 MonoInst *iargs [3];
9760 int add_reg = alloc_ireg_mp (cfg);
9762 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
9763 if (cfg->compile_aot) {
9764 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, m_class_get_image (method->klass), GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
9765 } else {
9766 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
9768 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
9769 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9772 break;
9774 case MONO_CEE_LDLEN:
9775 --sp;
9776 if (sp [0]->type != STACK_OBJ)
9777 UNVERIFIED;
9779 MONO_INST_NEW (cfg, ins, OP_LDLEN);
9780 ins->dreg = alloc_preg (cfg);
9781 ins->sreg1 = sp [0]->dreg;
9782 ins->inst_imm = MONO_STRUCT_OFFSET (MonoArray, max_length);
9783 ins->type = STACK_I4;
9784 /* This flag will be inherited by the decomposition */
9785 ins->flags |= MONO_INST_FAULT | MONO_INST_INVARIANT_LOAD;
9786 MONO_ADD_INS (cfg->cbb, ins);
9787 cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE;
9788 cfg->cbb->needs_decompose = TRUE;
9789 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, sp [0]->dreg);
9790 *sp++ = ins;
9791 break;
9792 case MONO_CEE_LDELEMA:
9793 sp -= 2;
9794 if (sp [0]->type != STACK_OBJ)
9795 UNVERIFIED;
9797 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9799 klass = mini_get_class (method, token, generic_context);
9800 CHECK_TYPELOAD (klass);
9801 /* we need to make sure that this array is exactly the type it needs
9802 * to be for correctness. the wrappers are lax with their usage
9803 * so we need to ignore them here
9805 if (!m_class_is_valuetype (klass) && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
9806 MonoClass *array_class = mono_class_create_array (klass, 1);
9807 mini_emit_check_array_type (cfg, sp [0], array_class);
9808 CHECK_TYPELOAD (array_class);
9811 readonly = FALSE;
9812 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9813 *sp++ = ins;
9814 break;
9815 case MONO_CEE_LDELEM:
9816 case MONO_CEE_LDELEM_I1:
9817 case MONO_CEE_LDELEM_U1:
9818 case MONO_CEE_LDELEM_I2:
9819 case MONO_CEE_LDELEM_U2:
9820 case MONO_CEE_LDELEM_I4:
9821 case MONO_CEE_LDELEM_U4:
9822 case MONO_CEE_LDELEM_I8:
9823 case MONO_CEE_LDELEM_I:
9824 case MONO_CEE_LDELEM_R4:
9825 case MONO_CEE_LDELEM_R8:
9826 case MONO_CEE_LDELEM_REF: {
9827 MonoInst *addr;
9829 sp -= 2;
9831 if (il_op == MONO_CEE_LDELEM) {
9832 klass = mini_get_class (method, token, generic_context);
9833 CHECK_TYPELOAD (klass);
9834 mono_class_init_internal (klass);
9836 else
9837 klass = array_access_to_klass (il_op);
9839 if (sp [0]->type != STACK_OBJ)
9840 UNVERIFIED;
9842 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9844 if (mini_is_gsharedvt_variable_klass (klass)) {
9845 // FIXME-VT: OP_ICONST optimization
9846 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9847 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0);
9848 ins->opcode = OP_LOADV_MEMBASE;
9849 } else if (sp [1]->opcode == OP_ICONST) {
9850 int array_reg = sp [0]->dreg;
9851 int index_reg = sp [1]->dreg;
9852 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
9854 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
9855 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
9857 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9858 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), array_reg, offset);
9859 } else {
9860 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9861 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0);
9863 *sp++ = ins;
9864 break;
9866 case MONO_CEE_STELEM_I:
9867 case MONO_CEE_STELEM_I1:
9868 case MONO_CEE_STELEM_I2:
9869 case MONO_CEE_STELEM_I4:
9870 case MONO_CEE_STELEM_I8:
9871 case MONO_CEE_STELEM_R4:
9872 case MONO_CEE_STELEM_R8:
9873 case MONO_CEE_STELEM_REF:
9874 case MONO_CEE_STELEM: {
9875 sp -= 3;
9877 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9879 if (il_op == MONO_CEE_STELEM) {
9880 klass = mini_get_class (method, token, generic_context);
9881 CHECK_TYPELOAD (klass);
9882 mono_class_init_internal (klass);
9884 else
9885 klass = array_access_to_klass (il_op);
9887 if (sp [0]->type != STACK_OBJ)
9888 UNVERIFIED;
9890 sp [2] = convert_value (cfg, m_class_get_byval_arg (klass), sp [2]);
9891 mini_emit_array_store (cfg, klass, sp, TRUE);
9893 inline_costs += 1;
9894 break;
9896 case MONO_CEE_CKFINITE: {
9897 --sp;
9899 if (cfg->llvm_only) {
9900 MonoInst *iargs [1];
9902 iargs [0] = sp [0];
9903 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
9904 } else {
9905 sp [0] = convert_value (cfg, m_class_get_byval_arg (mono_defaults.double_class), sp [0]);
9906 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
9907 ins->sreg1 = sp [0]->dreg;
9908 ins->dreg = alloc_freg (cfg);
9909 ins->type = STACK_R8;
9910 MONO_ADD_INS (cfg->cbb, ins);
9912 *sp++ = mono_decompose_opcode (cfg, ins);
9915 break;
9917 case MONO_CEE_REFANYVAL: {
9918 MonoInst *src_var, *src;
9920 int klass_reg = alloc_preg (cfg);
9921 int dreg = alloc_preg (cfg);
9923 GSHAREDVT_FAILURE (il_op);
9925 MONO_INST_NEW (cfg, ins, il_op);
9926 --sp;
9927 klass = mini_get_class (method, token, generic_context);
9928 CHECK_TYPELOAD (klass);
9930 context_used = mini_class_check_context_used (cfg, klass);
9932 // FIXME:
9933 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9934 if (!src_var)
9935 src_var = mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.typed_reference_class), OP_LOCAL, sp [0]->dreg);
9936 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9937 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
9939 if (context_used) {
9940 MonoInst *klass_ins;
9942 klass_ins = mini_emit_get_rgctx_klass (cfg, context_used,
9943 klass, MONO_RGCTX_INFO_KLASS);
9945 // FIXME:
9946 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
9947 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
9948 } else {
9949 mini_emit_class_check (cfg, klass_reg, klass);
9951 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
9952 ins->type = STACK_MP;
9953 ins->klass = klass;
9954 *sp++ = ins;
9955 break;
9957 case MONO_CEE_MKREFANY: {
9958 MonoInst *loc, *addr;
9960 GSHAREDVT_FAILURE (il_op);
9962 MONO_INST_NEW (cfg, ins, il_op);
9963 --sp;
9964 klass = mini_get_class (method, token, generic_context);
9965 CHECK_TYPELOAD (klass);
9967 context_used = mini_class_check_context_used (cfg, klass);
9969 loc = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.typed_reference_class), OP_LOCAL);
9970 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
9972 MonoInst *const_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
9973 int type_reg = alloc_preg (cfg);
9975 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
9976 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, m_class_offsetof_byval_arg ());
9977 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9979 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
9981 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
9982 ins->type = STACK_VTYPE;
9983 ins->klass = mono_defaults.typed_reference_class;
9984 *sp++ = ins;
9985 break;
9987 case MONO_CEE_LDTOKEN: {
9988 gpointer handle;
9989 MonoClass *handle_class;
9991 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
9992 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9993 handle = mono_method_get_wrapper_data (method, n);
9994 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
9995 if (handle_class == mono_defaults.typehandle_class)
9996 handle = m_class_get_byval_arg ((MonoClass*)handle);
9998 else {
9999 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, cfg->error);
10000 CHECK_CFG_ERROR;
10002 if (!handle)
10003 LOAD_ERROR;
10004 mono_class_init_internal (handle_class);
10005 if (cfg->gshared) {
10006 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
10007 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
10008 /* This case handles ldtoken
10009 of an open type, like for
10010 typeof(Gen<>). */
10011 context_used = 0;
10012 } else if (handle_class == mono_defaults.typehandle_class) {
10013 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type_internal ((MonoType *)handle));
10014 } else if (handle_class == mono_defaults.fieldhandle_class)
10015 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
10016 else if (handle_class == mono_defaults.methodhandle_class)
10017 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
10018 else
10019 g_assert_not_reached ();
10022 if ((cfg->opt & MONO_OPT_SHARED) &&
10023 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
10024 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
10025 MonoInst *addr, *vtvar, *iargs [3];
10026 int method_context_used;
10028 method_context_used = mini_method_check_context_used (cfg, method);
10030 vtvar = mono_compile_create_var (cfg, m_class_get_byval_arg (handle_class), OP_LOCAL);
10032 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10033 EMIT_NEW_ICONST (cfg, iargs [1], n);
10034 if (method_context_used) {
10035 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
10036 method, MONO_RGCTX_INFO_METHOD);
10037 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
10038 } else {
10039 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
10040 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
10042 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10044 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10046 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10047 } else {
10048 if ((next_ip + 4 < end) && ip_in_bb (cfg, cfg->cbb, next_ip) &&
10049 ((next_ip [0] == CEE_CALL) || (next_ip [0] == CEE_CALLVIRT)) &&
10050 (cmethod = mini_get_method (cfg, method, read32 (next_ip + 1), NULL, generic_context)) &&
10051 (cmethod->klass == mono_defaults.systemtype_class) &&
10052 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
10053 MonoClass *tclass = mono_class_from_mono_type_internal ((MonoType *)handle);
10055 mono_class_init_internal (tclass);
10056 if (context_used) {
10057 ins = mini_emit_get_rgctx_klass (cfg, context_used,
10058 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
10059 } else if (cfg->compile_aot) {
10060 if (method->wrapper_type) {
10061 error_init (error); //got to do it since there are multiple conditionals below
10062 if (mono_class_get_checked (m_class_get_image (tclass), m_class_get_type_token (tclass), error) == tclass && !generic_context) {
10063 /* Special case for static synchronized wrappers */
10064 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, m_class_get_image (tclass), m_class_get_type_token (tclass), generic_context);
10065 } else {
10066 mono_error_cleanup (error); /* FIXME don't swallow the error */
10067 /* FIXME: n is not a normal token */
10068 DISABLE_AOT (cfg);
10069 EMIT_NEW_PCONST (cfg, ins, NULL);
10071 } else {
10072 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
10074 } else {
10075 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, cfg->error);
10076 CHECK_CFG_ERROR;
10077 EMIT_NEW_PCONST (cfg, ins, rt);
10079 ins->type = STACK_OBJ;
10080 ins->klass = cmethod->klass;
10081 il_op = (MonoOpcodeEnum)next_ip [0];
10082 next_ip += 5;
10083 } else {
10084 MonoInst *addr, *vtvar;
10086 vtvar = mono_compile_create_var (cfg, m_class_get_byval_arg (handle_class), OP_LOCAL);
10088 if (context_used) {
10089 if (handle_class == mono_defaults.typehandle_class) {
10090 ins = mini_emit_get_rgctx_klass (cfg, context_used,
10091 mono_class_from_mono_type_internal ((MonoType *)handle),
10092 MONO_RGCTX_INFO_TYPE);
10093 } else if (handle_class == mono_defaults.methodhandle_class) {
10094 ins = emit_get_rgctx_method (cfg, context_used,
10095 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
10096 } else if (handle_class == mono_defaults.fieldhandle_class) {
10097 ins = emit_get_rgctx_field (cfg, context_used,
10098 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
10099 } else {
10100 g_assert_not_reached ();
10102 } else if (cfg->compile_aot) {
10103 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
10104 } else {
10105 EMIT_NEW_PCONST (cfg, ins, handle);
10107 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10108 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10109 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10113 *sp++ = ins;
10114 break;
10116 case MONO_CEE_THROW:
10117 if (sp [-1]->type != STACK_OBJ)
10118 UNVERIFIED;
10120 MONO_INST_NEW (cfg, ins, OP_THROW);
10121 --sp;
10122 ins->sreg1 = sp [0]->dreg;
10123 cfg->cbb->out_of_line = TRUE;
10124 MONO_ADD_INS (cfg->cbb, ins);
10125 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10126 MONO_ADD_INS (cfg->cbb, ins);
10127 sp = stack_start;
10129 link_bblock (cfg, cfg->cbb, end_bblock);
10130 start_new_bblock = 1;
10131 /* This can complicate code generation for llvm since the return value might not be defined */
10132 if (COMPILE_LLVM (cfg))
10133 INLINE_FAILURE ("throw");
10134 break;
10135 case MONO_CEE_ENDFINALLY:
10136 if (!ip_in_finally_clause (cfg, ip - header->code))
10137 UNVERIFIED;
10138 /* mono_save_seq_point_info () depends on this */
10139 if (sp != stack_start)
10140 emit_seq_point (cfg, method, ip, FALSE, FALSE);
10141 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
10142 MONO_ADD_INS (cfg->cbb, ins);
10143 start_new_bblock = 1;
10146 * Control will leave the method so empty the stack, otherwise
10147 * the next basic block will start with a nonempty stack.
10149 while (sp != stack_start) {
10150 sp--;
10152 break;
10153 case MONO_CEE_LEAVE:
10154 case MONO_CEE_LEAVE_S: {
10155 GList *handlers;
10157 /* empty the stack */
10158 g_assert (sp >= stack_start);
10159 sp = stack_start;
10162 * If this leave statement is in a catch block, check for a
10163 * pending exception, and rethrow it if necessary.
10164 * We avoid doing this in runtime invoke wrappers, since those are called
10165 * by native code which excepts the wrapper to catch all exceptions.
10167 for (i = 0; i < header->num_clauses; ++i) {
10168 MonoExceptionClause *clause = &header->clauses [i];
10171 * Use <= in the final comparison to handle clauses with multiple
10172 * leave statements, like in bug #78024.
10173 * The ordering of the exception clauses guarantees that we find the
10174 * innermost clause.
10176 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((il_op == MONO_CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
10177 MonoInst *exc_ins;
10178 MonoBasicBlock *dont_throw;
10181 MonoInst *load;
10183 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
10186 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
10188 NEW_BBLOCK (cfg, dont_throw);
10191 * Currently, we always rethrow the abort exception, despite the
10192 * fact that this is not correct. See thread6.cs for an example.
10193 * But propagating the abort exception is more important than
10194 * getting the semantics right.
10196 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
10197 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
10198 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
10200 MONO_START_BB (cfg, dont_throw);
10204 #ifdef ENABLE_LLVM
10205 cfg->cbb->try_end = (intptr_t)(ip - header->code);
10206 #endif
10208 if ((handlers = mono_find_leave_clauses (cfg, ip, target))) {
10209 GList *tmp;
10211 * For each finally clause that we exit we need to invoke the finally block.
10212 * After each invocation we need to add try holes for all the clauses that
10213 * we already exited.
10215 for (tmp = handlers; tmp; tmp = tmp->next) {
10216 MonoLeaveClause *leave = (MonoLeaveClause *) tmp->data;
10217 MonoExceptionClause *clause = leave->clause;
10219 if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY)
10220 continue;
10222 MonoInst *abort_exc = (MonoInst *)mono_find_exvar_for_offset (cfg, clause->handler_offset);
10223 MonoBasicBlock *dont_throw;
10226 * Emit instrumentation code before linking the basic blocks below as this
10227 * will alter cfg->cbb.
10229 mini_profiler_emit_call_finally (cfg, header, ip, leave->index, clause);
10231 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
10232 g_assert (tblock);
10233 link_bblock (cfg, cfg->cbb, tblock);
10235 MONO_EMIT_NEW_PCONST (cfg, abort_exc->dreg, 0);
10237 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
10238 ins->inst_target_bb = tblock;
10239 ins->inst_eh_blocks = tmp;
10240 MONO_ADD_INS (cfg->cbb, ins);
10241 cfg->cbb->has_call_handler = 1;
10243 /* Throw exception if exvar is set */
10244 /* FIXME Do we need this for calls from catch/filter ? */
10245 NEW_BBLOCK (cfg, dont_throw);
10246 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, abort_exc->dreg, 0);
10247 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
10248 mono_emit_jit_icall (cfg, ves_icall_thread_finish_async_abort, NULL);
10249 cfg->cbb->clause_holes = tmp;
10251 MONO_START_BB (cfg, dont_throw);
10252 cfg->cbb->clause_holes = tmp;
10254 if (COMPILE_LLVM (cfg)) {
10255 MonoBasicBlock *target_bb;
10258 * Link the finally bblock with the target, since it will
10259 * conceptually branch there.
10261 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
10262 GET_BBLOCK (cfg, target_bb, target);
10263 link_bblock (cfg, tblock, target_bb);
10268 MONO_INST_NEW (cfg, ins, OP_BR);
10269 MONO_ADD_INS (cfg->cbb, ins);
10270 GET_BBLOCK (cfg, tblock, target);
10271 link_bblock (cfg, cfg->cbb, tblock);
10272 ins->inst_target_bb = tblock;
10274 start_new_bblock = 1;
10275 break;
10279 * Mono specific opcodes
10282 case MONO_CEE_MONO_ICALL: {
10283 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10284 const MonoJitICallId jit_icall_id = (MonoJitICallId)token;
10285 MonoJitICallInfo * const info = mono_find_jit_icall_info (jit_icall_id);
10287 CHECK_STACK (info->sig->param_count);
10288 sp -= info->sig->param_count;
10290 if (token == MONO_JIT_ICALL_mono_threads_attach_coop) {
10291 MonoInst *addr;
10292 MonoBasicBlock *next_bb;
10294 if (cfg->compile_aot) {
10296 * This is called on unattached threads, so it cannot go through the trampoline
10297 * infrastructure. Use an indirect call through a got slot initialized at load time
10298 * instead.
10300 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL, GUINT_TO_POINTER (jit_icall_id));
10301 ins = mini_emit_calli (cfg, info->sig, sp, addr, NULL, NULL);
10302 } else {
10303 ins = mono_emit_jit_icall_id (cfg, jit_icall_id, sp);
10307 * Parts of the initlocals code needs to come after this, since it might call methods like memset.
10309 init_localsbb2 = cfg->cbb;
10310 NEW_BBLOCK (cfg, next_bb);
10311 MONO_START_BB (cfg, next_bb);
10312 } else {
10313 ins = mono_emit_jit_icall_id (cfg, jit_icall_id, sp);
10316 if (!MONO_TYPE_IS_VOID (info->sig->ret))
10317 *sp++ = ins;
10319 inline_costs += CALL_COST * MIN(10, num_calls++);
10320 break;
10323 MonoJumpInfoType ldptr_type;
10325 case MONO_CEE_MONO_LDPTR_CARD_TABLE:
10326 ldptr_type = MONO_PATCH_INFO_GC_CARD_TABLE_ADDR;
10327 goto mono_ldptr;
10328 case MONO_CEE_MONO_LDPTR_NURSERY_START:
10329 ldptr_type = MONO_PATCH_INFO_GC_NURSERY_START;
10330 goto mono_ldptr;
10331 case MONO_CEE_MONO_LDPTR_NURSERY_BITS:
10332 ldptr_type = MONO_PATCH_INFO_GC_NURSERY_BITS;
10333 goto mono_ldptr;
10334 case MONO_CEE_MONO_LDPTR_INT_REQ_FLAG:
10335 ldptr_type = MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG;
10336 goto mono_ldptr;
10337 case MONO_CEE_MONO_LDPTR_PROFILER_ALLOCATION_COUNT:
10338 ldptr_type = MONO_PATCH_INFO_PROFILER_ALLOCATION_COUNT;
10339 mono_ldptr:
10340 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10341 ins = mini_emit_runtime_constant (cfg, ldptr_type, NULL);
10342 *sp++ = ins;
10343 inline_costs += CALL_COST * MIN(10, num_calls++);
10344 break;
10346 case MONO_CEE_MONO_LDPTR: {
10347 gpointer ptr;
10349 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10350 ptr = mono_method_get_wrapper_data (method, token);
10351 EMIT_NEW_PCONST (cfg, ins, ptr);
10352 *sp++ = ins;
10353 inline_costs += CALL_COST * MIN(10, num_calls++);
10354 /* Can't embed random pointers into AOT code */
10355 DISABLE_AOT (cfg);
10356 break;
10358 case MONO_CEE_MONO_JIT_ICALL_ADDR:
10359 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10360 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, GUINT_TO_POINTER (token));
10361 *sp++ = ins;
10362 inline_costs += CALL_COST * MIN(10, num_calls++);
10363 break;
10365 case MONO_CEE_MONO_ICALL_ADDR: {
10366 MonoMethod *cmethod;
10367 gpointer ptr;
10369 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10371 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
10373 if (cfg->compile_aot) {
10374 if (cfg->direct_pinvoke && ip + 6 < end && (ip [6] == CEE_POP)) {
10376 * This is generated by emit_native_wrapper () to resolve the pinvoke address
10377 * before the call, its not needed when using direct pinvoke.
10378 * This is not an optimization, but its used to avoid looking up pinvokes
10379 * on platforms which don't support dlopen ().
10381 EMIT_NEW_PCONST (cfg, ins, NULL);
10382 } else {
10383 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
10385 } else {
10386 ptr = mono_lookup_internal_call (cmethod);
10387 g_assert (ptr);
10388 EMIT_NEW_PCONST (cfg, ins, ptr);
10390 *sp++ = ins;
10391 break;
10393 case MONO_CEE_MONO_VTADDR: {
10394 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10395 MonoInst *src_var, *src;
10397 --sp;
10399 // FIXME:
10400 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10401 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
10402 *sp++ = src;
10403 break;
10405 case MONO_CEE_MONO_NEWOBJ: {
10406 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10407 MonoInst *iargs [2];
10409 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10410 mono_class_init_internal (klass);
10411 NEW_DOMAINCONST (cfg, iargs [0]);
10412 MONO_ADD_INS (cfg->cbb, iargs [0]);
10413 NEW_CLASSCONST (cfg, iargs [1], klass);
10414 MONO_ADD_INS (cfg->cbb, iargs [1]);
10415 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
10416 inline_costs += CALL_COST * MIN(10, num_calls++);
10417 break;
10419 case MONO_CEE_MONO_OBJADDR:
10420 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10421 --sp;
10422 MONO_INST_NEW (cfg, ins, OP_MOVE);
10423 ins->dreg = alloc_ireg_mp (cfg);
10424 ins->sreg1 = sp [0]->dreg;
10425 ins->type = STACK_MP;
10426 MONO_ADD_INS (cfg->cbb, ins);
10427 *sp++ = ins;
10428 break;
10429 case MONO_CEE_MONO_LDNATIVEOBJ:
10431 * Similar to LDOBJ, but instead load the unmanaged
10432 * representation of the vtype to the stack.
10434 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10435 --sp;
10436 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10437 g_assert (m_class_is_valuetype (klass));
10438 mono_class_init_internal (klass);
10441 MonoInst *src, *dest, *temp;
10443 src = sp [0];
10444 temp = mono_compile_create_var (cfg, m_class_get_byval_arg (klass), OP_LOCAL);
10445 temp->backend.is_pinvoke = 1;
10446 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
10447 mini_emit_memory_copy (cfg, dest, src, klass, TRUE, 0);
10449 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
10450 dest->type = STACK_VTYPE;
10451 dest->klass = klass;
10453 *sp ++ = dest;
10455 break;
10456 case MONO_CEE_MONO_RETOBJ: {
10458 * Same as RET, but return the native representation of a vtype
10459 * to the caller.
10461 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10462 g_assert (cfg->ret);
10463 g_assert (mono_method_signature_internal (method)->pinvoke);
10464 --sp;
10466 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10468 if (!cfg->vret_addr) {
10469 g_assert (cfg->ret_var_is_local);
10471 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
10472 } else {
10473 EMIT_NEW_RETLOADA (cfg, ins);
10475 mini_emit_memory_copy (cfg, ins, sp [0], klass, TRUE, 0);
10477 if (sp != stack_start)
10478 UNVERIFIED;
10480 mini_profiler_emit_leave (cfg, sp [0]);
10482 MONO_INST_NEW (cfg, ins, OP_BR);
10483 ins->inst_target_bb = end_bblock;
10484 MONO_ADD_INS (cfg->cbb, ins);
10485 link_bblock (cfg, cfg->cbb, end_bblock);
10486 start_new_bblock = 1;
10487 break;
10489 case MONO_CEE_MONO_SAVE_LMF:
10490 case MONO_CEE_MONO_RESTORE_LMF:
10491 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10492 break;
10493 case MONO_CEE_MONO_CLASSCONST:
10494 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10495 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
10496 *sp++ = ins;
10497 inline_costs += CALL_COST * MIN(10, num_calls++);
10498 break;
10499 case MONO_CEE_MONO_NOT_TAKEN:
10500 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10501 cfg->cbb->out_of_line = TRUE;
10502 break;
10503 case MONO_CEE_MONO_TLS: {
10504 MonoTlsKey key;
10506 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10507 key = (MonoTlsKey)n;
10508 g_assert (key < TLS_KEY_NUM);
10510 ins = mono_create_tls_get (cfg, key);
10511 g_assert (ins);
10512 ins->type = STACK_PTR;
10513 *sp++ = ins;
10514 break;
10516 case MONO_CEE_MONO_DYN_CALL: {
10517 MonoCallInst *call;
10519 /* It would be easier to call a trampoline, but that would put an
10520 * extra frame on the stack, confusing exception handling. So
10521 * implement it inline using an opcode for now.
10524 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10525 if (!cfg->dyn_call_var) {
10526 cfg->dyn_call_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
10527 /* prevent it from being register allocated */
10528 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
10531 /* Has to use a call inst since local regalloc expects it */
10532 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
10533 ins = (MonoInst*)call;
10534 sp -= 2;
10535 ins->sreg1 = sp [0]->dreg;
10536 ins->sreg2 = sp [1]->dreg;
10537 MONO_ADD_INS (cfg->cbb, ins);
10539 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
10540 /* OP_DYN_CALL might need to allocate a dynamically sized param area */
10541 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10543 inline_costs += CALL_COST * MIN(10, num_calls++);
10544 break;
10546 case MONO_CEE_MONO_MEMORY_BARRIER: {
10547 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10548 mini_emit_memory_barrier (cfg, (int)n);
10549 break;
10551 case MONO_CEE_MONO_ATOMIC_STORE_I4: {
10552 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10553 g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4));
10555 sp -= 2;
10557 MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4);
10558 ins->dreg = sp [0]->dreg;
10559 ins->sreg1 = sp [1]->dreg;
10560 ins->backend.memory_barrier_kind = (int)n;
10561 MONO_ADD_INS (cfg->cbb, ins);
10562 break;
10564 case MONO_CEE_MONO_LD_DELEGATE_METHOD_PTR: {
10565 CHECK_STACK (1);
10566 --sp;
10568 dreg = alloc_preg (cfg);
10569 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
10570 *sp++ = ins;
10571 break;
10573 case MONO_CEE_MONO_CALLI_EXTRA_ARG: {
10574 MonoInst *addr;
10575 MonoMethodSignature *fsig;
10576 MonoInst *arg;
10579 * This is the same as CEE_CALLI, but passes an additional argument
10580 * to the called method in llvmonly mode.
10581 * This is only used by delegate invoke wrappers to call the
10582 * actual delegate method.
10584 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
10586 ins = NULL;
10588 cmethod = NULL;
10589 CHECK_STACK (1);
10590 --sp;
10591 addr = *sp;
10592 fsig = mini_get_signature (method, token, generic_context, cfg->error);
10593 CHECK_CFG_ERROR;
10595 if (cfg->llvm_only)
10596 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
10598 n = fsig->param_count + fsig->hasthis + 1;
10600 CHECK_STACK (n);
10602 sp -= n;
10603 arg = sp [n - 1];
10605 if (cfg->llvm_only) {
10607 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
10608 * cconv. This is set by mono_init_delegate ().
10610 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
10611 MonoInst *callee = addr;
10612 MonoInst *call, *localloc_ins;
10613 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
10614 int low_bit_reg = alloc_preg (cfg);
10616 NEW_BBLOCK (cfg, is_gsharedvt_bb);
10617 NEW_BBLOCK (cfg, end_bb);
10619 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
10620 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
10621 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
10623 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
10624 addr = emit_get_rgctx_sig (cfg, context_used,
10625 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
10627 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
10629 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
10630 ins->dreg = alloc_preg (cfg);
10631 ins->inst_imm = 2 * TARGET_SIZEOF_VOID_P;
10632 MONO_ADD_INS (cfg->cbb, ins);
10633 localloc_ins = ins;
10634 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10635 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
10636 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, TARGET_SIZEOF_VOID_P, arg->dreg);
10638 call = mini_emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
10639 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
10641 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
10642 MONO_START_BB (cfg, is_gsharedvt_bb);
10643 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
10644 ins = mini_emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
10645 ins->dreg = call->dreg;
10647 MONO_START_BB (cfg, end_bb);
10648 } else {
10649 /* Caller uses a normal calling conv */
10651 MonoInst *callee = addr;
10652 MonoInst *call, *localloc_ins;
10653 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
10654 int low_bit_reg = alloc_preg (cfg);
10656 NEW_BBLOCK (cfg, is_gsharedvt_bb);
10657 NEW_BBLOCK (cfg, end_bb);
10659 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
10660 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
10661 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
10663 /* Normal case: callee uses a normal cconv, no conversion is needed */
10664 call = mini_emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
10665 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
10666 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
10667 MONO_START_BB (cfg, is_gsharedvt_bb);
10668 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
10669 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
10670 MONO_ADD_INS (cfg->cbb, addr);
10672 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
10674 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
10675 ins->dreg = alloc_preg (cfg);
10676 ins->inst_imm = 2 * TARGET_SIZEOF_VOID_P;
10677 MONO_ADD_INS (cfg->cbb, ins);
10678 localloc_ins = ins;
10679 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10680 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
10681 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, TARGET_SIZEOF_VOID_P, arg->dreg);
10683 ins = mini_emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
10684 ins->dreg = call->dreg;
10685 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
10687 MONO_START_BB (cfg, end_bb);
10689 } else {
10690 /* Same as CEE_CALLI */
10691 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
10693 * We pass the address to the gsharedvt trampoline in the rgctx reg
10695 MonoInst *callee = addr;
10697 addr = emit_get_rgctx_sig (cfg, context_used,
10698 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
10699 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, callee);
10700 } else {
10701 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
10705 if (!MONO_TYPE_IS_VOID (fsig->ret))
10706 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
10708 CHECK_CFG_EXCEPTION;
10710 ins_flag = 0;
10711 constrained_class = NULL;
10712 break;
10714 case MONO_CEE_MONO_LDDOMAIN:
10715 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10716 EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : cfg->domain);
10717 *sp++ = ins;
10718 break;
10719 case MONO_CEE_MONO_SAVE_LAST_ERROR:
10720 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10722 // Just an IL prefix, setting this flag, picked up by call instructions.
10723 save_last_error = TRUE;
10724 break;
10725 case MONO_CEE_MONO_GET_RGCTX_ARG:
10726 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10728 mono_create_rgctx_var (cfg);
10730 MONO_INST_NEW (cfg, ins, OP_MOVE);
10731 ins->dreg = alloc_dreg (cfg, STACK_PTR);
10732 ins->sreg1 = cfg->rgctx_var->dreg;
10733 ins->type = STACK_PTR;
10734 MONO_ADD_INS (cfg->cbb, ins);
10736 *sp++ = ins;
10737 break;
10738 case MONO_CEE_MONO_GET_SP: {
10739 /* Used by COOP only, so this is good enough */
10740 MonoInst *var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
10741 EMIT_NEW_VARLOADA (cfg, ins, var, NULL);
10742 *sp++ = ins;
10743 break;
10746 case MONO_CEE_ARGLIST: {
10747 /* somewhat similar to LDTOKEN */
10748 MonoInst *addr, *vtvar;
10749 vtvar = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.argumenthandle_class), OP_LOCAL);
10751 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10752 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
10754 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10755 ins->type = STACK_VTYPE;
10756 ins->klass = mono_defaults.argumenthandle_class;
10757 *sp++ = ins;
10758 break;
10760 case MONO_CEE_CEQ:
10761 case MONO_CEE_CGT:
10762 case MONO_CEE_CGT_UN:
10763 case MONO_CEE_CLT:
10764 case MONO_CEE_CLT_UN: {
10765 MonoInst *cmp, *arg1, *arg2;
10767 sp -= 2;
10768 arg1 = sp [0];
10769 arg2 = sp [1];
10772 * The following transforms:
10773 * CEE_CEQ into OP_CEQ
10774 * CEE_CGT into OP_CGT
10775 * CEE_CGT_UN into OP_CGT_UN
10776 * CEE_CLT into OP_CLT
10777 * CEE_CLT_UN into OP_CLT_UN
10779 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
10781 MONO_INST_NEW (cfg, ins, cmp->opcode);
10782 cmp->sreg1 = arg1->dreg;
10783 cmp->sreg2 = arg2->dreg;
10784 type_from_op (cfg, cmp, arg1, arg2);
10785 CHECK_TYPE (cmp);
10786 add_widen_op (cfg, cmp, &arg1, &arg2);
10787 if ((arg1->type == STACK_I8) || ((TARGET_SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
10788 cmp->opcode = OP_LCOMPARE;
10789 else if (arg1->type == STACK_R4)
10790 cmp->opcode = OP_RCOMPARE;
10791 else if (arg1->type == STACK_R8)
10792 cmp->opcode = OP_FCOMPARE;
10793 else
10794 cmp->opcode = OP_ICOMPARE;
10795 MONO_ADD_INS (cfg->cbb, cmp);
10796 ins->type = STACK_I4;
10797 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
10798 type_from_op (cfg, ins, arg1, arg2);
10800 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
10802 * The backends expect the fceq opcodes to do the
10803 * comparison too.
10805 ins->sreg1 = cmp->sreg1;
10806 ins->sreg2 = cmp->sreg2;
10807 NULLIFY_INS (cmp);
10809 MONO_ADD_INS (cfg->cbb, ins);
10810 *sp++ = ins;
10811 break;
10813 case MONO_CEE_LDFTN: {
10814 MonoInst *argconst;
10815 MonoMethod *cil_method;
10817 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10818 CHECK_CFG_ERROR;
10820 mono_class_init_internal (cmethod->klass);
10822 mono_save_token_info (cfg, image, n, cmethod);
10824 context_used = mini_method_check_context_used (cfg, cmethod);
10826 cil_method = cmethod;
10827 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
10828 emit_method_access_failure (cfg, method, cil_method);
10830 if (mono_security_core_clr_enabled ())
10831 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10834 * Optimize the common case of ldftn+delegate creation
10836 if ((sp > stack_start) && (next_ip + 4 < end) && ip_in_bb (cfg, cfg->cbb, next_ip) && (next_ip [0] == CEE_NEWOBJ)) {
10837 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (next_ip + 1), NULL, generic_context);
10838 if (ctor_method && (m_class_get_parent (ctor_method->klass) == mono_defaults.multicastdelegate_class)) {
10839 MonoInst *target_ins, *handle_ins;
10840 MonoMethod *invoke;
10841 int invoke_context_used;
10843 invoke = mono_get_delegate_invoke_internal (ctor_method->klass);
10844 if (!invoke || !mono_method_signature_internal (invoke))
10845 LOAD_ERROR;
10847 invoke_context_used = mini_method_check_context_used (cfg, invoke);
10849 target_ins = sp [-1];
10851 if (mono_security_core_clr_enabled ())
10852 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
10854 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
10855 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
10856 if (mono_method_signature_internal (invoke)->param_count == mono_method_signature_internal (cmethod)->param_count) {
10857 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
10858 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
10862 if ((invoke_context_used == 0 || !cfg->gsharedvt) || cfg->llvm_only) {
10863 if (cfg->verbose_level > 3)
10864 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip + 6, NULL));
10865 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, invoke_context_used, FALSE))) {
10866 sp --;
10867 *sp = handle_ins;
10868 CHECK_CFG_EXCEPTION;
10869 sp ++;
10870 next_ip += 5;
10871 il_op = MONO_CEE_NEWOBJ;
10872 break;
10873 } else {
10874 CHECK_CFG_ERROR;
10880 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
10881 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
10882 *sp++ = ins;
10884 inline_costs += CALL_COST * MIN(10, num_calls++);
10885 break;
10887 case MONO_CEE_LDVIRTFTN: {
10888 MonoInst *args [2];
10890 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10891 CHECK_CFG_ERROR;
10893 mono_class_init_internal (cmethod->klass);
10895 context_used = mini_method_check_context_used (cfg, cmethod);
10897 if (mono_security_core_clr_enabled ())
10898 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10901 * Optimize the common case of ldvirtftn+delegate creation
10903 if (previous_il_op == MONO_CEE_DUP && (sp > stack_start) && (next_ip + 4 < end) && ip_in_bb (cfg, cfg->cbb, next_ip) && (next_ip [0] == CEE_NEWOBJ)) {
10904 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (next_ip + 1), NULL, generic_context);
10905 if (ctor_method && (m_class_get_parent (ctor_method->klass) == mono_defaults.multicastdelegate_class)) {
10906 MonoInst *target_ins, *handle_ins;
10907 MonoMethod *invoke;
10908 int invoke_context_used;
10909 const gboolean is_virtual = (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) != 0;
10911 invoke = mono_get_delegate_invoke_internal (ctor_method->klass);
10912 if (!invoke || !mono_method_signature_internal (invoke))
10913 LOAD_ERROR;
10915 invoke_context_used = mini_method_check_context_used (cfg, invoke);
10917 target_ins = sp [-1];
10919 if (mono_security_core_clr_enabled ())
10920 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
10922 if (invoke_context_used == 0 || !cfg->gsharedvt || cfg->llvm_only) {
10923 if (cfg->verbose_level > 3)
10924 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip + 6, NULL));
10925 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, invoke_context_used, is_virtual))) {
10926 sp -= 2;
10927 *sp = handle_ins;
10928 CHECK_CFG_EXCEPTION;
10929 next_ip += 5;
10930 previous_il_op = MONO_CEE_NEWOBJ;
10931 sp ++;
10932 break;
10933 } else {
10934 CHECK_CFG_ERROR;
10940 --sp;
10941 args [0] = *sp;
10943 args [1] = emit_get_rgctx_method (cfg, context_used,
10944 cmethod, MONO_RGCTX_INFO_METHOD);
10946 if (context_used)
10947 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
10948 else
10949 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
10951 inline_costs += CALL_COST * MIN(10, num_calls++);
10952 break;
10954 case MONO_CEE_LOCALLOC: {
10955 MonoBasicBlock *non_zero_bb, *end_bb;
10956 int alloc_ptr = alloc_preg (cfg);
10957 --sp;
10958 if (sp != stack_start)
10959 UNVERIFIED;
10960 if (cfg->method != method)
10962 * Inlining this into a loop in a parent could lead to
10963 * stack overflows which is different behavior than the
10964 * non-inlined case, thus disable inlining in this case.
10966 INLINE_FAILURE("localloc");
10968 NEW_BBLOCK (cfg, non_zero_bb);
10969 NEW_BBLOCK (cfg, end_bb);
10971 /* if size != zero */
10972 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10973 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_zero_bb);
10975 //size is zero, so result is NULL
10976 MONO_EMIT_NEW_PCONST (cfg, alloc_ptr, NULL);
10977 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
10979 MONO_START_BB (cfg, non_zero_bb);
10980 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
10981 ins->dreg = alloc_ptr;
10982 ins->sreg1 = sp [0]->dreg;
10983 ins->type = STACK_PTR;
10984 MONO_ADD_INS (cfg->cbb, ins);
10986 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10987 if (header->init_locals)
10988 ins->flags |= MONO_INST_INIT;
10990 MONO_START_BB (cfg, end_bb);
10991 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), alloc_ptr);
10992 ins->type = STACK_PTR;
10994 *sp++ = ins;
10995 break;
10997 case MONO_CEE_ENDFILTER: {
10998 MonoExceptionClause *clause, *nearest;
10999 int cc;
11001 --sp;
11002 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
11003 UNVERIFIED;
11004 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
11005 ins->sreg1 = (*sp)->dreg;
11006 MONO_ADD_INS (cfg->cbb, ins);
11007 start_new_bblock = 1;
11009 nearest = NULL;
11010 for (cc = 0; cc < header->num_clauses; ++cc) {
11011 clause = &header->clauses [cc];
11012 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
11013 ((next_ip - header->code) > clause->data.filter_offset && (next_ip - header->code) <= clause->handler_offset) &&
11014 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
11015 nearest = clause;
11017 g_assert (nearest);
11018 if ((next_ip - header->code) != nearest->handler_offset)
11019 UNVERIFIED;
11021 break;
11023 case MONO_CEE_UNALIGNED_:
11024 ins_flag |= MONO_INST_UNALIGNED;
11025 /* FIXME: record alignment? we can assume 1 for now */
11026 break;
11027 case MONO_CEE_VOLATILE_:
11028 ins_flag |= MONO_INST_VOLATILE;
11029 break;
11030 case MONO_CEE_TAIL_:
11031 ins_flag |= MONO_INST_TAILCALL;
11032 cfg->flags |= MONO_CFG_HAS_TAILCALL;
11033 /* Can't inline tailcalls at this time */
11034 inline_costs += 100000;
11035 break;
11036 case MONO_CEE_INITOBJ:
11037 --sp;
11038 klass = mini_get_class (method, token, generic_context);
11039 CHECK_TYPELOAD (klass);
11040 if (mini_class_is_reference (klass))
11041 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
11042 else
11043 mini_emit_initobj (cfg, *sp, NULL, klass);
11044 inline_costs += 1;
11045 break;
11046 case MONO_CEE_CONSTRAINED_:
11047 constrained_class = mini_get_class (method, token, generic_context);
11048 CHECK_TYPELOAD (constrained_class);
11049 break;
11050 case MONO_CEE_CPBLK:
11051 sp -= 3;
11052 mini_emit_memory_copy_bytes (cfg, sp [0], sp [1], sp [2], ins_flag);
11053 ins_flag = 0;
11054 inline_costs += 1;
11055 break;
11056 case MONO_CEE_INITBLK:
11057 sp -= 3;
11058 mini_emit_memory_init_bytes (cfg, sp [0], sp [1], sp [2], ins_flag);
11059 ins_flag = 0;
11060 inline_costs += 1;
11061 break;
11062 case MONO_CEE_NO_:
11063 if (ip [2] & 1)
11064 ins_flag |= MONO_INST_NOTYPECHECK;
11065 if (ip [2] & 2)
11066 ins_flag |= MONO_INST_NORANGECHECK;
11067 /* we ignore the no-nullcheck for now since we
11068 * really do it explicitly only when doing callvirt->call
11070 break;
11071 case MONO_CEE_RETHROW: {
11072 MonoInst *load;
11073 int handler_offset = -1;
11075 for (i = 0; i < header->num_clauses; ++i) {
11076 MonoExceptionClause *clause = &header->clauses [i];
11077 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
11078 handler_offset = clause->handler_offset;
11079 break;
11083 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
11085 if (handler_offset == -1)
11086 UNVERIFIED;
11088 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
11089 MONO_INST_NEW (cfg, ins, OP_RETHROW);
11090 ins->sreg1 = load->dreg;
11091 MONO_ADD_INS (cfg->cbb, ins);
11093 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11094 MONO_ADD_INS (cfg->cbb, ins);
11096 sp = stack_start;
11097 link_bblock (cfg, cfg->cbb, end_bblock);
11098 start_new_bblock = 1;
11099 break;
11101 case MONO_CEE_MONO_RETHROW: {
11102 if (sp [-1]->type != STACK_OBJ)
11103 UNVERIFIED;
11105 MONO_INST_NEW (cfg, ins, OP_RETHROW);
11106 --sp;
11107 ins->sreg1 = sp [0]->dreg;
11108 cfg->cbb->out_of_line = TRUE;
11109 MONO_ADD_INS (cfg->cbb, ins);
11110 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11111 MONO_ADD_INS (cfg->cbb, ins);
11112 sp = stack_start;
11114 link_bblock (cfg, cfg->cbb, end_bblock);
11115 start_new_bblock = 1;
11116 /* This can complicate code generation for llvm since the return value might not be defined */
11117 if (COMPILE_LLVM (cfg))
11118 INLINE_FAILURE ("mono_rethrow");
11119 break;
11121 case MONO_CEE_SIZEOF: {
11122 guint32 val;
11123 int ialign;
11125 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (m_class_get_image (method->klass)) && !generic_context) {
11126 MonoType *type = mono_type_create_from_typespec_checked (image, token, cfg->error);
11127 CHECK_CFG_ERROR;
11129 val = mono_type_size (type, &ialign);
11130 EMIT_NEW_ICONST (cfg, ins, val);
11131 } else {
11132 MonoClass *klass = mini_get_class (method, token, generic_context);
11133 CHECK_TYPELOAD (klass);
11135 if (mini_is_gsharedvt_klass (klass)) {
11136 ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_SIZEOF);
11137 ins->type = STACK_I4;
11138 } else {
11139 val = mono_type_size (m_class_get_byval_arg (klass), &ialign);
11140 EMIT_NEW_ICONST (cfg, ins, val);
11144 *sp++ = ins;
11145 break;
11147 case MONO_CEE_REFANYTYPE: {
11148 MonoInst *src_var, *src;
11150 GSHAREDVT_FAILURE (il_op);
11152 --sp;
11154 // FIXME:
11155 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11156 if (!src_var)
11157 src_var = mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.typed_reference_class), OP_LOCAL, sp [0]->dreg);
11158 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11159 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (mono_defaults.typehandle_class), src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
11160 *sp++ = ins;
11161 break;
11163 case MONO_CEE_READONLY_:
11164 readonly = TRUE;
11165 break;
11167 case MONO_CEE_UNUSED56:
11168 case MONO_CEE_UNUSED57:
11169 case MONO_CEE_UNUSED70:
11170 case MONO_CEE_UNUSED:
11171 case MONO_CEE_UNUSED99:
11172 case MONO_CEE_UNUSED58:
11173 case MONO_CEE_UNUSED1:
11174 UNVERIFIED;
11176 default:
11177 g_warning ("opcode 0x%02x not handled", il_op);
11178 UNVERIFIED;
11181 if (start_new_bblock != 1)
11182 UNVERIFIED;
11184 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
11185 if (cfg->cbb->next_bb) {
11186 /* This could already be set because of inlining, #693905 */
11187 MonoBasicBlock *bb = cfg->cbb;
11189 while (bb->next_bb)
11190 bb = bb->next_bb;
11191 bb->next_bb = end_bblock;
11192 } else {
11193 cfg->cbb->next_bb = end_bblock;
11196 if (cfg->method == method && cfg->domainvar) {
11197 MonoInst *store;
11198 MonoInst *get_domain;
11200 cfg->cbb = init_localsbb;
11202 get_domain = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
11203 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
11204 MONO_ADD_INS (cfg->cbb, store);
11205 cfg->domainvar_inited = TRUE;
11208 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
11209 if (cfg->compile_aot)
11210 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
11211 mono_get_got_var (cfg);
11212 #endif
11214 if (cfg->method == method && cfg->got_var)
11215 mono_emit_load_got_addr (cfg);
11217 if (init_localsbb) {
11218 cfg->cbb = init_localsbb;
11219 cfg->ip = NULL;
11220 for (i = 0; i < header->num_locals; ++i) {
11222 * Vtype initialization might need to be done after CEE_JIT_ATTACH, since it can make calls to memset (),
11223 * which need the trampoline code to work.
11225 if (MONO_TYPE_ISSTRUCT (header->locals [i]))
11226 cfg->cbb = init_localsbb2;
11227 else
11228 cfg->cbb = init_localsbb;
11229 emit_init_local (cfg, i, header->locals [i], init_locals);
11233 if (cfg->init_ref_vars && cfg->method == method) {
11234 /* Emit initialization for ref vars */
11235 // FIXME: Avoid duplication initialization for IL locals.
11236 for (i = 0; i < cfg->num_varinfo; ++i) {
11237 MonoInst *ins = cfg->varinfo [i];
11239 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
11240 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
11244 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
11245 cfg->cbb = init_localsbb;
11246 emit_push_lmf (cfg);
11249 cfg->cbb = init_localsbb;
11250 mini_profiler_emit_enter (cfg);
11252 if (seq_points) {
11253 MonoBasicBlock *bb;
11256 * Make seq points at backward branch targets interruptable.
11258 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
11259 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
11260 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
11263 /* Add a sequence point for method entry/exit events */
11264 if (seq_points && cfg->gen_sdb_seq_points) {
11265 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
11266 MONO_ADD_INS (init_localsbb, ins);
11267 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
11268 MONO_ADD_INS (cfg->bb_exit, ins);
11272 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
11273 * the code they refer to was dead (#11880).
11275 if (sym_seq_points) {
11276 for (i = 0; i < header->code_size; ++i) {
11277 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
11278 MonoInst *ins;
11280 NEW_SEQ_POINT (cfg, ins, i, FALSE);
11281 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
11286 cfg->ip = NULL;
11288 if (cfg->method == method) {
11289 compute_bb_regions (cfg);
11290 } else {
11291 MonoBasicBlock *bb;
11292 /* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
11293 for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) {
11294 bb->real_offset = inline_offset;
11298 if (inline_costs < 0) {
11299 char *mname;
11301 /* Method is too large */
11302 mname = mono_method_full_name (method, TRUE);
11303 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
11304 g_free (mname);
11307 if ((cfg->verbose_level > 2) && (cfg->method == method))
11308 mono_print_code (cfg, "AFTER METHOD-TO-IR");
11310 goto cleanup;
11312 mono_error_exit:
11313 if (cfg->verbose_level > 3)
11314 g_print ("exiting due to error");
11316 g_assert (!is_ok (cfg->error));
11317 goto cleanup;
11319 exception_exit:
11320 if (cfg->verbose_level > 3)
11321 g_print ("exiting due to exception");
11323 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
11324 goto cleanup;
11326 unverified:
11327 if (cfg->verbose_level > 3)
11328 g_print ("exiting due to invalid il");
11330 set_exception_type_from_invalid_il (cfg, method, ip);
11331 goto cleanup;
11333 cleanup:
11334 g_slist_free (class_inits);
11335 mono_basic_block_free (original_bb);
11336 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
11337 if (cfg->exception_type)
11338 return -1;
11339 else
11340 return inline_costs;
11343 static int
11344 store_membase_reg_to_store_membase_imm (int opcode)
11346 switch (opcode) {
11347 case OP_STORE_MEMBASE_REG:
11348 return OP_STORE_MEMBASE_IMM;
11349 case OP_STOREI1_MEMBASE_REG:
11350 return OP_STOREI1_MEMBASE_IMM;
11351 case OP_STOREI2_MEMBASE_REG:
11352 return OP_STOREI2_MEMBASE_IMM;
11353 case OP_STOREI4_MEMBASE_REG:
11354 return OP_STOREI4_MEMBASE_IMM;
11355 case OP_STOREI8_MEMBASE_REG:
11356 return OP_STOREI8_MEMBASE_IMM;
11357 default:
11358 g_assert_not_reached ();
11361 return -1;
11365 mono_op_to_op_imm (int opcode)
11367 switch (opcode) {
11368 case OP_IADD:
11369 return OP_IADD_IMM;
11370 case OP_ISUB:
11371 return OP_ISUB_IMM;
11372 case OP_IDIV:
11373 return OP_IDIV_IMM;
11374 case OP_IDIV_UN:
11375 return OP_IDIV_UN_IMM;
11376 case OP_IREM:
11377 return OP_IREM_IMM;
11378 case OP_IREM_UN:
11379 return OP_IREM_UN_IMM;
11380 case OP_IMUL:
11381 return OP_IMUL_IMM;
11382 case OP_IAND:
11383 return OP_IAND_IMM;
11384 case OP_IOR:
11385 return OP_IOR_IMM;
11386 case OP_IXOR:
11387 return OP_IXOR_IMM;
11388 case OP_ISHL:
11389 return OP_ISHL_IMM;
11390 case OP_ISHR:
11391 return OP_ISHR_IMM;
11392 case OP_ISHR_UN:
11393 return OP_ISHR_UN_IMM;
11395 case OP_LADD:
11396 return OP_LADD_IMM;
11397 case OP_LSUB:
11398 return OP_LSUB_IMM;
11399 case OP_LAND:
11400 return OP_LAND_IMM;
11401 case OP_LOR:
11402 return OP_LOR_IMM;
11403 case OP_LXOR:
11404 return OP_LXOR_IMM;
11405 case OP_LSHL:
11406 return OP_LSHL_IMM;
11407 case OP_LSHR:
11408 return OP_LSHR_IMM;
11409 case OP_LSHR_UN:
11410 return OP_LSHR_UN_IMM;
11411 #if SIZEOF_REGISTER == 8
11412 case OP_LMUL:
11413 return OP_LMUL_IMM;
11414 case OP_LREM:
11415 return OP_LREM_IMM;
11416 #endif
11418 case OP_COMPARE:
11419 return OP_COMPARE_IMM;
11420 case OP_ICOMPARE:
11421 return OP_ICOMPARE_IMM;
11422 case OP_LCOMPARE:
11423 return OP_LCOMPARE_IMM;
11425 case OP_STORE_MEMBASE_REG:
11426 return OP_STORE_MEMBASE_IMM;
11427 case OP_STOREI1_MEMBASE_REG:
11428 return OP_STOREI1_MEMBASE_IMM;
11429 case OP_STOREI2_MEMBASE_REG:
11430 return OP_STOREI2_MEMBASE_IMM;
11431 case OP_STOREI4_MEMBASE_REG:
11432 return OP_STOREI4_MEMBASE_IMM;
11434 #if defined(TARGET_X86) || defined (TARGET_AMD64)
11435 case OP_X86_PUSH:
11436 return OP_X86_PUSH_IMM;
11437 case OP_X86_COMPARE_MEMBASE_REG:
11438 return OP_X86_COMPARE_MEMBASE_IMM;
11439 #endif
11440 #if defined(TARGET_AMD64)
11441 case OP_AMD64_ICOMPARE_MEMBASE_REG:
11442 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
11443 #endif
11444 case OP_VOIDCALL_REG:
11445 return OP_VOIDCALL;
11446 case OP_CALL_REG:
11447 return OP_CALL;
11448 case OP_LCALL_REG:
11449 return OP_LCALL;
11450 case OP_FCALL_REG:
11451 return OP_FCALL;
11452 case OP_LOCALLOC:
11453 return OP_LOCALLOC_IMM;
11456 return -1;
11459 static int
11460 stind_to_store_membase (int opcode)
11462 switch (opcode) {
11463 case MONO_CEE_STIND_I1:
11464 return OP_STOREI1_MEMBASE_REG;
11465 case MONO_CEE_STIND_I2:
11466 return OP_STOREI2_MEMBASE_REG;
11467 case MONO_CEE_STIND_I4:
11468 return OP_STOREI4_MEMBASE_REG;
11469 case MONO_CEE_STIND_I:
11470 case MONO_CEE_STIND_REF:
11471 return OP_STORE_MEMBASE_REG;
11472 case MONO_CEE_STIND_I8:
11473 return OP_STOREI8_MEMBASE_REG;
11474 case MONO_CEE_STIND_R4:
11475 return OP_STORER4_MEMBASE_REG;
11476 case MONO_CEE_STIND_R8:
11477 return OP_STORER8_MEMBASE_REG;
11478 default:
11479 g_assert_not_reached ();
11482 return -1;
11486 mono_load_membase_to_load_mem (int opcode)
11488 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
11489 #if defined(TARGET_X86) || defined(TARGET_AMD64)
11490 switch (opcode) {
11491 case OP_LOAD_MEMBASE:
11492 return OP_LOAD_MEM;
11493 case OP_LOADU1_MEMBASE:
11494 return OP_LOADU1_MEM;
11495 case OP_LOADU2_MEMBASE:
11496 return OP_LOADU2_MEM;
11497 case OP_LOADI4_MEMBASE:
11498 return OP_LOADI4_MEM;
11499 case OP_LOADU4_MEMBASE:
11500 return OP_LOADU4_MEM;
11501 #if SIZEOF_REGISTER == 8
11502 case OP_LOADI8_MEMBASE:
11503 return OP_LOADI8_MEM;
11504 #endif
11506 #endif
11508 return -1;
11511 static int
11512 op_to_op_dest_membase (int store_opcode, int opcode)
11514 #if defined(TARGET_X86)
11515 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
11516 return -1;
11518 switch (opcode) {
11519 case OP_IADD:
11520 return OP_X86_ADD_MEMBASE_REG;
11521 case OP_ISUB:
11522 return OP_X86_SUB_MEMBASE_REG;
11523 case OP_IAND:
11524 return OP_X86_AND_MEMBASE_REG;
11525 case OP_IOR:
11526 return OP_X86_OR_MEMBASE_REG;
11527 case OP_IXOR:
11528 return OP_X86_XOR_MEMBASE_REG;
11529 case OP_ADD_IMM:
11530 case OP_IADD_IMM:
11531 return OP_X86_ADD_MEMBASE_IMM;
11532 case OP_SUB_IMM:
11533 case OP_ISUB_IMM:
11534 return OP_X86_SUB_MEMBASE_IMM;
11535 case OP_AND_IMM:
11536 case OP_IAND_IMM:
11537 return OP_X86_AND_MEMBASE_IMM;
11538 case OP_OR_IMM:
11539 case OP_IOR_IMM:
11540 return OP_X86_OR_MEMBASE_IMM;
11541 case OP_XOR_IMM:
11542 case OP_IXOR_IMM:
11543 return OP_X86_XOR_MEMBASE_IMM;
11544 case OP_MOVE:
11545 return OP_NOP;
11547 #endif
11549 #if defined(TARGET_AMD64)
11550 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
11551 return -1;
11553 switch (opcode) {
11554 case OP_IADD:
11555 return OP_X86_ADD_MEMBASE_REG;
11556 case OP_ISUB:
11557 return OP_X86_SUB_MEMBASE_REG;
11558 case OP_IAND:
11559 return OP_X86_AND_MEMBASE_REG;
11560 case OP_IOR:
11561 return OP_X86_OR_MEMBASE_REG;
11562 case OP_IXOR:
11563 return OP_X86_XOR_MEMBASE_REG;
11564 case OP_IADD_IMM:
11565 return OP_X86_ADD_MEMBASE_IMM;
11566 case OP_ISUB_IMM:
11567 return OP_X86_SUB_MEMBASE_IMM;
11568 case OP_IAND_IMM:
11569 return OP_X86_AND_MEMBASE_IMM;
11570 case OP_IOR_IMM:
11571 return OP_X86_OR_MEMBASE_IMM;
11572 case OP_IXOR_IMM:
11573 return OP_X86_XOR_MEMBASE_IMM;
11574 case OP_LADD:
11575 return OP_AMD64_ADD_MEMBASE_REG;
11576 case OP_LSUB:
11577 return OP_AMD64_SUB_MEMBASE_REG;
11578 case OP_LAND:
11579 return OP_AMD64_AND_MEMBASE_REG;
11580 case OP_LOR:
11581 return OP_AMD64_OR_MEMBASE_REG;
11582 case OP_LXOR:
11583 return OP_AMD64_XOR_MEMBASE_REG;
11584 case OP_ADD_IMM:
11585 case OP_LADD_IMM:
11586 return OP_AMD64_ADD_MEMBASE_IMM;
11587 case OP_SUB_IMM:
11588 case OP_LSUB_IMM:
11589 return OP_AMD64_SUB_MEMBASE_IMM;
11590 case OP_AND_IMM:
11591 case OP_LAND_IMM:
11592 return OP_AMD64_AND_MEMBASE_IMM;
11593 case OP_OR_IMM:
11594 case OP_LOR_IMM:
11595 return OP_AMD64_OR_MEMBASE_IMM;
11596 case OP_XOR_IMM:
11597 case OP_LXOR_IMM:
11598 return OP_AMD64_XOR_MEMBASE_IMM;
11599 case OP_MOVE:
11600 return OP_NOP;
11602 #endif
11604 return -1;
11607 static int
11608 op_to_op_store_membase (int store_opcode, int opcode)
11610 #if defined(TARGET_X86) || defined(TARGET_AMD64)
11611 switch (opcode) {
11612 case OP_ICEQ:
11613 if (store_opcode == OP_STOREI1_MEMBASE_REG)
11614 return OP_X86_SETEQ_MEMBASE;
11615 case OP_CNE:
11616 if (store_opcode == OP_STOREI1_MEMBASE_REG)
11617 return OP_X86_SETNE_MEMBASE;
11619 #endif
11621 return -1;
11624 static int
11625 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
11627 #ifdef TARGET_X86
11628 /* FIXME: This has sign extension issues */
11630 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
11631 return OP_X86_COMPARE_MEMBASE8_IMM;
11634 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
11635 return -1;
11637 switch (opcode) {
11638 case OP_X86_PUSH:
11639 return OP_X86_PUSH_MEMBASE;
11640 case OP_COMPARE_IMM:
11641 case OP_ICOMPARE_IMM:
11642 return OP_X86_COMPARE_MEMBASE_IMM;
11643 case OP_COMPARE:
11644 case OP_ICOMPARE:
11645 return OP_X86_COMPARE_MEMBASE_REG;
11647 #endif
11649 #ifdef TARGET_AMD64
11650 /* FIXME: This has sign extension issues */
11652 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
11653 return OP_X86_COMPARE_MEMBASE8_IMM;
11656 switch (opcode) {
11657 case OP_X86_PUSH:
11658 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
11659 return OP_X86_PUSH_MEMBASE;
11660 break;
11661 /* FIXME: This only works for 32 bit immediates
11662 case OP_COMPARE_IMM:
11663 case OP_LCOMPARE_IMM:
11664 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11665 return OP_AMD64_COMPARE_MEMBASE_IMM;
11667 case OP_ICOMPARE_IMM:
11668 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
11669 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
11670 break;
11671 case OP_COMPARE:
11672 case OP_LCOMPARE:
11673 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
11674 return OP_AMD64_ICOMPARE_MEMBASE_REG;
11675 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
11676 return OP_AMD64_COMPARE_MEMBASE_REG;
11677 break;
11678 case OP_ICOMPARE:
11679 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
11680 return OP_AMD64_ICOMPARE_MEMBASE_REG;
11681 break;
11683 #endif
11685 return -1;
11688 static int
11689 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
11691 #ifdef TARGET_X86
11692 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
11693 return -1;
11695 switch (opcode) {
11696 case OP_COMPARE:
11697 case OP_ICOMPARE:
11698 return OP_X86_COMPARE_REG_MEMBASE;
11699 case OP_IADD:
11700 return OP_X86_ADD_REG_MEMBASE;
11701 case OP_ISUB:
11702 return OP_X86_SUB_REG_MEMBASE;
11703 case OP_IAND:
11704 return OP_X86_AND_REG_MEMBASE;
11705 case OP_IOR:
11706 return OP_X86_OR_REG_MEMBASE;
11707 case OP_IXOR:
11708 return OP_X86_XOR_REG_MEMBASE;
11710 #endif
11712 #ifdef TARGET_AMD64
11713 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
11714 switch (opcode) {
11715 case OP_ICOMPARE:
11716 return OP_AMD64_ICOMPARE_REG_MEMBASE;
11717 case OP_IADD:
11718 return OP_X86_ADD_REG_MEMBASE;
11719 case OP_ISUB:
11720 return OP_X86_SUB_REG_MEMBASE;
11721 case OP_IAND:
11722 return OP_X86_AND_REG_MEMBASE;
11723 case OP_IOR:
11724 return OP_X86_OR_REG_MEMBASE;
11725 case OP_IXOR:
11726 return OP_X86_XOR_REG_MEMBASE;
11728 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
11729 switch (opcode) {
11730 case OP_COMPARE:
11731 case OP_LCOMPARE:
11732 return OP_AMD64_COMPARE_REG_MEMBASE;
11733 case OP_LADD:
11734 return OP_AMD64_ADD_REG_MEMBASE;
11735 case OP_LSUB:
11736 return OP_AMD64_SUB_REG_MEMBASE;
11737 case OP_LAND:
11738 return OP_AMD64_AND_REG_MEMBASE;
11739 case OP_LOR:
11740 return OP_AMD64_OR_REG_MEMBASE;
11741 case OP_LXOR:
11742 return OP_AMD64_XOR_REG_MEMBASE;
11745 #endif
11747 return -1;
11751 mono_op_to_op_imm_noemul (int opcode)
11753 switch (opcode) {
11754 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
11755 case OP_LSHR:
11756 case OP_LSHL:
11757 case OP_LSHR_UN:
11758 return -1;
11759 #endif
11760 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
11761 case OP_IDIV:
11762 case OP_IDIV_UN:
11763 case OP_IREM:
11764 case OP_IREM_UN:
11765 return -1;
11766 #endif
11767 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
11768 case OP_IMUL:
11769 return -1;
11770 #endif
11771 default:
11772 return mono_op_to_op_imm (opcode);
11777 * mono_handle_global_vregs:
11779 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
11780 * for them.
11782 void
11783 mono_handle_global_vregs (MonoCompile *cfg)
11785 gint32 *vreg_to_bb;
11786 MonoBasicBlock *bb;
11787 int i, pos;
11789 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
11791 #ifdef MONO_ARCH_SIMD_INTRINSICS
11792 if (cfg->uses_simd_intrinsics & MONO_CFG_USES_SIMD_INTRINSICS_SIMPLIFY_INDIRECTION)
11793 mono_simd_simplify_indirection (cfg);
11794 #endif
11796 /* Find local vregs used in more than one bb */
11797 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11798 MonoInst *ins = bb->code;
11799 int block_num = bb->block_num;
11801 if (cfg->verbose_level > 2)
11802 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
11804 cfg->cbb = bb;
11805 for (; ins; ins = ins->next) {
11806 const char *spec = INS_INFO (ins->opcode);
11807 int regtype = 0, regindex;
11808 gint32 prev_bb;
11810 if (G_UNLIKELY (cfg->verbose_level > 2))
11811 mono_print_ins (ins);
11813 g_assert (ins->opcode >= MONO_CEE_LAST);
11815 for (regindex = 0; regindex < 4; regindex ++) {
11816 int vreg = 0;
11818 if (regindex == 0) {
11819 regtype = spec [MONO_INST_DEST];
11820 if (regtype == ' ')
11821 continue;
11822 vreg = ins->dreg;
11823 } else if (regindex == 1) {
11824 regtype = spec [MONO_INST_SRC1];
11825 if (regtype == ' ')
11826 continue;
11827 vreg = ins->sreg1;
11828 } else if (regindex == 2) {
11829 regtype = spec [MONO_INST_SRC2];
11830 if (regtype == ' ')
11831 continue;
11832 vreg = ins->sreg2;
11833 } else if (regindex == 3) {
11834 regtype = spec [MONO_INST_SRC3];
11835 if (regtype == ' ')
11836 continue;
11837 vreg = ins->sreg3;
11840 #if SIZEOF_REGISTER == 4
11841 /* In the LLVM case, the long opcodes are not decomposed */
11842 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
11844 * Since some instructions reference the original long vreg,
11845 * and some reference the two component vregs, it is quite hard
11846 * to determine when it needs to be global. So be conservative.
11848 if (!get_vreg_to_inst (cfg, vreg)) {
11849 mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.int64_class), OP_LOCAL, vreg);
11851 if (cfg->verbose_level > 2)
11852 printf ("LONG VREG R%d made global.\n", vreg);
11856 * Make the component vregs volatile since the optimizations can
11857 * get confused otherwise.
11859 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
11860 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
11862 #endif
11864 g_assert (vreg != -1);
11866 prev_bb = vreg_to_bb [vreg];
11867 if (prev_bb == 0) {
11868 /* 0 is a valid block num */
11869 vreg_to_bb [vreg] = block_num + 1;
11870 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
11871 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
11872 continue;
11874 if (!get_vreg_to_inst (cfg, vreg)) {
11875 if (G_UNLIKELY (cfg->verbose_level > 2))
11876 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
11878 switch (regtype) {
11879 case 'i':
11880 if (vreg_is_ref (cfg, vreg))
11881 mono_compile_create_var_for_vreg (cfg, mono_get_object_type (), OP_LOCAL, vreg);
11882 else
11883 mono_compile_create_var_for_vreg (cfg, mono_get_int_type (), OP_LOCAL, vreg);
11884 break;
11885 case 'l':
11886 mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.int64_class), OP_LOCAL, vreg);
11887 break;
11888 case 'f':
11889 mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.double_class), OP_LOCAL, vreg);
11890 break;
11891 case 'v':
11892 case 'x':
11893 mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (ins->klass), OP_LOCAL, vreg);
11894 break;
11895 default:
11896 g_assert_not_reached ();
11900 /* Flag as having been used in more than one bb */
11901 vreg_to_bb [vreg] = -1;
11907 /* If a variable is used in only one bblock, convert it into a local vreg */
11908 for (i = 0; i < cfg->num_varinfo; i++) {
11909 MonoInst *var = cfg->varinfo [i];
11910 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
11912 switch (var->type) {
11913 case STACK_I4:
11914 case STACK_OBJ:
11915 case STACK_PTR:
11916 case STACK_MP:
11917 case STACK_VTYPE:
11918 #if SIZEOF_REGISTER == 8
11919 case STACK_I8:
11920 #endif
11921 #if !defined(TARGET_X86)
11922 /* Enabling this screws up the fp stack on x86 */
11923 case STACK_R8:
11924 #endif
11925 if (mono_arch_is_soft_float ())
11926 break;
11929 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
11930 break;
11933 /* Arguments are implicitly global */
11934 /* Putting R4 vars into registers doesn't work currently */
11935 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
11936 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (m_class_get_byval_arg (var->klass)->type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
11938 * Make that the variable's liveness interval doesn't contain a call, since
11939 * that would cause the lvreg to be spilled, making the whole optimization
11940 * useless.
11942 /* This is too slow for JIT compilation */
11943 #if 0
11944 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
11945 MonoInst *ins;
11946 int def_index, call_index, ins_index;
11947 gboolean spilled = FALSE;
11949 def_index = -1;
11950 call_index = -1;
11951 ins_index = 0;
11952 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
11953 const char *spec = INS_INFO (ins->opcode);
11955 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
11956 def_index = ins_index;
11958 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
11959 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
11960 if (call_index > def_index) {
11961 spilled = TRUE;
11962 break;
11966 if (MONO_IS_CALL (ins))
11967 call_index = ins_index;
11969 ins_index ++;
11972 if (spilled)
11973 break;
11975 #endif
11977 if (G_UNLIKELY (cfg->verbose_level > 2))
11978 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
11979 var->flags |= MONO_INST_IS_DEAD;
11980 cfg->vreg_to_inst [var->dreg] = NULL;
11982 break;
11987 * Compress the varinfo and vars tables so the liveness computation is faster and
11988 * takes up less space.
11990 pos = 0;
11991 for (i = 0; i < cfg->num_varinfo; ++i) {
11992 MonoInst *var = cfg->varinfo [i];
11993 if (pos < i && cfg->locals_start == i)
11994 cfg->locals_start = pos;
11995 if (!(var->flags & MONO_INST_IS_DEAD)) {
11996 if (pos < i) {
11997 cfg->varinfo [pos] = cfg->varinfo [i];
11998 cfg->varinfo [pos]->inst_c0 = pos;
11999 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
12000 cfg->vars [pos].idx = pos;
12001 #if SIZEOF_REGISTER == 4
12002 if (cfg->varinfo [pos]->type == STACK_I8) {
12003 /* Modify the two component vars too */
12004 MonoInst *var1;
12006 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
12007 var1->inst_c0 = pos;
12008 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
12009 var1->inst_c0 = pos;
12011 #endif
12013 pos ++;
12016 cfg->num_varinfo = pos;
12017 if (cfg->locals_start > cfg->num_varinfo)
12018 cfg->locals_start = cfg->num_varinfo;
12022 * mono_allocate_gsharedvt_vars:
12024 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
12025 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
12027 void
12028 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
12030 int i;
12032 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
12034 for (i = 0; i < cfg->num_varinfo; ++i) {
12035 MonoInst *ins = cfg->varinfo [i];
12036 int idx;
12038 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
12039 if (i >= cfg->locals_start) {
12040 /* Local */
12041 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
12042 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
12043 ins->opcode = OP_GSHAREDVT_LOCAL;
12044 ins->inst_imm = idx;
12045 } else {
12046 /* Arg */
12047 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
12048 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
12055 * mono_spill_global_vars:
12057 * Generate spill code for variables which are not allocated to registers,
12058 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
12059 * code is generated which could be optimized by the local optimization passes.
12061 void
12062 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
12064 MonoBasicBlock *bb;
12065 char spec2 [16];
12066 int orig_next_vreg;
12067 guint32 *vreg_to_lvreg;
12068 guint32 *lvregs;
12069 guint32 i, lvregs_len, lvregs_size;
12070 gboolean dest_has_lvreg = FALSE;
12071 MonoStackType stacktypes [128];
12072 MonoInst **live_range_start, **live_range_end;
12073 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
12075 *need_local_opts = FALSE;
12077 memset (spec2, 0, sizeof (spec2));
12079 /* FIXME: Move this function to mini.c */
12080 stacktypes [(int)'i'] = STACK_PTR;
12081 stacktypes [(int)'l'] = STACK_I8;
12082 stacktypes [(int)'f'] = STACK_R8;
12083 #ifdef MONO_ARCH_SIMD_INTRINSICS
12084 stacktypes [(int)'x'] = STACK_VTYPE;
12085 #endif
12087 #if SIZEOF_REGISTER == 4
12088 /* Create MonoInsts for longs */
12089 for (i = 0; i < cfg->num_varinfo; i++) {
12090 MonoInst *ins = cfg->varinfo [i];
12092 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
12093 switch (ins->type) {
12094 case STACK_R8:
12095 case STACK_I8: {
12096 MonoInst *tree;
12098 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
12099 break;
12101 g_assert (ins->opcode == OP_REGOFFSET);
12103 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
12104 g_assert (tree);
12105 tree->opcode = OP_REGOFFSET;
12106 tree->inst_basereg = ins->inst_basereg;
12107 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
12109 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
12110 g_assert (tree);
12111 tree->opcode = OP_REGOFFSET;
12112 tree->inst_basereg = ins->inst_basereg;
12113 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
12114 break;
12116 default:
12117 break;
12121 #endif
12123 if (cfg->compute_gc_maps) {
12124 /* registers need liveness info even for !non refs */
12125 for (i = 0; i < cfg->num_varinfo; i++) {
12126 MonoInst *ins = cfg->varinfo [i];
12128 if (ins->opcode == OP_REGVAR)
12129 ins->flags |= MONO_INST_GC_TRACK;
12133 /* FIXME: widening and truncation */
12136 * As an optimization, when a variable allocated to the stack is first loaded into
12137 * an lvreg, we will remember the lvreg and use it the next time instead of loading
12138 * the variable again.
12140 orig_next_vreg = cfg->next_vreg;
12141 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
12142 lvregs_size = 1024;
12143 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * lvregs_size);
12144 lvregs_len = 0;
12147 * These arrays contain the first and last instructions accessing a given
12148 * variable.
12149 * Since we emit bblocks in the same order we process them here, and we
12150 * don't split live ranges, these will precisely describe the live range of
12151 * the variable, i.e. the instruction range where a valid value can be found
12152 * in the variables location.
12153 * The live range is computed using the liveness info computed by the liveness pass.
12154 * We can't use vmv->range, since that is an abstract live range, and we need
12155 * one which is instruction precise.
12156 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
12158 /* FIXME: Only do this if debugging info is requested */
12159 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
12160 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
12161 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12162 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12164 /* Add spill loads/stores */
12165 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12166 MonoInst *ins;
12168 if (cfg->verbose_level > 2)
12169 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
12171 /* Clear vreg_to_lvreg array */
12172 for (i = 0; i < lvregs_len; i++)
12173 vreg_to_lvreg [lvregs [i]] = 0;
12174 lvregs_len = 0;
12176 cfg->cbb = bb;
12177 MONO_BB_FOR_EACH_INS (bb, ins) {
12178 const char *spec = INS_INFO (ins->opcode);
12179 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
12180 gboolean store, no_lvreg;
12181 int sregs [MONO_MAX_SRC_REGS];
12183 if (G_UNLIKELY (cfg->verbose_level > 2))
12184 mono_print_ins (ins);
12186 if (ins->opcode == OP_NOP)
12187 continue;
12190 * We handle LDADDR here as well, since it can only be decomposed
12191 * when variable addresses are known.
12193 if (ins->opcode == OP_LDADDR) {
12194 MonoInst *var = (MonoInst *)ins->inst_p0;
12196 if (var->opcode == OP_VTARG_ADDR) {
12197 /* Happens on SPARC/S390 where vtypes are passed by reference */
12198 MonoInst *vtaddr = var->inst_left;
12199 if (vtaddr->opcode == OP_REGVAR) {
12200 ins->opcode = OP_MOVE;
12201 ins->sreg1 = vtaddr->dreg;
12203 else if (var->inst_left->opcode == OP_REGOFFSET) {
12204 ins->opcode = OP_LOAD_MEMBASE;
12205 ins->inst_basereg = vtaddr->inst_basereg;
12206 ins->inst_offset = vtaddr->inst_offset;
12207 } else
12208 NOT_IMPLEMENTED;
12209 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
12210 /* gsharedvt arg passed by ref */
12211 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
12213 ins->opcode = OP_LOAD_MEMBASE;
12214 ins->inst_basereg = var->inst_basereg;
12215 ins->inst_offset = var->inst_offset;
12216 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
12217 MonoInst *load, *load2, *load3;
12218 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
12219 int reg1, reg2, reg3;
12220 MonoInst *info_var = cfg->gsharedvt_info_var;
12221 MonoInst *locals_var = cfg->gsharedvt_locals_var;
12224 * gsharedvt local.
12225 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
12228 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
12230 g_assert (info_var);
12231 g_assert (locals_var);
12233 /* Mark the instruction used to compute the locals var as used */
12234 cfg->gsharedvt_locals_var_ins = NULL;
12236 /* Load the offset */
12237 if (info_var->opcode == OP_REGOFFSET) {
12238 reg1 = alloc_ireg (cfg);
12239 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
12240 } else if (info_var->opcode == OP_REGVAR) {
12241 load = NULL;
12242 reg1 = info_var->dreg;
12243 } else {
12244 g_assert_not_reached ();
12246 reg2 = alloc_ireg (cfg);
12247 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * TARGET_SIZEOF_VOID_P));
12248 /* Load the locals area address */
12249 reg3 = alloc_ireg (cfg);
12250 if (locals_var->opcode == OP_REGOFFSET) {
12251 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
12252 } else if (locals_var->opcode == OP_REGVAR) {
12253 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
12254 } else {
12255 g_assert_not_reached ();
12257 /* Compute the address */
12258 ins->opcode = OP_PADD;
12259 ins->sreg1 = reg3;
12260 ins->sreg2 = reg2;
12262 mono_bblock_insert_before_ins (bb, ins, load3);
12263 mono_bblock_insert_before_ins (bb, load3, load2);
12264 if (load)
12265 mono_bblock_insert_before_ins (bb, load2, load);
12266 } else {
12267 g_assert (var->opcode == OP_REGOFFSET);
12269 ins->opcode = OP_ADD_IMM;
12270 ins->sreg1 = var->inst_basereg;
12271 ins->inst_imm = var->inst_offset;
12274 *need_local_opts = TRUE;
12275 spec = INS_INFO (ins->opcode);
12278 if (ins->opcode < MONO_CEE_LAST) {
12279 mono_print_ins (ins);
12280 g_assert_not_reached ();
12284 * Store opcodes have destbasereg in the dreg, but in reality, it is an
12285 * src register.
12286 * FIXME:
12288 if (MONO_IS_STORE_MEMBASE (ins)) {
12289 tmp_reg = ins->dreg;
12290 ins->dreg = ins->sreg2;
12291 ins->sreg2 = tmp_reg;
12292 store = TRUE;
12294 spec2 [MONO_INST_DEST] = ' ';
12295 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
12296 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
12297 spec2 [MONO_INST_SRC3] = ' ';
12298 spec = spec2;
12299 } else if (MONO_IS_STORE_MEMINDEX (ins))
12300 g_assert_not_reached ();
12301 else
12302 store = FALSE;
12303 no_lvreg = FALSE;
12305 if (G_UNLIKELY (cfg->verbose_level > 2)) {
12306 printf ("\t %.3s %d", spec, ins->dreg);
12307 num_sregs = mono_inst_get_src_registers (ins, sregs);
12308 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
12309 printf (" %d", sregs [srcindex]);
12310 printf ("\n");
12313 /***************/
12314 /* DREG */
12315 /***************/
12316 regtype = spec [MONO_INST_DEST];
12317 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
12318 prev_dreg = -1;
12319 int dreg_using_dest_to_membase_op = -1;
12321 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
12322 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
12323 MonoInst *store_ins;
12324 int store_opcode;
12325 MonoInst *def_ins = ins;
12326 int dreg = ins->dreg; /* The original vreg */
12328 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
12330 if (var->opcode == OP_REGVAR) {
12331 ins->dreg = var->dreg;
12332 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
12334 * Instead of emitting a load+store, use a _membase opcode.
12336 g_assert (var->opcode == OP_REGOFFSET);
12337 if (ins->opcode == OP_MOVE) {
12338 NULLIFY_INS (ins);
12339 def_ins = NULL;
12340 } else {
12341 dreg_using_dest_to_membase_op = ins->dreg;
12342 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
12343 ins->inst_basereg = var->inst_basereg;
12344 ins->inst_offset = var->inst_offset;
12345 ins->dreg = -1;
12347 spec = INS_INFO (ins->opcode);
12348 } else {
12349 guint32 lvreg;
12351 g_assert (var->opcode == OP_REGOFFSET);
12353 prev_dreg = ins->dreg;
12355 /* Invalidate any previous lvreg for this vreg */
12356 vreg_to_lvreg [ins->dreg] = 0;
12358 lvreg = 0;
12360 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
12361 regtype = 'l';
12362 store_opcode = OP_STOREI8_MEMBASE_REG;
12365 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
12367 #if SIZEOF_REGISTER != 8
12368 if (regtype == 'l') {
12369 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
12370 mono_bblock_insert_after_ins (bb, ins, store_ins);
12371 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
12372 mono_bblock_insert_after_ins (bb, ins, store_ins);
12373 def_ins = store_ins;
12375 else
12376 #endif
12378 g_assert (store_opcode != OP_STOREV_MEMBASE);
12380 /* Try to fuse the store into the instruction itself */
12381 /* FIXME: Add more instructions */
12382 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
12383 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
12384 ins->inst_imm = ins->inst_c0;
12385 ins->inst_destbasereg = var->inst_basereg;
12386 ins->inst_offset = var->inst_offset;
12387 spec = INS_INFO (ins->opcode);
12388 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
12389 ins->opcode = store_opcode;
12390 ins->inst_destbasereg = var->inst_basereg;
12391 ins->inst_offset = var->inst_offset;
12393 no_lvreg = TRUE;
12395 tmp_reg = ins->dreg;
12396 ins->dreg = ins->sreg2;
12397 ins->sreg2 = tmp_reg;
12398 store = TRUE;
12400 spec2 [MONO_INST_DEST] = ' ';
12401 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
12402 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
12403 spec2 [MONO_INST_SRC3] = ' ';
12404 spec = spec2;
12405 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
12406 // FIXME: The backends expect the base reg to be in inst_basereg
12407 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
12408 ins->dreg = -1;
12409 ins->inst_basereg = var->inst_basereg;
12410 ins->inst_offset = var->inst_offset;
12411 spec = INS_INFO (ins->opcode);
12412 } else {
12413 /* printf ("INS: "); mono_print_ins (ins); */
12414 /* Create a store instruction */
12415 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
12417 /* Insert it after the instruction */
12418 mono_bblock_insert_after_ins (bb, ins, store_ins);
12420 def_ins = store_ins;
12423 * We can't assign ins->dreg to var->dreg here, since the
12424 * sregs could use it. So set a flag, and do it after
12425 * the sregs.
12427 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
12428 dest_has_lvreg = TRUE;
12433 if (def_ins && !live_range_start [dreg]) {
12434 live_range_start [dreg] = def_ins;
12435 live_range_start_bb [dreg] = bb;
12438 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
12439 MonoInst *tmp;
12441 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
12442 tmp->inst_c1 = dreg;
12443 mono_bblock_insert_after_ins (bb, def_ins, tmp);
12447 /************/
12448 /* SREGS */
12449 /************/
12450 num_sregs = mono_inst_get_src_registers (ins, sregs);
12451 for (srcindex = 0; srcindex < 3; ++srcindex) {
12452 regtype = spec [MONO_INST_SRC1 + srcindex];
12453 sreg = sregs [srcindex];
12455 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
12456 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
12457 MonoInst *var = get_vreg_to_inst (cfg, sreg);
12458 MonoInst *use_ins = ins;
12459 MonoInst *load_ins;
12460 guint32 load_opcode;
12462 if (var->opcode == OP_REGVAR) {
12463 sregs [srcindex] = var->dreg;
12464 //mono_inst_set_src_registers (ins, sregs);
12465 live_range_end [sreg] = use_ins;
12466 live_range_end_bb [sreg] = bb;
12468 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
12469 MonoInst *tmp;
12471 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
12472 /* var->dreg is a hreg */
12473 tmp->inst_c1 = sreg;
12474 mono_bblock_insert_after_ins (bb, ins, tmp);
12477 continue;
12480 g_assert (var->opcode == OP_REGOFFSET);
12482 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
12484 g_assert (load_opcode != OP_LOADV_MEMBASE);
12486 if (vreg_to_lvreg [sreg]) {
12487 g_assert (vreg_to_lvreg [sreg] != -1);
12489 /* The variable is already loaded to an lvreg */
12490 if (G_UNLIKELY (cfg->verbose_level > 2))
12491 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
12492 sregs [srcindex] = vreg_to_lvreg [sreg];
12493 //mono_inst_set_src_registers (ins, sregs);
12494 continue;
12497 /* Try to fuse the load into the instruction */
12498 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
12499 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
12500 sregs [0] = var->inst_basereg;
12501 //mono_inst_set_src_registers (ins, sregs);
12502 ins->inst_offset = var->inst_offset;
12503 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
12504 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
12505 sregs [1] = var->inst_basereg;
12506 //mono_inst_set_src_registers (ins, sregs);
12507 ins->inst_offset = var->inst_offset;
12508 } else {
12509 if (MONO_IS_REAL_MOVE (ins)) {
12510 ins->opcode = OP_NOP;
12511 sreg = ins->dreg;
12512 } else {
12513 //printf ("%d ", srcindex); mono_print_ins (ins);
12515 sreg = alloc_dreg (cfg, stacktypes [regtype]);
12517 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
12518 if (var->dreg == prev_dreg) {
12520 * sreg refers to the value loaded by the load
12521 * emitted below, but we need to use ins->dreg
12522 * since it refers to the store emitted earlier.
12524 sreg = ins->dreg;
12526 g_assert (sreg != -1);
12527 if (var->dreg == dreg_using_dest_to_membase_op) {
12528 if (cfg->verbose_level > 2)
12529 printf ("\tCan't cache R%d because it's part of a dreg dest_membase optimization\n", var->dreg);
12530 } else {
12531 vreg_to_lvreg [var->dreg] = sreg;
12533 if (lvregs_len >= lvregs_size) {
12534 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
12535 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
12536 lvregs = new_lvregs;
12537 lvregs_size *= 2;
12539 lvregs [lvregs_len ++] = var->dreg;
12543 sregs [srcindex] = sreg;
12544 //mono_inst_set_src_registers (ins, sregs);
12546 #if SIZEOF_REGISTER != 8
12547 if (regtype == 'l') {
12548 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
12549 mono_bblock_insert_before_ins (bb, ins, load_ins);
12550 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
12551 mono_bblock_insert_before_ins (bb, ins, load_ins);
12552 use_ins = load_ins;
12554 else
12555 #endif
12557 #if SIZEOF_REGISTER == 4
12558 g_assert (load_opcode != OP_LOADI8_MEMBASE);
12559 #endif
12560 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
12561 mono_bblock_insert_before_ins (bb, ins, load_ins);
12562 use_ins = load_ins;
12564 if (cfg->verbose_level > 2)
12565 mono_print_ins_index (0, use_ins);
12568 if (var->dreg < orig_next_vreg) {
12569 live_range_end [var->dreg] = use_ins;
12570 live_range_end_bb [var->dreg] = bb;
12573 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
12574 MonoInst *tmp;
12576 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
12577 tmp->inst_c1 = var->dreg;
12578 mono_bblock_insert_after_ins (bb, ins, tmp);
12582 mono_inst_set_src_registers (ins, sregs);
12584 if (dest_has_lvreg) {
12585 g_assert (ins->dreg != -1);
12586 vreg_to_lvreg [prev_dreg] = ins->dreg;
12587 if (lvregs_len >= lvregs_size) {
12588 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
12589 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
12590 lvregs = new_lvregs;
12591 lvregs_size *= 2;
12593 lvregs [lvregs_len ++] = prev_dreg;
12594 dest_has_lvreg = FALSE;
12597 if (store) {
12598 tmp_reg = ins->dreg;
12599 ins->dreg = ins->sreg2;
12600 ins->sreg2 = tmp_reg;
12603 if (MONO_IS_CALL (ins)) {
12604 /* Clear vreg_to_lvreg array */
12605 for (i = 0; i < lvregs_len; i++)
12606 vreg_to_lvreg [lvregs [i]] = 0;
12607 lvregs_len = 0;
12608 } else if (ins->opcode == OP_NOP) {
12609 ins->dreg = -1;
12610 MONO_INST_NULLIFY_SREGS (ins);
12613 if (cfg->verbose_level > 2)
12614 mono_print_ins_index (1, ins);
12617 /* Extend the live range based on the liveness info */
12618 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
12619 for (i = 0; i < cfg->num_varinfo; i ++) {
12620 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
12622 if (vreg_is_volatile (cfg, vi->vreg))
12623 /* The liveness info is incomplete */
12624 continue;
12626 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
12627 /* Live from at least the first ins of this bb */
12628 live_range_start [vi->vreg] = bb->code;
12629 live_range_start_bb [vi->vreg] = bb;
12632 if (mono_bitset_test_fast (bb->live_out_set, i)) {
12633 /* Live at least until the last ins of this bb */
12634 live_range_end [vi->vreg] = bb->last_ins;
12635 live_range_end_bb [vi->vreg] = bb;
12642 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
12643 * by storing the current native offset into MonoMethodVar->live_range_start/end.
12645 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
12646 for (i = 0; i < cfg->num_varinfo; ++i) {
12647 int vreg = MONO_VARINFO (cfg, i)->vreg;
12648 MonoInst *ins;
12650 if (live_range_start [vreg]) {
12651 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
12652 ins->inst_c0 = i;
12653 ins->inst_c1 = vreg;
12654 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
12656 if (live_range_end [vreg]) {
12657 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
12658 ins->inst_c0 = i;
12659 ins->inst_c1 = vreg;
12660 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
12661 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
12662 else
12663 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
12668 if (cfg->gsharedvt_locals_var_ins) {
12669 /* Nullify if unused */
12670 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
12671 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
12674 g_free (live_range_start);
12675 g_free (live_range_end);
12676 g_free (live_range_start_bb);
12677 g_free (live_range_end_bb);
12681 * FIXME:
12682 * - use 'iadd' instead of 'int_add'
12683 * - handling ovf opcodes: decompose in method_to_ir.
12684 * - unify iregs/fregs
12685 * -> partly done, the missing parts are:
12686 * - a more complete unification would involve unifying the hregs as well, so
12687 * code wouldn't need if (fp) all over the place. but that would mean the hregs
12688 * would no longer map to the machine hregs, so the code generators would need to
12689 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
12690 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
12691 * fp/non-fp branches speeds it up by about 15%.
12692 * - use sext/zext opcodes instead of shifts
12693 * - add OP_ICALL
12694 * - get rid of TEMPLOADs if possible and use vregs instead
12695 * - clean up usage of OP_P/OP_ opcodes
12696 * - cleanup usage of DUMMY_USE
12697 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
12698 * stack
12699 * - set the stack type and allocate a dreg in the EMIT_NEW macros
12700 * - get rid of all the <foo>2 stuff when the new JIT is ready.
12701 * - make sure handle_stack_args () is called before the branch is emitted
12702 * - when the new IR is done, get rid of all unused stuff
12703 * - COMPARE/BEQ as separate instructions or unify them ?
12704 * - keeping them separate allows specialized compare instructions like
12705 * compare_imm, compare_membase
12706 * - most back ends unify fp compare+branch, fp compare+ceq
12707 * - integrate mono_save_args into inline_method
12708 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
12709 * - handle long shift opts on 32 bit platforms somehow: they require
12710 * 3 sregs (2 for arg1 and 1 for arg2)
12711 * - make byref a 'normal' type.
12712 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
12713 * variable if needed.
12714 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
12715 * like inline_method.
12716 * - remove inlining restrictions
12717 * - fix LNEG and enable cfold of INEG
12718 * - generalize x86 optimizations like ldelema as a peephole optimization
12719 * - add store_mem_imm for amd64
12720 * - optimize the loading of the interruption flag in the managed->native wrappers
12721 * - avoid special handling of OP_NOP in passes
12722 * - move code inserting instructions into one function/macro.
12723 * - try a coalescing phase after liveness analysis
12724 * - add float -> vreg conversion + local optimizations on !x86
12725 * - figure out how to handle decomposed branches during optimizations, ie.
12726 * compare+branch, op_jump_table+op_br etc.
12727 * - promote RuntimeXHandles to vregs
12728 * - vtype cleanups:
12729 * - add a NEW_VARLOADA_VREG macro
12730 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
12731 * accessing vtype fields.
12732 * - get rid of I8CONST on 64 bit platforms
12733 * - dealing with the increase in code size due to branches created during opcode
12734 * decomposition:
12735 * - use extended basic blocks
12736 * - all parts of the JIT
12737 * - handle_global_vregs () && local regalloc
12738 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
12739 * - sources of increase in code size:
12740 * - vtypes
12741 * - long compares
12742 * - isinst and castclass
12743 * - lvregs not allocated to global registers even if used multiple times
12744 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
12745 * meaningful.
12746 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
12747 * - add all micro optimizations from the old JIT
12748 * - put tree optimizations into the deadce pass
12749 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
12750 * specific function.
12751 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
12752 * fcompare + branchCC.
12753 * - create a helper function for allocating a stack slot, taking into account
12754 * MONO_CFG_HAS_SPILLUP.
12755 * - merge r68207.
12756 * - optimize mono_regstate2_alloc_int/float.
12757 * - fix the pessimistic handling of variables accessed in exception handler blocks.
12758 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
12759 * parts of the tree could be separated by other instructions, killing the tree
12760 * arguments, or stores killing loads etc. Also, should we fold loads into other
12761 * instructions if the result of the load is used multiple times ?
12762 * - make the REM_IMM optimization in mini-x86.c arch-independent.
12763 * - LAST MERGE: 108395.
12764 * - when returning vtypes in registers, generate IR and append it to the end of the
12765 * last bb instead of doing it in the epilog.
12766 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
12771 NOTES
12772 -----
12774 - When to decompose opcodes:
12775 - earlier: this makes some optimizations hard to implement, since the low level IR
12776 no longer contains the neccessary information. But it is easier to do.
12777 - later: harder to implement, enables more optimizations.
12778 - Branches inside bblocks:
12779 - created when decomposing complex opcodes.
12780 - branches to another bblock: harmless, but not tracked by the branch
12781 optimizations, so need to branch to a label at the start of the bblock.
12782 - branches to inside the same bblock: very problematic, trips up the local
12783 reg allocator. Can be fixed by spitting the current bblock, but that is a
12784 complex operation, since some local vregs can become global vregs etc.
12785 - Local/global vregs:
12786 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
12787 local register allocator.
12788 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
12789 structure, created by mono_create_var (). Assigned to hregs or the stack by
12790 the global register allocator.
12791 - When to do optimizations like alu->alu_imm:
12792 - earlier -> saves work later on since the IR will be smaller/simpler
12793 - later -> can work on more instructions
12794 - Handling of valuetypes:
12795 - When a vtype is pushed on the stack, a new temporary is created, an
12796 instruction computing its address (LDADDR) is emitted and pushed on
12797 the stack. Need to optimize cases when the vtype is used immediately as in
12798 argument passing, stloc etc.
12799 - Instead of the to_end stuff in the old JIT, simply call the function handling
12800 the values on the stack before emitting the last instruction of the bb.
12802 #else /* !DISABLE_JIT */
12804 MONO_EMPTY_SOURCE_FILE (method_to_ir);
12805 #endif /* !DISABLE_JIT */