[mini] Ignore conv.r.un if top of stack is already float (#15818)
[mono-project.git] / mono / mini / method-to-ir.c
blobff00d7db2046fdfc02f500d8f74b08fbbf5b2bbb
1 /**
2 * \file
3 * Convert CIL to the JIT internal representation
5 * Author:
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
9 * (C) 2002 Ximian, Inc.
10 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
11 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
15 #include <config.h>
16 #include <glib.h>
17 #include <mono/utils/mono-compiler.h>
18 #include "mini.h"
20 #ifndef DISABLE_JIT
22 #include <signal.h>
24 #ifdef HAVE_UNISTD_H
25 #include <unistd.h>
26 #endif
28 #include <math.h>
29 #include <string.h>
30 #include <ctype.h>
32 #ifdef HAVE_SYS_TIME_H
33 #include <sys/time.h>
34 #endif
36 #ifdef HAVE_ALLOCA_H
37 #include <alloca.h>
38 #endif
40 #include <mono/utils/memcheck.h>
41 #include <mono/metadata/abi-details.h>
42 #include <mono/metadata/assembly.h>
43 #include <mono/metadata/attrdefs.h>
44 #include <mono/metadata/loader.h>
45 #include <mono/metadata/tabledefs.h>
46 #include <mono/metadata/class.h>
47 #include <mono/metadata/class-abi-details.h>
48 #include <mono/metadata/object.h>
49 #include <mono/metadata/exception.h>
50 #include <mono/metadata/exception-internals.h>
51 #include <mono/metadata/opcodes.h>
52 #include <mono/metadata/mono-endian.h>
53 #include <mono/metadata/tokentype.h>
54 #include <mono/metadata/tabledefs.h>
55 #include <mono/metadata/marshal.h>
56 #include <mono/metadata/debug-helpers.h>
57 #include <mono/metadata/debug-internals.h>
58 #include <mono/metadata/gc-internals.h>
59 #include <mono/metadata/security-manager.h>
60 #include <mono/metadata/threads-types.h>
61 #include <mono/metadata/security-core-clr.h>
62 #include <mono/metadata/profiler-private.h>
63 #include <mono/metadata/profiler.h>
64 #include <mono/metadata/monitor.h>
65 #include <mono/utils/mono-memory-model.h>
66 #include <mono/utils/mono-error-internals.h>
67 #include <mono/metadata/mono-basic-block.h>
68 #include <mono/metadata/reflection-internals.h>
69 #include <mono/utils/mono-threads-coop.h>
70 #include <mono/utils/mono-utils-debug.h>
71 #include <mono/utils/mono-logger-internals.h>
72 #include <mono/metadata/verify-internals.h>
73 #include <mono/metadata/icall-decl.h>
74 #include "mono/metadata/icall-signatures.h"
76 #include "trace.h"
78 #include "ir-emit.h"
80 #include "jit-icalls.h"
81 #include "jit.h"
82 #include "debugger-agent.h"
83 #include "seq-points.h"
84 #include "aot-compiler.h"
85 #include "mini-llvm.h"
86 #include "mini-runtime.h"
87 #include "llvmonly-runtime.h"
89 #define BRANCH_COST 10
90 #define CALL_COST 10
91 /* Used for the JIT */
92 #define INLINE_LENGTH_LIMIT 20
93 /* Used to LLVM JIT */
94 #define LLVM_JIT_INLINE_LENGTH_LIMIT 100
96 static const gboolean debug_tailcall = FALSE; // logging
97 static const gboolean debug_tailcall_try_all = FALSE; // consider any call followed by ret
99 gboolean
100 mono_tailcall_print_enabled (void)
102 return debug_tailcall || MONO_TRACE_IS_TRACED (G_LOG_LEVEL_DEBUG, MONO_TRACE_TAILCALL);
105 void
106 mono_tailcall_print (const char *format, ...)
108 if (!mono_tailcall_print_enabled ())
109 return;
110 va_list args;
111 va_start (args, format);
112 g_printv (format, args);
113 va_end (args);
116 /* These have 'cfg' as an implicit argument */
117 #define INLINE_FAILURE(msg) do { \
118 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
119 inline_failure (cfg, msg); \
120 goto exception_exit; \
122 } while (0)
123 #define CHECK_CFG_EXCEPTION do {\
124 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
125 goto exception_exit; \
126 } while (0)
127 #define FIELD_ACCESS_FAILURE(method, field) do { \
128 field_access_failure ((cfg), (method), (field)); \
129 goto exception_exit; \
130 } while (0)
131 #define GENERIC_SHARING_FAILURE(opcode) do { \
132 if (cfg->gshared) { \
133 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
134 goto exception_exit; \
136 } while (0)
137 #define GSHAREDVT_FAILURE(opcode) do { \
138 if (cfg->gsharedvt) { \
139 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
140 goto exception_exit; \
142 } while (0)
143 #define OUT_OF_MEMORY_FAILURE do { \
144 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
145 mono_error_set_out_of_memory (&cfg->error, ""); \
146 goto exception_exit; \
147 } while (0)
148 #define DISABLE_AOT(cfg) do { \
149 if ((cfg)->verbose_level >= 2) \
150 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
151 (cfg)->disable_aot = TRUE; \
152 } while (0)
153 #define LOAD_ERROR do { \
154 break_on_unverified (); \
155 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
156 goto exception_exit; \
157 } while (0)
159 #define TYPE_LOAD_ERROR(klass) do { \
160 cfg->exception_ptr = klass; \
161 LOAD_ERROR; \
162 } while (0)
164 #define CHECK_CFG_ERROR do {\
165 if (!mono_error_ok (&cfg->error)) { \
166 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
167 goto mono_error_exit; \
169 } while (0)
171 static int stind_to_store_membase (int opcode);
173 int mono_op_to_op_imm (int opcode);
174 int mono_op_to_op_imm_noemul (int opcode);
176 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
177 guchar *ip, guint real_offset, gboolean inline_always);
178 static MonoInst*
179 convert_value (MonoCompile *cfg, MonoType *type, MonoInst *ins);
181 /* helper methods signatures */
183 /* type loading helpers */
184 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, "System.Diagnostics", "DebuggableAttribute")
185 static GENERATE_GET_CLASS_WITH_CACHE (iequatable, "System", "IEquatable`1")
186 static GENERATE_GET_CLASS_WITH_CACHE (geqcomparer, "System.Collections.Generic", "GenericEqualityComparer`1");
189 * Instruction metadata
191 #ifdef MINI_OP
192 #undef MINI_OP
193 #endif
194 #ifdef MINI_OP3
195 #undef MINI_OP3
196 #endif
197 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
198 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
199 #define NONE ' '
200 #define IREG 'i'
201 #define FREG 'f'
202 #define VREG 'v'
203 #define XREG 'x'
204 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == TARGET_SIZEOF_VOID_P
205 #define LREG IREG
206 #else
207 #define LREG 'l'
208 #endif
209 /* keep in sync with the enum in mini.h */
210 const char
211 mini_ins_info[] = {
212 #include "mini-ops.h"
214 #undef MINI_OP
215 #undef MINI_OP3
217 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
218 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
220 * This should contain the index of the last sreg + 1. This is not the same
221 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
223 const gint8 mini_ins_sreg_counts[] = {
224 #include "mini-ops.h"
226 #undef MINI_OP
227 #undef MINI_OP3
229 guint32
230 mono_alloc_ireg (MonoCompile *cfg)
232 return alloc_ireg (cfg);
235 guint32
236 mono_alloc_lreg (MonoCompile *cfg)
238 return alloc_lreg (cfg);
241 guint32
242 mono_alloc_freg (MonoCompile *cfg)
244 return alloc_freg (cfg);
247 guint32
248 mono_alloc_preg (MonoCompile *cfg)
250 return alloc_preg (cfg);
253 guint32
254 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
256 return alloc_dreg (cfg, stack_type);
260 * mono_alloc_ireg_ref:
262 * Allocate an IREG, and mark it as holding a GC ref.
264 guint32
265 mono_alloc_ireg_ref (MonoCompile *cfg)
267 return alloc_ireg_ref (cfg);
271 * mono_alloc_ireg_mp:
273 * Allocate an IREG, and mark it as holding a managed pointer.
275 guint32
276 mono_alloc_ireg_mp (MonoCompile *cfg)
278 return alloc_ireg_mp (cfg);
282 * mono_alloc_ireg_copy:
284 * Allocate an IREG with the same GC type as VREG.
286 guint32
287 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
289 if (vreg_is_ref (cfg, vreg))
290 return alloc_ireg_ref (cfg);
291 else if (vreg_is_mp (cfg, vreg))
292 return alloc_ireg_mp (cfg);
293 else
294 return alloc_ireg (cfg);
297 guint
298 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
300 if (type->byref)
301 return OP_MOVE;
303 type = mini_get_underlying_type (type);
304 handle_enum:
305 switch (type->type) {
306 case MONO_TYPE_I1:
307 case MONO_TYPE_U1:
308 return OP_MOVE;
309 case MONO_TYPE_I2:
310 case MONO_TYPE_U2:
311 return OP_MOVE;
312 case MONO_TYPE_I4:
313 case MONO_TYPE_U4:
314 return OP_MOVE;
315 case MONO_TYPE_I:
316 case MONO_TYPE_U:
317 case MONO_TYPE_PTR:
318 case MONO_TYPE_FNPTR:
319 return OP_MOVE;
320 case MONO_TYPE_CLASS:
321 case MONO_TYPE_STRING:
322 case MONO_TYPE_OBJECT:
323 case MONO_TYPE_SZARRAY:
324 case MONO_TYPE_ARRAY:
325 return OP_MOVE;
326 case MONO_TYPE_I8:
327 case MONO_TYPE_U8:
328 #if SIZEOF_REGISTER == 8
329 return OP_MOVE;
330 #else
331 return OP_LMOVE;
332 #endif
333 case MONO_TYPE_R4:
334 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
335 case MONO_TYPE_R8:
336 return OP_FMOVE;
337 case MONO_TYPE_VALUETYPE:
338 if (m_class_is_enumtype (type->data.klass)) {
339 type = mono_class_enum_basetype_internal (type->data.klass);
340 goto handle_enum;
342 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (type)))
343 return OP_XMOVE;
344 return OP_VMOVE;
345 case MONO_TYPE_TYPEDBYREF:
346 return OP_VMOVE;
347 case MONO_TYPE_GENERICINST:
348 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (type)))
349 return OP_XMOVE;
350 type = m_class_get_byval_arg (type->data.generic_class->container_class);
351 goto handle_enum;
352 case MONO_TYPE_VAR:
353 case MONO_TYPE_MVAR:
354 g_assert (cfg->gshared);
355 if (mini_type_var_is_vt (type))
356 return OP_VMOVE;
357 else
358 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
359 default:
360 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
362 return -1;
365 void
366 mono_print_bb (MonoBasicBlock *bb, const char *msg)
368 int i;
369 MonoInst *tree;
370 GString *str = g_string_new ("");
372 g_string_append_printf (str, "%s %d: [IN: ", msg, bb->block_num);
373 for (i = 0; i < bb->in_count; ++i)
374 g_string_append_printf (str, " BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
375 g_string_append_printf (str, ", OUT: ");
376 for (i = 0; i < bb->out_count; ++i)
377 g_string_append_printf (str, " BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
378 g_string_append_printf (str, " ]\n");
380 g_print ("%s", str->str);
381 g_string_free (str, TRUE);
383 for (tree = bb->code; tree; tree = tree->next)
384 mono_print_ins_index (-1, tree);
387 static MONO_NEVER_INLINE gboolean
388 break_on_unverified (void)
390 if (mini_debug_options.break_on_unverified) {
391 G_BREAKPOINT ();
392 return TRUE;
394 return FALSE;
397 static void
398 clear_cfg_error (MonoCompile *cfg)
400 mono_error_cleanup (&cfg->error);
401 error_init (&cfg->error);
404 static MONO_NEVER_INLINE void
405 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
407 char *method_fname = mono_method_full_name (method, TRUE);
408 char *field_fname = mono_field_full_name (field);
409 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
410 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
411 g_free (method_fname);
412 g_free (field_fname);
415 static MONO_NEVER_INLINE void
416 inline_failure (MonoCompile *cfg, const char *msg)
418 if (cfg->verbose_level >= 2)
419 printf ("inline failed: %s\n", msg);
420 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
423 static MONO_NEVER_INLINE void
424 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
426 if (cfg->verbose_level > 2)
427 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", m_class_get_name_space (cfg->current_method->klass), m_class_get_name (cfg->current_method->klass), cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name (opcode), line);
428 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
431 static MONO_NEVER_INLINE void
432 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
434 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", m_class_get_name_space (cfg->current_method->klass), m_class_get_name (cfg->current_method->klass), cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
435 if (cfg->verbose_level >= 2)
436 printf ("%s\n", cfg->exception_message);
437 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
440 void
441 mini_set_inline_failure (MonoCompile *cfg, const char *msg)
443 if (cfg->verbose_level >= 2)
444 printf ("inline failed: %s\n", msg);
445 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
449 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
450 * foo<T> (int i) { ldarg.0; box T; }
452 #define UNVERIFIED do { \
453 if (cfg->gsharedvt) { \
454 if (cfg->verbose_level > 2) \
455 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
456 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
457 goto exception_exit; \
459 break_on_unverified (); \
460 goto unverified; \
461 } while (0)
463 #define GET_BBLOCK(cfg,tblock,ip) do { \
464 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
465 if (!(tblock)) { \
466 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
467 NEW_BBLOCK (cfg, (tblock)); \
468 (tblock)->cil_code = (ip); \
469 ADD_BBLOCK (cfg, (tblock)); \
471 } while (0)
473 /* Emit conversions so both operands of a binary opcode are of the same type */
474 static void
475 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
477 MonoInst *arg1 = *arg1_ref;
478 MonoInst *arg2 = *arg2_ref;
480 if (cfg->r4fp &&
481 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
482 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
483 MonoInst *conv;
485 /* Mixing r4/r8 is allowed by the spec */
486 if (arg1->type == STACK_R4) {
487 int dreg = alloc_freg (cfg);
489 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
490 conv->type = STACK_R8;
491 ins->sreg1 = dreg;
492 *arg1_ref = conv;
494 if (arg2->type == STACK_R4) {
495 int dreg = alloc_freg (cfg);
497 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
498 conv->type = STACK_R8;
499 ins->sreg2 = dreg;
500 *arg2_ref = conv;
504 #if SIZEOF_REGISTER == 8
505 /* FIXME: Need to add many more cases */
506 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
507 MonoInst *widen;
509 int dr = alloc_preg (cfg);
510 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
511 (ins)->sreg2 = widen->dreg;
513 #endif
516 #define ADD_BINOP(op) do { \
517 MONO_INST_NEW (cfg, ins, (op)); \
518 sp -= 2; \
519 ins->sreg1 = sp [0]->dreg; \
520 ins->sreg2 = sp [1]->dreg; \
521 type_from_op (cfg, ins, sp [0], sp [1]); \
522 CHECK_TYPE (ins); \
523 /* Have to insert a widening op */ \
524 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
525 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
526 MONO_ADD_INS ((cfg)->cbb, (ins)); \
527 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
528 } while (0)
530 #define ADD_UNOP(op) do { \
531 MONO_INST_NEW (cfg, ins, (op)); \
532 sp--; \
533 ins->sreg1 = sp [0]->dreg; \
534 type_from_op (cfg, ins, sp [0], NULL); \
535 CHECK_TYPE (ins); \
536 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
537 MONO_ADD_INS ((cfg)->cbb, (ins)); \
538 *sp++ = mono_decompose_opcode (cfg, ins); \
539 } while (0)
541 #define ADD_BINCOND(next_block) do { \
542 MonoInst *cmp; \
543 sp -= 2; \
544 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
545 cmp->sreg1 = sp [0]->dreg; \
546 cmp->sreg2 = sp [1]->dreg; \
547 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
548 type_from_op (cfg, cmp, sp [0], sp [1]); \
549 CHECK_TYPE (cmp); \
550 type_from_op (cfg, ins, sp [0], sp [1]); \
551 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
552 GET_BBLOCK (cfg, tblock, target); \
553 link_bblock (cfg, cfg->cbb, tblock); \
554 ins->inst_true_bb = tblock; \
555 if ((next_block)) { \
556 link_bblock (cfg, cfg->cbb, (next_block)); \
557 ins->inst_false_bb = (next_block); \
558 start_new_bblock = 1; \
559 } else { \
560 GET_BBLOCK (cfg, tblock, next_ip); \
561 link_bblock (cfg, cfg->cbb, tblock); \
562 ins->inst_false_bb = tblock; \
563 start_new_bblock = 2; \
565 if (sp != stack_start) { \
566 handle_stack_args (cfg, stack_start, sp - stack_start); \
567 CHECK_UNVERIFIABLE (cfg); \
569 MONO_ADD_INS (cfg->cbb, cmp); \
570 MONO_ADD_INS (cfg->cbb, ins); \
571 } while (0)
573 /* *
574 * link_bblock: Links two basic blocks
576 * links two basic blocks in the control flow graph, the 'from'
577 * argument is the starting block and the 'to' argument is the block
578 * the control flow ends to after 'from'.
580 static void
581 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
583 MonoBasicBlock **newa;
584 int i, found;
586 #if 0
587 if (from->cil_code) {
588 if (to->cil_code)
589 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
590 else
591 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
592 } else {
593 if (to->cil_code)
594 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
595 else
596 printf ("edge from entry to exit\n");
598 #endif
600 found = FALSE;
601 for (i = 0; i < from->out_count; ++i) {
602 if (to == from->out_bb [i]) {
603 found = TRUE;
604 break;
607 if (!found) {
608 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
609 for (i = 0; i < from->out_count; ++i) {
610 newa [i] = from->out_bb [i];
612 newa [i] = to;
613 from->out_count++;
614 from->out_bb = newa;
617 found = FALSE;
618 for (i = 0; i < to->in_count; ++i) {
619 if (from == to->in_bb [i]) {
620 found = TRUE;
621 break;
624 if (!found) {
625 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
626 for (i = 0; i < to->in_count; ++i) {
627 newa [i] = to->in_bb [i];
629 newa [i] = from;
630 to->in_count++;
631 to->in_bb = newa;
635 void
636 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
638 link_bblock (cfg, from, to);
641 static void
642 mono_create_spvar_for_region (MonoCompile *cfg, int region);
644 static void
645 mark_bb_in_region (MonoCompile *cfg, guint region, uint32_t start, uint32_t end)
647 MonoBasicBlock *bb = cfg->cil_offset_to_bb [start];
649 //start must exist in cil_offset_to_bb as those are il offsets used by EH which should have GET_BBLOCK early.
650 g_assert (bb);
652 if (cfg->verbose_level > 1)
653 g_print ("FIRST BB for %d is BB_%d\n", start, bb->block_num);
654 for (; bb && bb->real_offset < end; bb = bb->next_bb) {
655 //no one claimed this bb, take it.
656 if (bb->region == -1) {
657 bb->region = region;
658 continue;
661 //current region is an early handler, bail
662 if ((bb->region & (0xf << 4)) != MONO_REGION_TRY) {
663 continue;
666 //current region is a try, only overwrite if new region is a handler
667 if ((region & (0xf << 4)) != MONO_REGION_TRY) {
668 bb->region = region;
672 if (cfg->spvars)
673 mono_create_spvar_for_region (cfg, region);
676 static void
677 compute_bb_regions (MonoCompile *cfg)
679 MonoBasicBlock *bb;
680 MonoMethodHeader *header = cfg->header;
681 int i;
683 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
684 bb->region = -1;
686 for (i = 0; i < header->num_clauses; ++i) {
687 MonoExceptionClause *clause = &header->clauses [i];
689 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER)
690 mark_bb_in_region (cfg, ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags, clause->data.filter_offset, clause->handler_offset);
692 guint handler_region;
693 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
694 handler_region = ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
695 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
696 handler_region = ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
697 else
698 handler_region = ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
700 mark_bb_in_region (cfg, handler_region, clause->handler_offset, clause->handler_offset + clause->handler_len);
701 mark_bb_in_region (cfg, ((i + 1) << 8) | clause->flags, clause->try_offset, clause->try_offset + clause->try_len);
704 if (cfg->verbose_level > 2) {
705 MonoBasicBlock *bb;
706 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
707 g_print ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
713 static gboolean
714 ip_in_finally_clause (MonoCompile *cfg, int offset)
716 MonoMethodHeader *header = cfg->header;
717 MonoExceptionClause *clause;
718 int i;
720 for (i = 0; i < header->num_clauses; ++i) {
721 clause = &header->clauses [i];
722 if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)
723 continue;
725 if (MONO_OFFSET_IN_HANDLER (clause, offset))
726 return TRUE;
728 return FALSE;
731 /* Find clauses between ip and target, from inner to outer */
732 static GList*
733 mono_find_leave_clauses (MonoCompile *cfg, guchar *ip, guchar *target)
735 MonoMethodHeader *header = cfg->header;
736 MonoExceptionClause *clause;
737 int i;
738 GList *res = NULL;
740 for (i = 0; i < header->num_clauses; ++i) {
741 clause = &header->clauses [i];
742 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
743 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
744 MonoLeaveClause *leave = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoLeaveClause));
745 leave->index = i;
746 leave->clause = clause;
748 res = g_list_append_mempool (cfg->mempool, res, leave);
751 return res;
754 static void
755 mono_create_spvar_for_region (MonoCompile *cfg, int region)
757 MonoInst *var;
759 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
760 if (var)
761 return;
763 var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
764 /* prevent it from being register allocated */
765 var->flags |= MONO_INST_VOLATILE;
767 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
770 MonoInst *
771 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
773 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
776 static MonoInst*
777 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
779 MonoInst *var;
781 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
782 if (var)
783 return var;
785 var = mono_compile_create_var (cfg, mono_get_object_type (), OP_LOCAL);
786 /* prevent it from being register allocated */
787 var->flags |= MONO_INST_VOLATILE;
789 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
791 return var;
795 * Returns the type used in the eval stack when @type is loaded.
796 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
798 void
799 mini_type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
801 MonoClass *klass;
803 type = mini_get_underlying_type (type);
804 inst->klass = klass = mono_class_from_mono_type_internal (type);
805 if (type->byref) {
806 inst->type = STACK_MP;
807 return;
810 handle_enum:
811 switch (type->type) {
812 case MONO_TYPE_VOID:
813 inst->type = STACK_INV;
814 return;
815 case MONO_TYPE_I1:
816 case MONO_TYPE_U1:
817 case MONO_TYPE_I2:
818 case MONO_TYPE_U2:
819 case MONO_TYPE_I4:
820 case MONO_TYPE_U4:
821 inst->type = STACK_I4;
822 return;
823 case MONO_TYPE_I:
824 case MONO_TYPE_U:
825 case MONO_TYPE_PTR:
826 case MONO_TYPE_FNPTR:
827 inst->type = STACK_PTR;
828 return;
829 case MONO_TYPE_CLASS:
830 case MONO_TYPE_STRING:
831 case MONO_TYPE_OBJECT:
832 case MONO_TYPE_SZARRAY:
833 case MONO_TYPE_ARRAY:
834 inst->type = STACK_OBJ;
835 return;
836 case MONO_TYPE_I8:
837 case MONO_TYPE_U8:
838 inst->type = STACK_I8;
839 return;
840 case MONO_TYPE_R4:
841 inst->type = cfg->r4_stack_type;
842 break;
843 case MONO_TYPE_R8:
844 inst->type = STACK_R8;
845 return;
846 case MONO_TYPE_VALUETYPE:
847 if (m_class_is_enumtype (type->data.klass)) {
848 type = mono_class_enum_basetype_internal (type->data.klass);
849 goto handle_enum;
850 } else {
851 inst->klass = klass;
852 inst->type = STACK_VTYPE;
853 return;
855 case MONO_TYPE_TYPEDBYREF:
856 inst->klass = mono_defaults.typed_reference_class;
857 inst->type = STACK_VTYPE;
858 return;
859 case MONO_TYPE_GENERICINST:
860 type = m_class_get_byval_arg (type->data.generic_class->container_class);
861 goto handle_enum;
862 case MONO_TYPE_VAR:
863 case MONO_TYPE_MVAR:
864 g_assert (cfg->gshared);
865 if (mini_is_gsharedvt_type (type)) {
866 g_assert (cfg->gsharedvt);
867 inst->type = STACK_VTYPE;
868 } else {
869 mini_type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
871 return;
872 default:
873 g_error ("unknown type 0x%02x in eval stack type", type->type);
878 * The following tables are used to quickly validate the IL code in type_from_op ().
880 static const char
881 bin_num_table [STACK_MAX] [STACK_MAX] = {
882 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
883 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
884 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
885 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
886 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
887 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
888 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
889 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
890 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
893 static const char
894 neg_table [] = {
895 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
898 /* reduce the size of this table */
899 static const char
900 bin_int_table [STACK_MAX] [STACK_MAX] = {
901 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
902 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
903 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
904 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
905 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
906 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
907 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
908 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
911 static const char
912 bin_comp_table [STACK_MAX] [STACK_MAX] = {
913 /* Inv i L p F & O vt r4 */
914 {0},
915 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
916 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
917 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
918 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
919 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
920 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
921 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
922 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
925 /* reduce the size of this table */
926 static const char
927 shift_table [STACK_MAX] [STACK_MAX] = {
928 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
929 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
930 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
931 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
932 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
933 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
934 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
935 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
939 * Tables to map from the non-specific opcode to the matching
940 * type-specific opcode.
942 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
943 static const guint16
944 binops_op_map [STACK_MAX] = {
945 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
948 /* handles from CEE_NEG to CEE_CONV_U8 */
949 static const guint16
950 unops_op_map [STACK_MAX] = {
951 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
954 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
955 static const guint16
956 ovfops_op_map [STACK_MAX] = {
957 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
960 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
961 static const guint16
962 ovf2ops_op_map [STACK_MAX] = {
963 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
966 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
967 static const guint16
968 ovf3ops_op_map [STACK_MAX] = {
969 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
972 /* handles from CEE_BEQ to CEE_BLT_UN */
973 static const guint16
974 beqops_op_map [STACK_MAX] = {
975 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
978 /* handles from CEE_CEQ to CEE_CLT_UN */
979 static const guint16
980 ceqops_op_map [STACK_MAX] = {
981 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
985 * Sets ins->type (the type on the eval stack) according to the
986 * type of the opcode and the arguments to it.
987 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
989 * FIXME: this function sets ins->type unconditionally in some cases, but
990 * it should set it to invalid for some types (a conv.x on an object)
992 static void
993 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
995 switch (ins->opcode) {
996 /* binops */
997 case MONO_CEE_ADD:
998 case MONO_CEE_SUB:
999 case MONO_CEE_MUL:
1000 case MONO_CEE_DIV:
1001 case MONO_CEE_REM:
1002 /* FIXME: check unverifiable args for STACK_MP */
1003 ins->type = bin_num_table [src1->type] [src2->type];
1004 ins->opcode += binops_op_map [ins->type];
1005 break;
1006 case MONO_CEE_DIV_UN:
1007 case MONO_CEE_REM_UN:
1008 case MONO_CEE_AND:
1009 case MONO_CEE_OR:
1010 case MONO_CEE_XOR:
1011 ins->type = bin_int_table [src1->type] [src2->type];
1012 ins->opcode += binops_op_map [ins->type];
1013 break;
1014 case MONO_CEE_SHL:
1015 case MONO_CEE_SHR:
1016 case MONO_CEE_SHR_UN:
1017 ins->type = shift_table [src1->type] [src2->type];
1018 ins->opcode += binops_op_map [ins->type];
1019 break;
1020 case OP_COMPARE:
1021 case OP_LCOMPARE:
1022 case OP_ICOMPARE:
1023 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
1024 if ((src1->type == STACK_I8) || ((TARGET_SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
1025 ins->opcode = OP_LCOMPARE;
1026 else if (src1->type == STACK_R4)
1027 ins->opcode = OP_RCOMPARE;
1028 else if (src1->type == STACK_R8)
1029 ins->opcode = OP_FCOMPARE;
1030 else
1031 ins->opcode = OP_ICOMPARE;
1032 break;
1033 case OP_ICOMPARE_IMM:
1034 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
1035 if ((src1->type == STACK_I8) || ((TARGET_SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
1036 ins->opcode = OP_LCOMPARE_IMM;
1037 break;
1038 case MONO_CEE_BEQ:
1039 case MONO_CEE_BGE:
1040 case MONO_CEE_BGT:
1041 case MONO_CEE_BLE:
1042 case MONO_CEE_BLT:
1043 case MONO_CEE_BNE_UN:
1044 case MONO_CEE_BGE_UN:
1045 case MONO_CEE_BGT_UN:
1046 case MONO_CEE_BLE_UN:
1047 case MONO_CEE_BLT_UN:
1048 ins->opcode += beqops_op_map [src1->type];
1049 break;
1050 case OP_CEQ:
1051 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
1052 ins->opcode += ceqops_op_map [src1->type];
1053 break;
1054 case OP_CGT:
1055 case OP_CGT_UN:
1056 case OP_CLT:
1057 case OP_CLT_UN:
1058 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
1059 ins->opcode += ceqops_op_map [src1->type];
1060 break;
1061 /* unops */
1062 case MONO_CEE_NEG:
1063 ins->type = neg_table [src1->type];
1064 ins->opcode += unops_op_map [ins->type];
1065 break;
1066 case MONO_CEE_NOT:
1067 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1068 ins->type = src1->type;
1069 else
1070 ins->type = STACK_INV;
1071 ins->opcode += unops_op_map [ins->type];
1072 break;
1073 case MONO_CEE_CONV_I1:
1074 case MONO_CEE_CONV_I2:
1075 case MONO_CEE_CONV_I4:
1076 case MONO_CEE_CONV_U4:
1077 ins->type = STACK_I4;
1078 ins->opcode += unops_op_map [src1->type];
1079 break;
1080 case MONO_CEE_CONV_R_UN:
1081 ins->type = STACK_R8;
1082 switch (src1->type) {
1083 case STACK_I4:
1084 case STACK_PTR:
1085 ins->opcode = OP_ICONV_TO_R_UN;
1086 break;
1087 case STACK_I8:
1088 ins->opcode = OP_LCONV_TO_R_UN;
1089 break;
1090 case STACK_R8:
1091 ins->opcode = OP_FMOVE;
1092 break;
1094 break;
1095 case MONO_CEE_CONV_OVF_I1:
1096 case MONO_CEE_CONV_OVF_U1:
1097 case MONO_CEE_CONV_OVF_I2:
1098 case MONO_CEE_CONV_OVF_U2:
1099 case MONO_CEE_CONV_OVF_I4:
1100 case MONO_CEE_CONV_OVF_U4:
1101 ins->type = STACK_I4;
1102 ins->opcode += ovf3ops_op_map [src1->type];
1103 break;
1104 case MONO_CEE_CONV_OVF_I_UN:
1105 case MONO_CEE_CONV_OVF_U_UN:
1106 ins->type = STACK_PTR;
1107 ins->opcode += ovf2ops_op_map [src1->type];
1108 break;
1109 case MONO_CEE_CONV_OVF_I1_UN:
1110 case MONO_CEE_CONV_OVF_I2_UN:
1111 case MONO_CEE_CONV_OVF_I4_UN:
1112 case MONO_CEE_CONV_OVF_U1_UN:
1113 case MONO_CEE_CONV_OVF_U2_UN:
1114 case MONO_CEE_CONV_OVF_U4_UN:
1115 ins->type = STACK_I4;
1116 ins->opcode += ovf2ops_op_map [src1->type];
1117 break;
1118 case MONO_CEE_CONV_U:
1119 ins->type = STACK_PTR;
1120 switch (src1->type) {
1121 case STACK_I4:
1122 ins->opcode = OP_ICONV_TO_U;
1123 break;
1124 case STACK_PTR:
1125 case STACK_MP:
1126 case STACK_OBJ:
1127 #if TARGET_SIZEOF_VOID_P == 8
1128 ins->opcode = OP_LCONV_TO_U;
1129 #else
1130 ins->opcode = OP_MOVE;
1131 #endif
1132 break;
1133 case STACK_I8:
1134 ins->opcode = OP_LCONV_TO_U;
1135 break;
1136 case STACK_R8:
1137 ins->opcode = OP_FCONV_TO_U;
1138 break;
1139 case STACK_R4:
1140 if (TARGET_SIZEOF_VOID_P == 8)
1141 ins->opcode = OP_RCONV_TO_U8;
1142 else
1143 ins->opcode = OP_RCONV_TO_U4;
1144 break;
1146 break;
1147 case MONO_CEE_CONV_I8:
1148 case MONO_CEE_CONV_U8:
1149 ins->type = STACK_I8;
1150 ins->opcode += unops_op_map [src1->type];
1151 break;
1152 case MONO_CEE_CONV_OVF_I8:
1153 case MONO_CEE_CONV_OVF_U8:
1154 ins->type = STACK_I8;
1155 ins->opcode += ovf3ops_op_map [src1->type];
1156 break;
1157 case MONO_CEE_CONV_OVF_U8_UN:
1158 case MONO_CEE_CONV_OVF_I8_UN:
1159 ins->type = STACK_I8;
1160 ins->opcode += ovf2ops_op_map [src1->type];
1161 break;
1162 case MONO_CEE_CONV_R4:
1163 ins->type = cfg->r4_stack_type;
1164 ins->opcode += unops_op_map [src1->type];
1165 break;
1166 case MONO_CEE_CONV_R8:
1167 ins->type = STACK_R8;
1168 ins->opcode += unops_op_map [src1->type];
1169 break;
1170 case OP_CKFINITE:
1171 ins->type = STACK_R8;
1172 break;
1173 case MONO_CEE_CONV_U2:
1174 case MONO_CEE_CONV_U1:
1175 ins->type = STACK_I4;
1176 ins->opcode += ovfops_op_map [src1->type];
1177 break;
1178 case MONO_CEE_CONV_I:
1179 case MONO_CEE_CONV_OVF_I:
1180 case MONO_CEE_CONV_OVF_U:
1181 ins->type = STACK_PTR;
1182 ins->opcode += ovfops_op_map [src1->type];
1183 break;
1184 case MONO_CEE_ADD_OVF:
1185 case MONO_CEE_ADD_OVF_UN:
1186 case MONO_CEE_MUL_OVF:
1187 case MONO_CEE_MUL_OVF_UN:
1188 case MONO_CEE_SUB_OVF:
1189 case MONO_CEE_SUB_OVF_UN:
1190 ins->type = bin_num_table [src1->type] [src2->type];
1191 ins->opcode += ovfops_op_map [src1->type];
1192 if (ins->type == STACK_R8)
1193 ins->type = STACK_INV;
1194 break;
1195 case OP_LOAD_MEMBASE:
1196 ins->type = STACK_PTR;
1197 break;
1198 case OP_LOADI1_MEMBASE:
1199 case OP_LOADU1_MEMBASE:
1200 case OP_LOADI2_MEMBASE:
1201 case OP_LOADU2_MEMBASE:
1202 case OP_LOADI4_MEMBASE:
1203 case OP_LOADU4_MEMBASE:
1204 ins->type = STACK_PTR;
1205 break;
1206 case OP_LOADI8_MEMBASE:
1207 ins->type = STACK_I8;
1208 break;
1209 case OP_LOADR4_MEMBASE:
1210 ins->type = cfg->r4_stack_type;
1211 break;
1212 case OP_LOADR8_MEMBASE:
1213 ins->type = STACK_R8;
1214 break;
1215 default:
1216 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1217 break;
1220 if (ins->type == STACK_MP) {
1221 if (src1->type == STACK_MP)
1222 ins->klass = src1->klass;
1223 else
1224 ins->klass = mono_defaults.object_class;
1228 void
1229 mini_type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
1231 type_from_op (cfg, ins, src1, src2);
1234 static MonoClass*
1235 ldind_to_type (int op)
1237 switch (op) {
1238 case MONO_CEE_LDIND_I1: return mono_defaults.sbyte_class;
1239 case MONO_CEE_LDIND_U1: return mono_defaults.byte_class;
1240 case MONO_CEE_LDIND_I2: return mono_defaults.int16_class;
1241 case MONO_CEE_LDIND_U2: return mono_defaults.uint16_class;
1242 case MONO_CEE_LDIND_I4: return mono_defaults.int32_class;
1243 case MONO_CEE_LDIND_U4: return mono_defaults.uint32_class;
1244 case MONO_CEE_LDIND_I8: return mono_defaults.int64_class;
1245 case MONO_CEE_LDIND_I: return mono_defaults.int_class;
1246 case MONO_CEE_LDIND_R4: return mono_defaults.single_class;
1247 case MONO_CEE_LDIND_R8: return mono_defaults.double_class;
1248 case MONO_CEE_LDIND_REF:return mono_defaults.object_class; //FIXME we should try to return a more specific type
1249 default: g_error ("Unknown ldind type %d", op);
1253 #if 0
1255 static const char
1256 param_table [STACK_MAX] [STACK_MAX] = {
1257 {0},
1260 static int
1261 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1263 int i;
1265 if (sig->hasthis) {
1266 switch (args->type) {
1267 case STACK_I4:
1268 case STACK_I8:
1269 case STACK_R8:
1270 case STACK_VTYPE:
1271 case STACK_INV:
1272 return 0;
1274 args++;
1276 for (i = 0; i < sig->param_count; ++i) {
1277 switch (args [i].type) {
1278 case STACK_INV:
1279 return 0;
1280 case STACK_MP:
1281 if (!sig->params [i]->byref)
1282 return 0;
1283 continue;
1284 case STACK_OBJ:
1285 if (sig->params [i]->byref)
1286 return 0;
1287 switch (sig->params [i]->type) {
1288 case MONO_TYPE_CLASS:
1289 case MONO_TYPE_STRING:
1290 case MONO_TYPE_OBJECT:
1291 case MONO_TYPE_SZARRAY:
1292 case MONO_TYPE_ARRAY:
1293 break;
1294 default:
1295 return 0;
1297 continue;
1298 case STACK_R8:
1299 if (sig->params [i]->byref)
1300 return 0;
1301 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1302 return 0;
1303 continue;
1304 case STACK_PTR:
1305 case STACK_I4:
1306 case STACK_I8:
1307 case STACK_VTYPE:
1308 break;
1310 /*if (!param_table [args [i].type] [sig->params [i]->type])
1311 return 0;*/
1313 return 1;
1315 #endif
1318 * When we need a pointer to the current domain many times in a method, we
1319 * call mono_domain_get() once and we store the result in a local variable.
1320 * This function returns the variable that represents the MonoDomain*.
1322 inline static MonoInst *
1323 mono_get_domainvar (MonoCompile *cfg)
1325 if (!cfg->domainvar) {
1326 /* Make sure we don't generate references after checking whenever to init this */
1327 g_assert (!cfg->domainvar_inited);
1328 cfg->domainvar = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
1329 /* Avoid optimizing it away */
1330 cfg->domainvar->flags |= MONO_INST_VOLATILE;
1332 return cfg->domainvar;
1336 * The got_var contains the address of the Global Offset Table when AOT
1337 * compiling.
1339 MonoInst *
1340 mono_get_got_var (MonoCompile *cfg)
1342 if (!cfg->compile_aot || !cfg->backend->need_got_var || cfg->llvm_only)
1343 return NULL;
1344 if (!cfg->got_var) {
1345 cfg->got_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
1347 return cfg->got_var;
1350 static void
1351 mono_create_rgctx_var (MonoCompile *cfg)
1353 if (!cfg->rgctx_var) {
1354 cfg->rgctx_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
1355 /* force the var to be stack allocated */
1356 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1360 static MonoInst *
1361 mono_get_vtable_var (MonoCompile *cfg)
1363 g_assert (cfg->gshared);
1365 mono_create_rgctx_var (cfg);
1367 return cfg->rgctx_var;
1370 static MonoType*
1371 type_from_stack_type (MonoInst *ins) {
1372 switch (ins->type) {
1373 case STACK_I4: return mono_get_int32_type ();
1374 case STACK_I8: return m_class_get_byval_arg (mono_defaults.int64_class);
1375 case STACK_PTR: return mono_get_int_type ();
1376 case STACK_R4: return m_class_get_byval_arg (mono_defaults.single_class);
1377 case STACK_R8: return m_class_get_byval_arg (mono_defaults.double_class);
1378 case STACK_MP:
1379 return m_class_get_this_arg (ins->klass);
1380 case STACK_OBJ: return mono_get_object_type ();
1381 case STACK_VTYPE: return m_class_get_byval_arg (ins->klass);
1382 default:
1383 g_error ("stack type %d to monotype not handled\n", ins->type);
1385 return NULL;
1388 static G_GNUC_UNUSED int
1389 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1391 t = mono_type_get_underlying_type (t);
1392 switch (t->type) {
1393 case MONO_TYPE_I1:
1394 case MONO_TYPE_U1:
1395 case MONO_TYPE_I2:
1396 case MONO_TYPE_U2:
1397 case MONO_TYPE_I4:
1398 case MONO_TYPE_U4:
1399 return STACK_I4;
1400 case MONO_TYPE_I:
1401 case MONO_TYPE_U:
1402 case MONO_TYPE_PTR:
1403 case MONO_TYPE_FNPTR:
1404 return STACK_PTR;
1405 case MONO_TYPE_CLASS:
1406 case MONO_TYPE_STRING:
1407 case MONO_TYPE_OBJECT:
1408 case MONO_TYPE_SZARRAY:
1409 case MONO_TYPE_ARRAY:
1410 return STACK_OBJ;
1411 case MONO_TYPE_I8:
1412 case MONO_TYPE_U8:
1413 return STACK_I8;
1414 case MONO_TYPE_R4:
1415 return cfg->r4_stack_type;
1416 case MONO_TYPE_R8:
1417 return STACK_R8;
1418 case MONO_TYPE_VALUETYPE:
1419 case MONO_TYPE_TYPEDBYREF:
1420 return STACK_VTYPE;
1421 case MONO_TYPE_GENERICINST:
1422 if (mono_type_generic_inst_is_valuetype (t))
1423 return STACK_VTYPE;
1424 else
1425 return STACK_OBJ;
1426 break;
1427 default:
1428 g_assert_not_reached ();
1431 return -1;
1434 static MonoClass*
1435 array_access_to_klass (int opcode)
1437 switch (opcode) {
1438 case MONO_CEE_LDELEM_U1:
1439 return mono_defaults.byte_class;
1440 case MONO_CEE_LDELEM_U2:
1441 return mono_defaults.uint16_class;
1442 case MONO_CEE_LDELEM_I:
1443 case MONO_CEE_STELEM_I:
1444 return mono_defaults.int_class;
1445 case MONO_CEE_LDELEM_I1:
1446 case MONO_CEE_STELEM_I1:
1447 return mono_defaults.sbyte_class;
1448 case MONO_CEE_LDELEM_I2:
1449 case MONO_CEE_STELEM_I2:
1450 return mono_defaults.int16_class;
1451 case MONO_CEE_LDELEM_I4:
1452 case MONO_CEE_STELEM_I4:
1453 return mono_defaults.int32_class;
1454 case MONO_CEE_LDELEM_U4:
1455 return mono_defaults.uint32_class;
1456 case MONO_CEE_LDELEM_I8:
1457 case MONO_CEE_STELEM_I8:
1458 return mono_defaults.int64_class;
1459 case MONO_CEE_LDELEM_R4:
1460 case MONO_CEE_STELEM_R4:
1461 return mono_defaults.single_class;
1462 case MONO_CEE_LDELEM_R8:
1463 case MONO_CEE_STELEM_R8:
1464 return mono_defaults.double_class;
1465 case MONO_CEE_LDELEM_REF:
1466 case MONO_CEE_STELEM_REF:
1467 return mono_defaults.object_class;
1468 default:
1469 g_assert_not_reached ();
1471 return NULL;
1475 * We try to share variables when possible
1477 static MonoInst *
1478 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1480 MonoInst *res;
1481 int pos, vnum;
1482 MonoType *type;
1484 type = type_from_stack_type (ins);
1486 /* inlining can result in deeper stacks */
1487 if (cfg->inline_depth || slot >= cfg->header->max_stack)
1488 return mono_compile_create_var (cfg, type, OP_LOCAL);
1490 pos = ins->type - 1 + slot * STACK_MAX;
1492 switch (ins->type) {
1493 case STACK_I4:
1494 case STACK_I8:
1495 case STACK_R8:
1496 case STACK_PTR:
1497 case STACK_MP:
1498 case STACK_OBJ:
1499 if ((vnum = cfg->intvars [pos]))
1500 return cfg->varinfo [vnum];
1501 res = mono_compile_create_var (cfg, type, OP_LOCAL);
1502 cfg->intvars [pos] = res->inst_c0;
1503 break;
1504 default:
1505 res = mono_compile_create_var (cfg, type, OP_LOCAL);
1507 return res;
1510 static void
1511 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1514 * Don't use this if a generic_context is set, since that means AOT can't
1515 * look up the method using just the image+token.
1516 * table == 0 means this is a reference made from a wrapper.
1518 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1519 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1520 jump_info_token->image = image;
1521 jump_info_token->token = token;
1522 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1527 * This function is called to handle items that are left on the evaluation stack
1528 * at basic block boundaries. What happens is that we save the values to local variables
1529 * and we reload them later when first entering the target basic block (with the
1530 * handle_loaded_temps () function).
1531 * A single joint point will use the same variables (stored in the array bb->out_stack or
1532 * bb->in_stack, if the basic block is before or after the joint point).
1534 * This function needs to be called _before_ emitting the last instruction of
1535 * the bb (i.e. before emitting a branch).
1536 * If the stack merge fails at a join point, cfg->unverifiable is set.
1538 static void
1539 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1541 int i, bindex;
1542 MonoBasicBlock *bb = cfg->cbb;
1543 MonoBasicBlock *outb;
1544 MonoInst *inst, **locals;
1545 gboolean found;
1547 if (!count)
1548 return;
1549 if (cfg->verbose_level > 3)
1550 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1551 if (!bb->out_scount) {
1552 bb->out_scount = count;
1553 //printf ("bblock %d has out:", bb->block_num);
1554 found = FALSE;
1555 for (i = 0; i < bb->out_count; ++i) {
1556 outb = bb->out_bb [i];
1557 /* exception handlers are linked, but they should not be considered for stack args */
1558 if (outb->flags & BB_EXCEPTION_HANDLER)
1559 continue;
1560 //printf (" %d", outb->block_num);
1561 if (outb->in_stack) {
1562 found = TRUE;
1563 bb->out_stack = outb->in_stack;
1564 break;
1567 //printf ("\n");
1568 if (!found) {
1569 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1570 for (i = 0; i < count; ++i) {
1572 * try to reuse temps already allocated for this purpouse, if they occupy the same
1573 * stack slot and if they are of the same type.
1574 * This won't cause conflicts since if 'local' is used to
1575 * store one of the values in the in_stack of a bblock, then
1576 * the same variable will be used for the same outgoing stack
1577 * slot as well.
1578 * This doesn't work when inlining methods, since the bblocks
1579 * in the inlined methods do not inherit their in_stack from
1580 * the bblock they are inlined to. See bug #58863 for an
1581 * example.
1583 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1588 for (i = 0; i < bb->out_count; ++i) {
1589 outb = bb->out_bb [i];
1590 /* exception handlers are linked, but they should not be considered for stack args */
1591 if (outb->flags & BB_EXCEPTION_HANDLER)
1592 continue;
1593 if (outb->in_scount) {
1594 if (outb->in_scount != bb->out_scount) {
1595 cfg->unverifiable = TRUE;
1596 return;
1598 continue; /* check they are the same locals */
1600 outb->in_scount = count;
1601 outb->in_stack = bb->out_stack;
1604 locals = bb->out_stack;
1605 cfg->cbb = bb;
1606 for (i = 0; i < count; ++i) {
1607 sp [i] = convert_value (cfg, locals [i]->inst_vtype, sp [i]);
1608 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1609 inst->cil_code = sp [i]->cil_code;
1610 sp [i] = locals [i];
1611 if (cfg->verbose_level > 3)
1612 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1616 * It is possible that the out bblocks already have in_stack assigned, and
1617 * the in_stacks differ. In this case, we will store to all the different
1618 * in_stacks.
1621 found = TRUE;
1622 bindex = 0;
1623 while (found) {
1624 /* Find a bblock which has a different in_stack */
1625 found = FALSE;
1626 while (bindex < bb->out_count) {
1627 outb = bb->out_bb [bindex];
1628 /* exception handlers are linked, but they should not be considered for stack args */
1629 if (outb->flags & BB_EXCEPTION_HANDLER) {
1630 bindex++;
1631 continue;
1633 if (outb->in_stack != locals) {
1634 for (i = 0; i < count; ++i) {
1635 sp [i] = convert_value (cfg, outb->in_stack [i]->inst_vtype, sp [i]);
1636 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1637 inst->cil_code = sp [i]->cil_code;
1638 sp [i] = locals [i];
1639 if (cfg->verbose_level > 3)
1640 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1642 locals = outb->in_stack;
1643 found = TRUE;
1644 break;
1646 bindex ++;
1651 MonoInst*
1652 mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1654 MonoInst *ins;
1656 if (cfg->compile_aot) {
1657 MONO_DISABLE_WARNING (4306) // 'type cast': conversion from 'MonoJumpInfoType' to 'MonoInst *' of greater size
1658 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1659 MONO_RESTORE_WARNING
1660 } else {
1661 MonoJumpInfo ji;
1662 gpointer target;
1663 ERROR_DECL (error);
1665 ji.type = patch_type;
1666 ji.data.target = data;
1667 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, error);
1668 mono_error_assert_ok (error);
1670 EMIT_NEW_PCONST (cfg, ins, target);
1672 return ins;
1675 static MonoInst*
1676 mono_create_fast_tls_getter (MonoCompile *cfg, MonoTlsKey key)
1678 int tls_offset = mono_tls_get_tls_offset (key);
1680 if (cfg->compile_aot)
1681 return NULL;
1683 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1684 MonoInst *ins;
1685 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
1686 ins->dreg = mono_alloc_preg (cfg);
1687 ins->inst_offset = tls_offset;
1688 return ins;
1690 return NULL;
1693 static MonoInst*
1694 mono_create_tls_get (MonoCompile *cfg, MonoTlsKey key)
1696 MonoInst *fast_tls = NULL;
1698 if (!mini_debug_options.use_fallback_tls)
1699 fast_tls = mono_create_fast_tls_getter (cfg, key);
1701 if (fast_tls) {
1702 MONO_ADD_INS (cfg->cbb, fast_tls);
1703 return fast_tls;
1706 const MonoJitICallId jit_icall_id = mono_get_tls_key_to_jit_icall_id (key);
1708 if (cfg->compile_aot) {
1709 MonoInst *addr;
1711 * tls getters are critical pieces of code and we don't want to resolve them
1712 * through the standard plt/tramp mechanism since we might expose ourselves
1713 * to crashes and infinite recursions.
1714 * Therefore the NOCALL part of MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL, FALSE in is_plt_patch.
1716 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL, GUINT_TO_POINTER (jit_icall_id));
1717 return mini_emit_calli (cfg, mono_icall_sig_ptr, NULL, addr, NULL, NULL);
1718 } else {
1719 return mono_emit_jit_icall_id (cfg, jit_icall_id, NULL);
1724 * emit_push_lmf:
1726 * Emit IR to push the current LMF onto the LMF stack.
1728 static void
1729 emit_push_lmf (MonoCompile *cfg)
1732 * Emit IR to push the LMF:
1733 * lmf_addr = <lmf_addr from tls>
1734 * lmf->lmf_addr = lmf_addr
1735 * lmf->prev_lmf = *lmf_addr
1736 * *lmf_addr = lmf
1738 MonoInst *ins, *lmf_ins;
1740 if (!cfg->lmf_ir)
1741 return;
1743 int lmf_reg, prev_lmf_reg;
1745 * Store lmf_addr in a variable, so it can be allocated to a global register.
1747 if (!cfg->lmf_addr_var)
1748 cfg->lmf_addr_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
1750 lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR);
1751 g_assert (lmf_ins);
1753 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1755 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1756 lmf_reg = ins->dreg;
1758 prev_lmf_reg = alloc_preg (cfg);
1759 /* Save previous_lmf */
1760 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1761 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1762 /* Set new lmf */
1763 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1767 * emit_pop_lmf:
1769 * Emit IR to pop the current LMF from the LMF stack.
1771 static void
1772 emit_pop_lmf (MonoCompile *cfg)
1774 int lmf_reg, lmf_addr_reg;
1775 MonoInst *ins;
1777 if (!cfg->lmf_ir)
1778 return;
1780 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1781 lmf_reg = ins->dreg;
1783 int prev_lmf_reg;
1785 * Emit IR to pop the LMF:
1786 * *(lmf->lmf_addr) = lmf->prev_lmf
1788 /* This could be called before emit_push_lmf () */
1789 if (!cfg->lmf_addr_var)
1790 cfg->lmf_addr_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
1791 lmf_addr_reg = cfg->lmf_addr_var->dreg;
1793 prev_lmf_reg = alloc_preg (cfg);
1794 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
1795 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
1799 * target_type_is_incompatible:
1800 * @cfg: MonoCompile context
1802 * Check that the item @arg on the evaluation stack can be stored
1803 * in the target type (can be a local, or field, etc).
1804 * The cfg arg can be used to check if we need verification or just
1805 * validity checks.
1807 * Returns: non-0 value if arg can't be stored on a target.
1809 static int
1810 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1812 MonoType *simple_type;
1813 MonoClass *klass;
1815 if (target->byref) {
1816 /* FIXME: check that the pointed to types match */
1817 if (arg->type == STACK_MP) {
1818 /* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
1819 MonoClass *target_class_lowered = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (mono_class_from_mono_type_internal (target))));
1820 MonoClass *source_class_lowered = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (arg->klass)));
1822 /* if the target is native int& or X* or same type */
1823 if (target->type == MONO_TYPE_I || target->type == MONO_TYPE_PTR || target_class_lowered == source_class_lowered)
1824 return 0;
1826 /* Both are primitive type byrefs and the source points to a larger type that the destination */
1827 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (m_class_get_byval_arg (target_class_lowered)) && MONO_TYPE_IS_PRIMITIVE_SCALAR (m_class_get_byval_arg (source_class_lowered)) &&
1828 mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
1829 return 0;
1830 return 1;
1832 if (arg->type == STACK_PTR)
1833 return 0;
1834 return 1;
1837 simple_type = mini_get_underlying_type (target);
1838 switch (simple_type->type) {
1839 case MONO_TYPE_VOID:
1840 return 1;
1841 case MONO_TYPE_I1:
1842 case MONO_TYPE_U1:
1843 case MONO_TYPE_I2:
1844 case MONO_TYPE_U2:
1845 case MONO_TYPE_I4:
1846 case MONO_TYPE_U4:
1847 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1848 return 1;
1849 return 0;
1850 case MONO_TYPE_PTR:
1851 /* STACK_MP is needed when setting pinned locals */
1852 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1853 return 1;
1854 return 0;
1855 case MONO_TYPE_I:
1856 case MONO_TYPE_U:
1857 case MONO_TYPE_FNPTR:
1859 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
1860 * in native int. (#688008).
1862 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1863 return 1;
1864 return 0;
1865 case MONO_TYPE_CLASS:
1866 case MONO_TYPE_STRING:
1867 case MONO_TYPE_OBJECT:
1868 case MONO_TYPE_SZARRAY:
1869 case MONO_TYPE_ARRAY:
1870 if (arg->type != STACK_OBJ)
1871 return 1;
1872 /* FIXME: check type compatibility */
1873 return 0;
1874 case MONO_TYPE_I8:
1875 case MONO_TYPE_U8:
1876 if (arg->type != STACK_I8)
1877 return 1;
1878 return 0;
1879 case MONO_TYPE_R4:
1880 if (arg->type != cfg->r4_stack_type)
1881 return 1;
1882 return 0;
1883 case MONO_TYPE_R8:
1884 if (arg->type != STACK_R8)
1885 return 1;
1886 return 0;
1887 case MONO_TYPE_VALUETYPE:
1888 if (arg->type != STACK_VTYPE)
1889 return 1;
1890 klass = mono_class_from_mono_type_internal (simple_type);
1891 if (klass != arg->klass)
1892 return 1;
1893 return 0;
1894 case MONO_TYPE_TYPEDBYREF:
1895 if (arg->type != STACK_VTYPE)
1896 return 1;
1897 klass = mono_class_from_mono_type_internal (simple_type);
1898 if (klass != arg->klass)
1899 return 1;
1900 return 0;
1901 case MONO_TYPE_GENERICINST:
1902 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1903 MonoClass *target_class;
1904 if (arg->type != STACK_VTYPE)
1905 return 1;
1906 klass = mono_class_from_mono_type_internal (simple_type);
1907 target_class = mono_class_from_mono_type_internal (target);
1908 /* The second cases is needed when doing partial sharing */
1909 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (arg->klass))))
1910 return 1;
1911 return 0;
1912 } else {
1913 if (arg->type != STACK_OBJ)
1914 return 1;
1915 /* FIXME: check type compatibility */
1916 return 0;
1918 case MONO_TYPE_VAR:
1919 case MONO_TYPE_MVAR:
1920 g_assert (cfg->gshared);
1921 if (mini_type_var_is_vt (simple_type)) {
1922 if (arg->type != STACK_VTYPE)
1923 return 1;
1924 } else {
1925 if (arg->type != STACK_OBJ)
1926 return 1;
1928 return 0;
1929 default:
1930 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1932 return 1;
1936 * convert_value:
1938 * Emit some implicit conversions which are not part of the .net spec, but are allowed by MS.NET.
1940 static MonoInst*
1941 convert_value (MonoCompile *cfg, MonoType *type, MonoInst *ins)
1943 if (!cfg->r4fp)
1944 return ins;
1945 type = mini_get_underlying_type (type);
1946 switch (type->type) {
1947 case MONO_TYPE_R4:
1948 if (ins->type == STACK_R8) {
1949 int dreg = alloc_freg (cfg);
1950 MonoInst *conv;
1951 EMIT_NEW_UNALU (cfg, conv, OP_FCONV_TO_R4, dreg, ins->dreg);
1952 conv->type = STACK_R4;
1953 return conv;
1955 break;
1956 case MONO_TYPE_R8:
1957 if (ins->type == STACK_R4) {
1958 int dreg = alloc_freg (cfg);
1959 MonoInst *conv;
1960 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, ins->dreg);
1961 conv->type = STACK_R8;
1962 return conv;
1964 break;
1965 default:
1966 break;
1968 return ins;
1972 * Prepare arguments for passing to a function call.
1973 * Return a non-zero value if the arguments can't be passed to the given
1974 * signature.
1975 * The type checks are not yet complete and some conversions may need
1976 * casts on 32 or 64 bit architectures.
1978 * FIXME: implement this using target_type_is_incompatible ()
1980 static gboolean
1981 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1983 MonoType *simple_type;
1984 int i;
1986 if (sig->hasthis) {
1987 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1988 return TRUE;
1989 args++;
1991 for (i = 0; i < sig->param_count; ++i) {
1992 if (sig->params [i]->byref) {
1993 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1994 return TRUE;
1995 continue;
1997 simple_type = mini_get_underlying_type (sig->params [i]);
1998 handle_enum:
1999 switch (simple_type->type) {
2000 case MONO_TYPE_VOID:
2001 return TRUE;
2002 case MONO_TYPE_I1:
2003 case MONO_TYPE_U1:
2004 case MONO_TYPE_I2:
2005 case MONO_TYPE_U2:
2006 case MONO_TYPE_I4:
2007 case MONO_TYPE_U4:
2008 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2009 return TRUE;
2010 continue;
2011 case MONO_TYPE_I:
2012 case MONO_TYPE_U:
2013 case MONO_TYPE_PTR:
2014 case MONO_TYPE_FNPTR:
2015 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2016 return TRUE;
2017 continue;
2018 case MONO_TYPE_CLASS:
2019 case MONO_TYPE_STRING:
2020 case MONO_TYPE_OBJECT:
2021 case MONO_TYPE_SZARRAY:
2022 case MONO_TYPE_ARRAY:
2023 if (args [i]->type != STACK_OBJ)
2024 return TRUE;
2025 continue;
2026 case MONO_TYPE_I8:
2027 case MONO_TYPE_U8:
2028 if (args [i]->type != STACK_I8)
2029 return TRUE;
2030 continue;
2031 case MONO_TYPE_R4:
2032 if (args [i]->type != cfg->r4_stack_type)
2033 return TRUE;
2034 continue;
2035 case MONO_TYPE_R8:
2036 if (args [i]->type != STACK_R8)
2037 return TRUE;
2038 continue;
2039 case MONO_TYPE_VALUETYPE:
2040 if (m_class_is_enumtype (simple_type->data.klass)) {
2041 simple_type = mono_class_enum_basetype_internal (simple_type->data.klass);
2042 goto handle_enum;
2044 if (args [i]->type != STACK_VTYPE)
2045 return TRUE;
2046 continue;
2047 case MONO_TYPE_TYPEDBYREF:
2048 if (args [i]->type != STACK_VTYPE)
2049 return TRUE;
2050 continue;
2051 case MONO_TYPE_GENERICINST:
2052 simple_type = m_class_get_byval_arg (simple_type->data.generic_class->container_class);
2053 goto handle_enum;
2054 case MONO_TYPE_VAR:
2055 case MONO_TYPE_MVAR:
2056 /* gsharedvt */
2057 if (args [i]->type != STACK_VTYPE)
2058 return TRUE;
2059 continue;
2060 default:
2061 g_error ("unknown type 0x%02x in check_call_signature",
2062 simple_type->type);
2065 return FALSE;
2068 MonoJumpInfo *
2069 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2071 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2073 ji->ip.i = ip;
2074 ji->type = type;
2075 ji->data.target = target;
2077 return ji;
2081 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2083 if (cfg->gshared)
2084 return mono_class_check_context_used (klass);
2085 else
2086 return 0;
2090 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2092 if (cfg->gshared)
2093 return mono_method_check_context_used (method);
2094 else
2095 return 0;
2099 * check_method_sharing:
2101 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2103 static void
2104 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2106 gboolean pass_vtable = FALSE;
2107 gboolean pass_mrgctx = FALSE;
2109 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || m_class_is_valuetype (cmethod->klass)) &&
2110 (mono_class_is_ginst (cmethod->klass) || mono_class_is_gtd (cmethod->klass))) {
2111 gboolean sharable = FALSE;
2113 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2114 sharable = TRUE;
2117 * Pass vtable iff target method might
2118 * be shared, which means that sharing
2119 * is enabled for its class and its
2120 * context is sharable (and it's not a
2121 * generic method).
2123 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2124 pass_vtable = TRUE;
2127 if (mini_method_needs_mrgctx (cmethod)) {
2128 if (mini_method_is_default_method (cmethod))
2129 pass_vtable = FALSE;
2130 else
2131 g_assert (!pass_vtable);
2133 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2134 pass_mrgctx = TRUE;
2135 } else {
2136 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature_internal (cmethod)))
2137 pass_mrgctx = TRUE;
2141 if (out_pass_vtable)
2142 *out_pass_vtable = pass_vtable;
2143 if (out_pass_mrgctx)
2144 *out_pass_mrgctx = pass_mrgctx;
2147 static gboolean
2148 direct_icalls_enabled (MonoCompile *cfg, MonoMethod *method)
2150 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
2151 return FALSE;
2153 if (method && mono_aot_direct_icalls_enabled_for_method (cfg, method))
2154 return TRUE;
2156 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2157 #ifdef TARGET_AMD64
2158 if (cfg->compile_llvm && !cfg->llvm_only)
2159 return FALSE;
2160 #endif
2162 return FALSE;
2165 MonoInst*
2166 mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
2169 * Call the jit icall without a wrapper if possible.
2170 * The wrapper is needed to be able to do stack walks for asynchronously suspended
2171 * threads when debugging.
2173 if (direct_icalls_enabled (cfg, NULL)) {
2174 int costs;
2176 if (!info->wrapper_method) {
2177 info->wrapper_method = mono_marshal_get_icall_wrapper (info, TRUE);
2178 mono_memory_barrier ();
2182 * Inline the wrapper method, which is basically a call to the C icall, and
2183 * an exception check.
2185 costs = inline_method (cfg, info->wrapper_method, NULL,
2186 args, NULL, il_offset, TRUE);
2187 g_assert (costs > 0);
2188 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
2190 return args [0];
2192 return mono_emit_jit_icall_id (cfg, mono_jit_icall_info_id (info), args);
2195 static MonoInst*
2196 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2198 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2199 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2200 int widen_op = -1;
2203 * Native code might return non register sized integers
2204 * without initializing the upper bits.
2206 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2207 case OP_LOADI1_MEMBASE:
2208 widen_op = OP_ICONV_TO_I1;
2209 break;
2210 case OP_LOADU1_MEMBASE:
2211 widen_op = OP_ICONV_TO_U1;
2212 break;
2213 case OP_LOADI2_MEMBASE:
2214 widen_op = OP_ICONV_TO_I2;
2215 break;
2216 case OP_LOADU2_MEMBASE:
2217 widen_op = OP_ICONV_TO_U2;
2218 break;
2219 default:
2220 break;
2223 if (widen_op != -1) {
2224 int dreg = alloc_preg (cfg);
2225 MonoInst *widen;
2227 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2228 widen->type = ins->type;
2229 ins = widen;
2234 return ins;
2237 static MonoInst*
2238 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2239 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2241 static void
2242 emit_method_access_failure (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
2244 MonoInst *args [2];
2245 args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (caller), caller, MONO_RGCTX_INFO_METHOD);
2246 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (callee), callee, MONO_RGCTX_INFO_METHOD);
2247 mono_emit_jit_icall (cfg, mono_throw_method_access, args);
2250 static void
2251 emit_bad_image_failure (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
2253 mono_emit_jit_icall (cfg, mono_throw_bad_image, NULL);
2256 static MonoMethod*
2257 get_method_nofail (MonoClass *klass, const char *method_name, int num_params, int flags)
2259 MonoMethod *method;
2260 ERROR_DECL (error);
2261 method = mono_class_get_method_from_name_checked (klass, method_name, num_params, flags, error);
2262 mono_error_assert_ok (error);
2263 g_assertf (method, "Could not lookup method %s in %s", method_name, m_class_get_name (klass));
2264 return method;
2267 MonoMethod*
2268 mini_get_memcpy_method (void)
2270 static MonoMethod *memcpy_method = NULL;
2271 if (!memcpy_method) {
2272 memcpy_method = get_method_nofail (mono_defaults.string_class, "memcpy", 3, 0);
2273 if (!memcpy_method)
2274 g_error ("Old corlib found. Install a new one");
2276 return memcpy_method;
2279 MonoInst*
2280 mini_emit_storing_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2282 MonoInst *store;
2285 * Add a release memory barrier so the object contents are flushed
2286 * to memory before storing the reference into another object.
2288 if (mini_debug_options.clr_memory_model)
2289 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
2291 EMIT_NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, ptr->dreg, 0, value->dreg);
2293 mini_emit_write_barrier (cfg, ptr, value);
2294 return store;
2297 void
2298 mini_emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2300 int card_table_shift_bits;
2301 target_mgreg_t card_table_mask;
2302 guint8 *card_table;
2303 MonoInst *dummy_use;
2304 int nursery_shift_bits;
2305 size_t nursery_size;
2307 if (!cfg->gen_write_barriers)
2308 return;
2310 //method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1])
2312 card_table = mono_gc_get_target_card_table (&card_table_shift_bits, &card_table_mask);
2314 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2316 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
2317 MonoInst *wbarrier;
2319 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2320 wbarrier->sreg1 = ptr->dreg;
2321 wbarrier->sreg2 = value->dreg;
2322 MONO_ADD_INS (cfg->cbb, wbarrier);
2323 } else if (card_table) {
2324 int offset_reg = alloc_preg (cfg);
2325 int card_reg;
2326 MonoInst *ins;
2329 * We emit a fast light weight write barrier. This always marks cards as in the concurrent
2330 * collector case, so, for the serial collector, it might slightly slow down nursery
2331 * collections. We also expect that the host system and the target system have the same card
2332 * table configuration, which is the case if they have the same pointer size.
2335 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2336 if (card_table_mask)
2337 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2339 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2340 * IMM's larger than 32bits.
2342 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
2343 card_reg = ins->dreg;
2345 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2346 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2347 } else {
2348 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2349 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2352 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2355 MonoMethod*
2356 mini_get_memset_method (void)
2358 static MonoMethod *memset_method = NULL;
2359 if (!memset_method) {
2360 memset_method = get_method_nofail (mono_defaults.string_class, "memset", 3, 0);
2361 if (!memset_method)
2362 g_error ("Old corlib found. Install a new one");
2364 return memset_method;
2367 void
2368 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2370 MonoInst *iargs [3];
2371 int n;
2372 guint32 align;
2373 MonoMethod *memset_method;
2374 MonoInst *size_ins = NULL;
2375 MonoInst *bzero_ins = NULL;
2376 static MonoMethod *bzero_method;
2378 /* FIXME: Optimize this for the case when dest is an LDADDR */
2379 mono_class_init_internal (klass);
2380 if (mini_is_gsharedvt_klass (klass)) {
2381 size_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
2382 bzero_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
2383 if (!bzero_method)
2384 bzero_method = get_method_nofail (mono_defaults.string_class, "bzero_aligned_1", 2, 0);
2385 g_assert (bzero_method);
2386 iargs [0] = dest;
2387 iargs [1] = size_ins;
2388 mini_emit_calli (cfg, mono_method_signature_internal (bzero_method), iargs, bzero_ins, NULL, NULL);
2389 return;
2392 klass = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (klass)));
2394 n = mono_class_value_size (klass, &align);
2396 if (n <= TARGET_SIZEOF_VOID_P * 8) {
2397 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2399 else {
2400 memset_method = mini_get_memset_method ();
2401 iargs [0] = dest;
2402 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2403 EMIT_NEW_ICONST (cfg, iargs [2], n);
2404 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2408 static gboolean
2409 context_used_is_mrgctx (MonoCompile *cfg, int context_used)
2411 /* gshared dim methods use an mrgctx */
2412 if (mini_method_is_default_method (cfg->method))
2413 return context_used != 0;
2414 return context_used & MONO_GENERIC_CONTEXT_USED_METHOD;
2418 * emit_get_rgctx:
2420 * Emit IR to return either the this pointer for instance method,
2421 * or the mrgctx for static methods.
2423 static MonoInst*
2424 emit_get_rgctx (MonoCompile *cfg, int context_used)
2426 MonoInst *this_ins = NULL;
2427 MonoMethod *method = cfg->method;
2429 g_assert (cfg->gshared);
2431 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2432 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2433 !m_class_is_valuetype (method->klass))
2434 EMIT_NEW_VARLOAD (cfg, this_ins, cfg->this_arg, mono_get_object_type ());
2436 if (context_used_is_mrgctx (cfg, context_used)) {
2437 MonoInst *mrgctx_loc, *mrgctx_var;
2439 if (!mini_method_is_default_method (method)) {
2440 g_assert (!this_ins);
2441 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2444 mrgctx_loc = mono_get_vtable_var (cfg);
2445 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2447 return mrgctx_var;
2448 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || m_class_is_valuetype (method->klass)) {
2449 MonoInst *vtable_loc, *vtable_var;
2451 g_assert (!this_ins);
2453 vtable_loc = mono_get_vtable_var (cfg);
2454 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2456 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2457 MonoInst *mrgctx_var = vtable_var;
2458 int vtable_reg;
2460 vtable_reg = alloc_preg (cfg);
2461 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2462 vtable_var->type = STACK_PTR;
2465 return vtable_var;
2466 } else {
2467 MonoInst *ins;
2468 int vtable_reg;
2470 vtable_reg = alloc_preg (cfg);
2471 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2472 return ins;
2476 static MonoJumpInfoRgctxEntry *
2477 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
2479 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2480 if (in_mrgctx)
2481 res->d.method = method;
2482 else
2483 res->d.klass = method->klass;
2484 res->in_mrgctx = in_mrgctx;
2485 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2486 res->data->type = patch_type;
2487 res->data->data.target = patch_data;
2488 res->info_type = info_type;
2490 return res;
2493 static inline MonoInst*
2494 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2496 MonoInst *args [16];
2497 MonoInst *call;
2499 // FIXME: No fastpath since the slot is not a compile time constant
2500 args [0] = rgctx;
2501 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
2502 if (entry->in_mrgctx)
2503 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
2504 else
2505 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
2506 return call;
2507 #if 0
2509 * FIXME: This can be called during decompose, which is a problem since it creates
2510 * new bblocks.
2511 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
2513 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
2514 gboolean mrgctx;
2515 MonoBasicBlock *is_null_bb, *end_bb;
2516 MonoInst *res, *ins, *call;
2517 MonoInst *args[16];
2519 slot = mini_get_rgctx_entry_slot (entry);
2521 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
2522 index = MONO_RGCTX_SLOT_INDEX (slot);
2523 if (mrgctx)
2524 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / TARGET_SIZEOF_VOID_P;
2525 for (depth = 0; ; ++depth) {
2526 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
2528 if (index < size - 1)
2529 break;
2530 index -= size - 1;
2533 NEW_BBLOCK (cfg, end_bb);
2534 NEW_BBLOCK (cfg, is_null_bb);
2536 if (mrgctx) {
2537 rgctx_reg = rgctx->dreg;
2538 } else {
2539 rgctx_reg = alloc_preg (cfg);
2541 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
2542 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
2543 NEW_BBLOCK (cfg, is_null_bb);
2545 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
2546 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2549 for (i = 0; i < depth; ++i) {
2550 int array_reg = alloc_preg (cfg);
2552 /* load ptr to next array */
2553 if (mrgctx && i == 0)
2554 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
2555 else
2556 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
2557 rgctx_reg = array_reg;
2558 /* is the ptr null? */
2559 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
2560 /* if yes, jump to actual trampoline */
2561 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2564 /* fetch slot */
2565 val_reg = alloc_preg (cfg);
2566 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * TARGET_SIZEOF_VOID_P);
2567 /* is the slot null? */
2568 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
2569 /* if yes, jump to actual trampoline */
2570 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2572 /* Fastpath */
2573 res_reg = alloc_preg (cfg);
2574 MONO_INST_NEW (cfg, ins, OP_MOVE);
2575 ins->dreg = res_reg;
2576 ins->sreg1 = val_reg;
2577 MONO_ADD_INS (cfg->cbb, ins);
2578 res = ins;
2579 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
2581 /* Slowpath */
2582 MONO_START_BB (cfg, is_null_bb);
2583 args [0] = rgctx;
2584 EMIT_NEW_ICONST (cfg, args [1], index);
2585 if (mrgctx)
2586 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
2587 else
2588 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
2589 MONO_INST_NEW (cfg, ins, OP_MOVE);
2590 ins->dreg = res_reg;
2591 ins->sreg1 = call->dreg;
2592 MONO_ADD_INS (cfg->cbb, ins);
2593 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
2595 MONO_START_BB (cfg, end_bb);
2597 return res;
2598 #endif
2602 * emit_rgctx_fetch:
2604 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
2605 * given by RGCTX.
2607 static MonoInst*
2608 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2610 if (cfg->llvm_only)
2611 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
2612 else
2613 return mini_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, mono_icall_sig_ptr_ptr, &rgctx);
2617 * mini_emit_get_rgctx_klass:
2619 * Emit IR to load the property RGCTX_TYPE of KLASS. If context_used is 0, emit
2620 * normal constants, else emit a load from the rgctx.
2622 MonoInst*
2623 mini_emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2624 MonoClass *klass, MonoRgctxInfoType rgctx_type)
2626 if (!context_used) {
2627 MonoInst *ins;
2629 switch (rgctx_type) {
2630 case MONO_RGCTX_INFO_KLASS:
2631 EMIT_NEW_CLASSCONST (cfg, ins, klass);
2632 return ins;
2633 default:
2634 g_assert_not_reached ();
2638 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2639 MonoInst *rgctx = emit_get_rgctx (cfg, context_used);
2641 return emit_rgctx_fetch (cfg, rgctx, entry);
2644 static MonoInst*
2645 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
2646 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
2648 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
2649 MonoInst *rgctx = emit_get_rgctx (cfg, context_used);
2651 return emit_rgctx_fetch (cfg, rgctx, entry);
2654 static MonoInst*
2655 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
2656 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
2658 MonoJumpInfoGSharedVtCall *call_info;
2659 MonoJumpInfoRgctxEntry *entry;
2660 MonoInst *rgctx;
2662 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
2663 call_info->sig = sig;
2664 call_info->method = cmethod;
2666 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
2667 rgctx = emit_get_rgctx (cfg, context_used);
2669 return emit_rgctx_fetch (cfg, rgctx, entry);
2673 * emit_get_rgctx_virt_method:
2675 * Return data for method VIRT_METHOD for a receiver of type KLASS.
2677 static MonoInst*
2678 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
2679 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
2681 MonoJumpInfoVirtMethod *info;
2682 MonoJumpInfoRgctxEntry *entry;
2683 MonoInst *rgctx;
2685 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
2686 info->klass = klass;
2687 info->method = virt_method;
2689 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
2690 rgctx = emit_get_rgctx (cfg, context_used);
2692 return emit_rgctx_fetch (cfg, rgctx, entry);
2695 static MonoInst*
2696 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
2697 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
2699 MonoJumpInfoRgctxEntry *entry;
2700 MonoInst *rgctx;
2702 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
2703 rgctx = emit_get_rgctx (cfg, context_used);
2705 return emit_rgctx_fetch (cfg, rgctx, entry);
2709 * emit_get_rgctx_method:
2711 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2712 * normal constants, else emit a load from the rgctx.
2714 static MonoInst*
2715 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2716 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
2718 if (context_used == -1)
2719 context_used = mono_method_check_context_used (cmethod);
2721 if (!context_used) {
2722 MonoInst *ins;
2724 switch (rgctx_type) {
2725 case MONO_RGCTX_INFO_METHOD:
2726 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2727 return ins;
2728 case MONO_RGCTX_INFO_METHOD_RGCTX:
2729 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2730 return ins;
2731 case MONO_RGCTX_INFO_METHOD_FTNDESC:
2732 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_METHOD_FTNDESC, cmethod);
2733 return ins;
2734 default:
2735 g_assert_not_reached ();
2737 } else {
2738 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2739 MonoInst *rgctx = emit_get_rgctx (cfg, context_used);
2741 return emit_rgctx_fetch (cfg, rgctx, entry);
2745 static MonoInst*
2746 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2747 MonoClassField *field, MonoRgctxInfoType rgctx_type)
2749 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_FIELD, field, rgctx_type);
2750 MonoInst *rgctx = emit_get_rgctx (cfg, context_used);
2752 return emit_rgctx_fetch (cfg, rgctx, entry);
2755 MonoInst*
2756 mini_emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2757 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
2759 return emit_get_rgctx_method (cfg, context_used, cmethod, rgctx_type);
2762 static int
2763 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
2765 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
2766 MonoRuntimeGenericContextInfoTemplate *template_;
2767 int i, idx;
2769 g_assert (info);
2771 for (i = 0; i < info->num_entries; ++i) {
2772 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
2774 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
2775 return i;
2778 if (info->num_entries == info->count_entries) {
2779 MonoRuntimeGenericContextInfoTemplate *new_entries;
2780 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
2782 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
2784 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
2785 info->entries = new_entries;
2786 info->count_entries = new_count_entries;
2789 idx = info->num_entries;
2790 template_ = &info->entries [idx];
2791 template_->info_type = rgctx_type;
2792 template_->data = data;
2794 info->num_entries ++;
2796 return idx;
2800 * emit_get_gsharedvt_info:
2802 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
2804 static MonoInst*
2805 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
2807 MonoInst *ins;
2808 int idx, dreg;
2810 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
2811 /* Load info->entries [idx] */
2812 dreg = alloc_preg (cfg);
2813 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * TARGET_SIZEOF_VOID_P));
2815 return ins;
2818 MonoInst*
2819 mini_emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
2821 return emit_get_gsharedvt_info (cfg, m_class_get_byval_arg (klass), rgctx_type);
2825 * On return the caller must check @klass for load errors.
2827 static void
2828 emit_class_init (MonoCompile *cfg, MonoClass *klass)
2830 MonoInst *vtable_arg;
2831 int context_used;
2833 context_used = mini_class_check_context_used (cfg, klass);
2835 if (context_used) {
2836 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
2837 klass, MONO_RGCTX_INFO_VTABLE);
2838 } else {
2839 MonoVTable *vtable = mono_class_vtable_checked (cfg->domain, klass, &cfg->error);
2840 if (!is_ok (&cfg->error)) {
2841 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
2842 return;
2845 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2848 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
2849 MonoInst *ins;
2852 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
2853 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
2855 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
2856 ins->sreg1 = vtable_arg->dreg;
2857 MONO_ADD_INS (cfg->cbb, ins);
2858 } else {
2859 int inited_reg;
2860 MonoBasicBlock *inited_bb;
2862 inited_reg = alloc_ireg (cfg);
2864 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, inited_reg, vtable_arg->dreg, MONO_STRUCT_OFFSET (MonoVTable, initialized));
2866 NEW_BBLOCK (cfg, inited_bb);
2868 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
2869 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
2871 mono_emit_jit_icall (cfg, mono_generic_class_init, &vtable_arg);
2873 MONO_START_BB (cfg, inited_bb);
2877 static void
2878 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
2880 MonoInst *ins;
2882 if (cfg->gen_seq_points && cfg->method == method) {
2883 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
2884 if (nonempty_stack)
2885 ins->flags |= MONO_INST_NONEMPTY_STACK;
2886 MONO_ADD_INS (cfg->cbb, ins);
2887 cfg->last_seq_point = ins;
2891 void
2892 mini_save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
2894 if (mini_debug_options.better_cast_details) {
2895 int vtable_reg = alloc_preg (cfg);
2896 int klass_reg = alloc_preg (cfg);
2897 MonoBasicBlock *is_null_bb = NULL;
2898 MonoInst *tls_get;
2900 if (null_check) {
2901 NEW_BBLOCK (cfg, is_null_bb);
2903 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2904 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2907 tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
2908 if (!tls_get) {
2909 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2910 exit (1);
2913 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2914 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
2916 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2918 MonoInst *class_ins = mini_emit_get_rgctx_klass (cfg, mini_class_check_context_used (cfg, klass), klass, MONO_RGCTX_INFO_KLASS);
2919 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), class_ins->dreg);
2921 if (null_check)
2922 MONO_START_BB (cfg, is_null_bb);
2926 void
2927 mini_reset_cast_details (MonoCompile *cfg)
2929 /* Reset the variables holding the cast details */
2930 if (mini_debug_options.better_cast_details) {
2931 MonoInst *tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
2932 /* It is enough to reset the from field */
2933 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2938 * On return the caller must check @array_class for load errors
2940 static void
2941 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2943 int vtable_reg = alloc_preg (cfg);
2944 int context_used;
2946 context_used = mini_class_check_context_used (cfg, array_class);
2948 mini_save_cast_details (cfg, array_class, obj->dreg, FALSE);
2950 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2952 if (cfg->opt & MONO_OPT_SHARED) {
2953 int class_reg = alloc_preg (cfg);
2954 MonoInst *ins;
2956 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
2957 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
2958 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
2959 } else if (context_used) {
2960 MonoInst *vtable_ins;
2962 vtable_ins = mini_emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2963 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2964 } else {
2965 if (cfg->compile_aot) {
2966 int vt_reg;
2967 MonoVTable *vtable;
2969 if (!(vtable = mono_class_vtable_checked (cfg->domain, array_class, &cfg->error))) {
2970 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
2971 return;
2973 vt_reg = alloc_preg (cfg);
2974 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2975 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2976 } else {
2977 MonoVTable *vtable;
2978 if (!(vtable = mono_class_vtable_checked (cfg->domain, array_class, &cfg->error))) {
2979 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
2980 return;
2982 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, (gssize)vtable);
2986 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2988 mini_reset_cast_details (cfg);
2992 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2993 * generic code is generated.
2995 static MonoInst*
2996 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2998 MonoMethod* method;
3000 if (m_class_is_enumtype (mono_class_get_nullable_param_internal (klass)))
3001 method = get_method_nofail (klass, "UnboxExact", 1, 0);
3002 else
3003 method = get_method_nofail (klass, "Unbox", 1, 0);
3004 g_assert (method);
3006 if (context_used) {
3007 MonoInst *rgctx, *addr;
3009 /* FIXME: What if the class is shared? We might not
3010 have to get the address of the method from the
3011 RGCTX. */
3012 if (cfg->llvm_only) {
3013 addr = emit_get_rgctx_method (cfg, context_used, method,
3014 MONO_RGCTX_INFO_METHOD_FTNDESC);
3015 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature_internal (method));
3016 return mini_emit_llvmonly_calli (cfg, mono_method_signature_internal (method), &val, addr);
3017 } else {
3018 addr = emit_get_rgctx_method (cfg, context_used, method,
3019 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3020 rgctx = emit_get_rgctx (cfg, context_used);
3022 return mini_emit_calli (cfg, mono_method_signature_internal (method), &val, addr, NULL, rgctx);
3024 } else {
3025 gboolean pass_vtable, pass_mrgctx;
3026 MonoInst *rgctx_arg = NULL;
3028 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3029 g_assert (!pass_mrgctx);
3031 if (pass_vtable) {
3032 MonoVTable *vtable = mono_class_vtable_checked (cfg->domain, method->klass, &cfg->error);
3034 mono_error_assert_ok (&cfg->error);
3035 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3038 return mini_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3042 static MonoInst*
3043 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3045 MonoInst *add;
3046 int obj_reg;
3047 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3048 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3049 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3050 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3052 obj_reg = sp [0]->dreg;
3053 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3054 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3056 /* FIXME: generics */
3057 g_assert (m_class_get_rank (klass) == 0);
3059 // Check rank == 0
3060 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3061 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3063 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3064 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, m_class_offsetof_element_class ());
3066 if (context_used) {
3067 MonoInst *element_class;
3069 /* This assertion is from the unboxcast insn */
3070 g_assert (m_class_get_rank (klass) == 0);
3072 element_class = mini_emit_get_rgctx_klass (cfg, context_used,
3073 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3075 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3076 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3077 } else {
3078 mini_save_cast_details (cfg, m_class_get_element_class (klass), obj_reg, FALSE);
3079 mini_emit_class_check (cfg, eclass_reg, m_class_get_element_class (klass));
3080 mini_reset_cast_details (cfg);
3083 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, MONO_ABI_SIZEOF (MonoObject));
3084 MONO_ADD_INS (cfg->cbb, add);
3085 add->type = STACK_MP;
3086 add->klass = klass;
3088 return add;
3091 static MonoInst*
3092 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
3094 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3095 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3096 MonoInst *ins;
3097 int dreg, addr_reg;
3099 klass_inst = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3101 /* obj */
3102 args [0] = obj;
3104 /* klass */
3105 args [1] = klass_inst;
3107 /* CASTCLASS */
3108 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3110 NEW_BBLOCK (cfg, is_ref_bb);
3111 NEW_BBLOCK (cfg, is_nullable_bb);
3112 NEW_BBLOCK (cfg, end_bb);
3113 is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3114 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
3115 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3117 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
3118 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3120 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3121 addr_reg = alloc_dreg (cfg, STACK_MP);
3123 /* Non-ref case */
3124 /* UNBOX */
3125 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, MONO_ABI_SIZEOF (MonoObject));
3126 MONO_ADD_INS (cfg->cbb, addr);
3128 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3130 /* Ref case */
3131 MONO_START_BB (cfg, is_ref_bb);
3133 /* Save the ref to a temporary */
3134 dreg = alloc_ireg (cfg);
3135 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, m_class_get_byval_arg (klass));
3136 addr->dreg = addr_reg;
3137 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3138 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3140 /* Nullable case */
3141 MONO_START_BB (cfg, is_nullable_bb);
3144 MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3145 MonoInst *unbox_call;
3146 MonoMethodSignature *unbox_sig;
3148 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3149 unbox_sig->ret = m_class_get_byval_arg (klass);
3150 unbox_sig->param_count = 1;
3151 unbox_sig->params [0] = mono_get_object_type ();
3153 if (cfg->llvm_only)
3154 unbox_call = mini_emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
3155 else
3156 unbox_call = mini_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3158 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, m_class_get_byval_arg (klass));
3159 addr->dreg = addr_reg;
3162 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3164 /* End */
3165 MONO_START_BB (cfg, end_bb);
3167 /* LDOBJ */
3168 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr_reg, 0);
3170 return ins;
3174 * Returns NULL and set the cfg exception on error.
3176 static MonoInst*
3177 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3179 MonoInst *iargs [2];
3180 MonoJitICallId alloc_ftn;
3182 if (mono_class_get_flags (klass) & TYPE_ATTRIBUTE_ABSTRACT) {
3183 char* full_name = mono_type_get_full_name (klass);
3184 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
3185 mono_error_set_member_access (&cfg->error, "Cannot create an abstract class: %s", full_name);
3186 g_free (full_name);
3187 return NULL;
3190 if (context_used) {
3191 MonoInst *data;
3192 MonoRgctxInfoType rgctx_info;
3193 MonoInst *iargs [2];
3194 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
3196 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
3198 if (cfg->opt & MONO_OPT_SHARED)
3199 rgctx_info = MONO_RGCTX_INFO_KLASS;
3200 else
3201 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3202 data = mini_emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3204 if (cfg->opt & MONO_OPT_SHARED) {
3205 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3206 iargs [1] = data;
3207 alloc_ftn = MONO_JIT_ICALL_ves_icall_object_new;
3208 } else {
3209 iargs [0] = data;
3210 alloc_ftn = MONO_JIT_ICALL_ves_icall_object_new_specific;
3213 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
3214 if (known_instance_size) {
3215 int size = mono_class_instance_size (klass);
3216 if (size < MONO_ABI_SIZEOF (MonoObject))
3217 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
3219 EMIT_NEW_ICONST (cfg, iargs [1], size);
3221 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3224 return mono_emit_jit_icall_id (cfg, alloc_ftn, iargs);
3227 if (cfg->opt & MONO_OPT_SHARED) {
3228 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3229 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3231 alloc_ftn = MONO_JIT_ICALL_ves_icall_object_new;
3232 } else if (cfg->compile_aot && cfg->cbb->out_of_line && m_class_get_type_token (klass) && m_class_get_image (klass) == mono_defaults.corlib && !mono_class_is_ginst (klass)) {
3233 /* This happens often in argument checking code, eg. throw new FooException... */
3234 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3235 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (m_class_get_type_token (klass)));
3236 alloc_ftn = MONO_JIT_ICALL_mono_helper_newobj_mscorlib;
3237 } else {
3238 MonoVTable *vtable = mono_class_vtable_checked (cfg->domain, klass, &cfg->error);
3240 if (!is_ok (&cfg->error)) {
3241 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
3242 return NULL;
3245 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
3247 if (managed_alloc) {
3248 int size = mono_class_instance_size (klass);
3249 if (size < MONO_ABI_SIZEOF (MonoObject))
3250 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
3252 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3253 EMIT_NEW_ICONST (cfg, iargs [1], size);
3254 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3256 alloc_ftn = MONO_JIT_ICALL_ves_icall_object_new_specific;
3257 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3260 return mono_emit_jit_icall_id (cfg, alloc_ftn, iargs);
3264 * Returns NULL and set the cfg exception on error.
3266 MonoInst*
3267 mini_emit_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3269 MonoInst *alloc, *ins;
3271 if (G_UNLIKELY (m_class_is_byreflike (klass))) {
3272 mono_error_set_bad_image (&cfg->error, m_class_get_image (cfg->method->klass), "Cannot box IsByRefLike type '%s.%s'", m_class_get_name_space (klass), m_class_get_name (klass));
3273 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
3274 return NULL;
3277 if (mono_class_is_nullable (klass)) {
3278 MonoMethod* method = get_method_nofail (klass, "Box", 1, 0);
3280 if (context_used) {
3281 if (cfg->llvm_only && cfg->gsharedvt) {
3282 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3283 MONO_RGCTX_INFO_METHOD_FTNDESC);
3284 return mini_emit_llvmonly_calli (cfg, mono_method_signature_internal (method), &val, addr);
3285 } else {
3286 /* FIXME: What if the class is shared? We might not
3287 have to get the method address from the RGCTX. */
3288 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3289 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3290 MonoInst *rgctx = emit_get_rgctx (cfg, context_used);
3292 return mini_emit_calli (cfg, mono_method_signature_internal (method), &val, addr, NULL, rgctx);
3294 } else {
3295 gboolean pass_vtable, pass_mrgctx;
3296 MonoInst *rgctx_arg = NULL;
3298 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3299 g_assert (!pass_mrgctx);
3301 if (pass_vtable) {
3302 MonoVTable *vtable = mono_class_vtable_checked (cfg->domain, method->klass, &cfg->error);
3304 mono_error_assert_ok (&cfg->error);
3305 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3308 return mini_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3312 if (mini_is_gsharedvt_klass (klass)) {
3313 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3314 MonoInst *res, *is_ref, *src_var, *addr;
3315 int dreg;
3317 dreg = alloc_ireg (cfg);
3319 NEW_BBLOCK (cfg, is_ref_bb);
3320 NEW_BBLOCK (cfg, is_nullable_bb);
3321 NEW_BBLOCK (cfg, end_bb);
3322 is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3323 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
3324 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3326 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
3327 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3329 /* Non-ref case */
3330 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3331 if (!alloc)
3332 return NULL;
3333 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), alloc->dreg, MONO_ABI_SIZEOF (MonoObject), val->dreg);
3334 ins->opcode = OP_STOREV_MEMBASE;
3336 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
3337 res->type = STACK_OBJ;
3338 res->klass = klass;
3339 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3341 /* Ref case */
3342 MONO_START_BB (cfg, is_ref_bb);
3344 /* val is a vtype, so has to load the value manually */
3345 src_var = get_vreg_to_inst (cfg, val->dreg);
3346 if (!src_var)
3347 src_var = mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (klass), OP_LOCAL, val->dreg);
3348 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
3349 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
3350 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3352 /* Nullable case */
3353 MONO_START_BB (cfg, is_nullable_bb);
3356 MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass,
3357 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
3358 MonoInst *box_call;
3359 MonoMethodSignature *box_sig;
3362 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
3363 * construct that method at JIT time, so have to do things by hand.
3365 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3366 box_sig->ret = mono_get_object_type ();
3367 box_sig->param_count = 1;
3368 box_sig->params [0] = m_class_get_byval_arg (klass);
3370 if (cfg->llvm_only)
3371 box_call = mini_emit_llvmonly_calli (cfg, box_sig, &val, addr);
3372 else
3373 box_call = mini_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
3374 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
3375 res->type = STACK_OBJ;
3376 res->klass = klass;
3379 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3381 MONO_START_BB (cfg, end_bb);
3383 return res;
3386 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3387 if (!alloc)
3388 return NULL;
3390 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), alloc->dreg, MONO_ABI_SIZEOF (MonoObject), val->dreg);
3391 return alloc;
3394 static gboolean
3395 method_needs_stack_walk (MonoCompile *cfg, MonoMethod *cmethod)
3397 if (cmethod->klass == mono_defaults.systemtype_class) {
3398 if (!strcmp (cmethod->name, "GetType"))
3399 return TRUE;
3401 return FALSE;
3404 G_GNUC_UNUSED MonoInst*
3405 mini_handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, int enum_val_reg, MonoInst *enum_flag)
3407 MonoType *enum_type = mono_type_get_underlying_type (m_class_get_byval_arg (klass));
3408 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
3409 gboolean is_i4;
3411 switch (enum_type->type) {
3412 case MONO_TYPE_I8:
3413 case MONO_TYPE_U8:
3414 #if SIZEOF_REGISTER == 8
3415 case MONO_TYPE_I:
3416 case MONO_TYPE_U:
3417 #endif
3418 is_i4 = FALSE;
3419 break;
3420 default:
3421 is_i4 = TRUE;
3422 break;
3426 MonoInst *load = NULL, *and_, *cmp, *ceq;
3427 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
3428 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
3429 int dest_reg = alloc_ireg (cfg);
3431 if (enum_this) {
3432 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
3433 } else {
3434 g_assert (enum_val_reg != -1);
3435 enum_reg = enum_val_reg;
3437 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
3438 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
3439 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
3441 ceq->type = STACK_I4;
3443 if (!is_i4) {
3444 load = load ? mono_decompose_opcode (cfg, load) : NULL;
3445 and_ = mono_decompose_opcode (cfg, and_);
3446 cmp = mono_decompose_opcode (cfg, cmp);
3447 ceq = mono_decompose_opcode (cfg, ceq);
3450 return ceq;
3454 static MonoInst*
3455 emit_get_rgctx_dele_tramp (MonoCompile *cfg, int context_used,
3456 MonoClass *klass, MonoMethod *virt_method, gboolean _virtual, MonoRgctxInfoType rgctx_type)
3458 MonoDelegateClassMethodPair *info;
3459 MonoJumpInfoRgctxEntry *entry;
3460 MonoInst *rgctx;
3462 info = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
3463 info->klass = klass;
3464 info->method = virt_method;
3465 info->is_virtual = _virtual;
3467 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, info, rgctx_type);
3468 rgctx = emit_get_rgctx (cfg, context_used);
3470 return emit_rgctx_fetch (cfg, rgctx, entry);
3475 * Returns NULL and set the cfg exception on error.
3477 static G_GNUC_UNUSED MonoInst*
3478 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int target_method_context_used, int invoke_context_used, gboolean virtual_)
3480 MonoInst *ptr;
3481 int dreg;
3482 gpointer trampoline;
3483 MonoInst *obj, *tramp_ins;
3484 MonoDomain *domain;
3485 guint8 **code_slot;
3487 if (virtual_ && !cfg->llvm_only) {
3488 MonoMethod *invoke = mono_get_delegate_invoke_internal (klass);
3489 g_assert (invoke);
3491 //FIXME verify & fix any issue with removing invoke_context_used restriction
3492 if (invoke_context_used || !mono_get_delegate_virtual_invoke_impl (mono_method_signature_internal (invoke), target_method_context_used ? NULL : method))
3493 return NULL;
3496 obj = handle_alloc (cfg, klass, FALSE, invoke_context_used);
3497 if (!obj)
3498 return NULL;
3500 /* Inline the contents of mono_delegate_ctor */
3502 /* Set target field */
3503 /* Optimize away setting of NULL target */
3504 if (!MONO_INS_IS_PCONST_NULL (target)) {
3505 if (!(method->flags & METHOD_ATTRIBUTE_STATIC)) {
3506 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target->dreg, 0);
3507 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
3509 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3510 if (cfg->gen_write_barriers) {
3511 dreg = alloc_preg (cfg);
3512 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
3513 mini_emit_write_barrier (cfg, ptr, target);
3517 /* Set method field */
3518 if (!(target_method_context_used || invoke_context_used) || cfg->llvm_only) {
3519 //If compiling with gsharing enabled, it's faster to load method the delegate trampoline info than to use a rgctx slot
3520 MonoInst *method_ins = emit_get_rgctx_method (cfg, target_method_context_used, method, MONO_RGCTX_INFO_METHOD);
3521 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3525 * To avoid looking up the compiled code belonging to the target method
3526 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3527 * store it, and we fill it after the method has been compiled.
3529 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
3530 MonoInst *code_slot_ins;
3532 if (target_method_context_used) {
3533 code_slot_ins = emit_get_rgctx_method (cfg, target_method_context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3534 } else {
3535 domain = mono_domain_get ();
3536 mono_domain_lock (domain);
3537 if (!domain_jit_info (domain)->method_code_hash)
3538 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3539 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3540 if (!code_slot) {
3541 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
3542 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3544 mono_domain_unlock (domain);
3546 code_slot_ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
3548 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3551 if (cfg->llvm_only) {
3552 if (virtual_) {
3553 MonoInst *args [ ] = {
3554 obj,
3555 target,
3556 emit_get_rgctx_method (cfg, target_method_context_used, method, MONO_RGCTX_INFO_METHOD)
3558 mono_emit_jit_icall (cfg, mini_llvmonly_init_delegate_virtual, args);
3559 } else {
3560 mono_emit_jit_icall (cfg, mini_llvmonly_init_delegate, &obj);
3563 return obj;
3565 if (target_method_context_used || invoke_context_used) {
3566 tramp_ins = emit_get_rgctx_dele_tramp (cfg, target_method_context_used | invoke_context_used, klass, method, virtual_, MONO_RGCTX_INFO_DELEGATE_TRAMP_INFO);
3568 //This is emited as a contant store for the non-shared case.
3569 //We copy from the delegate trampoline info as it's faster than a rgctx fetch
3570 dreg = alloc_preg (cfg);
3571 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method));
3572 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), dreg);
3573 } else if (cfg->compile_aot) {
3574 MonoDelegateClassMethodPair *del_tramp;
3576 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
3577 del_tramp->klass = klass;
3578 del_tramp->method = method;
3579 del_tramp->is_virtual = virtual_;
3580 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
3581 } else {
3582 if (virtual_)
3583 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, method);
3584 else
3585 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, method);
3586 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3589 /* Set invoke_impl field */
3590 if (virtual_) {
3591 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3592 } else {
3593 dreg = alloc_preg (cfg);
3594 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
3595 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
3597 dreg = alloc_preg (cfg);
3598 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
3599 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
3602 dreg = alloc_preg (cfg);
3603 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
3604 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
3606 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3608 return obj;
3612 * handle_constrained_gsharedvt_call:
3614 * Handle constrained calls where the receiver is a gsharedvt type.
3615 * Return the instruction representing the call. Set the cfg exception on failure.
3617 static MonoInst*
3618 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
3619 gboolean *ref_emit_widen)
3621 MonoInst *ins = NULL;
3622 gboolean emit_widen = *ref_emit_widen;
3623 gboolean supported;
3626 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
3627 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
3628 * pack the arguments into an array, and do the rest of the work in in an icall.
3630 supported = ((cmethod->klass == mono_defaults.object_class) || mono_class_is_interface (cmethod->klass) || (!m_class_is_valuetype (cmethod->klass) && m_class_get_image (cmethod->klass) != mono_defaults.corlib));
3631 if (supported)
3632 supported = (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || m_class_is_enumtype (mono_class_from_mono_type_internal (fsig->ret)) || mini_is_gsharedvt_type (fsig->ret));
3633 if (supported) {
3634 if (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1)) {
3635 supported = TRUE;
3636 } else {
3637 /* Allow scalar parameters and a gsharedvt first parameter */
3638 supported = MONO_TYPE_IS_PRIMITIVE (fsig->params [0]) || MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]);
3639 if (supported) {
3640 for (int i = 1; i < fsig->param_count; ++i) {
3641 if (!(fsig->params [i]->byref || MONO_TYPE_IS_PRIMITIVE (fsig->params [i]) || MONO_TYPE_IS_REFERENCE (fsig->params [i]) || MONO_TYPE_ISSTRUCT (fsig->params [i])))
3642 supported = FALSE;
3647 if (supported) {
3648 MonoInst *args [16];
3651 * This case handles calls to
3652 * - object:ToString()/Equals()/GetHashCode(),
3653 * - System.IComparable<T>:CompareTo()
3654 * - System.IEquatable<T>:Equals ()
3655 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
3658 args [0] = sp [0];
3659 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
3660 args [2] = mini_emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
3662 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
3663 if (fsig->hasthis && fsig->param_count) {
3664 /* Call mono_gsharedvt_constrained_call (gpointer mp, MonoMethod *cmethod, MonoClass *klass, gboolean deref_arg, gpointer *args) */
3665 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
3666 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
3667 ins->dreg = alloc_preg (cfg);
3668 ins->inst_imm = fsig->param_count * sizeof (target_mgreg_t);
3669 MONO_ADD_INS (cfg->cbb, ins);
3670 args [4] = ins;
3672 /* Only the first argument is allowed to be gsharedvt */
3673 /* args [3] = deref_arg */
3674 if (mini_is_gsharedvt_type (fsig->params [0])) {
3675 int deref_arg_reg;
3676 ins = mini_emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type_internal (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3677 deref_arg_reg = alloc_preg (cfg);
3678 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
3679 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
3680 } else {
3681 EMIT_NEW_ICONST (cfg, args [3], 0);
3684 for (int i = 0; i < fsig->param_count; ++i) {
3685 int addr_reg;
3687 if (mini_is_gsharedvt_type (fsig->params [i]) || MONO_TYPE_IS_PRIMITIVE (fsig->params [i]) || MONO_TYPE_ISSTRUCT (fsig->params [i])) {
3688 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [i + 1]->dreg, fsig->params [i]);
3689 addr_reg = ins->dreg;
3690 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, i * sizeof (target_mgreg_t), addr_reg);
3691 } else {
3692 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, i * sizeof (target_mgreg_t), sp [i + 1]->dreg);
3695 } else {
3696 EMIT_NEW_ICONST (cfg, args [3], 0);
3697 EMIT_NEW_ICONST (cfg, args [4], 0);
3699 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
3700 emit_widen = FALSE;
3702 if (mini_is_gsharedvt_type (fsig->ret)) {
3703 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type_internal (fsig->ret), ins);
3704 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || m_class_is_enumtype (mono_class_from_mono_type_internal (fsig->ret))) {
3705 MonoInst *add;
3707 /* Unbox */
3708 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, MONO_ABI_SIZEOF (MonoObject));
3709 MONO_ADD_INS (cfg->cbb, add);
3710 /* Load value */
3711 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
3712 MONO_ADD_INS (cfg->cbb, ins);
3713 /* ins represents the call result */
3715 } else {
3716 GSHAREDVT_FAILURE (CEE_CALLVIRT);
3719 *ref_emit_widen = emit_widen;
3721 return ins;
3723 exception_exit:
3724 return NULL;
3727 static void
3728 mono_emit_load_got_addr (MonoCompile *cfg)
3730 MonoInst *getaddr, *dummy_use;
3732 if (!cfg->got_var || cfg->got_var_allocated)
3733 return;
3735 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3736 getaddr->cil_code = cfg->header->code;
3737 getaddr->dreg = cfg->got_var->dreg;
3739 /* Add it to the start of the first bblock */
3740 if (cfg->bb_entry->code) {
3741 getaddr->next = cfg->bb_entry->code;
3742 cfg->bb_entry->code = getaddr;
3744 else
3745 MONO_ADD_INS (cfg->bb_entry, getaddr);
3747 cfg->got_var_allocated = TRUE;
3750 * Add a dummy use to keep the got_var alive, since real uses might
3751 * only be generated by the back ends.
3752 * Add it to end_bblock, so the variable's lifetime covers the whole
3753 * method.
3754 * It would be better to make the usage of the got var explicit in all
3755 * cases when the backend needs it (i.e. calls, throw etc.), so this
3756 * wouldn't be needed.
3758 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3759 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3762 static int inline_limit, llvm_jit_inline_limit;
3763 static gboolean inline_limit_inited;
3765 static gboolean
3766 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3768 MonoMethodHeaderSummary header;
3769 MonoVTable *vtable;
3770 int limit;
3771 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
3772 MonoMethodSignature *sig = mono_method_signature_internal (method);
3773 int i;
3774 #endif
3776 if (cfg->disable_inline)
3777 return FALSE;
3778 if (cfg->gsharedvt)
3779 return FALSE;
3781 if (cfg->inline_depth > 10)
3782 return FALSE;
3784 if (!mono_method_get_header_summary (method, &header))
3785 return FALSE;
3787 /*runtime, icall and pinvoke are checked by summary call*/
3788 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3789 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3790 (mono_class_is_marshalbyref (method->klass)) ||
3791 header.has_clauses)
3792 return FALSE;
3794 if (method->flags & METHOD_ATTRIBUTE_REQSECOBJ)
3795 /* Used to mark methods containing StackCrawlMark locals */
3796 return FALSE;
3798 /* also consider num_locals? */
3799 /* Do the size check early to avoid creating vtables */
3800 if (!inline_limit_inited) {
3801 char *inlinelimit;
3802 if ((inlinelimit = g_getenv ("MONO_INLINELIMIT"))) {
3803 inline_limit = atoi (inlinelimit);
3804 llvm_jit_inline_limit = inline_limit;
3805 g_free (inlinelimit);
3806 } else {
3807 inline_limit = INLINE_LENGTH_LIMIT;
3808 llvm_jit_inline_limit = LLVM_JIT_INLINE_LENGTH_LIMIT;
3810 inline_limit_inited = TRUE;
3813 if (COMPILE_LLVM (cfg) && !cfg->compile_aot)
3814 limit = llvm_jit_inline_limit;
3815 else
3816 limit = inline_limit;
3817 if (header.code_size >= limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
3818 return FALSE;
3821 * if we can initialize the class of the method right away, we do,
3822 * otherwise we don't allow inlining if the class needs initialization,
3823 * since it would mean inserting a call to mono_runtime_class_init()
3824 * inside the inlined code
3826 if (cfg->gshared && m_class_has_cctor (method->klass) && mini_class_check_context_used (cfg, method->klass))
3827 return FALSE;
3829 if (!(cfg->opt & MONO_OPT_SHARED)) {
3830 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
3831 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
3832 if (m_class_has_cctor (method->klass)) {
3833 ERROR_DECL (error);
3834 vtable = mono_class_vtable_checked (cfg->domain, method->klass, error);
3835 if (!is_ok (error)) {
3836 mono_error_cleanup (error);
3837 return FALSE;
3839 if (!cfg->compile_aot) {
3840 if (!mono_runtime_class_init_full (vtable, error)) {
3841 mono_error_cleanup (error);
3842 return FALSE;
3846 } else if (mono_class_is_before_field_init (method->klass)) {
3847 if (cfg->run_cctors && m_class_has_cctor (method->klass)) {
3848 ERROR_DECL (error);
3849 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3850 if (!m_class_get_runtime_info (method->klass))
3851 /* No vtable created yet */
3852 return FALSE;
3853 vtable = mono_class_vtable_checked (cfg->domain, method->klass, error);
3854 if (!is_ok (error)) {
3855 mono_error_cleanup (error);
3856 return FALSE;
3858 /* This makes so that inline cannot trigger */
3859 /* .cctors: too many apps depend on them */
3860 /* running with a specific order... */
3861 if (! vtable->initialized)
3862 return FALSE;
3863 if (!mono_runtime_class_init_full (vtable, error)) {
3864 mono_error_cleanup (error);
3865 return FALSE;
3868 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3869 ERROR_DECL (error);
3870 if (!m_class_get_runtime_info (method->klass))
3871 /* No vtable created yet */
3872 return FALSE;
3873 vtable = mono_class_vtable_checked (cfg->domain, method->klass, error);
3874 if (!is_ok (error)) {
3875 mono_error_cleanup (error);
3876 return FALSE;
3878 if (!vtable->initialized)
3879 return FALSE;
3881 } else {
3883 * If we're compiling for shared code
3884 * the cctor will need to be run at aot method load time, for example,
3885 * or at the end of the compilation of the inlining method.
3887 if (mono_class_needs_cctor_run (method->klass, NULL) && !mono_class_is_before_field_init (method->klass))
3888 return FALSE;
3891 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
3892 if (mono_arch_is_soft_float ()) {
3893 /* FIXME: */
3894 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3895 return FALSE;
3896 for (i = 0; i < sig->param_count; ++i)
3897 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3898 return FALSE;
3900 #endif
3902 if (g_list_find (cfg->dont_inline, method))
3903 return FALSE;
3905 if (mono_profiler_get_call_instrumentation_flags (method))
3906 return FALSE;
3908 if (mono_profiler_coverage_instrumentation_enabled (method))
3909 return FALSE;
3911 #if ENABLE_NETCORE
3912 if (!cfg->ret_var_set)
3913 return FALSE;
3914 #endif
3916 return TRUE;
3919 static gboolean
3920 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
3922 if (!cfg->compile_aot) {
3923 g_assert (vtable);
3924 if (vtable->initialized)
3925 return FALSE;
3928 if (mono_class_is_before_field_init (klass)) {
3929 if (cfg->method == method)
3930 return FALSE;
3933 if (!mono_class_needs_cctor_run (klass, method))
3934 return FALSE;
3936 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
3937 /* The initialization is already done before the method is called */
3938 return FALSE;
3940 return TRUE;
3944 mini_emit_sext_index_reg (MonoCompile *cfg, MonoInst *index)
3946 int index_reg = index->dreg;
3947 int index2_reg;
3949 #if SIZEOF_REGISTER == 8
3950 /* The array reg is 64 bits but the index reg is only 32 */
3951 if (COMPILE_LLVM (cfg)) {
3953 * abcrem can't handle the OP_SEXT_I4, so add this after abcrem,
3954 * during OP_BOUNDS_CHECK decomposition, and in the implementation
3955 * of OP_X86_LEA for llvm.
3957 index2_reg = index_reg;
3958 } else {
3959 index2_reg = alloc_preg (cfg);
3960 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3962 #else
3963 if (index->type == STACK_I8) {
3964 index2_reg = alloc_preg (cfg);
3965 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3966 } else {
3967 index2_reg = index_reg;
3969 #endif
3971 return index2_reg;
3974 MonoInst*
3975 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
3977 MonoInst *ins;
3978 guint32 size;
3979 int mult_reg, add_reg, array_reg, index2_reg;
3980 int context_used;
3982 if (mini_is_gsharedvt_variable_klass (klass)) {
3983 size = -1;
3984 } else {
3985 mono_class_init_internal (klass);
3986 size = mono_class_array_element_size (klass);
3989 mult_reg = alloc_preg (cfg);
3990 array_reg = arr->dreg;
3992 index2_reg = mini_emit_sext_index_reg (cfg, index);
3994 if (bcheck)
3995 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3997 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3998 if (size == 1 || size == 2 || size == 4 || size == 8) {
3999 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4001 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
4002 ins->klass = klass;
4003 ins->type = STACK_MP;
4005 return ins;
4007 #endif
4009 add_reg = alloc_ireg_mp (cfg);
4011 if (size == -1) {
4012 MonoInst *rgctx_ins;
4014 /* gsharedvt */
4015 g_assert (cfg->gshared);
4016 context_used = mini_class_check_context_used (cfg, klass);
4017 g_assert (context_used);
4018 rgctx_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4019 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4020 } else {
4021 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4023 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4024 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4025 ins->klass = klass;
4026 ins->type = STACK_MP;
4027 MONO_ADD_INS (cfg->cbb, ins);
4029 return ins;
4032 static MonoInst*
4033 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4035 int bounds_reg = alloc_preg (cfg);
4036 int add_reg = alloc_ireg_mp (cfg);
4037 int mult_reg = alloc_preg (cfg);
4038 int mult2_reg = alloc_preg (cfg);
4039 int low1_reg = alloc_preg (cfg);
4040 int low2_reg = alloc_preg (cfg);
4041 int high1_reg = alloc_preg (cfg);
4042 int high2_reg = alloc_preg (cfg);
4043 int realidx1_reg = alloc_preg (cfg);
4044 int realidx2_reg = alloc_preg (cfg);
4045 int sum_reg = alloc_preg (cfg);
4046 int index1, index2;
4047 MonoInst *ins;
4048 guint32 size;
4050 mono_class_init_internal (klass);
4051 size = mono_class_array_element_size (klass);
4053 index1 = index_ins1->dreg;
4054 index2 = index_ins2->dreg;
4056 #if SIZEOF_REGISTER == 8
4057 /* The array reg is 64 bits but the index reg is only 32 */
4058 if (COMPILE_LLVM (cfg)) {
4059 /* Not needed */
4060 } else {
4061 int tmpreg = alloc_preg (cfg);
4062 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4063 index1 = tmpreg;
4064 tmpreg = alloc_preg (cfg);
4065 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4066 index2 = tmpreg;
4068 #else
4069 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4070 #endif
4072 /* range checking */
4073 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4074 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4076 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4077 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4078 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4079 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4080 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4081 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4082 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4084 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4085 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4086 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4087 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4088 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4089 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4090 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4092 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4093 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4094 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4095 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4096 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4098 ins->type = STACK_MP;
4099 ins->klass = klass;
4100 MONO_ADD_INS (cfg->cbb, ins);
4102 return ins;
4105 static MonoInst*
4106 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, guchar *ip, gboolean is_set)
4108 int rank;
4109 MonoInst *addr;
4110 MonoMethod *addr_method;
4111 int element_size;
4112 MonoClass *eclass = m_class_get_element_class (cmethod->klass);
4114 rank = mono_method_signature_internal (cmethod)->param_count - (is_set? 1: 0);
4116 if (rank == 1)
4117 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
4119 /* emit_ldelema_2 depends on OP_LMUL */
4120 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
4121 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
4124 if (mini_is_gsharedvt_variable_klass (eclass))
4125 element_size = 0;
4126 else
4127 element_size = mono_class_array_element_size (eclass);
4128 addr_method = mono_marshal_get_array_address (rank, element_size);
4129 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4131 return addr;
4134 static gboolean
4135 mini_class_is_reference (MonoClass *klass)
4137 return mini_type_is_reference (m_class_get_byval_arg (klass));
4140 MonoInst*
4141 mini_emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4143 if (safety_checks && mini_class_is_reference (klass) &&
4144 !(MONO_INS_IS_PCONST_NULL (sp [2]))) {
4145 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4146 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4147 MonoInst *iargs [3];
4149 if (!helper->slot)
4150 mono_class_setup_vtable (obj_array);
4151 g_assert (helper->slot);
4153 if (sp [0]->type != STACK_OBJ)
4154 return NULL;
4155 if (sp [2]->type != STACK_OBJ)
4156 return NULL;
4158 iargs [2] = sp [2];
4159 iargs [1] = sp [1];
4160 iargs [0] = sp [0];
4162 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4163 } else {
4164 MonoInst *ins;
4166 if (mini_is_gsharedvt_variable_klass (klass)) {
4167 MonoInst *addr;
4169 // FIXME-VT: OP_ICONST optimization
4170 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4171 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0, sp [2]->dreg);
4172 ins->opcode = OP_STOREV_MEMBASE;
4173 } else if (sp [1]->opcode == OP_ICONST) {
4174 int array_reg = sp [0]->dreg;
4175 int index_reg = sp [1]->dreg;
4176 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
4178 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg) && sp [1]->inst_c0 < 0)
4179 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
4181 if (safety_checks)
4182 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
4183 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), array_reg, offset, sp [2]->dreg);
4184 } else {
4185 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
4186 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0, sp [2]->dreg);
4187 if (mini_class_is_reference (klass))
4188 mini_emit_write_barrier (cfg, addr, sp [2]);
4190 return ins;
4194 MonoInst*
4195 mini_emit_memory_barrier (MonoCompile *cfg, int kind)
4197 MonoInst *ins = NULL;
4198 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4199 MONO_ADD_INS (cfg->cbb, ins);
4200 ins->backend.memory_barrier_kind = kind;
4202 return ins;
4206 * This entry point could be used later for arbitrary method
4207 * redirection.
4209 inline static MonoInst*
4210 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4211 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
4213 if (method->klass == mono_defaults.string_class) {
4214 /* managed string allocation support */
4215 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(cfg->opt & MONO_OPT_SHARED)) {
4216 MonoInst *iargs [2];
4217 MonoVTable *vtable = mono_class_vtable_checked (cfg->domain, method->klass, &cfg->error);
4218 MonoMethod *managed_alloc = NULL;
4220 mono_error_assert_ok (&cfg->error); /*Should not fail since it System.String*/
4221 #ifndef MONO_CROSS_COMPILE
4222 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
4223 #endif
4224 if (!managed_alloc)
4225 return NULL;
4226 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4227 iargs [1] = args [0];
4228 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
4231 return NULL;
4234 static void
4235 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4237 MonoInst *store, *temp;
4238 int i;
4240 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4241 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4244 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4245 * would be different than the MonoInst's used to represent arguments, and
4246 * the ldelema implementation can't deal with that.
4247 * Solution: When ldelema is used on an inline argument, create a var for
4248 * it, emit ldelema on that var, and emit the saving code below in
4249 * inline_method () if needed.
4251 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4252 cfg->args [i] = temp;
4253 /* This uses cfg->args [i] which is set by the preceeding line */
4254 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4255 store->cil_code = sp [0]->cil_code;
4256 sp++;
4260 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4261 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4263 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4264 static gboolean
4265 check_inline_called_method_name_limit (MonoMethod *called_method)
4267 int strncmp_result;
4268 static const char *limit = NULL;
4270 if (limit == NULL) {
4271 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4273 if (limit_string != NULL)
4274 limit = limit_string;
4275 else
4276 limit = "";
4279 if (limit [0] != '\0') {
4280 char *called_method_name = mono_method_full_name (called_method, TRUE);
4282 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4283 g_free (called_method_name);
4285 //return (strncmp_result <= 0);
4286 return (strncmp_result == 0);
4287 } else {
4288 return TRUE;
4291 #endif
4293 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4294 static gboolean
4295 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4297 int strncmp_result;
4298 static const char *limit = NULL;
4300 if (limit == NULL) {
4301 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4302 if (limit_string != NULL) {
4303 limit = limit_string;
4304 } else {
4305 limit = "";
4309 if (limit [0] != '\0') {
4310 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4312 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4313 g_free (caller_method_name);
4315 //return (strncmp_result <= 0);
4316 return (strncmp_result == 0);
4317 } else {
4318 return TRUE;
4321 #endif
4323 static void
4324 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
4326 static double r8_0 = 0.0;
4327 static float r4_0 = 0.0;
4328 MonoInst *ins;
4329 int t;
4331 rtype = mini_get_underlying_type (rtype);
4332 t = rtype->type;
4334 if (rtype->byref) {
4335 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
4336 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
4337 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4338 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
4339 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
4340 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
4341 MONO_INST_NEW (cfg, ins, OP_R4CONST);
4342 ins->type = STACK_R4;
4343 ins->inst_p0 = (void*)&r4_0;
4344 ins->dreg = dreg;
4345 MONO_ADD_INS (cfg->cbb, ins);
4346 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
4347 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4348 ins->type = STACK_R8;
4349 ins->inst_p0 = (void*)&r8_0;
4350 ins->dreg = dreg;
4351 MONO_ADD_INS (cfg->cbb, ins);
4352 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
4353 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
4354 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type_internal (rtype));
4355 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
4356 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type_internal (rtype));
4357 } else {
4358 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
4362 static void
4363 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
4365 int t;
4367 rtype = mini_get_underlying_type (rtype);
4368 t = rtype->type;
4370 if (rtype->byref) {
4371 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
4372 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
4373 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
4374 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
4375 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
4376 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
4377 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
4378 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
4379 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
4380 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
4381 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
4382 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
4383 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
4384 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
4385 } else {
4386 emit_init_rvar (cfg, dreg, rtype);
4390 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
4391 static void
4392 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
4394 MonoInst *var = cfg->locals [local];
4395 if (COMPILE_SOFT_FLOAT (cfg)) {
4396 MonoInst *store;
4397 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
4398 emit_init_rvar (cfg, reg, type);
4399 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
4400 } else {
4401 if (init)
4402 emit_init_rvar (cfg, var->dreg, type);
4403 else
4404 emit_dummy_init_rvar (cfg, var->dreg, type);
4409 mini_inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always)
4411 return inline_method (cfg, cmethod, fsig, sp, ip, real_offset, inline_always);
4415 * inline_method:
4417 * Return the cost of inlining CMETHOD, or zero if it should not be inlined.
4419 static int
4420 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4421 guchar *ip, guint real_offset, gboolean inline_always)
4423 ERROR_DECL (error);
4424 MonoInst *ins, *rvar = NULL;
4425 MonoMethodHeader *cheader;
4426 MonoBasicBlock *ebblock, *sbblock;
4427 int i, costs;
4428 MonoInst **prev_locals, **prev_args;
4429 MonoType **prev_arg_types;
4430 guint prev_real_offset;
4431 GHashTable *prev_cbb_hash;
4432 MonoBasicBlock **prev_cil_offset_to_bb;
4433 MonoBasicBlock *prev_cbb;
4434 const guchar *prev_ip;
4435 guchar *prev_cil_start;
4436 guint32 prev_cil_offset_to_bb_len;
4437 MonoMethod *prev_current_method;
4438 MonoGenericContext *prev_generic_context;
4439 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
4441 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4443 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4444 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
4445 return 0;
4446 #endif
4447 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4448 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
4449 return 0;
4450 #endif
4452 if (!fsig)
4453 fsig = mono_method_signature_internal (cmethod);
4455 if (cfg->verbose_level > 2)
4456 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4458 if (!cmethod->inline_info) {
4459 cfg->stat_inlineable_methods++;
4460 cmethod->inline_info = 1;
4463 /* allocate local variables */
4464 cheader = mono_method_get_header_checked (cmethod, error);
4465 if (!cheader) {
4466 if (inline_always) {
4467 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
4468 mono_error_move (&cfg->error, error);
4469 } else {
4470 mono_error_cleanup (error);
4472 return 0;
4475 /*Must verify before creating locals as it can cause the JIT to assert.*/
4476 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
4477 mono_metadata_free_mh (cheader);
4478 return 0;
4481 /* allocate space to store the return value */
4482 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4483 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4486 prev_locals = cfg->locals;
4487 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4488 for (i = 0; i < cheader->num_locals; ++i)
4489 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4491 /* allocate start and end blocks */
4492 /* This is needed so if the inline is aborted, we can clean up */
4493 NEW_BBLOCK (cfg, sbblock);
4494 sbblock->real_offset = real_offset;
4496 NEW_BBLOCK (cfg, ebblock);
4497 ebblock->block_num = cfg->num_bblocks++;
4498 ebblock->real_offset = real_offset;
4500 prev_args = cfg->args;
4501 prev_arg_types = cfg->arg_types;
4502 prev_ret_var_set = cfg->ret_var_set;
4503 prev_real_offset = cfg->real_offset;
4504 prev_cbb_hash = cfg->cbb_hash;
4505 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4506 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4507 prev_cil_start = cfg->cil_start;
4508 prev_ip = cfg->ip;
4509 prev_cbb = cfg->cbb;
4510 prev_current_method = cfg->current_method;
4511 prev_generic_context = cfg->generic_context;
4512 prev_disable_inline = cfg->disable_inline;
4514 cfg->ret_var_set = FALSE;
4515 cfg->inline_depth ++;
4517 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
4518 virtual_ = TRUE;
4520 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
4522 ret_var_set = cfg->ret_var_set;
4524 cfg->real_offset = prev_real_offset;
4525 cfg->cbb_hash = prev_cbb_hash;
4526 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4527 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4528 cfg->cil_start = prev_cil_start;
4529 cfg->ip = prev_ip;
4530 cfg->locals = prev_locals;
4531 cfg->args = prev_args;
4532 cfg->arg_types = prev_arg_types;
4533 cfg->current_method = prev_current_method;
4534 cfg->generic_context = prev_generic_context;
4535 cfg->ret_var_set = prev_ret_var_set;
4536 cfg->disable_inline = prev_disable_inline;
4537 cfg->inline_depth --;
4539 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
4540 if (cfg->verbose_level > 2)
4541 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4543 mono_error_assert_ok (&cfg->error);
4545 cfg->stat_inlined_methods++;
4547 /* always add some code to avoid block split failures */
4548 MONO_INST_NEW (cfg, ins, OP_NOP);
4549 MONO_ADD_INS (prev_cbb, ins);
4551 prev_cbb->next_bb = sbblock;
4552 link_bblock (cfg, prev_cbb, sbblock);
4555 * Get rid of the begin and end bblocks if possible to aid local
4556 * optimizations.
4558 if (prev_cbb->out_count == 1)
4559 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4561 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4562 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4564 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4565 MonoBasicBlock *prev = ebblock->in_bb [0];
4567 if (prev->next_bb == ebblock) {
4568 mono_merge_basic_blocks (cfg, prev, ebblock);
4569 cfg->cbb = prev;
4570 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4571 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4572 cfg->cbb = prev_cbb;
4574 } else {
4575 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
4576 cfg->cbb = ebblock;
4578 } else {
4580 * Its possible that the rvar is set in some prev bblock, but not in others.
4581 * (#1835).
4583 if (rvar) {
4584 MonoBasicBlock *bb;
4586 for (i = 0; i < ebblock->in_count; ++i) {
4587 bb = ebblock->in_bb [i];
4589 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
4590 cfg->cbb = bb;
4592 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
4597 cfg->cbb = ebblock;
4600 if (rvar) {
4602 * If the inlined method contains only a throw, then the ret var is not
4603 * set, so set it to a dummy value.
4605 if (!ret_var_set)
4606 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
4608 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4609 *sp++ = ins;
4611 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4612 return costs + 1;
4613 } else {
4614 if (cfg->verbose_level > 2) {
4615 const char *msg = mono_error_get_message (&cfg->error);
4616 printf ("INLINE ABORTED %s (cost %d) %s\n", mono_method_full_name (cmethod, TRUE), costs, msg ? msg : "");
4618 cfg->exception_type = MONO_EXCEPTION_NONE;
4620 clear_cfg_error (cfg);
4622 /* This gets rid of the newly added bblocks */
4623 cfg->cbb = prev_cbb;
4625 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4626 return 0;
4630 * Some of these comments may well be out-of-date.
4631 * Design decisions: we do a single pass over the IL code (and we do bblock
4632 * splitting/merging in the few cases when it's required: a back jump to an IL
4633 * address that was not already seen as bblock starting point).
4634 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4635 * Complex operations are decomposed in simpler ones right away. We need to let the
4636 * arch-specific code peek and poke inside this process somehow (except when the
4637 * optimizations can take advantage of the full semantic info of coarse opcodes).
4638 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4639 * MonoInst->opcode initially is the IL opcode or some simplification of that
4640 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4641 * opcode with value bigger than OP_LAST.
4642 * At this point the IR can be handed over to an interpreter, a dumb code generator
4643 * or to the optimizing code generator that will translate it to SSA form.
4645 * Profiling directed optimizations.
4646 * We may compile by default with few or no optimizations and instrument the code
4647 * or the user may indicate what methods to optimize the most either in a config file
4648 * or through repeated runs where the compiler applies offline the optimizations to
4649 * each method and then decides if it was worth it.
4652 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4653 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4654 #define CHECK_STACK_OVF() if (((sp - stack_start) + 1) > header->max_stack) UNVERIFIED
4655 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4656 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4657 #define CHECK_OPSIZE(size) if ((size) < 1 || ip + (size) > end) UNVERIFIED
4658 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4659 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
4661 /* offset from br.s -> br like opcodes */
4662 #define BIG_BRANCH_OFFSET 13
4664 static gboolean
4665 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4667 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4669 return b == NULL || b == bb;
4672 static int
4673 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, guchar *start, guchar *end, guchar **pos)
4675 guchar *ip = start;
4676 guchar *target;
4677 int i;
4678 guint cli_addr;
4679 MonoBasicBlock *bblock;
4680 const MonoOpcode *opcode;
4682 while (ip < end) {
4683 cli_addr = ip - start;
4684 i = mono_opcode_value ((const guint8 **)&ip, end);
4685 if (i < 0)
4686 UNVERIFIED;
4687 opcode = &mono_opcodes [i];
4688 switch (opcode->argument) {
4689 case MonoInlineNone:
4690 ip++;
4691 break;
4692 case MonoInlineString:
4693 case MonoInlineType:
4694 case MonoInlineField:
4695 case MonoInlineMethod:
4696 case MonoInlineTok:
4697 case MonoInlineSig:
4698 case MonoShortInlineR:
4699 case MonoInlineI:
4700 ip += 5;
4701 break;
4702 case MonoInlineVar:
4703 ip += 3;
4704 break;
4705 case MonoShortInlineVar:
4706 case MonoShortInlineI:
4707 ip += 2;
4708 break;
4709 case MonoShortInlineBrTarget:
4710 target = start + cli_addr + 2 + (signed char)ip [1];
4711 GET_BBLOCK (cfg, bblock, target);
4712 ip += 2;
4713 if (ip < end)
4714 GET_BBLOCK (cfg, bblock, ip);
4715 break;
4716 case MonoInlineBrTarget:
4717 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4718 GET_BBLOCK (cfg, bblock, target);
4719 ip += 5;
4720 if (ip < end)
4721 GET_BBLOCK (cfg, bblock, ip);
4722 break;
4723 case MonoInlineSwitch: {
4724 guint32 n = read32 (ip + 1);
4725 guint32 j;
4726 ip += 5;
4727 cli_addr += 5 + 4 * n;
4728 target = start + cli_addr;
4729 GET_BBLOCK (cfg, bblock, target);
4731 for (j = 0; j < n; ++j) {
4732 target = start + cli_addr + (gint32)read32 (ip);
4733 GET_BBLOCK (cfg, bblock, target);
4734 ip += 4;
4736 break;
4738 case MonoInlineR:
4739 case MonoInlineI8:
4740 ip += 9;
4741 break;
4742 default:
4743 g_assert_not_reached ();
4746 if (i == CEE_THROW) {
4747 guchar *bb_start = ip - 1;
4749 /* Find the start of the bblock containing the throw */
4750 bblock = NULL;
4751 while ((bb_start >= start) && !bblock) {
4752 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4753 bb_start --;
4755 if (bblock)
4756 bblock->out_of_line = 1;
4759 return 0;
4760 unverified:
4761 exception_exit:
4762 *pos = ip;
4763 return 1;
4766 static inline MonoMethod *
4767 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
4769 MonoMethod *method;
4771 error_init (error);
4773 if (m->wrapper_type != MONO_WRAPPER_NONE) {
4774 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
4775 if (context) {
4776 method = mono_class_inflate_generic_method_checked (method, context, error);
4778 } else {
4779 method = mono_get_method_checked (m_class_get_image (m->klass), token, klass, context, error);
4782 return method;
4785 static inline MonoMethod *
4786 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4788 ERROR_DECL (error);
4789 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : error);
4791 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (m_class_get_byval_arg (method->klass))) {
4792 mono_error_set_bad_image (&cfg->error, m_class_get_image (cfg->method->klass), "Method with open type while not compiling gshared");
4793 method = NULL;
4796 if (!method && !cfg)
4797 mono_error_cleanup (error); /* FIXME don't swallow the error */
4799 return method;
4802 static inline MonoMethodSignature*
4803 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
4805 MonoMethodSignature *fsig;
4807 error_init (error);
4808 if (method->wrapper_type != MONO_WRAPPER_NONE) {
4809 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
4810 } else {
4811 fsig = mono_metadata_parse_signature_checked (m_class_get_image (method->klass), token, error);
4812 return_val_if_nok (error, NULL);
4814 if (context) {
4815 fsig = mono_inflate_generic_signature(fsig, context, error);
4817 return fsig;
4820 static MonoMethod*
4821 throw_exception (void)
4823 static MonoMethod *method = NULL;
4825 if (!method) {
4826 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4827 method = get_method_nofail (secman->securitymanager, "ThrowException", 1, 0);
4829 g_assert (method);
4830 return method;
4833 static void
4834 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
4836 MonoMethod *thrower = throw_exception ();
4837 MonoInst *args [1];
4839 EMIT_NEW_PCONST (cfg, args [0], ex);
4840 mono_emit_method_call (cfg, thrower, args, NULL);
4844 * Return the original method is a wrapper is specified. We can only access
4845 * the custom attributes from the original method.
4847 static MonoMethod*
4848 get_original_method (MonoMethod *method)
4850 if (method->wrapper_type == MONO_WRAPPER_NONE)
4851 return method;
4853 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4854 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
4855 return NULL;
4857 /* in other cases we need to find the original method */
4858 return mono_marshal_method_from_wrapper (method);
4861 static void
4862 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
4864 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4865 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
4866 if (ex)
4867 emit_throw_exception (cfg, ex);
4870 static void
4871 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4873 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4874 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
4875 if (ex)
4876 emit_throw_exception (cfg, ex);
4879 static guchar*
4880 il_read_op (guchar *ip, guchar *end, guchar first_byte, MonoOpcodeEnum desired_il_op)
4881 // If ip is desired_il_op, return the next ip, else NULL.
4883 if (G_LIKELY (ip < end) && G_UNLIKELY (*ip == first_byte)) {
4884 MonoOpcodeEnum il_op = MonoOpcodeEnum_Invalid;
4885 // mono_opcode_value_and_size updates ip, but not in the expected way.
4886 const guchar *temp_ip = ip;
4887 const int size = mono_opcode_value_and_size (&temp_ip, end, &il_op);
4888 return (G_LIKELY (size > 0) && G_UNLIKELY (il_op == desired_il_op)) ? (ip + size) : NULL;
4890 return NULL;
4893 static guchar*
4894 il_read_op_and_token (guchar *ip, guchar *end, guchar first_byte, MonoOpcodeEnum desired_il_op, guint32 *token)
4896 ip = il_read_op (ip, end, first_byte, desired_il_op);
4897 if (ip)
4898 *token = read32 (ip - 4); // could be +1 or +2 from start
4899 return ip;
4902 static guchar*
4903 il_read_branch_and_target (guchar *ip, guchar *end, guchar first_byte, MonoOpcodeEnum desired_il_op, int size, guchar **target)
4905 ip = il_read_op (ip, end, first_byte, desired_il_op);
4906 if (ip) {
4907 gint32 delta = 0;
4908 switch (size) {
4909 case 1:
4910 delta = (signed char)ip [-1];
4911 break;
4912 case 4:
4913 delta = (gint32)read32 (ip - 4);
4914 break;
4916 // FIXME verify it is within the function and start of an instruction.
4917 *target = ip + delta;
4918 return ip;
4920 return NULL;
4923 #define il_read_brtrue(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRTRUE, MONO_CEE_BRTRUE, 4, target))
4924 #define il_read_brtrue_s(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRTRUE_S, MONO_CEE_BRTRUE_S, 1, target))
4925 #define il_read_brfalse(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRFALSE, MONO_CEE_BRFALSE, 4, target))
4926 #define il_read_brfalse_s(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRFALSE_S, MONO_CEE_BRFALSE_S, 1, target))
4927 #define il_read_dup(ip, end) (il_read_op (ip, end, CEE_DUP, MONO_CEE_DUP))
4928 #define il_read_newobj(ip, end, token) (il_read_op_and_token (ip, end, CEE_NEW_OBJ, MONO_CEE_NEWOBJ, token))
4929 #define il_read_ldtoken(ip, end, token) (il_read_op_and_token (ip, end, CEE_LDTOKEN, MONO_CEE_LDTOKEN, token))
4930 #define il_read_call(ip, end, token) (il_read_op_and_token (ip, end, CEE_CALL, MONO_CEE_CALL, token))
4931 #define il_read_callvirt(ip, end, token) (il_read_op_and_token (ip, end, CEE_CALLVIRT, MONO_CEE_CALLVIRT, token))
4932 #define il_read_initobj(ip, end, token) (il_read_op_and_token (ip, end, CEE_PREFIX1, MONO_CEE_INITOBJ, token))
4933 #define il_read_constrained(ip, end, token) (il_read_op_and_token (ip, end, CEE_PREFIX1, MONO_CEE_CONSTRAINED_, token))
4936 * Check that the IL instructions at ip are the array initialization
4937 * sequence and return the pointer to the data and the size.
4939 static const char*
4940 initialize_array_data (MonoCompile *cfg, MonoMethod *method, gboolean aot, guchar *ip,
4941 guchar *end, MonoClass *klass, guint32 len, int *out_size,
4942 guint32 *out_field_token, MonoOpcodeEnum *il_op, guchar **next_ip)
4945 * newarr[System.Int32]
4946 * dup
4947 * ldtoken field valuetype ...
4948 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4951 guint32 token;
4952 guint32 field_token;
4954 if ((ip = il_read_dup (ip, end))
4955 && ip_in_bb (cfg, cfg->cbb, ip)
4956 && (ip = il_read_ldtoken (ip, end, &field_token))
4957 && IS_FIELD_DEF (field_token)
4958 && ip_in_bb (cfg, cfg->cbb, ip)
4959 && (ip = il_read_call (ip, end, &token))) {
4960 ERROR_DECL (error);
4961 guint32 rva;
4962 const char *data_ptr;
4963 int size = 0;
4964 MonoMethod *cmethod;
4965 MonoClass *dummy_class;
4966 MonoClassField *field = mono_field_from_token_checked (m_class_get_image (method->klass), field_token, &dummy_class, NULL, error);
4967 int dummy_align;
4969 if (!field) {
4970 mono_error_cleanup (error); /* FIXME don't swallow the error */
4971 return NULL;
4974 *out_field_token = field_token;
4976 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4977 if (!cmethod)
4978 return NULL;
4979 if (strcmp (cmethod->name, "InitializeArray") || strcmp (m_class_get_name (cmethod->klass), "RuntimeHelpers") || m_class_get_image (cmethod->klass) != mono_defaults.corlib)
4980 return NULL;
4981 switch (mini_get_underlying_type (m_class_get_byval_arg (klass))->type) {
4982 case MONO_TYPE_I1:
4983 case MONO_TYPE_U1:
4984 size = 1; break;
4985 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4986 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
4987 case MONO_TYPE_I2:
4988 case MONO_TYPE_U2:
4989 size = 2; break;
4990 case MONO_TYPE_I4:
4991 case MONO_TYPE_U4:
4992 case MONO_TYPE_R4:
4993 size = 4; break;
4994 case MONO_TYPE_R8:
4995 case MONO_TYPE_I8:
4996 case MONO_TYPE_U8:
4997 size = 8; break;
4998 #endif
4999 default:
5000 return NULL;
5002 size *= len;
5003 if (size > mono_type_size (field->type, &dummy_align))
5004 return NULL;
5005 *out_size = size;
5006 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5007 MonoImage *method_klass_image = m_class_get_image (method->klass);
5008 if (!image_is_dynamic (method_klass_image)) {
5009 guint32 field_index = mono_metadata_token_index (field_token);
5010 mono_metadata_field_info (method_klass_image, field_index - 1, NULL, &rva, NULL);
5011 data_ptr = mono_image_rva_map (method_klass_image, rva);
5012 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5013 /* for aot code we do the lookup on load */
5014 if (aot && data_ptr)
5015 data_ptr = (const char *)GUINT_TO_POINTER (rva);
5016 } else {
5017 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5018 g_assert (!aot);
5019 data_ptr = mono_field_get_data (field);
5021 if (!data_ptr)
5022 return NULL;
5023 *il_op = MONO_CEE_CALL;
5024 *next_ip = ip;
5025 return data_ptr;
5027 return NULL;
5030 static void
5031 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, guchar *ip)
5033 ERROR_DECL (error);
5034 char *method_fname = mono_method_full_name (method, TRUE);
5035 char *method_code;
5036 MonoMethodHeader *header = mono_method_get_header_checked (method, error);
5038 if (!header) {
5039 method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (error));
5040 mono_error_cleanup (error);
5041 } else if (header->code_size == 0)
5042 method_code = g_strdup ("method body is empty.");
5043 else
5044 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5045 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
5046 g_free (method_fname);
5047 g_free (method_code);
5048 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5051 guint32
5052 mono_type_to_stloc_coerce (MonoType *type)
5054 if (type->byref)
5055 return 0;
5057 type = mini_get_underlying_type (type);
5058 handle_enum:
5059 switch (type->type) {
5060 case MONO_TYPE_I1:
5061 return OP_ICONV_TO_I1;
5062 case MONO_TYPE_U1:
5063 return OP_ICONV_TO_U1;
5064 case MONO_TYPE_I2:
5065 return OP_ICONV_TO_I2;
5066 case MONO_TYPE_U2:
5067 return OP_ICONV_TO_U2;
5068 case MONO_TYPE_I4:
5069 case MONO_TYPE_U4:
5070 case MONO_TYPE_I:
5071 case MONO_TYPE_U:
5072 case MONO_TYPE_PTR:
5073 case MONO_TYPE_FNPTR:
5074 case MONO_TYPE_CLASS:
5075 case MONO_TYPE_STRING:
5076 case MONO_TYPE_OBJECT:
5077 case MONO_TYPE_SZARRAY:
5078 case MONO_TYPE_ARRAY:
5079 case MONO_TYPE_I8:
5080 case MONO_TYPE_U8:
5081 case MONO_TYPE_R4:
5082 case MONO_TYPE_R8:
5083 case MONO_TYPE_TYPEDBYREF:
5084 case MONO_TYPE_GENERICINST:
5085 return 0;
5086 case MONO_TYPE_VALUETYPE:
5087 if (m_class_is_enumtype (type->data.klass)) {
5088 type = mono_class_enum_basetype_internal (type->data.klass);
5089 goto handle_enum;
5091 return 0;
5092 case MONO_TYPE_VAR:
5093 case MONO_TYPE_MVAR: //TODO I believe we don't need to handle gsharedvt as there won't be match and, for example, u1 is not covariant to u32
5094 return 0;
5095 default:
5096 g_error ("unknown type 0x%02x in mono_type_to_stloc_coerce", type->type);
5098 return -1;
5101 static void
5102 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5104 MonoInst *ins;
5105 guint32 coerce_op = mono_type_to_stloc_coerce (header->locals [n]);
5107 if (coerce_op) {
5108 if (cfg->cbb->last_ins == sp [0] && sp [0]->opcode == coerce_op) {
5109 if (cfg->verbose_level > 2)
5110 printf ("Found existing coercing is enough for stloc\n");
5111 } else {
5112 MONO_INST_NEW (cfg, ins, coerce_op);
5113 ins->dreg = alloc_ireg (cfg);
5114 ins->sreg1 = sp [0]->dreg;
5115 ins->type = STACK_I4;
5116 ins->klass = mono_class_from_mono_type_internal (header->locals [n]);
5117 MONO_ADD_INS (cfg->cbb, ins);
5118 *sp = mono_decompose_opcode (cfg, ins);
5123 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5124 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5125 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5126 /* Optimize reg-reg moves away */
5128 * Can't optimize other opcodes, since sp[0] might point to
5129 * the last ins of a decomposed opcode.
5131 sp [0]->dreg = (cfg)->locals [n]->dreg;
5132 } else {
5133 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5137 static void
5138 emit_starg_ir (MonoCompile *cfg, MonoInst **sp, int n)
5140 MonoInst *ins;
5141 guint32 coerce_op = mono_type_to_stloc_coerce (cfg->arg_types [n]);
5143 if (coerce_op) {
5144 if (cfg->cbb->last_ins == sp [0] && sp [0]->opcode == coerce_op) {
5145 if (cfg->verbose_level > 2)
5146 printf ("Found existing coercing is enough for starg\n");
5147 } else {
5148 MONO_INST_NEW (cfg, ins, coerce_op);
5149 ins->dreg = alloc_ireg (cfg);
5150 ins->sreg1 = sp [0]->dreg;
5151 ins->type = STACK_I4;
5152 ins->klass = mono_class_from_mono_type_internal (cfg->arg_types [n]);
5153 MONO_ADD_INS (cfg->cbb, ins);
5154 *sp = mono_decompose_opcode (cfg, ins);
5158 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5162 * ldloca inhibits many optimizations so try to get rid of it in common
5163 * cases.
5165 static guchar *
5166 emit_optimized_ldloca_ir (MonoCompile *cfg, guchar *ip, guchar *end, int local)
5168 guint32 token;
5169 MonoClass *klass;
5170 MonoType *type;
5172 guchar *start = ip;
5174 if ((ip = il_read_initobj (ip, end, &token)) && ip_in_bb (cfg, cfg->cbb, start + 1)) {
5175 /* From the INITOBJ case */
5176 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5177 CHECK_TYPELOAD (klass);
5178 type = mini_get_underlying_type (m_class_get_byval_arg (klass));
5179 emit_init_local (cfg, local, type, TRUE);
5180 return ip;
5182 exception_exit:
5183 return NULL;
5186 static MonoInst*
5187 handle_call_res_devirt (MonoCompile *cfg, MonoMethod *cmethod, MonoInst *call_res)
5190 * Devirt EqualityComparer.Default.Equals () calls for some types.
5191 * The corefx code excepts these calls to be devirtualized.
5192 * This depends on the implementation of EqualityComparer.Default, which is
5193 * in mcs/class/referencesource/mscorlib/system/collections/generic/equalitycomparer.cs
5195 if (m_class_get_image (cmethod->klass) == mono_defaults.corlib &&
5196 !strcmp (m_class_get_name (cmethod->klass), "EqualityComparer`1") &&
5197 !strcmp (cmethod->name, "get_Default")) {
5198 MonoType *param_type = mono_class_get_generic_class (cmethod->klass)->context.class_inst->type_argv [0];
5199 MonoClass *inst;
5200 MonoGenericContext ctx;
5201 MonoType *args [16];
5202 ERROR_DECL (error);
5204 memset (&ctx, 0, sizeof (ctx));
5206 args [0] = param_type;
5207 ctx.class_inst = mono_metadata_get_generic_inst (1, args);
5209 inst = mono_class_inflate_generic_class_checked (mono_class_get_iequatable_class (), &ctx, error);
5210 mono_error_assert_ok (error);
5212 /* EqualityComparer<T>.Default returns specific types depending on T */
5213 // FIXME: Add more
5214 /* 1. Implements IEquatable<T> */
5216 * Can't use this for string/byte as it might use a different comparer:
5218 * // Specialize type byte for performance reasons
5219 * if (t == typeof(byte)) {
5220 * return (EqualityComparer<T>)(object)(new ByteEqualityComparer());
5222 * #if MOBILE
5223 * // Breaks .net serialization compatibility
5224 * if (t == typeof (string))
5225 * return (EqualityComparer<T>)(object)new InternalStringComparer ();
5226 * #endif
5228 if (mono_class_is_assignable_from_internal (inst, mono_class_from_mono_type_internal (param_type)) && param_type->type != MONO_TYPE_U1 && param_type->type != MONO_TYPE_STRING) {
5229 MonoInst *typed_objref;
5230 MonoClass *gcomparer_inst;
5232 memset (&ctx, 0, sizeof (ctx));
5234 args [0] = param_type;
5235 ctx.class_inst = mono_metadata_get_generic_inst (1, args);
5237 MonoClass *gcomparer = mono_class_get_geqcomparer_class ();
5238 g_assert (gcomparer);
5239 gcomparer_inst = mono_class_inflate_generic_class_checked (gcomparer, &ctx, error);
5240 mono_error_assert_ok (error);
5242 MONO_INST_NEW (cfg, typed_objref, OP_TYPED_OBJREF);
5243 typed_objref->type = STACK_OBJ;
5244 typed_objref->dreg = alloc_ireg_ref (cfg);
5245 typed_objref->sreg1 = call_res->dreg;
5246 typed_objref->klass = gcomparer_inst;
5247 MONO_ADD_INS (cfg->cbb, typed_objref);
5249 call_res = typed_objref;
5251 /* Force decompose */
5252 cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE;
5253 cfg->cbb->needs_decompose = TRUE;
5257 return call_res;
5260 static gboolean
5261 is_exception_class (MonoClass *klass)
5263 if (G_LIKELY (m_class_get_supertypes (klass)))
5264 return mono_class_has_parent_fast (klass, mono_defaults.exception_class);
5265 while (klass) {
5266 if (klass == mono_defaults.exception_class)
5267 return TRUE;
5268 klass = m_class_get_parent (klass);
5270 return FALSE;
5274 * is_jit_optimizer_disabled:
5276 * Determine whenever M's assembly has a DebuggableAttribute with the
5277 * IsJITOptimizerDisabled flag set.
5279 static gboolean
5280 is_jit_optimizer_disabled (MonoMethod *m)
5282 ERROR_DECL (error);
5283 MonoAssembly *ass = m_class_get_image (m->klass)->assembly;
5284 MonoCustomAttrInfo* attrs;
5285 MonoClass *klass;
5286 int i;
5287 gboolean val = FALSE;
5289 g_assert (ass);
5290 if (ass->jit_optimizer_disabled_inited)
5291 return ass->jit_optimizer_disabled;
5293 klass = mono_class_try_get_debuggable_attribute_class ();
5295 if (!klass) {
5296 /* Linked away */
5297 ass->jit_optimizer_disabled = FALSE;
5298 mono_memory_barrier ();
5299 ass->jit_optimizer_disabled_inited = TRUE;
5300 return FALSE;
5303 attrs = mono_custom_attrs_from_assembly_checked (ass, FALSE, error);
5304 mono_error_cleanup (error); /* FIXME don't swallow the error */
5305 if (attrs) {
5306 for (i = 0; i < attrs->num_attrs; ++i) {
5307 MonoCustomAttrEntry *attr = &attrs->attrs [i];
5308 const gchar *p;
5309 MonoMethodSignature *sig;
5311 if (!attr->ctor || attr->ctor->klass != klass)
5312 continue;
5313 /* Decode the attribute. See reflection.c */
5314 p = (const char*)attr->data;
5315 g_assert (read16 (p) == 0x0001);
5316 p += 2;
5318 // FIXME: Support named parameters
5319 sig = mono_method_signature_internal (attr->ctor);
5320 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
5321 continue;
5322 /* Two boolean arguments */
5323 p ++;
5324 val = *p;
5326 mono_custom_attrs_free (attrs);
5329 ass->jit_optimizer_disabled = val;
5330 mono_memory_barrier ();
5331 ass->jit_optimizer_disabled_inited = TRUE;
5333 return val;
5336 gboolean
5337 mono_is_supported_tailcall_helper (gboolean value, const char *svalue)
5339 if (!value)
5340 mono_tailcall_print ("%s %s\n", __func__, svalue);
5341 return value;
5344 static gboolean
5345 mono_is_not_supported_tailcall_helper (gboolean value, const char *svalue, MonoMethod *method, MonoMethod *cmethod)
5347 // Return value, printing if it inhibits tailcall.
5349 if (value && mono_tailcall_print_enabled ()) {
5350 const char *lparen = strchr (svalue, ' ') ? "(" : "";
5351 const char *rparen = *lparen ? ")" : "";
5352 mono_tailcall_print ("%s %s -> %s %s%s%s:%d\n", __func__, method->name, cmethod->name, lparen, svalue, rparen, value);
5354 return value;
5357 #define IS_NOT_SUPPORTED_TAILCALL(x) (mono_is_not_supported_tailcall_helper((x), #x, method, cmethod))
5359 static gboolean
5360 is_supported_tailcall (MonoCompile *cfg, const guint8 *ip, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig,
5361 gboolean virtual_, gboolean extra_arg, gboolean *ptailcall_calli)
5363 // Some checks apply to "regular", some to "calli", some to both.
5364 // To ease burden on caller, always compute regular and calli.
5366 gboolean tailcall = TRUE;
5367 gboolean tailcall_calli = TRUE;
5369 if (IS_NOT_SUPPORTED_TAILCALL (virtual_ && !cfg->backend->have_op_tailcall_membase))
5370 tailcall = FALSE;
5372 if (IS_NOT_SUPPORTED_TAILCALL (!cfg->backend->have_op_tailcall_reg))
5373 tailcall_calli = FALSE;
5375 if (!tailcall && !tailcall_calli)
5376 goto exit;
5378 // FIXME in calli, there is no type for for the this parameter,
5379 // so we assume it might be valuetype; in future we should issue a range
5380 // check, so rule out pointing to frame (for other reference parameters also)
5382 if ( IS_NOT_SUPPORTED_TAILCALL (cmethod && fsig->hasthis && m_class_is_valuetype (cmethod->klass)) // This might point to the current method's stack. Emit range check?
5383 || IS_NOT_SUPPORTED_TAILCALL (cmethod && (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL))
5384 || IS_NOT_SUPPORTED_TAILCALL (fsig->pinvoke) // i.e. if !cmethod (calli)
5385 || IS_NOT_SUPPORTED_TAILCALL (cfg->method->save_lmf)
5386 || IS_NOT_SUPPORTED_TAILCALL (!cmethod && fsig->hasthis) // FIXME could be valuetype to current frame; range check
5387 || IS_NOT_SUPPORTED_TAILCALL (cmethod && cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
5389 // http://www.mono-project.com/docs/advanced/runtime/docs/generic-sharing/
5391 // 1. Non-generic non-static methods of reference types have access to the
5392 // RGCTX via the “this” argument (this->vtable->rgctx).
5393 // 2. a Non-generic static methods of reference types and b. non-generic methods
5394 // of value types need to be passed a pointer to the caller’s class’s VTable in the MONO_ARCH_RGCTX_REG register.
5395 // 3. Generic methods need to be passed a pointer to the MRGCTX in the MONO_ARCH_RGCTX_REG register
5397 // That is what vtable_arg is here (always?).
5399 // Passing vtable_arg uses (requires?) a volatile non-parameter register,
5400 // such as AMD64 rax, r10, r11, or the return register on many architectures.
5401 // ARM32 does not always clearly have such a register. ARM32's return register
5402 // is a parameter register.
5403 // iPhone could use r9 except on old systems. iPhone/ARM32 is not particularly
5404 // important. Linux/arm32 is less clear.
5405 // ARM32's scratch r12 might work but only with much collateral change.
5407 // Imagine F1 calls F2, and F2 tailcalls F3.
5408 // F2 and F3 are managed. F1 is native.
5409 // Without a tailcall, F2 can save and restore everything needed for F1.
5410 // However if the extra parameter were in a non-volatile, such as ARM32 V5/R8,
5411 // F3 cannot easily restore it for F1, in the current scheme. The current
5412 // scheme where the extra parameter is not merely an extra parameter, but
5413 // passed "outside of the ABI".
5415 // If all native to managed transitions are intercepted and wrapped (w/o tailcall),
5416 // then they can preserve this register and the rest of the managed callgraph
5417 // treat it as volatile.
5419 // Interface method dispatch has the same problem (imt_arg).
5421 || IS_NOT_SUPPORTED_TAILCALL (extra_arg && !cfg->backend->have_volatile_non_param_register)
5422 || IS_NOT_SUPPORTED_TAILCALL (cfg->gsharedvt)
5424 tailcall_calli = FALSE;
5425 tailcall = FALSE;
5426 goto exit;
5429 for (int i = 0; i < fsig->param_count; ++i) {
5430 if (IS_NOT_SUPPORTED_TAILCALL (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)) {
5431 tailcall_calli = FALSE;
5432 tailcall = FALSE; // These can point to the current method's stack. Emit range check?
5433 goto exit;
5437 MonoMethodSignature *caller_signature;
5438 MonoMethodSignature *callee_signature;
5439 caller_signature = mono_method_signature_internal (method);
5440 callee_signature = cmethod ? mono_method_signature_internal (cmethod) : fsig;
5442 g_assert (caller_signature);
5443 g_assert (callee_signature);
5445 // Require an exact match on return type due to various conversions in emit_move_return_value that would be skipped.
5446 // The main troublesome conversions are double <=> float.
5447 // CoreCLR allows some conversions here, such as integer truncation.
5448 // As well I <=> I[48] and U <=> U[48] would be ok, for matching size.
5449 if (IS_NOT_SUPPORTED_TAILCALL (mini_get_underlying_type (caller_signature->ret)->type != mini_get_underlying_type (callee_signature->ret)->type)
5450 || IS_NOT_SUPPORTED_TAILCALL (!mono_arch_tailcall_supported (cfg, caller_signature, callee_signature, virtual_))) {
5451 tailcall_calli = FALSE;
5452 tailcall = FALSE;
5453 goto exit;
5456 /* Debugging support */
5457 #if 0
5458 if (!mono_debug_count ()) {
5459 tailcall_calli = FALSE;
5460 tailcall = FALSE;
5461 goto exit;
5463 #endif
5464 // See check_sp in mini_emit_calli_full.
5465 if (tailcall_calli && IS_NOT_SUPPORTED_TAILCALL (mini_should_check_stack_pointer (cfg)))
5466 tailcall_calli = FALSE;
5467 exit:
5468 mono_tailcall_print ("tail.%s %s -> %s tailcall:%d tailcall_calli:%d gshared:%d extra_arg:%d virtual_:%d\n",
5469 mono_opcode_name (*ip), method->name, cmethod ? cmethod->name : "calli", tailcall, tailcall_calli,
5470 cfg->gshared, extra_arg, virtual_);
5472 *ptailcall_calli = tailcall_calli;
5473 return tailcall;
5477 * is_addressable_valuetype_load
5479 * Returns true if a previous load can be done without doing an extra copy, given the new instruction ip and the type of the object being loaded ldtype
5481 static gboolean
5482 is_addressable_valuetype_load (MonoCompile* cfg, guint8* ip, MonoType* ldtype)
5484 /* Avoid loading a struct just to load one of its fields */
5485 gboolean is_load_instruction = (*ip == CEE_LDFLD);
5486 gboolean is_in_previous_bb = ip_in_bb(cfg, cfg->cbb, ip);
5487 gboolean is_struct = MONO_TYPE_ISSTRUCT(ldtype);
5488 return is_load_instruction && is_in_previous_bb && is_struct;
5492 * handle_ctor_call:
5494 * Handle calls made to ctors from NEWOBJ opcodes.
5496 static void
5497 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
5498 MonoInst **sp, guint8 *ip, int *inline_costs)
5500 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
5502 if (m_class_is_valuetype (cmethod->klass) && mono_class_generic_sharing_enabled (cmethod->klass) &&
5503 mono_method_is_generic_sharable (cmethod, TRUE)) {
5504 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
5505 mono_class_vtable_checked (cfg->domain, cmethod->klass, &cfg->error);
5506 CHECK_CFG_ERROR;
5507 CHECK_TYPELOAD (cmethod->klass);
5509 vtable_arg = emit_get_rgctx_method (cfg, context_used,
5510 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
5511 } else {
5512 if (context_used) {
5513 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
5514 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
5515 } else {
5516 MonoVTable *vtable = mono_class_vtable_checked (cfg->domain, cmethod->klass, &cfg->error);
5517 CHECK_CFG_ERROR;
5518 CHECK_TYPELOAD (cmethod->klass);
5519 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
5524 /* Avoid virtual calls to ctors if possible */
5525 if (mono_class_is_marshalbyref (cmethod->klass))
5526 callvirt_this_arg = sp [0];
5528 if (cmethod && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
5529 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
5530 CHECK_CFG_EXCEPTION;
5531 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
5532 mono_method_check_inlining (cfg, cmethod) &&
5533 !mono_class_is_subclass_of_internal (cmethod->klass, mono_defaults.exception_class, FALSE)) {
5534 int costs;
5536 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
5537 cfg->real_offset += 5;
5539 *inline_costs += costs - 5;
5540 } else {
5541 INLINE_FAILURE ("inline failure");
5542 // FIXME-VT: Clean this up
5543 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
5544 GSHAREDVT_FAILURE(*ip);
5545 mini_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
5547 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
5548 MonoInst *addr;
5550 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
5552 if (cfg->llvm_only) {
5553 // FIXME: Avoid initializing vtable_arg
5554 mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
5555 } else {
5556 mini_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
5558 } else if (context_used &&
5559 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
5560 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
5561 MonoInst *cmethod_addr;
5563 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
5565 if (cfg->llvm_only) {
5566 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
5567 MONO_RGCTX_INFO_METHOD_FTNDESC);
5568 mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
5569 } else {
5570 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
5571 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
5573 mini_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
5575 } else {
5576 INLINE_FAILURE ("ctor call");
5577 ins = mini_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
5578 callvirt_this_arg, NULL, vtable_arg);
5580 exception_exit:
5581 mono_error_exit:
5582 return;
5585 typedef struct {
5586 MonoMethod *method;
5587 gboolean inst_tailcall;
5588 } HandleCallData;
5591 * handle_constrained_call:
5593 * Handle constrained calls. Return a MonoInst* representing the call or NULL.
5594 * May overwrite sp [0] and modify the ref_... parameters.
5596 static MonoInst*
5597 handle_constrained_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoClass *constrained_class, MonoInst **sp,
5598 HandleCallData *cdata, MonoMethod **ref_cmethod, gboolean *ref_virtual, gboolean *ref_emit_widen)
5600 MonoInst *ins, *addr;
5601 MonoMethod *method = cdata->method;
5602 gboolean constrained_partial_call = FALSE;
5603 gboolean constrained_is_generic_param =
5604 m_class_get_byval_arg (constrained_class)->type == MONO_TYPE_VAR ||
5605 m_class_get_byval_arg (constrained_class)->type == MONO_TYPE_MVAR;
5607 if (constrained_is_generic_param && cfg->gshared) {
5608 if (!mini_is_gsharedvt_klass (constrained_class)) {
5609 g_assert (!m_class_is_valuetype (cmethod->klass));
5610 if (!mini_type_is_reference (m_class_get_byval_arg (constrained_class)))
5611 constrained_partial_call = TRUE;
5615 if (mini_is_gsharedvt_klass (constrained_class)) {
5616 if ((cmethod->klass != mono_defaults.object_class) && m_class_is_valuetype (constrained_class) && m_class_is_valuetype (cmethod->klass)) {
5617 /* The 'Own method' case below */
5618 } else if (m_class_get_image (cmethod->klass) != mono_defaults.corlib && !mono_class_is_interface (cmethod->klass) && !m_class_is_valuetype (cmethod->klass)) {
5619 /* 'The type parameter is instantiated as a reference type' case below. */
5620 } else {
5621 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, ref_emit_widen);
5622 CHECK_CFG_EXCEPTION;
5623 g_assert (ins);
5624 if (cdata->inst_tailcall) // FIXME
5625 mono_tailcall_print ("missed tailcall constrained_class %s -> %s\n", method->name, cmethod->name);
5626 return ins;
5630 if (constrained_partial_call) {
5631 gboolean need_box = TRUE;
5634 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
5635 * called method is not known at compile time either. The called method could end up being
5636 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
5637 * to box the receiver.
5638 * A simple solution would be to box always and make a normal virtual call, but that would
5639 * be bad performance wise.
5641 if (mono_class_is_interface (cmethod->klass) && mono_class_is_ginst (cmethod->klass) &&
5642 (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT)) {
5644 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
5646 /* If the method is not abstract, it's a default interface method, and we need to box */
5647 need_box = FALSE;
5650 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == m_class_get_parent (mono_defaults.enum_class) || cmethod->klass == mono_defaults.enum_class)) {
5651 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
5652 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (constrained_class), sp [0]->dreg, 0);
5653 ins->klass = constrained_class;
5654 sp [0] = mini_emit_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
5655 CHECK_CFG_EXCEPTION;
5656 } else if (need_box) {
5657 MonoInst *box_type;
5658 MonoBasicBlock *is_ref_bb, *end_bb;
5659 MonoInst *nonbox_call, *addr;
5662 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
5663 * if needed.
5664 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
5665 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
5667 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
5669 NEW_BBLOCK (cfg, is_ref_bb);
5670 NEW_BBLOCK (cfg, end_bb);
5672 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
5673 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
5674 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
5676 /* Non-ref case */
5677 if (cfg->llvm_only)
5678 /* addr is an ftndesc in this case */
5679 nonbox_call = mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
5680 else
5681 nonbox_call = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
5683 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5685 /* Ref case */
5686 MONO_START_BB (cfg, is_ref_bb);
5687 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (constrained_class), sp [0]->dreg, 0);
5688 ins->klass = constrained_class;
5689 sp [0] = mini_emit_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
5690 CHECK_CFG_EXCEPTION;
5691 if (cfg->llvm_only)
5692 ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
5693 else
5694 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
5696 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5698 MONO_START_BB (cfg, end_bb);
5699 cfg->cbb = end_bb;
5701 nonbox_call->dreg = ins->dreg;
5702 if (cdata->inst_tailcall) // FIXME
5703 mono_tailcall_print ("missed tailcall constrained_partial_need_box %s -> %s\n", method->name, cmethod->name);
5704 return ins;
5705 } else {
5706 g_assert (mono_class_is_interface (cmethod->klass));
5707 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
5708 if (cfg->llvm_only)
5709 ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
5710 else
5711 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
5712 if (cdata->inst_tailcall) // FIXME
5713 mono_tailcall_print ("missed tailcall constrained_partial %s -> %s\n", method->name, cmethod->name);
5714 return ins;
5716 } else if (!m_class_is_valuetype (constrained_class)) {
5717 int dreg = alloc_ireg_ref (cfg);
5720 * The type parameter is instantiated as a reference
5721 * type. We have a managed pointer on the stack, so
5722 * we need to dereference it here.
5724 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
5725 ins->type = STACK_OBJ;
5726 sp [0] = ins;
5727 } else if (cmethod->klass == mono_defaults.object_class || cmethod->klass == m_class_get_parent (mono_defaults.enum_class) || cmethod->klass == mono_defaults.enum_class) {
5729 * The type parameter is instantiated as a valuetype,
5730 * but that type doesn't override the method we're
5731 * calling, so we need to box `this'.
5733 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (constrained_class), sp [0]->dreg, 0);
5734 ins->klass = constrained_class;
5735 sp [0] = mini_emit_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
5736 CHECK_CFG_EXCEPTION;
5737 } else {
5738 if (cmethod->klass != constrained_class) {
5739 /* Enums/default interface methods */
5740 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (constrained_class), sp [0]->dreg, 0);
5741 ins->klass = constrained_class;
5742 sp [0] = mini_emit_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
5743 CHECK_CFG_EXCEPTION;
5745 *ref_virtual = FALSE;
5748 exception_exit:
5749 return NULL;
5752 static void
5753 emit_setret (MonoCompile *cfg, MonoInst *val)
5755 MonoType *ret_type = mini_get_underlying_type (mono_method_signature_internal (cfg->method)->ret);
5756 MonoInst *ins;
5758 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
5759 MonoInst *ret_addr;
5761 if (!cfg->vret_addr) {
5762 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
5763 } else {
5764 EMIT_NEW_RETLOADA (cfg, ret_addr);
5766 MonoClass *ret_class = mono_class_from_mono_type_internal (ret_type);
5767 if (MONO_CLASS_IS_SIMD (cfg, ret_class))
5768 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREX_MEMBASE, ret_addr->dreg, 0, val->dreg);
5769 else
5770 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
5771 ins->klass = ret_class;
5773 } else {
5774 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5775 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
5776 MonoInst *iargs [1];
5777 MonoInst *conv;
5779 iargs [0] = val;
5780 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
5781 mono_arch_emit_setret (cfg, cfg->method, conv);
5782 } else {
5783 mono_arch_emit_setret (cfg, cfg->method, val);
5785 #else
5786 mono_arch_emit_setret (cfg, cfg->method, val);
5787 #endif
5791 typedef union _MonoOpcodeParameter {
5792 gint32 i32;
5793 gint64 i64;
5794 float f;
5795 double d;
5796 guchar *branch_target;
5797 } MonoOpcodeParameter;
5799 typedef struct _MonoOpcodeInfo {
5800 guint constant : 4; // private
5801 gint pops : 3; // public -1 means variable
5802 gint pushes : 3; // public -1 means variable
5803 } MonoOpcodeInfo;
5805 static inline const MonoOpcodeInfo*
5806 mono_opcode_decode (guchar *ip, guint op_size, MonoOpcodeEnum il_op, MonoOpcodeParameter *parameter)
5808 #define Push0 (0)
5809 #define Pop0 (0)
5810 #define Push1 (1)
5811 #define Pop1 (1)
5812 #define PushI (1)
5813 #define PopI (1)
5814 #define PushI8 (1)
5815 #define PopI8 (1)
5816 #define PushRef (1)
5817 #define PopRef (1)
5818 #define PushR4 (1)
5819 #define PopR4 (1)
5820 #define PushR8 (1)
5821 #define PopR8 (1)
5822 #define VarPush (-1)
5823 #define VarPop (-1)
5825 static const MonoOpcodeInfo mono_opcode_info [ ] = {
5826 #define OPDEF(name, str, pops, pushes, param, param_constant, a, b, c, flow) {param_constant + 1, pops, pushes },
5827 #include "mono/cil/opcode.def"
5828 #undef OPDEF
5831 #undef Push0
5832 #undef Pop0
5833 #undef Push1
5834 #undef Pop1
5835 #undef PushI
5836 #undef PopI
5837 #undef PushI8
5838 #undef PopI8
5839 #undef PushRef
5840 #undef PopRef
5841 #undef PushR4
5842 #undef PopR4
5843 #undef PushR8
5844 #undef PopR8
5845 #undef VarPush
5846 #undef VarPop
5848 gint32 delta;
5849 guchar *next_ip = ip + op_size;
5851 const MonoOpcodeInfo *info = &mono_opcode_info [il_op];
5853 switch (mono_opcodes [il_op].argument) {
5854 case MonoInlineNone:
5855 parameter->i32 = (int)info->constant - 1;
5856 break;
5857 case MonoInlineString:
5858 case MonoInlineType:
5859 case MonoInlineField:
5860 case MonoInlineMethod:
5861 case MonoInlineTok:
5862 case MonoInlineSig:
5863 case MonoShortInlineR:
5864 case MonoInlineI:
5865 parameter->i32 = read32 (next_ip - 4);
5866 // FIXME check token type?
5867 break;
5868 case MonoShortInlineI:
5869 parameter->i32 = (signed char)next_ip [-1];
5870 break;
5871 case MonoInlineVar:
5872 parameter->i32 = read16 (next_ip - 2);
5873 break;
5874 case MonoShortInlineVar:
5875 parameter->i32 = next_ip [-1];
5876 break;
5877 case MonoInlineR:
5878 case MonoInlineI8:
5879 parameter->i64 = read64 (next_ip - 8);
5880 break;
5881 case MonoShortInlineBrTarget:
5882 delta = (signed char)next_ip [-1];
5883 goto branch_target;
5884 case MonoInlineBrTarget:
5885 delta = (gint32)read32 (next_ip - 4);
5886 branch_target:
5887 parameter->branch_target = delta + next_ip;
5888 break;
5889 case MonoInlineSwitch: // complicated
5890 break;
5891 default:
5892 g_error ("%s %d %d\n", __func__, il_op, mono_opcodes [il_op].argument);
5894 return info;
5898 * mono_method_to_ir:
5900 * Translate the .net IL into linear IR.
5902 * @start_bblock: if not NULL, the starting basic block, used during inlining.
5903 * @end_bblock: if not NULL, the ending basic block, used during inlining.
5904 * @return_var: if not NULL, the place where the return value is stored, used during inlining.
5905 * @inline_args: if not NULL, contains the arguments to the inline call
5906 * @inline_offset: if not zero, the real offset from the inline call, or zero otherwise.
5907 * @is_virtual_call: whether this method is being called as a result of a call to callvirt
5909 * This method is used to turn ECMA IL into Mono's internal Linear IR
5910 * reprensetation. It is used both for entire methods, as well as
5911 * inlining existing methods. In the former case, the @start_bblock,
5912 * @end_bblock, @return_var, @inline_args are all set to NULL, and the
5913 * inline_offset is set to zero.
5915 * Returns: the inline cost, or -1 if there was an error processing this method.
5918 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5919 MonoInst *return_var, MonoInst **inline_args,
5920 guint inline_offset, gboolean is_virtual_call)
5922 ERROR_DECL (error);
5923 // Buffer to hold parameters to mono_new_array, instead of varargs.
5924 MonoInst *array_new_localalloc_ins = NULL;
5925 MonoInst *ins, **sp, **stack_start;
5926 MonoBasicBlock *tblock = NULL;
5927 MonoBasicBlock *init_localsbb = NULL, *init_localsbb2 = NULL;
5928 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5929 MonoMethod *method_definition;
5930 MonoInst **arg_array;
5931 MonoMethodHeader *header;
5932 MonoImage *image;
5933 guint32 token, ins_flag;
5934 MonoClass *klass;
5935 MonoClass *constrained_class = NULL;
5936 gboolean save_last_error = FALSE;
5937 guchar *ip, *end, *target, *err_pos;
5938 MonoMethodSignature *sig;
5939 MonoGenericContext *generic_context = NULL;
5940 MonoGenericContainer *generic_container = NULL;
5941 MonoType **param_types;
5942 int i, n, start_new_bblock, dreg;
5943 int num_calls = 0, inline_costs = 0;
5944 int breakpoint_id = 0;
5945 guint num_args;
5946 GSList *class_inits = NULL;
5947 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5948 int context_used;
5949 gboolean init_locals, seq_points, skip_dead_blocks;
5950 gboolean sym_seq_points = FALSE;
5951 MonoDebugMethodInfo *minfo;
5952 MonoBitSet *seq_point_locs = NULL;
5953 MonoBitSet *seq_point_set_locs = NULL;
5954 gboolean emitted_funccall_seq_point = FALSE;
5956 cfg->disable_inline = is_jit_optimizer_disabled (method);
5958 image = m_class_get_image (method->klass);
5960 /* serialization and xdomain stuff may need access to private fields and methods */
5961 dont_verify = image->assembly->corlib_internal? TRUE: FALSE;
5962 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5963 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5964 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5965 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5966 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5968 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5969 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5970 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_OTHER;
5971 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5972 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
5974 header = mono_method_get_header_checked (method, &cfg->error);
5975 if (!header) {
5976 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
5977 goto exception_exit;
5978 } else {
5979 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5982 generic_container = mono_method_get_generic_container (method);
5983 sig = mono_method_signature_internal (method);
5984 num_args = sig->hasthis + sig->param_count;
5985 ip = (guchar*)header->code;
5986 cfg->cil_start = ip;
5987 end = ip + header->code_size;
5988 cfg->stat_cil_code_size += header->code_size;
5990 seq_points = cfg->gen_seq_points && cfg->method == method;
5992 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
5993 /* We could hit a seq point before attaching to the JIT (#8338) */
5994 seq_points = FALSE;
5997 if (cfg->prof_coverage) {
5998 if (cfg->compile_aot)
5999 g_error ("Coverage profiling is not supported with AOT.");
6001 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
6004 if ((cfg->gen_sdb_seq_points && cfg->method == method) || cfg->prof_coverage) {
6005 minfo = mono_debug_lookup_method (method);
6006 if (minfo) {
6007 MonoSymSeqPoint *sps;
6008 int i, n_il_offsets;
6010 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
6011 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6012 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6013 sym_seq_points = TRUE;
6014 for (i = 0; i < n_il_offsets; ++i) {
6015 if (sps [i].il_offset < header->code_size)
6016 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
6018 g_free (sps);
6020 MonoDebugMethodAsyncInfo* asyncMethod = mono_debug_lookup_method_async_debug_info (method);
6021 if (asyncMethod) {
6022 for (i = 0; asyncMethod != NULL && i < asyncMethod->num_awaits; i++)
6024 mono_bitset_set_fast (seq_point_locs, asyncMethod->resume_offsets[i]);
6025 mono_bitset_set_fast (seq_point_locs, asyncMethod->yield_offsets[i]);
6027 mono_debug_free_method_async_debug_info (asyncMethod);
6029 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (m_class_get_image (method->klass))) {
6030 /* Methods without line number info like auto-generated property accessors */
6031 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6032 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6033 sym_seq_points = TRUE;
6038 * Methods without init_locals set could cause asserts in various passes
6039 * (#497220). To work around this, we emit dummy initialization opcodes
6040 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
6041 * on some platforms.
6043 if (cfg->opt & MONO_OPT_UNSAFE)
6044 init_locals = header->init_locals;
6045 else
6046 init_locals = TRUE;
6048 method_definition = method;
6049 while (method_definition->is_inflated) {
6050 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
6051 method_definition = imethod->declaring;
6054 /* SkipVerification is not allowed if core-clr is enabled */
6055 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
6056 dont_verify = TRUE;
6057 dont_verify_stloc = TRUE;
6060 if (sig->is_inflated)
6061 generic_context = mono_method_get_context (method);
6062 else if (generic_container)
6063 generic_context = &generic_container->context;
6064 cfg->generic_context = generic_context;
6066 if (!cfg->gshared)
6067 g_assert (!sig->has_type_parameters);
6069 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
6070 g_assert (method->is_inflated);
6071 g_assert (mono_method_get_context (method)->method_inst);
6073 if (method->is_inflated && mono_method_get_context (method)->method_inst)
6074 g_assert (sig->generic_param_count);
6076 if (cfg->method == method) {
6077 cfg->real_offset = 0;
6078 } else {
6079 cfg->real_offset = inline_offset;
6082 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
6083 cfg->cil_offset_to_bb_len = header->code_size;
6085 cfg->current_method = method;
6087 if (cfg->verbose_level > 2)
6088 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
6090 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
6091 if (sig->hasthis)
6092 param_types [0] = m_class_is_valuetype (method->klass) ? m_class_get_this_arg (method->klass) : m_class_get_byval_arg (method->klass);
6093 for (n = 0; n < sig->param_count; ++n)
6094 param_types [n + sig->hasthis] = sig->params [n];
6095 cfg->arg_types = param_types;
6097 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
6098 if (cfg->method == method) {
6099 /* ENTRY BLOCK */
6100 NEW_BBLOCK (cfg, start_bblock);
6101 cfg->bb_entry = start_bblock;
6102 start_bblock->cil_code = NULL;
6103 start_bblock->cil_length = 0;
6105 /* EXIT BLOCK */
6106 NEW_BBLOCK (cfg, end_bblock);
6107 cfg->bb_exit = end_bblock;
6108 end_bblock->cil_code = NULL;
6109 end_bblock->cil_length = 0;
6110 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
6111 g_assert (cfg->num_bblocks == 2);
6113 arg_array = cfg->args;
6115 if (header->num_clauses) {
6116 cfg->spvars = g_hash_table_new (NULL, NULL);
6117 cfg->exvars = g_hash_table_new (NULL, NULL);
6119 /* handle exception clauses */
6120 for (i = 0; i < header->num_clauses; ++i) {
6121 MonoBasicBlock *try_bb;
6122 MonoExceptionClause *clause = &header->clauses [i];
6123 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
6125 try_bb->real_offset = clause->try_offset;
6126 try_bb->try_start = TRUE;
6127 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
6128 tblock->real_offset = clause->handler_offset;
6129 tblock->flags |= BB_EXCEPTION_HANDLER;
6131 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
6132 mono_create_exvar_for_offset (cfg, clause->handler_offset);
6134 * Linking the try block with the EH block hinders inlining as we won't be able to
6135 * merge the bblocks from inlining and produce an artificial hole for no good reason.
6137 if (COMPILE_LLVM (cfg))
6138 link_bblock (cfg, try_bb, tblock);
6140 if (*(ip + clause->handler_offset) == CEE_POP)
6141 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
6143 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
6144 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
6145 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
6146 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6147 MONO_ADD_INS (tblock, ins);
6149 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
6150 /* finally clauses already have a seq point */
6151 /* seq points for filter clauses are emitted below */
6152 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
6153 MONO_ADD_INS (tblock, ins);
6156 /* todo: is a fault block unsafe to optimize? */
6157 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
6158 tblock->flags |= BB_EXCEPTION_UNSAFE;
6161 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
6162 while (p < end) {
6163 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
6165 /* catch and filter blocks get the exception object on the stack */
6166 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
6167 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6169 /* mostly like handle_stack_args (), but just sets the input args */
6170 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
6171 tblock->in_scount = 1;
6172 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6173 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6175 cfg->cbb = tblock;
6177 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
6178 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
6179 if (!cfg->compile_llvm) {
6180 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
6181 ins->dreg = tblock->in_stack [0]->dreg;
6182 MONO_ADD_INS (tblock, ins);
6184 #else
6185 MonoInst *dummy_use;
6188 * Add a dummy use for the exvar so its liveness info will be
6189 * correct.
6191 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
6192 #endif
6194 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6195 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
6196 MONO_ADD_INS (tblock, ins);
6199 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6200 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
6201 tblock->flags |= BB_EXCEPTION_HANDLER;
6202 tblock->real_offset = clause->data.filter_offset;
6203 tblock->in_scount = 1;
6204 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6205 /* The filter block shares the exvar with the handler block */
6206 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6207 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6208 MONO_ADD_INS (tblock, ins);
6212 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
6213 clause->data.catch_class &&
6214 cfg->gshared &&
6215 mono_class_check_context_used (clause->data.catch_class)) {
6217 * In shared generic code with catch
6218 * clauses containing type variables
6219 * the exception handling code has to
6220 * be able to get to the rgctx.
6221 * Therefore we have to make sure that
6222 * the vtable/mrgctx argument (for
6223 * static or generic methods) or the
6224 * "this" argument (for non-static
6225 * methods) are live.
6227 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6228 mini_method_get_context (method)->method_inst ||
6229 m_class_is_valuetype (method->klass)) {
6230 mono_get_vtable_var (cfg);
6231 } else {
6232 MonoInst *dummy_use;
6234 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
6238 } else {
6239 arg_array = g_newa (MonoInst*, num_args);
6240 cfg->cbb = start_bblock;
6241 cfg->args = arg_array;
6242 mono_save_args (cfg, sig, inline_args);
6245 /* FIRST CODE BLOCK */
6246 NEW_BBLOCK (cfg, tblock);
6247 tblock->cil_code = ip;
6248 cfg->cbb = tblock;
6249 cfg->ip = ip;
6251 ADD_BBLOCK (cfg, tblock);
6253 if (cfg->method == method) {
6254 breakpoint_id = mono_debugger_method_has_breakpoint (method);
6255 if (breakpoint_id) {
6256 MONO_INST_NEW (cfg, ins, OP_BREAK);
6257 MONO_ADD_INS (cfg->cbb, ins);
6261 /* we use a separate basic block for the initialization code */
6262 NEW_BBLOCK (cfg, init_localsbb);
6263 if (cfg->method == method)
6264 cfg->bb_init = init_localsbb;
6265 init_localsbb->real_offset = cfg->real_offset;
6266 start_bblock->next_bb = init_localsbb;
6267 init_localsbb->next_bb = cfg->cbb;
6268 link_bblock (cfg, start_bblock, init_localsbb);
6269 link_bblock (cfg, init_localsbb, cfg->cbb);
6270 init_localsbb2 = init_localsbb;
6271 cfg->cbb = init_localsbb;
6273 if (cfg->gsharedvt && cfg->method == method) {
6274 MonoGSharedVtMethodInfo *info;
6275 MonoInst *var, *locals_var;
6276 int dreg;
6278 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
6279 info->method = cfg->method;
6280 info->count_entries = 16;
6281 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
6282 cfg->gsharedvt_info = info;
6284 var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
6285 /* prevent it from being register allocated */
6286 //var->flags |= MONO_INST_VOLATILE;
6287 cfg->gsharedvt_info_var = var;
6289 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
6290 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
6292 /* Allocate locals */
6293 locals_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
6294 /* prevent it from being register allocated */
6295 //locals_var->flags |= MONO_INST_VOLATILE;
6296 cfg->gsharedvt_locals_var = locals_var;
6298 dreg = alloc_ireg (cfg);
6299 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
6301 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
6302 ins->dreg = locals_var->dreg;
6303 ins->sreg1 = dreg;
6304 MONO_ADD_INS (cfg->cbb, ins);
6305 cfg->gsharedvt_locals_var_ins = ins;
6307 cfg->flags |= MONO_CFG_HAS_ALLOCA;
6309 if (init_locals)
6310 ins->flags |= MONO_INST_INIT;
6314 if (mono_security_core_clr_enabled ()) {
6315 /* check if this is native code, e.g. an icall or a p/invoke */
6316 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
6317 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6318 if (wrapped) {
6319 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
6320 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
6322 /* if this ia a native call then it can only be JITted from platform code */
6323 if ((icall || pinvk) && method->klass && m_class_get_image (method->klass)) {
6324 if (!mono_security_core_clr_is_platform_image (m_class_get_image (method->klass))) {
6325 MonoException *ex = icall ? mono_get_exception_security () :
6326 mono_get_exception_method_access ();
6327 emit_throw_exception (cfg, ex);
6334 CHECK_CFG_EXCEPTION;
6336 if (header->code_size == 0)
6337 UNVERIFIED;
6339 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
6340 ip = err_pos;
6341 UNVERIFIED;
6344 if (cfg->method == method)
6345 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
6347 for (n = 0; n < header->num_locals; ++n) {
6348 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
6349 UNVERIFIED;
6351 class_inits = NULL;
6353 /* We force the vtable variable here for all shared methods
6354 for the possibility that they might show up in a stack
6355 trace where their exact instantiation is needed. */
6356 if (cfg->gshared && method == cfg->method) {
6357 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6358 mini_method_get_context (method)->method_inst ||
6359 m_class_is_valuetype (method->klass)) {
6360 mono_get_vtable_var (cfg);
6361 } else {
6362 /* FIXME: Is there a better way to do this?
6363 We need the variable live for the duration
6364 of the whole method. */
6365 cfg->args [0]->flags |= MONO_INST_VOLATILE;
6369 /* add a check for this != NULL to inlined methods */
6370 if (is_virtual_call) {
6371 MonoInst *arg_ins;
6373 NEW_ARGLOAD (cfg, arg_ins, 0);
6374 MONO_ADD_INS (cfg->cbb, arg_ins);
6375 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
6378 skip_dead_blocks = !dont_verify;
6379 if (skip_dead_blocks) {
6380 original_bb = bb = mono_basic_block_split (method, &cfg->error, header);
6381 CHECK_CFG_ERROR;
6382 g_assert (bb);
6385 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
6386 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
6388 ins_flag = 0;
6389 start_new_bblock = 0;
6390 MonoOpcodeEnum il_op; il_op = MonoOpcodeEnum_Invalid;
6392 for (guchar *next_ip = ip; ip < end; ip = next_ip) {
6393 MonoOpcodeEnum previous_il_op = il_op;
6394 const guchar *tmp_ip = ip;
6395 const int op_size = mono_opcode_value_and_size (&tmp_ip, end, &il_op);
6396 CHECK_OPSIZE (op_size);
6397 next_ip += op_size;
6399 if (cfg->method == method)
6400 cfg->real_offset = ip - header->code;
6401 else
6402 cfg->real_offset = inline_offset;
6403 cfg->ip = ip;
6405 context_used = 0;
6407 if (start_new_bblock) {
6408 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
6409 if (start_new_bblock == 2) {
6410 g_assert (ip == tblock->cil_code);
6411 } else {
6412 GET_BBLOCK (cfg, tblock, ip);
6414 cfg->cbb->next_bb = tblock;
6415 cfg->cbb = tblock;
6416 start_new_bblock = 0;
6417 for (i = 0; i < cfg->cbb->in_scount; ++i) {
6418 if (cfg->verbose_level > 3)
6419 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
6420 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
6421 *sp++ = ins;
6423 if (class_inits)
6424 g_slist_free (class_inits);
6425 class_inits = NULL;
6426 } else {
6427 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
6428 link_bblock (cfg, cfg->cbb, tblock);
6429 if (sp != stack_start) {
6430 handle_stack_args (cfg, stack_start, sp - stack_start);
6431 sp = stack_start;
6432 CHECK_UNVERIFIABLE (cfg);
6434 cfg->cbb->next_bb = tblock;
6435 cfg->cbb = tblock;
6436 for (i = 0; i < cfg->cbb->in_scount; ++i) {
6437 if (cfg->verbose_level > 3)
6438 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
6439 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
6440 *sp++ = ins;
6442 g_slist_free (class_inits);
6443 class_inits = NULL;
6447 if (skip_dead_blocks) {
6448 int ip_offset = ip - header->code;
6450 if (ip_offset == bb->end)
6451 bb = bb->next;
6453 if (bb->dead) {
6454 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
6456 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
6458 if (ip_offset + op_size == bb->end) {
6459 MONO_INST_NEW (cfg, ins, OP_NOP);
6460 MONO_ADD_INS (cfg->cbb, ins);
6461 start_new_bblock = 1;
6463 continue;
6467 * Sequence points are points where the debugger can place a breakpoint.
6468 * Currently, we generate these automatically at points where the IL
6469 * stack is empty.
6471 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
6473 * Make methods interruptable at the beginning, and at the targets of
6474 * backward branches.
6475 * Also, do this at the start of every bblock in methods with clauses too,
6476 * to be able to handle instructions with inprecise control flow like
6477 * throw/endfinally.
6478 * Backward branches are handled at the end of method-to-ir ().
6480 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
6481 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
6483 /* Avoid sequence points on empty IL like .volatile */
6484 // FIXME: Enable this
6485 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
6486 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
6487 if ((sp != stack_start) && !sym_seq_point)
6488 ins->flags |= MONO_INST_NONEMPTY_STACK;
6489 MONO_ADD_INS (cfg->cbb, ins);
6491 if (sym_seq_points)
6492 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
6494 if (cfg->prof_coverage) {
6495 guint32 cil_offset = ip - header->code;
6496 gpointer counter = &cfg->coverage_info->data [cil_offset].count;
6497 cfg->coverage_info->data [cil_offset].cil_code = ip;
6499 if (mono_arch_opcode_supported (OP_ATOMIC_ADD_I4)) {
6500 MonoInst *one_ins, *load_ins;
6502 EMIT_NEW_PCONST (cfg, load_ins, counter);
6503 EMIT_NEW_ICONST (cfg, one_ins, 1);
6504 MONO_INST_NEW (cfg, ins, OP_ATOMIC_ADD_I4);
6505 ins->dreg = mono_alloc_ireg (cfg);
6506 ins->inst_basereg = load_ins->dreg;
6507 ins->inst_offset = 0;
6508 ins->sreg2 = one_ins->dreg;
6509 ins->type = STACK_I4;
6510 MONO_ADD_INS (cfg->cbb, ins);
6511 } else {
6512 EMIT_NEW_PCONST (cfg, ins, counter);
6513 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
6518 cfg->cbb->real_offset = cfg->real_offset;
6520 if (cfg->verbose_level > 3)
6521 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
6523 // Variables shared by CEE_CALLI CEE_CALL CEE_CALLVIRT CEE_JMP.
6524 // Initialize to either what they all need or zero.
6525 gboolean emit_widen = TRUE;
6526 gboolean tailcall = FALSE;
6527 gboolean common_call = FALSE;
6528 MonoInst *keep_this_alive = NULL;
6529 MonoMethod *cmethod = NULL;
6530 MonoMethodSignature *fsig = NULL;
6532 // These are used only in CALL/CALLVIRT but must be initialized also for CALLI,
6533 // since it jumps into CALL/CALLVIRT.
6534 gboolean need_seq_point = FALSE;
6535 gboolean push_res = TRUE;
6536 gboolean skip_ret = FALSE;
6537 gboolean tailcall_remove_ret = FALSE;
6539 // FIXME split 500 lines load/store field into separate file/function.
6541 MonoOpcodeParameter parameter;
6542 const MonoOpcodeInfo* info = mono_opcode_decode (ip, op_size, il_op, &parameter);
6543 g_assert (info);
6544 n = parameter.i32;
6545 token = parameter.i32;
6546 target = parameter.branch_target;
6548 // Check stack size for push/pop except variable cases -- -1 like call/ret/newobj.
6549 const int pushes = info->pushes;
6550 const int pops = info->pops;
6551 if (pushes >= 0 && pops >= 0) {
6552 g_assert (pushes - pops <= 1);
6553 if (pushes - pops == 1)
6554 CHECK_STACK_OVF ();
6556 if (pops >= 0)
6557 CHECK_STACK (pops);
6559 switch (il_op) {
6560 case MONO_CEE_NOP:
6561 if (seq_points && !sym_seq_points && sp != stack_start) {
6563 * The C# compiler uses these nops to notify the JIT that it should
6564 * insert seq points.
6566 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
6567 MONO_ADD_INS (cfg->cbb, ins);
6569 if (cfg->keep_cil_nops)
6570 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
6571 else
6572 MONO_INST_NEW (cfg, ins, OP_NOP);
6573 MONO_ADD_INS (cfg->cbb, ins);
6574 emitted_funccall_seq_point = FALSE;
6575 break;
6576 case MONO_CEE_BREAK:
6577 if (mini_should_insert_breakpoint (cfg->method)) {
6578 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6579 } else {
6580 MONO_INST_NEW (cfg, ins, OP_NOP);
6581 MONO_ADD_INS (cfg->cbb, ins);
6583 break;
6584 case MONO_CEE_LDARG_0:
6585 case MONO_CEE_LDARG_1:
6586 case MONO_CEE_LDARG_2:
6587 case MONO_CEE_LDARG_3:
6588 case MONO_CEE_LDARG_S:
6589 case MONO_CEE_LDARG:
6590 CHECK_ARG (n);
6591 if (next_ip < end && is_addressable_valuetype_load (cfg, next_ip, cfg->arg_types[n])) {
6592 EMIT_NEW_ARGLOADA (cfg, ins, n);
6593 } else {
6594 EMIT_NEW_ARGLOAD (cfg, ins, n);
6596 *sp++ = ins;
6597 break;
6599 case MONO_CEE_LDLOC_0:
6600 case MONO_CEE_LDLOC_1:
6601 case MONO_CEE_LDLOC_2:
6602 case MONO_CEE_LDLOC_3:
6603 case MONO_CEE_LDLOC_S:
6604 case MONO_CEE_LDLOC:
6605 CHECK_LOCAL (n);
6606 if (next_ip < end && is_addressable_valuetype_load (cfg, next_ip, header->locals[n])) {
6607 EMIT_NEW_LOCLOADA (cfg, ins, n);
6608 } else {
6609 EMIT_NEW_LOCLOAD (cfg, ins, n);
6611 *sp++ = ins;
6612 break;
6614 case MONO_CEE_STLOC_0:
6615 case MONO_CEE_STLOC_1:
6616 case MONO_CEE_STLOC_2:
6617 case MONO_CEE_STLOC_3:
6618 case MONO_CEE_STLOC_S:
6619 case MONO_CEE_STLOC:
6620 CHECK_LOCAL (n);
6621 --sp;
6622 *sp = convert_value (cfg, header->locals [n], *sp);
6623 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
6624 UNVERIFIED;
6625 emit_stloc_ir (cfg, sp, header, n);
6626 inline_costs += 1;
6627 break;
6628 case MONO_CEE_LDARGA_S:
6629 case MONO_CEE_LDARGA:
6630 CHECK_ARG (n);
6631 NEW_ARGLOADA (cfg, ins, n);
6632 MONO_ADD_INS (cfg->cbb, ins);
6633 *sp++ = ins;
6634 break;
6635 case MONO_CEE_STARG_S:
6636 case MONO_CEE_STARG:
6637 --sp;
6638 CHECK_ARG (n);
6639 *sp = convert_value (cfg, param_types [n], *sp);
6640 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
6641 UNVERIFIED;
6642 emit_starg_ir (cfg, sp, n);
6643 break;
6644 case MONO_CEE_LDLOCA:
6645 case MONO_CEE_LDLOCA_S: {
6646 guchar *tmp_ip;
6647 CHECK_LOCAL (n);
6649 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, next_ip, end, n))) {
6650 next_ip = tmp_ip;
6651 il_op = MONO_CEE_INITOBJ;
6652 inline_costs += 1;
6653 break;
6656 EMIT_NEW_LOCLOADA (cfg, ins, n);
6657 *sp++ = ins;
6658 break;
6660 case MONO_CEE_LDNULL:
6661 EMIT_NEW_PCONST (cfg, ins, NULL);
6662 ins->type = STACK_OBJ;
6663 *sp++ = ins;
6664 break;
6665 case MONO_CEE_LDC_I4_M1:
6666 case MONO_CEE_LDC_I4_0:
6667 case MONO_CEE_LDC_I4_1:
6668 case MONO_CEE_LDC_I4_2:
6669 case MONO_CEE_LDC_I4_3:
6670 case MONO_CEE_LDC_I4_4:
6671 case MONO_CEE_LDC_I4_5:
6672 case MONO_CEE_LDC_I4_6:
6673 case MONO_CEE_LDC_I4_7:
6674 case MONO_CEE_LDC_I4_8:
6675 case MONO_CEE_LDC_I4_S:
6676 case MONO_CEE_LDC_I4:
6677 EMIT_NEW_ICONST (cfg, ins, n);
6678 *sp++ = ins;
6679 break;
6680 case MONO_CEE_LDC_I8:
6681 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6682 ins->type = STACK_I8;
6683 ins->dreg = alloc_dreg (cfg, STACK_I8);
6684 ins->inst_l = parameter.i64;
6685 MONO_ADD_INS (cfg->cbb, ins);
6686 *sp++ = ins;
6687 break;
6688 case MONO_CEE_LDC_R4: {
6689 float *f;
6690 gboolean use_aotconst = FALSE;
6692 #ifdef TARGET_POWERPC
6693 /* FIXME: Clean this up */
6694 if (cfg->compile_aot)
6695 use_aotconst = TRUE;
6696 #endif
6697 /* FIXME: we should really allocate this only late in the compilation process */
6698 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
6700 if (use_aotconst) {
6701 MonoInst *cons;
6702 int dreg;
6704 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6706 dreg = alloc_freg (cfg);
6707 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6708 ins->type = cfg->r4_stack_type;
6709 } else {
6710 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6711 ins->type = cfg->r4_stack_type;
6712 ins->dreg = alloc_dreg (cfg, STACK_R8);
6713 ins->inst_p0 = f;
6714 MONO_ADD_INS (cfg->cbb, ins);
6716 *f = parameter.f;
6717 *sp++ = ins;
6718 break;
6720 case MONO_CEE_LDC_R8: {
6721 double *d;
6722 gboolean use_aotconst = FALSE;
6724 #ifdef TARGET_POWERPC
6725 /* FIXME: Clean this up */
6726 if (cfg->compile_aot)
6727 use_aotconst = TRUE;
6728 #endif
6730 /* FIXME: we should really allocate this only late in the compilation process */
6731 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
6733 if (use_aotconst) {
6734 MonoInst *cons;
6735 int dreg;
6737 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6739 dreg = alloc_freg (cfg);
6740 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6741 ins->type = STACK_R8;
6742 } else {
6743 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6744 ins->type = STACK_R8;
6745 ins->dreg = alloc_dreg (cfg, STACK_R8);
6746 ins->inst_p0 = d;
6747 MONO_ADD_INS (cfg->cbb, ins);
6749 *d = parameter.d;
6750 *sp++ = ins;
6751 break;
6753 case MONO_CEE_DUP: {
6754 MonoInst *temp, *store;
6755 sp--;
6756 ins = *sp;
6758 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6759 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6761 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6762 *sp++ = ins;
6764 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6765 *sp++ = ins;
6767 inline_costs += 2;
6768 break;
6770 case MONO_CEE_POP:
6771 --sp;
6773 #ifdef TARGET_X86
6774 if (sp [0]->type == STACK_R8)
6775 /* we need to pop the value from the x86 FP stack */
6776 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6777 #endif
6778 break;
6779 case MONO_CEE_JMP: {
6780 MonoCallInst *call;
6781 int i, n;
6783 INLINE_FAILURE ("jmp");
6784 GSHAREDVT_FAILURE (il_op);
6786 if (stack_start != sp)
6787 UNVERIFIED;
6788 /* FIXME: check the signature matches */
6789 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6790 CHECK_CFG_ERROR;
6792 if (cfg->gshared && mono_method_check_context_used (cmethod))
6793 GENERIC_SHARING_FAILURE (CEE_JMP);
6795 mini_profiler_emit_tail_call (cfg, cmethod);
6797 fsig = mono_method_signature_internal (cmethod);
6798 n = fsig->param_count + fsig->hasthis;
6799 if (cfg->llvm_only) {
6800 MonoInst **args;
6802 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6803 for (i = 0; i < n; ++i)
6804 EMIT_NEW_ARGLOAD (cfg, args [i], i);
6805 ins = mini_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
6807 * The code in mono-basic-block.c treats the rest of the code as dead, but we
6808 * have to emit a normal return since llvm expects it.
6810 if (cfg->ret)
6811 emit_setret (cfg, ins);
6812 MONO_INST_NEW (cfg, ins, OP_BR);
6813 ins->inst_target_bb = end_bblock;
6814 MONO_ADD_INS (cfg->cbb, ins);
6815 link_bblock (cfg, cfg->cbb, end_bblock);
6816 break;
6817 } else {
6818 /* Handle tailcalls similarly to calls */
6819 DISABLE_AOT (cfg);
6821 mini_emit_tailcall_parameters (cfg, fsig);
6822 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6823 call->method = cmethod;
6824 // FIXME Other initialization of the tailcall field occurs after
6825 // it is used. So this is the only "real" use and needs more attention.
6826 call->tailcall = TRUE;
6827 call->signature = fsig;
6828 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6829 call->inst.inst_p0 = cmethod;
6830 for (i = 0; i < n; ++i)
6831 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6833 if (mini_type_is_vtype (mini_get_underlying_type (call->signature->ret)))
6834 call->vret_var = cfg->vret_addr;
6836 mono_arch_emit_call (cfg, call);
6837 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
6838 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
6841 start_new_bblock = 1;
6842 break;
6844 case MONO_CEE_CALLI: {
6845 // FIXME tail.calli is problemetic because the this pointer's type
6846 // is not in the signature, and we cannot check for a byref valuetype.
6847 MonoInst *addr;
6848 MonoInst *callee = NULL;
6850 // Variables shared by CEE_CALLI and CEE_CALL/CEE_CALLVIRT.
6851 common_call = TRUE; // i.e. skip_ret/push_res/seq_point logic
6852 cmethod = NULL;
6854 gboolean const inst_tailcall = G_UNLIKELY (debug_tailcall_try_all
6855 ? (next_ip < end && next_ip [0] == CEE_RET)
6856 : ((ins_flag & MONO_INST_TAILCALL) != 0));
6857 ins = NULL;
6859 //GSHAREDVT_FAILURE (il_op);
6860 CHECK_STACK (1);
6861 --sp;
6862 addr = *sp;
6863 g_assert (addr);
6864 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
6865 CHECK_CFG_ERROR;
6867 if (method->dynamic && fsig->pinvoke) {
6868 MonoInst *args [3];
6871 * This is a call through a function pointer using a pinvoke
6872 * signature. Have to create a wrapper and call that instead.
6873 * FIXME: This is very slow, need to create a wrapper at JIT time
6874 * instead based on the signature.
6876 EMIT_NEW_IMAGECONST (cfg, args [0], m_class_get_image (method->klass));
6877 EMIT_NEW_PCONST (cfg, args [1], fsig);
6878 args [2] = addr;
6879 // FIXME tailcall?
6880 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6883 n = fsig->param_count + fsig->hasthis;
6885 CHECK_STACK (n);
6887 //g_assert (!virtual_ || fsig->hasthis);
6889 sp -= n;
6891 if (!(cfg->method->wrapper_type && cfg->method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD) && check_call_signature (cfg, fsig, sp)) {
6892 if (break_on_unverified ())
6893 check_call_signature (cfg, fsig, sp); // Again, step through it.
6894 UNVERIFIED;
6897 inline_costs += CALL_COST * MIN(10, num_calls++);
6900 * Making generic calls out of gsharedvt methods.
6901 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
6902 * patching gshared method addresses into a gsharedvt method.
6904 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
6906 * We pass the address to the gsharedvt trampoline in the rgctx reg
6908 callee = addr;
6909 g_assert (addr); // Doubles as boolean after tailcall check.
6912 inst_tailcall && is_supported_tailcall (cfg, ip, method, NULL, fsig,
6913 FALSE/*virtual irrelevant*/, addr != NULL, &tailcall);
6915 if (callee) {
6916 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
6917 /* Not tested */
6918 GSHAREDVT_FAILURE (il_op);
6920 if (cfg->llvm_only)
6921 // FIXME:
6922 GSHAREDVT_FAILURE (il_op);
6924 addr = emit_get_rgctx_sig (cfg, context_used, fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
6925 ins = (MonoInst*)mini_emit_calli_full (cfg, fsig, sp, addr, NULL, callee, tailcall);
6926 goto calli_end;
6929 /* Prevent inlining of methods with indirect calls */
6930 INLINE_FAILURE ("indirect call");
6932 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
6933 MonoJumpInfoType info_type;
6934 gpointer info_data;
6937 * Instead of emitting an indirect call, emit a direct call
6938 * with the contents of the aotconst as the patch info.
6940 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
6941 info_type = (MonoJumpInfoType)addr->inst_c1;
6942 info_data = addr->inst_p0;
6943 } else {
6944 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
6945 info_data = addr->inst_right->inst_left;
6948 if (info_type == MONO_PATCH_INFO_ICALL_ADDR) {
6949 // non-JIT icall, mostly builtin, but also user-extensible
6950 tailcall = FALSE;
6951 ins = (MonoInst*)mini_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp);
6952 NULLIFY_INS (addr);
6953 goto calli_end;
6954 } else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR
6955 || info_type == MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR) {
6956 tailcall = FALSE;
6957 ins = (MonoInst*)mini_emit_abs_call (cfg, info_type, info_data, fsig, sp);
6958 NULLIFY_INS (addr);
6959 goto calli_end;
6962 ins = (MonoInst*)mini_emit_calli_full (cfg, fsig, sp, addr, NULL, NULL, tailcall);
6963 goto calli_end;
6965 case MONO_CEE_CALL:
6966 case MONO_CEE_CALLVIRT: {
6967 MonoInst *addr; addr = NULL;
6968 int array_rank; array_rank = 0;
6969 gboolean virtual_; virtual_ = il_op == MONO_CEE_CALLVIRT;
6970 gboolean pass_imt_from_rgctx; pass_imt_from_rgctx = FALSE;
6971 MonoInst *imt_arg; imt_arg = NULL;
6972 gboolean pass_vtable; pass_vtable = FALSE;
6973 gboolean pass_mrgctx; pass_mrgctx = FALSE;
6974 MonoInst *vtable_arg; vtable_arg = NULL;
6975 gboolean check_this; check_this = FALSE;
6976 gboolean delegate_invoke; delegate_invoke = FALSE;
6977 gboolean direct_icall; direct_icall = FALSE;
6978 gboolean tailcall_calli; tailcall_calli = FALSE;
6980 // Variables shared by CEE_CALLI and CEE_CALL/CEE_CALLVIRT.
6981 common_call = FALSE;
6983 // variables to help in assertions
6984 gboolean called_is_supported_tailcall; called_is_supported_tailcall = FALSE;
6985 MonoMethod *tailcall_method; tailcall_method = NULL;
6986 MonoMethod *tailcall_cmethod; tailcall_cmethod = NULL;
6987 MonoMethodSignature *tailcall_fsig; tailcall_fsig = NULL;
6988 gboolean tailcall_virtual; tailcall_virtual = FALSE;
6989 gboolean tailcall_extra_arg; tailcall_extra_arg = FALSE;
6991 gboolean inst_tailcall; inst_tailcall = G_UNLIKELY (debug_tailcall_try_all
6992 ? (next_ip < end && next_ip [0] == CEE_RET)
6993 : ((ins_flag & MONO_INST_TAILCALL) != 0));
6994 ins = NULL;
6996 /* Used to pass arguments to called functions */
6997 HandleCallData cdata;
6998 memset (&cdata, 0, sizeof (HandleCallData));
7000 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7001 CHECK_CFG_ERROR;
7003 MonoMethod *cil_method; cil_method = cmethod;
7005 if (constrained_class) {
7006 gboolean constrained_is_generic_param =
7007 m_class_get_byval_arg (constrained_class)->type == MONO_TYPE_VAR ||
7008 m_class_get_byval_arg (constrained_class)->type == MONO_TYPE_MVAR;
7010 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7011 if (cfg->verbose_level > 2)
7012 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
7013 if (!(constrained_is_generic_param &&
7014 cfg->gshared)) {
7015 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
7016 CHECK_CFG_ERROR;
7018 } else {
7019 if (cfg->verbose_level > 2)
7020 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
7022 if (constrained_is_generic_param && cfg->gshared) {
7024 * This is needed since get_method_constrained can't find
7025 * the method in klass representing a type var.
7026 * The type var is guaranteed to be a reference type in this
7027 * case.
7029 if (!mini_is_gsharedvt_klass (constrained_class))
7030 g_assert (!m_class_is_valuetype (cmethod->klass));
7031 } else {
7032 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
7033 CHECK_CFG_ERROR;
7037 if (m_class_is_enumtype (constrained_class) && !strcmp (cmethod->name, "GetHashCode")) {
7038 /* Use the corresponding method from the base type to avoid boxing */
7039 MonoType *base_type = mono_class_enum_basetype_internal (constrained_class);
7040 g_assert (base_type);
7041 constrained_class = mono_class_from_mono_type_internal (base_type);
7042 cmethod = get_method_nofail (constrained_class, cmethod->name, 0, 0);
7043 g_assert (cmethod);
7047 if (!dont_verify && !cfg->skip_visibility) {
7048 MonoMethod *target_method = cil_method;
7049 if (method->is_inflated) {
7050 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
7051 CHECK_CFG_ERROR;
7053 if (!mono_method_can_access_method (method_definition, target_method) &&
7054 !mono_method_can_access_method (method, cil_method))
7055 emit_method_access_failure (cfg, method, cil_method);
7058 if (mono_security_core_clr_enabled ())
7059 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
7061 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT)) {
7062 if (!mono_class_is_interface (method->klass))
7063 emit_bad_image_failure (cfg, method, cil_method);
7064 else
7065 virtual_ = TRUE;
7070 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
7071 * converts to a callvirt.
7073 * tests/bug-515884.il is an example of this behavior
7075 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
7076 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
7077 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
7078 virtual_ = TRUE;
7081 if (!m_class_is_inited (cmethod->klass))
7082 if (!mono_class_init_internal (cmethod->klass))
7083 TYPE_LOAD_ERROR (cmethod->klass);
7085 fsig = mono_method_signature_internal (cmethod);
7086 if (!fsig)
7087 LOAD_ERROR;
7088 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
7089 mini_class_is_system_array (cmethod->klass)) {
7090 array_rank = m_class_get_rank (cmethod->klass);
7091 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && direct_icalls_enabled (cfg, cmethod)) {
7092 direct_icall = TRUE;
7093 } else if (fsig->pinvoke) {
7094 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
7095 fsig = mono_method_signature_internal (wrapper);
7096 } else if (constrained_class) {
7097 } else {
7098 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
7099 CHECK_CFG_ERROR;
7102 if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated))
7103 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
7105 /* See code below */
7106 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature_internal (cmethod)->param_count == 1) {
7107 MonoBasicBlock *tbb;
7109 GET_BBLOCK (cfg, tbb, next_ip);
7110 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
7112 * We want to extend the try block to cover the call, but we can't do it if the
7113 * call is made directly since its followed by an exception check.
7115 direct_icall = FALSE;
7119 mono_save_token_info (cfg, image, token, cil_method);
7121 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, next_ip - header->code)))
7122 need_seq_point = TRUE;
7124 /* Don't support calls made using type arguments for now */
7126 if (cfg->gsharedvt) {
7127 if (mini_is_gsharedvt_signature (fsig))
7128 GSHAREDVT_FAILURE (il_op);
7132 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
7133 g_assert_not_reached ();
7135 n = fsig->param_count + fsig->hasthis;
7137 if (!cfg->gshared && mono_class_is_gtd (cmethod->klass))
7138 UNVERIFIED;
7140 if (!cfg->gshared)
7141 g_assert (!mono_method_check_context_used (cmethod));
7143 CHECK_STACK (n);
7145 //g_assert (!virtual_ || fsig->hasthis);
7147 sp -= n;
7149 if (virtual_ && cmethod && sp [0]->opcode == OP_TYPED_OBJREF) {
7150 ERROR_DECL (error);
7152 MonoMethod *new_cmethod = mono_class_get_virtual_method (sp [0]->klass, cmethod, FALSE, error);
7153 mono_error_assert_ok (error);
7154 cmethod = new_cmethod;
7155 virtual_ = FALSE;
7158 if (cmethod && m_class_get_image (cmethod->klass) == mono_defaults.corlib && !strcmp (m_class_get_name (cmethod->klass), "ThrowHelper"))
7159 cfg->cbb->out_of_line = TRUE;
7161 cdata.method = method;
7162 cdata.inst_tailcall = inst_tailcall;
7165 * We have the `constrained.' prefix opcode.
7167 if (constrained_class) {
7168 ins = handle_constrained_call (cfg, cmethod, fsig, constrained_class, sp, &cdata, &cmethod, &virtual_, &emit_widen);
7169 CHECK_CFG_EXCEPTION;
7170 constrained_class = NULL;
7171 if (ins)
7172 goto call_end;
7175 for (int i = 0; i < fsig->param_count; ++i)
7176 sp [i + fsig->hasthis] = convert_value (cfg, fsig->params [i], sp [i + fsig->hasthis]);
7178 if (check_call_signature (cfg, fsig, sp)) {
7179 if (break_on_unverified ())
7180 check_call_signature (cfg, fsig, sp); // Again, step through it.
7181 UNVERIFIED;
7184 if ((m_class_get_parent (cmethod->klass) == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
7185 delegate_invoke = TRUE;
7187 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
7188 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7189 mini_type_to_eval_stack_type ((cfg), fsig->ret, ins);
7190 emit_widen = FALSE;
7193 if (inst_tailcall) // FIXME
7194 mono_tailcall_print ("missed tailcall intrins_sharable %s -> %s\n", method->name, cmethod->name);
7195 goto call_end;
7199 * Implement a workaround for the inherent races involved in locking:
7200 * Monitor.Enter ()
7201 * try {
7202 * } finally {
7203 * Monitor.Exit ()
7205 * If a thread abort happens between the call to Monitor.Enter () and the start of the
7206 * try block, the Exit () won't be executed, see:
7207 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
7208 * To work around this, we extend such try blocks to include the last x bytes
7209 * of the Monitor.Enter () call.
7211 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature_internal (cmethod)->param_count == 1) {
7212 MonoBasicBlock *tbb;
7214 GET_BBLOCK (cfg, tbb, next_ip);
7216 * Only extend try blocks with a finally, to avoid catching exceptions thrown
7217 * from Monitor.Enter like ArgumentNullException.
7219 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
7220 /* Mark this bblock as needing to be extended */
7221 tbb->extend_try_block = TRUE;
7225 /* Conversion to a JIT intrinsic */
7226 if ((ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
7227 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7228 mini_type_to_eval_stack_type ((cfg), fsig->ret, ins);
7229 emit_widen = FALSE;
7231 // FIXME This is only missed if in fact the intrinsic involves a call.
7232 if (inst_tailcall) // FIXME
7233 mono_tailcall_print ("missed tailcall intrins %s -> %s\n", method->name, cmethod->name);
7234 goto call_end;
7236 CHECK_CFG_ERROR;
7239 * If the callee is a shared method, then its static cctor
7240 * might not get called after the call was patched.
7242 if (cfg->gshared && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7243 emit_class_init (cfg, cmethod->klass);
7244 CHECK_TYPELOAD (cmethod->klass);
7247 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
7249 if (cfg->gshared) {
7250 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
7252 context_used = mini_method_check_context_used (cfg, cmethod);
7254 if (context_used && mono_class_is_interface (cmethod->klass)) {
7255 /* Generic method interface
7256 calls are resolved via a
7257 helper function and don't
7258 need an imt. */
7259 if (!cmethod_context || !cmethod_context->method_inst)
7260 pass_imt_from_rgctx = TRUE;
7264 * If a shared method calls another
7265 * shared method then the caller must
7266 * have a generic sharing context
7267 * because the magic trampoline
7268 * requires it. FIXME: We shouldn't
7269 * have to force the vtable/mrgctx
7270 * variable here. Instead there
7271 * should be a flag in the cfg to
7272 * request a generic sharing context.
7274 if (context_used &&
7275 ((cfg->method->flags & METHOD_ATTRIBUTE_STATIC) || m_class_is_valuetype (cfg->method->klass)))
7276 mono_get_vtable_var (cfg);
7279 if (pass_vtable) {
7280 if (context_used) {
7281 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7282 } else {
7283 MonoVTable *vtable = mono_class_vtable_checked (cfg->domain, cmethod->klass, &cfg->error);
7284 CHECK_CFG_ERROR;
7286 CHECK_TYPELOAD (cmethod->klass);
7287 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7291 if (pass_mrgctx) {
7292 g_assert (!vtable_arg);
7294 if (!cfg->compile_aot) {
7296 * emit_get_rgctx_method () calls mono_class_vtable () so check
7297 * for type load errors before.
7299 mono_class_setup_vtable (cmethod->klass);
7300 CHECK_TYPELOAD (cmethod->klass);
7303 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7305 /* !marshalbyref is needed to properly handle generic methods + remoting */
7306 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
7307 MONO_METHOD_IS_FINAL (cmethod)) &&
7308 !mono_class_is_marshalbyref (cmethod->klass)) {
7309 if (virtual_)
7310 check_this = TRUE;
7311 virtual_ = FALSE;
7315 if (pass_imt_from_rgctx) {
7316 g_assert (!pass_vtable);
7318 imt_arg = emit_get_rgctx_method (cfg, context_used,
7319 cmethod, MONO_RGCTX_INFO_METHOD);
7320 g_assert (imt_arg);
7323 if (check_this)
7324 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
7326 /* Calling virtual generic methods */
7328 // These temporaries help detangle "pure" computation of
7329 // inputs to is_supported_tailcall from side effects, so that
7330 // is_supported_tailcall can be computed just once.
7331 gboolean virtual_generic; virtual_generic = FALSE;
7332 gboolean virtual_generic_imt; virtual_generic_imt = FALSE;
7334 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
7335 !(MONO_METHOD_IS_FINAL (cmethod) &&
7336 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
7337 fsig->generic_param_count &&
7338 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
7339 !cfg->llvm_only) {
7341 g_assert (fsig->is_inflated);
7343 virtual_generic = TRUE;
7345 /* Prevent inlining of methods that contain indirect calls */
7346 INLINE_FAILURE ("virtual generic call");
7348 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7349 GSHAREDVT_FAILURE (il_op);
7351 if (cfg->backend->have_generalized_imt_trampoline && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
7352 virtual_generic_imt = TRUE;
7353 g_assert (!imt_arg);
7354 if (!context_used)
7355 g_assert (cmethod->is_inflated);
7357 imt_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
7358 g_assert (imt_arg);
7360 virtual_ = TRUE;
7361 vtable_arg = NULL;
7365 // Capture some intent before computing tailcall.
7367 gboolean make_generic_call_out_of_gsharedvt_method;
7368 gboolean will_have_imt_arg;
7370 make_generic_call_out_of_gsharedvt_method = FALSE;
7371 will_have_imt_arg = FALSE;
7374 * Making generic calls out of gsharedvt methods.
7375 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
7376 * patching gshared method addresses into a gsharedvt method.
7378 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || mono_class_is_ginst (cmethod->klass)) &&
7379 !(m_class_get_rank (cmethod->klass) && m_class_get_byval_arg (cmethod->klass)->type != MONO_TYPE_SZARRAY) &&
7380 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
7382 make_generic_call_out_of_gsharedvt_method = TRUE;
7384 if (virtual_) {
7385 if (fsig->generic_param_count) {
7386 will_have_imt_arg = TRUE;
7387 } else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
7388 will_have_imt_arg = TRUE;
7393 #ifdef ENABLE_NETCORE
7394 if (save_last_error) {
7395 mono_emit_jit_icall (cfg, mono_marshal_clear_last_error, NULL);
7397 #endif
7399 /* Tail prefix / tailcall optimization */
7401 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests.
7402 Inlining and stack traces are not guaranteed however. */
7403 /* FIXME: runtime generic context pointer for jumps? */
7404 /* FIXME: handle this for generic sharing eventually */
7406 // tailcall means "the backend can and will handle it".
7407 // inst_tailcall means the tail. prefix is present.
7408 tailcall_extra_arg = vtable_arg || imt_arg || will_have_imt_arg || mono_class_is_interface (cmethod->klass);
7409 tailcall = inst_tailcall && is_supported_tailcall (cfg, ip, method, cmethod, fsig,
7410 virtual_, tailcall_extra_arg, &tailcall_calli);
7411 // Writes to imt_arg, vtable_arg, virtual_, cmethod, must not occur from here (inputs to is_supported_tailcall).
7412 // Capture values to later assert they don't change.
7413 called_is_supported_tailcall = TRUE;
7414 tailcall_method = method;
7415 tailcall_cmethod = cmethod;
7416 tailcall_fsig = fsig;
7417 tailcall_virtual = virtual_;
7419 if (virtual_generic) {
7420 if (virtual_generic_imt) {
7421 if (tailcall) {
7422 /* Prevent inlining of methods with tailcalls (the call stack would be altered) */
7423 INLINE_FAILURE ("tailcall");
7425 common_call = TRUE;
7426 goto call_end;
7429 MonoInst *this_temp, *this_arg_temp, *store;
7430 MonoInst *iargs [4];
7432 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
7433 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
7434 MONO_ADD_INS (cfg->cbb, store);
7436 /* FIXME: This should be a managed pointer */
7437 this_arg_temp = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
7439 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
7440 iargs [1] = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
7442 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
7443 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
7445 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
7447 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
7449 if (inst_tailcall) // FIXME
7450 mono_tailcall_print ("missed tailcall virtual generic %s -> %s\n", method->name, cmethod->name);
7451 goto call_end;
7453 CHECK_CFG_ERROR;
7455 /* Inlining */
7456 if ((cfg->opt & MONO_OPT_INLINE) &&
7457 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
7458 mono_method_check_inlining (cfg, cmethod)) {
7459 int costs;
7460 gboolean always = FALSE;
7462 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
7463 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7464 /* Prevent inlining of methods that call wrappers */
7465 INLINE_FAILURE ("wrapper call");
7466 // FIXME? Does this write to cmethod impact tailcall_supported? Probably not.
7467 // Neither pinvoke or icall are likely to be tailcalled.
7468 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
7469 always = TRUE;
7472 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
7473 if (costs) {
7474 cfg->real_offset += 5;
7476 if (!MONO_TYPE_IS_VOID (fsig->ret))
7477 /* *sp is already set by inline_method */
7478 ins = *sp;
7480 inline_costs += costs;
7481 // FIXME This is missed if the inlinee contains tail calls that
7482 // would work, but not once inlined into caller.
7483 // This matchingness could be a factor in inlining.
7484 // i.e. Do not inline if it hurts tailcall, do inline
7485 // if it helps and/or or is neutral, and helps performance
7486 // using usual heuristics.
7487 // Note that inlining will expose multiple tailcall opportunities
7488 // so the tradeoff is not obvious. If we can tailcall anything
7489 // like desktop, then this factor mostly falls away, except
7490 // that inlining can affect tailcall performance due to
7491 // signature match/mismatch.
7492 if (inst_tailcall) // FIXME
7493 mono_tailcall_print ("missed tailcall inline %s -> %s\n", method->name, cmethod->name);
7494 goto call_end;
7498 /* Tail recursion elimination */
7499 if (((cfg->opt & MONO_OPT_TAILCALL) || inst_tailcall) && il_op == MONO_CEE_CALL && cmethod == method && next_ip < end && next_ip [0] == CEE_RET && !vtable_arg) {
7500 gboolean has_vtargs = FALSE;
7501 int i;
7503 /* Prevent inlining of methods with tailcalls (the call stack would be altered) */
7504 INLINE_FAILURE ("tailcall");
7506 /* keep it simple */
7507 for (i = fsig->param_count - 1; !has_vtargs && i >= 0; i--)
7508 has_vtargs = MONO_TYPE_ISSTRUCT (mono_method_signature_internal (cmethod)->params [i]);
7510 if (!has_vtargs) {
7511 if (need_seq_point) {
7512 emit_seq_point (cfg, method, ip, FALSE, TRUE);
7513 need_seq_point = FALSE;
7515 for (i = 0; i < n; ++i)
7516 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7518 mini_profiler_emit_tail_call (cfg, cmethod);
7520 MONO_INST_NEW (cfg, ins, OP_BR);
7521 MONO_ADD_INS (cfg->cbb, ins);
7522 tblock = start_bblock->out_bb [0];
7523 link_bblock (cfg, cfg->cbb, tblock);
7524 ins->inst_target_bb = tblock;
7525 start_new_bblock = 1;
7527 /* skip the CEE_RET, too */
7528 if (ip_in_bb (cfg, cfg->cbb, next_ip))
7529 skip_ret = TRUE;
7530 push_res = FALSE;
7531 need_seq_point = FALSE;
7532 goto call_end;
7536 inline_costs += CALL_COST * MIN(10, num_calls++);
7539 * Synchronized wrappers.
7540 * Its hard to determine where to replace a method with its synchronized
7541 * wrapper without causing an infinite recursion. The current solution is
7542 * to add the synchronized wrapper in the trampolines, and to
7543 * change the called method to a dummy wrapper, and resolve that wrapper
7544 * to the real method in mono_jit_compile_method ().
7546 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
7547 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
7548 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig)) {
7549 // FIXME? Does this write to cmethod impact tailcall_supported? Probably not.
7550 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
7555 * Making generic calls out of gsharedvt methods.
7556 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
7557 * patching gshared method addresses into a gsharedvt method.
7559 if (make_generic_call_out_of_gsharedvt_method) {
7560 if (virtual_) {
7561 //if (mono_class_is_interface (cmethod->klass))
7562 //GSHAREDVT_FAILURE (il_op);
7563 // disable for possible remoting calls
7564 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
7565 GSHAREDVT_FAILURE (il_op);
7566 if (fsig->generic_param_count) {
7567 /* virtual generic call */
7568 g_assert (!imt_arg);
7569 g_assert (will_have_imt_arg);
7570 /* Same as the virtual generic case above */
7571 imt_arg = emit_get_rgctx_method (cfg, context_used,
7572 cmethod, MONO_RGCTX_INFO_METHOD);
7573 g_assert (imt_arg);
7574 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
7575 vtable_arg = NULL;
7576 } else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
7577 /* This can happen when we call a fully instantiated iface method */
7578 g_assert (will_have_imt_arg);
7579 imt_arg = emit_get_rgctx_method (cfg, context_used,
7580 cmethod, MONO_RGCTX_INFO_METHOD);
7581 g_assert (imt_arg);
7582 vtable_arg = NULL;
7586 if ((m_class_get_parent (cmethod->klass) == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
7587 keep_this_alive = sp [0];
7589 MonoRgctxInfoType info_type;
7591 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
7592 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
7593 else
7594 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
7595 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
7597 if (cfg->llvm_only) {
7598 // FIXME: Avoid initializing vtable_arg
7599 ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
7600 if (inst_tailcall) // FIXME
7601 mono_tailcall_print ("missed tailcall llvmonly gsharedvt %s -> %s\n", method->name, cmethod->name);
7602 } else {
7603 tailcall = tailcall_calli;
7604 ins = (MonoInst*)mini_emit_calli_full (cfg, fsig, sp, addr, imt_arg, vtable_arg, tailcall);
7605 tailcall_remove_ret |= tailcall;
7607 goto call_end;
7610 /* Generic sharing */
7613 * Use this if the callee is gsharedvt sharable too, since
7614 * at runtime we might find an instantiation so the call cannot
7615 * be patched (the 'no_patch' code path in mini-trampolines.c).
7617 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
7618 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7619 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
7620 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
7621 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
7622 INLINE_FAILURE ("gshared");
7624 g_assert (cfg->gshared && cmethod);
7625 g_assert (!addr);
7628 * We are compiling a call to a
7629 * generic method from shared code,
7630 * which means that we have to look up
7631 * the method in the rgctx and do an
7632 * indirect call.
7634 if (fsig->hasthis)
7635 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
7637 if (cfg->llvm_only) {
7638 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
7639 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
7640 else
7641 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_FTNDESC);
7642 // FIXME: Avoid initializing imt_arg/vtable_arg
7643 ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
7644 if (inst_tailcall) // FIXME
7645 mono_tailcall_print ("missed tailcall context_used_llvmonly %s -> %s\n", method->name, cmethod->name);
7646 } else {
7647 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7648 if (inst_tailcall)
7649 mono_tailcall_print ("%s tailcall_calli#2 %s -> %s\n", tailcall_calli ? "making" : "missed", method->name, cmethod->name);
7650 tailcall = tailcall_calli;
7651 ins = (MonoInst*)mini_emit_calli_full (cfg, fsig, sp, addr, imt_arg, vtable_arg, tailcall);
7652 tailcall_remove_ret |= tailcall;
7654 goto call_end;
7657 /* Direct calls to icalls */
7658 if (direct_icall) {
7659 MonoMethod *wrapper;
7660 int costs;
7662 /* Inline the wrapper */
7663 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
7665 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
7666 g_assert (costs > 0);
7667 cfg->real_offset += 5;
7669 if (!MONO_TYPE_IS_VOID (fsig->ret))
7670 /* *sp is already set by inline_method */
7671 ins = *sp;
7673 inline_costs += costs;
7675 if (inst_tailcall) // FIXME
7676 mono_tailcall_print ("missed tailcall direct_icall %s -> %s\n", method->name, cmethod->name);
7677 goto call_end;
7680 /* Array methods */
7681 if (array_rank) {
7682 MonoInst *addr;
7684 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
7685 MonoInst *val = sp [fsig->param_count];
7687 if (val->type == STACK_OBJ) {
7688 MonoInst *iargs [2];
7690 iargs [0] = sp [0];
7691 iargs [1] = val;
7693 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
7696 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
7697 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
7698 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !MONO_INS_IS_PCONST_NULL (val))
7699 mini_emit_write_barrier (cfg, addr, val);
7700 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
7701 GSHAREDVT_FAILURE (il_op);
7702 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
7703 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7705 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
7706 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
7707 if (!m_class_is_valuetype (m_class_get_element_class (cmethod->klass)) && !readonly)
7708 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
7709 CHECK_TYPELOAD (cmethod->klass);
7711 readonly = FALSE;
7712 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7713 ins = addr;
7714 } else {
7715 g_assert_not_reached ();
7718 emit_widen = FALSE;
7719 if (inst_tailcall) // FIXME
7720 mono_tailcall_print ("missed tailcall array_rank %s -> %s\n", method->name, cmethod->name);
7721 goto call_end;
7724 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
7725 if (ins) {
7726 if (inst_tailcall) // FIXME
7727 mono_tailcall_print ("missed tailcall redirect %s -> %s\n", method->name, cmethod->name);
7728 goto call_end;
7731 /* Tail prefix / tailcall optimization */
7733 if (tailcall) {
7734 /* Prevent inlining of methods with tailcalls (the call stack would be altered) */
7735 INLINE_FAILURE ("tailcall");
7739 * Virtual calls in llvm-only mode.
7741 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
7742 ins = mini_emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
7743 goto call_end;
7746 /* Common call */
7747 if (!(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) && !(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
7748 INLINE_FAILURE ("call");
7749 common_call = TRUE;
7751 call_end:
7752 // Check that the decision to tailcall would not have changed.
7753 g_assert (!called_is_supported_tailcall || tailcall_method == method);
7754 // FIXME? cmethod does change, weaken the assert if we weren't tailcalling anyway.
7755 // If this still fails, restructure the code, or call tailcall_supported again and assert no change.
7756 g_assert (!called_is_supported_tailcall || !tailcall || tailcall_cmethod == cmethod);
7757 g_assert (!called_is_supported_tailcall || tailcall_fsig == fsig);
7758 g_assert (!called_is_supported_tailcall || tailcall_virtual == virtual_);
7759 g_assert (!called_is_supported_tailcall || tailcall_extra_arg == (vtable_arg || imt_arg || will_have_imt_arg || mono_class_is_interface (cmethod->klass)));
7761 if (common_call) // FIXME goto call_end && !common_call often skips tailcall processing.
7762 ins = mini_emit_method_call_full (cfg, cmethod, fsig, tailcall, sp, virtual_ ? sp [0] : NULL,
7763 imt_arg, vtable_arg);
7766 * Handle devirt of some A.B.C calls by replacing the result of A.B with a OP_TYPED_OBJREF instruction, so the .C
7767 * call can be devirtualized above.
7769 if (cmethod)
7770 ins = handle_call_res_devirt (cfg, cmethod, ins);
7772 calli_end:
7773 if ((tailcall_remove_ret || (common_call && tailcall)) && !cfg->llvm_only) {
7774 link_bblock (cfg, cfg->cbb, end_bblock);
7775 start_new_bblock = 1;
7777 // FIXME: Eliminate unreachable epilogs
7780 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
7781 * only reachable from this call.
7783 GET_BBLOCK (cfg, tblock, next_ip);
7784 if (tblock == cfg->cbb || tblock->in_count == 0)
7785 skip_ret = TRUE;
7786 push_res = FALSE;
7787 need_seq_point = FALSE;
7790 if (ins_flag & MONO_INST_TAILCALL)
7791 mini_test_tailcall (cfg, tailcall);
7793 /* End of call, INS should contain the result of the call, if any */
7795 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
7796 g_assert (ins);
7797 if (emit_widen)
7798 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7799 else
7800 *sp++ = ins;
7803 if (save_last_error) {
7804 save_last_error = FALSE;
7805 #ifdef TARGET_WIN32
7806 // Making icalls etc could clobber the value so emit inline code
7807 // to read last error on Windows.
7808 MONO_INST_NEW (cfg, ins, OP_GET_LAST_ERROR);
7809 ins->dreg = alloc_dreg (cfg, STACK_I4);
7810 ins->type = STACK_I4;
7811 MONO_ADD_INS (cfg->cbb, ins);
7812 mono_emit_jit_icall (cfg, mono_marshal_set_last_error_windows, &ins);
7813 #else
7814 mono_emit_jit_icall (cfg, mono_marshal_set_last_error, NULL);
7815 #endif
7818 if (keep_this_alive) {
7819 MonoInst *dummy_use;
7821 /* See mini_emit_method_call_full () */
7822 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
7825 if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) {
7827 * Clang can convert these calls to tailcalls which screw up the stack
7828 * walk. This happens even when the -fno-optimize-sibling-calls
7829 * option is passed to clang.
7830 * Work around this by emitting a dummy call.
7832 mono_emit_jit_icall (cfg, mono_dummy_jit_icall, NULL);
7835 CHECK_CFG_EXCEPTION;
7837 if (skip_ret) {
7838 // FIXME When not followed by CEE_RET, correct behavior is to raise an exception.
7839 g_assert (next_ip [0] == CEE_RET);
7840 next_ip += 1;
7841 il_op = MonoOpcodeEnum_Invalid; // Call or ret? Unclear.
7843 ins_flag = 0;
7844 constrained_class = NULL;
7846 if (need_seq_point) {
7847 //check is is a nested call and remove the non_empty_stack of the last call, only for non native methods
7848 if (!(method->flags & METHOD_IMPL_ATTRIBUTE_NATIVE)) {
7849 if (emitted_funccall_seq_point) {
7850 if (cfg->last_seq_point)
7851 cfg->last_seq_point->flags |= MONO_INST_NESTED_CALL;
7853 else
7854 emitted_funccall_seq_point = TRUE;
7856 emit_seq_point (cfg, method, next_ip, FALSE, TRUE);
7858 break;
7860 case MONO_CEE_RET:
7861 mini_profiler_emit_leave (cfg, sig->ret->type != MONO_TYPE_VOID ? sp [-1] : NULL);
7863 if (cfg->method != method) {
7864 /* return from inlined method */
7866 * If in_count == 0, that means the ret is unreachable due to
7867 * being preceeded by a throw. In that case, inline_method () will
7868 * handle setting the return value
7869 * (test case: test_0_inline_throw ()).
7871 if (return_var && cfg->cbb->in_count) {
7872 MonoType *ret_type = mono_method_signature_internal (method)->ret;
7874 MonoInst *store;
7875 CHECK_STACK (1);
7876 --sp;
7877 *sp = convert_value (cfg, ret_type, *sp);
7879 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7880 UNVERIFIED;
7882 //g_assert (returnvar != -1);
7883 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
7884 cfg->ret_var_set = TRUE;
7886 } else {
7887 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
7888 emit_pop_lmf (cfg);
7890 if (cfg->ret) {
7891 MonoType *ret_type = mini_get_underlying_type (mono_method_signature_internal (method)->ret);
7893 if (seq_points && !sym_seq_points) {
7895 * Place a seq point here too even through the IL stack is not
7896 * empty, so a step over on
7897 * call <FOO>
7898 * ret
7899 * will work correctly.
7901 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
7902 MONO_ADD_INS (cfg->cbb, ins);
7905 g_assert (!return_var);
7906 CHECK_STACK (1);
7907 --sp;
7908 *sp = convert_value (cfg, ret_type, *sp);
7910 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7911 UNVERIFIED;
7913 emit_setret (cfg, *sp);
7916 if (sp != stack_start)
7917 UNVERIFIED;
7918 MONO_INST_NEW (cfg, ins, OP_BR);
7919 ins->inst_target_bb = end_bblock;
7920 MONO_ADD_INS (cfg->cbb, ins);
7921 link_bblock (cfg, cfg->cbb, end_bblock);
7922 start_new_bblock = 1;
7923 break;
7924 case MONO_CEE_BR_S:
7925 MONO_INST_NEW (cfg, ins, OP_BR);
7926 GET_BBLOCK (cfg, tblock, target);
7927 link_bblock (cfg, cfg->cbb, tblock);
7928 ins->inst_target_bb = tblock;
7929 if (sp != stack_start) {
7930 handle_stack_args (cfg, stack_start, sp - stack_start);
7931 sp = stack_start;
7932 CHECK_UNVERIFIABLE (cfg);
7934 MONO_ADD_INS (cfg->cbb, ins);
7935 start_new_bblock = 1;
7936 inline_costs += BRANCH_COST;
7937 break;
7938 case MONO_CEE_BEQ_S:
7939 case MONO_CEE_BGE_S:
7940 case MONO_CEE_BGT_S:
7941 case MONO_CEE_BLE_S:
7942 case MONO_CEE_BLT_S:
7943 case MONO_CEE_BNE_UN_S:
7944 case MONO_CEE_BGE_UN_S:
7945 case MONO_CEE_BGT_UN_S:
7946 case MONO_CEE_BLE_UN_S:
7947 case MONO_CEE_BLT_UN_S:
7948 MONO_INST_NEW (cfg, ins, il_op + BIG_BRANCH_OFFSET);
7950 ADD_BINCOND (NULL);
7952 sp = stack_start;
7953 inline_costs += BRANCH_COST;
7954 break;
7955 case MONO_CEE_BR:
7956 MONO_INST_NEW (cfg, ins, OP_BR);
7958 GET_BBLOCK (cfg, tblock, target);
7959 link_bblock (cfg, cfg->cbb, tblock);
7960 ins->inst_target_bb = tblock;
7961 if (sp != stack_start) {
7962 handle_stack_args (cfg, stack_start, sp - stack_start);
7963 sp = stack_start;
7964 CHECK_UNVERIFIABLE (cfg);
7967 MONO_ADD_INS (cfg->cbb, ins);
7969 start_new_bblock = 1;
7970 inline_costs += BRANCH_COST;
7971 break;
7972 case MONO_CEE_BRFALSE_S:
7973 case MONO_CEE_BRTRUE_S:
7974 case MONO_CEE_BRFALSE:
7975 case MONO_CEE_BRTRUE: {
7976 MonoInst *cmp;
7977 gboolean is_true = il_op == MONO_CEE_BRTRUE_S || il_op == MONO_CEE_BRTRUE;
7979 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
7980 UNVERIFIED;
7982 sp--;
7984 GET_BBLOCK (cfg, tblock, target);
7985 link_bblock (cfg, cfg->cbb, tblock);
7986 GET_BBLOCK (cfg, tblock, next_ip);
7987 link_bblock (cfg, cfg->cbb, tblock);
7989 if (sp != stack_start) {
7990 handle_stack_args (cfg, stack_start, sp - stack_start);
7991 CHECK_UNVERIFIABLE (cfg);
7994 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
7995 cmp->sreg1 = sp [0]->dreg;
7996 type_from_op (cfg, cmp, sp [0], NULL);
7997 CHECK_TYPE (cmp);
7999 #if SIZEOF_REGISTER == 4
8000 if (cmp->opcode == OP_LCOMPARE_IMM) {
8001 /* Convert it to OP_LCOMPARE */
8002 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8003 ins->type = STACK_I8;
8004 ins->dreg = alloc_dreg (cfg, STACK_I8);
8005 ins->inst_l = 0;
8006 MONO_ADD_INS (cfg->cbb, ins);
8007 cmp->opcode = OP_LCOMPARE;
8008 cmp->sreg2 = ins->dreg;
8010 #endif
8011 MONO_ADD_INS (cfg->cbb, cmp);
8013 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
8014 type_from_op (cfg, ins, sp [0], NULL);
8015 MONO_ADD_INS (cfg->cbb, ins);
8016 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
8017 GET_BBLOCK (cfg, tblock, target);
8018 ins->inst_true_bb = tblock;
8019 GET_BBLOCK (cfg, tblock, next_ip);
8020 ins->inst_false_bb = tblock;
8021 start_new_bblock = 2;
8023 sp = stack_start;
8024 inline_costs += BRANCH_COST;
8025 break;
8027 case MONO_CEE_BEQ:
8028 case MONO_CEE_BGE:
8029 case MONO_CEE_BGT:
8030 case MONO_CEE_BLE:
8031 case MONO_CEE_BLT:
8032 case MONO_CEE_BNE_UN:
8033 case MONO_CEE_BGE_UN:
8034 case MONO_CEE_BGT_UN:
8035 case MONO_CEE_BLE_UN:
8036 case MONO_CEE_BLT_UN:
8037 MONO_INST_NEW (cfg, ins, il_op);
8039 ADD_BINCOND (NULL);
8041 sp = stack_start;
8042 inline_costs += BRANCH_COST;
8043 break;
8044 case MONO_CEE_SWITCH: {
8045 MonoInst *src1;
8046 MonoBasicBlock **targets;
8047 MonoBasicBlock *default_bblock;
8048 MonoJumpInfoBBTable *table;
8049 int offset_reg = alloc_preg (cfg);
8050 int target_reg = alloc_preg (cfg);
8051 int table_reg = alloc_preg (cfg);
8052 int sum_reg = alloc_preg (cfg);
8053 gboolean use_op_switch;
8055 n = read32 (ip + 1);
8056 --sp;
8057 src1 = sp [0];
8058 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
8059 UNVERIFIED;
8061 ip += 5;
8063 GET_BBLOCK (cfg, default_bblock, next_ip);
8064 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8066 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
8067 for (i = 0; i < n; ++i) {
8068 GET_BBLOCK (cfg, tblock, next_ip + (gint32)read32 (ip));
8069 targets [i] = tblock;
8070 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
8071 ip += 4;
8074 if (sp != stack_start) {
8076 * Link the current bb with the targets as well, so handle_stack_args
8077 * will set their in_stack correctly.
8079 link_bblock (cfg, cfg->cbb, default_bblock);
8080 for (i = 0; i < n; ++i)
8081 link_bblock (cfg, cfg->cbb, targets [i]);
8083 handle_stack_args (cfg, stack_start, sp - stack_start);
8084 sp = stack_start;
8085 CHECK_UNVERIFIABLE (cfg);
8087 /* Undo the links */
8088 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
8089 for (i = 0; i < n; ++i)
8090 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
8093 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
8094 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
8096 for (i = 0; i < n; ++i)
8097 link_bblock (cfg, cfg->cbb, targets [i]);
8099 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
8100 table->table = targets;
8101 table->table_size = n;
8103 use_op_switch = FALSE;
8104 #ifdef TARGET_ARM
8105 /* ARM implements SWITCH statements differently */
8106 /* FIXME: Make it use the generic implementation */
8107 if (!cfg->compile_aot)
8108 use_op_switch = TRUE;
8109 #endif
8111 if (COMPILE_LLVM (cfg))
8112 use_op_switch = TRUE;
8114 cfg->cbb->has_jump_table = 1;
8116 if (use_op_switch) {
8117 MONO_INST_NEW (cfg, ins, OP_SWITCH);
8118 ins->sreg1 = src1->dreg;
8119 ins->inst_p0 = table;
8120 ins->inst_many_bb = targets;
8121 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
8122 MONO_ADD_INS (cfg->cbb, ins);
8123 } else {
8124 if (TARGET_SIZEOF_VOID_P == 8)
8125 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
8126 else
8127 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
8129 #if SIZEOF_REGISTER == 8
8130 /* The upper word might not be zero, and we add it to a 64 bit address later */
8131 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
8132 #endif
8134 if (cfg->compile_aot) {
8135 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
8136 } else {
8137 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
8138 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
8139 ins->inst_p0 = table;
8140 ins->dreg = table_reg;
8141 MONO_ADD_INS (cfg->cbb, ins);
8144 /* FIXME: Use load_memindex */
8145 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
8146 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
8147 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
8149 start_new_bblock = 1;
8150 inline_costs += BRANCH_COST * 2;
8151 break;
8153 case MONO_CEE_LDIND_I1:
8154 case MONO_CEE_LDIND_U1:
8155 case MONO_CEE_LDIND_I2:
8156 case MONO_CEE_LDIND_U2:
8157 case MONO_CEE_LDIND_I4:
8158 case MONO_CEE_LDIND_U4:
8159 case MONO_CEE_LDIND_I8:
8160 case MONO_CEE_LDIND_I:
8161 case MONO_CEE_LDIND_R4:
8162 case MONO_CEE_LDIND_R8:
8163 case MONO_CEE_LDIND_REF:
8164 --sp;
8166 ins = mini_emit_memory_load (cfg, m_class_get_byval_arg (ldind_to_type (il_op)), sp [0], 0, ins_flag);
8167 *sp++ = ins;
8168 ins_flag = 0;
8169 break;
8170 case MONO_CEE_STIND_REF:
8171 case MONO_CEE_STIND_I1:
8172 case MONO_CEE_STIND_I2:
8173 case MONO_CEE_STIND_I4:
8174 case MONO_CEE_STIND_I8:
8175 case MONO_CEE_STIND_R4:
8176 case MONO_CEE_STIND_R8:
8177 case MONO_CEE_STIND_I: {
8178 sp -= 2;
8180 if (ins_flag & MONO_INST_VOLATILE) {
8181 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
8182 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
8185 if (il_op == MONO_CEE_STIND_R4 && sp [1]->type == STACK_R8)
8186 sp [1] = convert_value (cfg, m_class_get_byval_arg (mono_defaults.single_class), sp [1]);
8187 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (il_op), sp [0]->dreg, 0, sp [1]->dreg);
8188 ins->flags |= ins_flag;
8189 ins_flag = 0;
8191 MONO_ADD_INS (cfg->cbb, ins);
8193 if (il_op == MONO_CEE_STIND_REF) {
8194 /* stind.ref must only be used with object references. */
8195 if (sp [1]->type != STACK_OBJ)
8196 UNVERIFIED;
8197 if (cfg->gen_write_barriers && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1]))
8198 mini_emit_write_barrier (cfg, sp [0], sp [1]);
8201 inline_costs += 1;
8202 break;
8204 case MONO_CEE_MUL:
8205 MONO_INST_NEW (cfg, ins, il_op);
8206 sp -= 2;
8207 ins->sreg1 = sp [0]->dreg;
8208 ins->sreg2 = sp [1]->dreg;
8209 type_from_op (cfg, ins, sp [0], sp [1]);
8210 CHECK_TYPE (ins);
8211 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
8213 /* Use the immediate opcodes if possible */
8214 int imm_opcode; imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8216 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (ins->opcode, imm_opcode, sp [1]->inst_c0)) {
8217 if (imm_opcode != -1) {
8218 ins->opcode = imm_opcode;
8219 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
8220 ins->sreg2 = -1;
8222 NULLIFY_INS (sp [1]);
8226 MONO_ADD_INS ((cfg)->cbb, (ins));
8228 *sp++ = mono_decompose_opcode (cfg, ins);
8229 break;
8230 case MONO_CEE_ADD:
8231 case MONO_CEE_SUB:
8232 case MONO_CEE_DIV:
8233 case MONO_CEE_DIV_UN:
8234 case MONO_CEE_REM:
8235 case MONO_CEE_REM_UN:
8236 case MONO_CEE_AND:
8237 case MONO_CEE_OR:
8238 case MONO_CEE_XOR:
8239 case MONO_CEE_SHL:
8240 case MONO_CEE_SHR:
8241 case MONO_CEE_SHR_UN: {
8242 MONO_INST_NEW (cfg, ins, il_op);
8243 sp -= 2;
8244 ins->sreg1 = sp [0]->dreg;
8245 ins->sreg2 = sp [1]->dreg;
8246 type_from_op (cfg, ins, sp [0], sp [1]);
8247 CHECK_TYPE (ins);
8248 add_widen_op (cfg, ins, &sp [0], &sp [1]);
8249 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
8251 /* Use the immediate opcodes if possible */
8252 int imm_opcode; imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8254 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) &&
8255 mono_arch_is_inst_imm (ins->opcode, imm_opcode, sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
8256 if (imm_opcode != -1) {
8257 ins->opcode = imm_opcode;
8258 if (sp [1]->opcode == OP_I8CONST) {
8259 #if SIZEOF_REGISTER == 8
8260 ins->inst_imm = sp [1]->inst_l;
8261 #else
8262 ins->inst_l = sp [1]->inst_l;
8263 #endif
8264 } else {
8265 ins->inst_imm = (gssize)(sp [1]->inst_c0);
8267 ins->sreg2 = -1;
8269 /* Might be followed by an instruction added by add_widen_op */
8270 if (sp [1]->next == NULL)
8271 NULLIFY_INS (sp [1]);
8274 MONO_ADD_INS ((cfg)->cbb, (ins));
8276 *sp++ = mono_decompose_opcode (cfg, ins);
8277 break;
8279 case MONO_CEE_NEG:
8280 case MONO_CEE_NOT:
8281 case MONO_CEE_CONV_I1:
8282 case MONO_CEE_CONV_I2:
8283 case MONO_CEE_CONV_I4:
8284 case MONO_CEE_CONV_R4:
8285 case MONO_CEE_CONV_R8:
8286 case MONO_CEE_CONV_U4:
8287 case MONO_CEE_CONV_I8:
8288 case MONO_CEE_CONV_U8:
8289 case MONO_CEE_CONV_OVF_I8:
8290 case MONO_CEE_CONV_OVF_U8:
8291 case MONO_CEE_CONV_R_UN:
8292 /* Special case this earlier so we have long constants in the IR */
8293 if ((il_op == MONO_CEE_CONV_I8 || il_op == MONO_CEE_CONV_U8) && (sp [-1]->opcode == OP_ICONST)) {
8294 int data = sp [-1]->inst_c0;
8295 sp [-1]->opcode = OP_I8CONST;
8296 sp [-1]->type = STACK_I8;
8297 #if SIZEOF_REGISTER == 8
8298 if (il_op == MONO_CEE_CONV_U8)
8299 sp [-1]->inst_c0 = (guint32)data;
8300 else
8301 sp [-1]->inst_c0 = data;
8302 #else
8303 if (il_op == MONO_CEE_CONV_U8)
8304 sp [-1]->inst_l = (guint32)data;
8305 else
8306 sp [-1]->inst_l = data;
8307 #endif
8308 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
8310 else {
8311 ADD_UNOP (il_op);
8313 break;
8314 case MONO_CEE_CONV_OVF_I4:
8315 case MONO_CEE_CONV_OVF_I1:
8316 case MONO_CEE_CONV_OVF_I2:
8317 case MONO_CEE_CONV_OVF_I:
8318 case MONO_CEE_CONV_OVF_U:
8319 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
8320 ADD_UNOP (CEE_CONV_OVF_I8);
8321 ADD_UNOP (il_op);
8322 } else {
8323 ADD_UNOP (il_op);
8325 break;
8326 case MONO_CEE_CONV_OVF_U1:
8327 case MONO_CEE_CONV_OVF_U2:
8328 case MONO_CEE_CONV_OVF_U4:
8329 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
8330 ADD_UNOP (CEE_CONV_OVF_U8);
8331 ADD_UNOP (il_op);
8332 } else {
8333 ADD_UNOP (il_op);
8335 break;
8336 case MONO_CEE_CONV_OVF_I1_UN:
8337 case MONO_CEE_CONV_OVF_I2_UN:
8338 case MONO_CEE_CONV_OVF_I4_UN:
8339 case MONO_CEE_CONV_OVF_I8_UN:
8340 case MONO_CEE_CONV_OVF_U1_UN:
8341 case MONO_CEE_CONV_OVF_U2_UN:
8342 case MONO_CEE_CONV_OVF_U4_UN:
8343 case MONO_CEE_CONV_OVF_U8_UN:
8344 case MONO_CEE_CONV_OVF_I_UN:
8345 case MONO_CEE_CONV_OVF_U_UN:
8346 case MONO_CEE_CONV_U2:
8347 case MONO_CEE_CONV_U1:
8348 case MONO_CEE_CONV_I:
8349 case MONO_CEE_CONV_U:
8350 ADD_UNOP (il_op);
8351 CHECK_CFG_EXCEPTION;
8352 break;
8353 case MONO_CEE_ADD_OVF:
8354 case MONO_CEE_ADD_OVF_UN:
8355 case MONO_CEE_MUL_OVF:
8356 case MONO_CEE_MUL_OVF_UN:
8357 case MONO_CEE_SUB_OVF:
8358 case MONO_CEE_SUB_OVF_UN:
8359 ADD_BINOP (il_op);
8360 break;
8361 case MONO_CEE_CPOBJ:
8362 GSHAREDVT_FAILURE (il_op);
8363 GSHAREDVT_FAILURE (*ip);
8364 klass = mini_get_class (method, token, generic_context);
8365 CHECK_TYPELOAD (klass);
8366 sp -= 2;
8367 mini_emit_memory_copy (cfg, sp [0], sp [1], klass, FALSE, ins_flag);
8368 ins_flag = 0;
8369 break;
8370 case MONO_CEE_LDOBJ: {
8371 int loc_index = -1;
8372 int stloc_len = 0;
8374 --sp;
8375 klass = mini_get_class (method, token, generic_context);
8376 CHECK_TYPELOAD (klass);
8378 /* Optimize the common ldobj+stloc combination */
8379 if (next_ip < end) {
8380 switch (next_ip [0]) {
8381 case MONO_CEE_STLOC_S:
8382 CHECK_OPSIZE (7);
8383 loc_index = next_ip [1];
8384 stloc_len = 2;
8385 break;
8386 case MONO_CEE_STLOC_0:
8387 case MONO_CEE_STLOC_1:
8388 case MONO_CEE_STLOC_2:
8389 case MONO_CEE_STLOC_3:
8390 loc_index = next_ip [0] - CEE_STLOC_0;
8391 stloc_len = 1;
8392 break;
8393 default:
8394 break;
8398 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, next_ip)) {
8399 CHECK_LOCAL (loc_index);
8401 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), sp [0]->dreg, 0);
8402 ins->dreg = cfg->locals [loc_index]->dreg;
8403 ins->flags |= ins_flag;
8404 il_op = (MonoOpcodeEnum)next_ip [0];
8405 next_ip += stloc_len;
8406 if (ins_flag & MONO_INST_VOLATILE) {
8407 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
8408 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
8410 ins_flag = 0;
8411 break;
8414 /* Optimize the ldobj+stobj combination */
8415 if (next_ip + 4 < end && next_ip [0] == CEE_STOBJ && ip_in_bb (cfg, cfg->cbb, next_ip) && read32 (next_ip + 1) == token) {
8416 CHECK_STACK (1);
8418 sp --;
8420 mini_emit_memory_copy (cfg, sp [0], sp [1], klass, FALSE, ins_flag);
8422 il_op = (MonoOpcodeEnum)next_ip [0];
8423 next_ip += 5;
8424 ins_flag = 0;
8425 break;
8428 ins = mini_emit_memory_load (cfg, m_class_get_byval_arg (klass), sp [0], 0, ins_flag);
8429 *sp++ = ins;
8431 ins_flag = 0;
8432 inline_costs += 1;
8433 break;
8435 case MONO_CEE_LDSTR:
8436 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
8437 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
8438 ins->type = STACK_OBJ;
8439 *sp = ins;
8441 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
8442 MonoInst *iargs [1];
8443 char *str = (char *)mono_method_get_wrapper_data (method, n);
8445 if (cfg->compile_aot)
8446 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
8447 else
8448 EMIT_NEW_PCONST (cfg, iargs [0], str);
8449 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper_internal, iargs);
8450 } else {
8451 if (cfg->opt & MONO_OPT_SHARED) {
8452 MonoInst *iargs [3];
8454 if (cfg->compile_aot) {
8455 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
8457 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8458 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
8459 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
8460 *sp = mono_emit_jit_icall (cfg, ves_icall_mono_ldstr, iargs);
8461 mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
8462 CHECK_CFG_ERROR;
8463 } else {
8464 if (cfg->cbb->out_of_line) {
8465 MonoInst *iargs [2];
8467 if (image == mono_defaults.corlib) {
8469 * Avoid relocations in AOT and save some space by using a
8470 * version of helper_ldstr specialized to mscorlib.
8472 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
8473 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
8474 } else {
8475 /* Avoid creating the string object */
8476 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8477 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
8478 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
8481 else
8482 if (cfg->compile_aot) {
8483 NEW_LDSTRCONST (cfg, ins, image, n);
8484 *sp = ins;
8485 MONO_ADD_INS (cfg->cbb, ins);
8487 else {
8488 NEW_PCONST (cfg, ins, NULL);
8489 ins->type = STACK_OBJ;
8490 ins->inst_p0 = mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
8491 CHECK_CFG_ERROR;
8493 if (!ins->inst_p0)
8494 OUT_OF_MEMORY_FAILURE;
8496 *sp = ins;
8497 MONO_ADD_INS (cfg->cbb, ins);
8502 sp++;
8503 break;
8504 case MONO_CEE_NEWOBJ: {
8505 MonoInst *iargs [2];
8506 MonoMethodSignature *fsig;
8507 MonoInst this_ins;
8508 MonoInst *alloc;
8509 MonoInst *vtable_arg = NULL;
8511 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8512 CHECK_CFG_ERROR;
8514 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8515 CHECK_CFG_ERROR;
8517 mono_save_token_info (cfg, image, token, cmethod);
8519 if (!mono_class_init_internal (cmethod->klass))
8520 TYPE_LOAD_ERROR (cmethod->klass);
8522 context_used = mini_method_check_context_used (cfg, cmethod);
8524 if (!dont_verify && !cfg->skip_visibility) {
8525 MonoMethod *cil_method = cmethod;
8526 MonoMethod *target_method = cil_method;
8528 if (method->is_inflated) {
8529 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
8530 CHECK_CFG_ERROR;
8533 if (!mono_method_can_access_method (method_definition, target_method) &&
8534 !mono_method_can_access_method (method, cil_method))
8535 emit_method_access_failure (cfg, method, cil_method);
8538 if (mono_security_core_clr_enabled ())
8539 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
8541 if (cfg->gshared && cmethod && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8542 emit_class_init (cfg, cmethod->klass);
8543 CHECK_TYPELOAD (cmethod->klass);
8547 if (cfg->gsharedvt) {
8548 if (mini_is_gsharedvt_variable_signature (sig))
8549 GSHAREDVT_FAILURE (il_op);
8553 n = fsig->param_count;
8554 CHECK_STACK (n);
8557 * Generate smaller code for the common newobj <exception> instruction in
8558 * argument checking code.
8560 if (cfg->cbb->out_of_line && m_class_get_image (cmethod->klass) == mono_defaults.corlib &&
8561 is_exception_class (cmethod->klass) && n <= 2 &&
8562 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
8563 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
8564 MonoInst *iargs [3];
8566 sp -= n;
8568 EMIT_NEW_ICONST (cfg, iargs [0], m_class_get_type_token (cmethod->klass));
8569 switch (n) {
8570 case 0:
8571 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
8572 break;
8573 case 1:
8574 iargs [1] = sp [0];
8575 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
8576 break;
8577 case 2:
8578 iargs [1] = sp [0];
8579 iargs [2] = sp [1];
8580 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
8581 break;
8582 default:
8583 g_assert_not_reached ();
8586 inline_costs += 5;
8587 break;
8590 /* move the args to allow room for 'this' in the first position */
8591 while (n--) {
8592 --sp;
8593 sp [1] = sp [0];
8596 for (int i = 0; i < fsig->param_count; ++i)
8597 sp [i + fsig->hasthis] = convert_value (cfg, fsig->params [i], sp [i + fsig->hasthis]);
8599 /* check_call_signature () requires sp[0] to be set */
8600 this_ins.type = STACK_OBJ;
8601 sp [0] = &this_ins;
8602 if (check_call_signature (cfg, fsig, sp))
8603 UNVERIFIED;
8605 iargs [0] = NULL;
8607 if (mini_class_is_system_array (cmethod->klass)) {
8608 *sp = emit_get_rgctx_method (cfg, context_used,
8609 cmethod, MONO_RGCTX_INFO_METHOD);
8610 /* Optimize the common cases */
8611 MonoJitICallId function = MONO_JIT_ICALL_ZeroIsReserved;;
8612 int n = fsig->param_count;
8613 switch (n) {
8614 case 1: function = MONO_JIT_ICALL_mono_array_new_1;
8615 break;
8616 case 2: function = MONO_JIT_ICALL_mono_array_new_2;
8617 break;
8618 case 3: function = MONO_JIT_ICALL_mono_array_new_3;
8619 break;
8620 case 4: function = MONO_JIT_ICALL_mono_array_new_4;
8621 break;
8622 default:
8623 // FIXME Maximum value of param_count? Realistically 64. Fits in imm?
8624 if (!array_new_localalloc_ins) {
8625 MONO_INST_NEW (cfg, array_new_localalloc_ins, OP_LOCALLOC_IMM);
8626 array_new_localalloc_ins->dreg = alloc_preg (cfg);
8627 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8628 MONO_ADD_INS (init_localsbb, array_new_localalloc_ins);
8630 array_new_localalloc_ins->inst_imm = MAX (array_new_localalloc_ins->inst_imm, n * sizeof (target_mgreg_t));
8631 int dreg = array_new_localalloc_ins->dreg;
8632 for (int i = 0; i < n; ++i) {
8633 NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, dreg, i * sizeof (target_mgreg_t), sp [i + 1]->dreg);
8634 MONO_ADD_INS (cfg->cbb, ins);
8636 EMIT_NEW_ICONST (cfg, ins, n);
8637 sp [1] = ins;
8638 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), dreg);
8639 ins->type = STACK_PTR;
8640 sp [2] = ins;
8641 // FIXME Adjust sp by n - 3? Attempts failed.
8642 function = MONO_JIT_ICALL_mono_array_new_n_icall;
8643 break;
8645 alloc = mono_emit_jit_icall_id (cfg, function, sp);
8646 } else if (cmethod->string_ctor) {
8647 g_assert (!context_used);
8648 g_assert (!vtable_arg);
8649 /* we simply pass a null pointer */
8650 EMIT_NEW_PCONST (cfg, *sp, NULL);
8651 /* now call the string ctor */
8652 alloc = mini_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
8653 } else {
8654 if (m_class_is_valuetype (cmethod->klass)) {
8655 iargs [0] = mono_compile_create_var (cfg, m_class_get_byval_arg (cmethod->klass), OP_LOCAL);
8656 emit_init_rvar (cfg, iargs [0]->dreg, m_class_get_byval_arg (cmethod->klass));
8657 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
8659 alloc = NULL;
8662 * The code generated by mini_emit_virtual_call () expects
8663 * iargs [0] to be a boxed instance, but luckily the vcall
8664 * will be transformed into a normal call there.
8666 } else if (context_used) {
8667 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
8668 *sp = alloc;
8669 } else {
8670 MonoVTable *vtable = NULL;
8672 if (!cfg->compile_aot)
8673 vtable = mono_class_vtable_checked (cfg->domain, cmethod->klass, &cfg->error);
8674 CHECK_CFG_ERROR;
8675 CHECK_TYPELOAD (cmethod->klass);
8678 * TypeInitializationExceptions thrown from the mono_runtime_class_init
8679 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
8680 * As a workaround, we call class cctors before allocating objects.
8682 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
8683 emit_class_init (cfg, cmethod->klass);
8684 if (cfg->verbose_level > 2)
8685 printf ("class %s.%s needs init call for ctor\n", m_class_get_name_space (cmethod->klass), m_class_get_name (cmethod->klass));
8686 class_inits = g_slist_prepend (class_inits, cmethod->klass);
8689 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
8690 *sp = alloc;
8692 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
8694 if (alloc)
8695 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
8697 /* Now call the actual ctor */
8698 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
8699 CHECK_CFG_EXCEPTION;
8702 if (alloc == NULL) {
8703 /* Valuetype */
8704 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
8705 mini_type_to_eval_stack_type (cfg, m_class_get_byval_arg (ins->klass), ins);
8706 *sp++= ins;
8707 } else {
8708 *sp++ = alloc;
8711 inline_costs += 5;
8712 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, next_ip - header->code)))
8713 emit_seq_point (cfg, method, next_ip, FALSE, TRUE);
8714 break;
8716 case MONO_CEE_CASTCLASS:
8717 case MONO_CEE_ISINST: {
8718 --sp;
8719 klass = mini_get_class (method, token, generic_context);
8720 CHECK_TYPELOAD (klass);
8721 if (sp [0]->type != STACK_OBJ)
8722 UNVERIFIED;
8724 MONO_INST_NEW (cfg, ins, (il_op == MONO_CEE_ISINST) ? OP_ISINST : OP_CASTCLASS);
8725 ins->dreg = alloc_preg (cfg);
8726 ins->sreg1 = (*sp)->dreg;
8727 ins->klass = klass;
8728 ins->type = STACK_OBJ;
8729 MONO_ADD_INS (cfg->cbb, ins);
8731 CHECK_CFG_EXCEPTION;
8732 *sp++ = ins;
8734 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
8735 break;
8737 case MONO_CEE_UNBOX_ANY: {
8738 MonoInst *res, *addr;
8740 --sp;
8741 klass = mini_get_class (method, token, generic_context);
8742 CHECK_TYPELOAD (klass);
8744 mono_save_token_info (cfg, image, token, klass);
8746 context_used = mini_class_check_context_used (cfg, klass);
8748 if (mini_is_gsharedvt_klass (klass)) {
8749 res = handle_unbox_gsharedvt (cfg, klass, *sp);
8750 inline_costs += 2;
8751 } else if (mini_class_is_reference (klass)) {
8752 if (MONO_INS_IS_PCONST_NULL (*sp)) {
8753 EMIT_NEW_PCONST (cfg, res, NULL);
8754 res->type = STACK_OBJ;
8755 } else {
8756 MONO_INST_NEW (cfg, res, OP_CASTCLASS);
8757 res->dreg = alloc_preg (cfg);
8758 res->sreg1 = (*sp)->dreg;
8759 res->klass = klass;
8760 res->type = STACK_OBJ;
8761 MONO_ADD_INS (cfg->cbb, res);
8762 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
8764 } else if (mono_class_is_nullable (klass)) {
8765 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
8766 } else {
8767 addr = handle_unbox (cfg, klass, sp, context_used);
8768 /* LDOBJ */
8769 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0);
8770 res = ins;
8771 inline_costs += 2;
8774 *sp ++ = res;
8775 break;
8777 case MONO_CEE_BOX: {
8778 MonoInst *val;
8779 MonoClass *enum_class;
8780 MonoMethod *has_flag;
8782 --sp;
8783 val = *sp;
8784 klass = mini_get_class (method, token, generic_context);
8785 CHECK_TYPELOAD (klass);
8787 mono_save_token_info (cfg, image, token, klass);
8789 context_used = mini_class_check_context_used (cfg, klass);
8791 if (mini_class_is_reference (klass)) {
8792 *sp++ = val;
8793 break;
8796 val = convert_value (cfg, m_class_get_byval_arg (klass), val);
8798 if (klass == mono_defaults.void_class)
8799 UNVERIFIED;
8800 if (target_type_is_incompatible (cfg, m_class_get_byval_arg (klass), val))
8801 UNVERIFIED;
8802 /* frequent check in generic code: box (struct), brtrue */
8805 * Look for:
8807 * <push int/long ptr>
8808 * <push int/long>
8809 * box MyFlags
8810 * constrained. MyFlags
8811 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
8813 * If we find this sequence and the operand types on box and constrained
8814 * are equal, we can emit a specialized instruction sequence instead of
8815 * the very slow HasFlag () call.
8816 * This code sequence is generated by older mcs/csc, the newer one is handled in
8817 * emit_inst_for_method ().
8819 guint32 constrained_token;
8820 guint32 callvirt_token;
8822 if ((cfg->opt & MONO_OPT_INTRINS) &&
8823 // FIXME ip_in_bb as we go?
8824 next_ip < end && ip_in_bb (cfg, cfg->cbb, next_ip) &&
8825 (ip = il_read_constrained (next_ip, end, &constrained_token)) &&
8826 ip_in_bb (cfg, cfg->cbb, ip) &&
8827 (ip = il_read_callvirt (ip, end, &callvirt_token)) &&
8828 ip_in_bb (cfg, cfg->cbb, ip) &&
8829 m_class_is_enumtype (klass) &&
8830 (enum_class = mini_get_class (method, constrained_token, generic_context)) &&
8831 (has_flag = mini_get_method (cfg, method, callvirt_token, NULL, generic_context)) &&
8832 has_flag->klass == mono_defaults.enum_class &&
8833 !strcmp (has_flag->name, "HasFlag") &&
8834 has_flag->signature->hasthis &&
8835 has_flag->signature->param_count == 1) {
8836 CHECK_TYPELOAD (enum_class);
8838 if (enum_class == klass) {
8839 MonoInst *enum_this, *enum_flag;
8841 next_ip = ip;
8842 il_op = MONO_CEE_CALLVIRT;
8843 --sp;
8845 enum_this = sp [0];
8846 enum_flag = sp [1];
8848 *sp++ = mini_handle_enum_has_flag (cfg, klass, enum_this, -1, enum_flag);
8849 break;
8853 gboolean is_true;
8855 // FIXME: LLVM can't handle the inconsistent bb linking
8856 if (!mono_class_is_nullable (klass) &&
8857 !mini_is_gsharedvt_klass (klass) &&
8858 next_ip < end && ip_in_bb (cfg, cfg->cbb, next_ip) &&
8859 ( (is_true = !!(ip = il_read_brtrue (next_ip, end, &target))) ||
8860 (is_true = !!(ip = il_read_brtrue_s (next_ip, end, &target))) ||
8861 (ip = il_read_brfalse (next_ip, end, &target)) ||
8862 (ip = il_read_brfalse_s (next_ip, end, &target)))) {
8864 int dreg;
8865 MonoBasicBlock *true_bb, *false_bb;
8867 il_op = (MonoOpcodeEnum)next_ip [0];
8868 next_ip = ip;
8870 if (cfg->verbose_level > 3) {
8871 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8872 printf ("<box+brtrue opt>\n");
8876 * We need to link both bblocks, since it is needed for handling stack
8877 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8878 * Branching to only one of them would lead to inconsistencies, so
8879 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8881 GET_BBLOCK (cfg, true_bb, target);
8882 GET_BBLOCK (cfg, false_bb, next_ip);
8884 mono_link_bblock (cfg, cfg->cbb, true_bb);
8885 mono_link_bblock (cfg, cfg->cbb, false_bb);
8887 if (sp != stack_start) {
8888 handle_stack_args (cfg, stack_start, sp - stack_start);
8889 sp = stack_start;
8890 CHECK_UNVERIFIABLE (cfg);
8893 if (COMPILE_LLVM (cfg)) {
8894 dreg = alloc_ireg (cfg);
8895 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
8896 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
8898 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
8899 } else {
8900 /* The JIT can't eliminate the iconst+compare */
8901 MONO_INST_NEW (cfg, ins, OP_BR);
8902 ins->inst_target_bb = is_true ? true_bb : false_bb;
8903 MONO_ADD_INS (cfg->cbb, ins);
8906 start_new_bblock = 1;
8907 break;
8910 if (m_class_is_enumtype (klass) && !mini_is_gsharedvt_klass (klass) && !(val->type == STACK_I8 && TARGET_SIZEOF_VOID_P == 4)) {
8911 /* Can't do this with 64 bit enums on 32 bit since the vtype decomp pass is ran after the long decomp pass */
8912 if (val->opcode == OP_ICONST) {
8913 MONO_INST_NEW (cfg, ins, OP_BOX_ICONST);
8914 ins->type = STACK_OBJ;
8915 ins->klass = klass;
8916 ins->inst_c0 = val->inst_c0;
8917 ins->dreg = alloc_dreg (cfg, (MonoStackType)val->type);
8918 } else {
8919 MONO_INST_NEW (cfg, ins, OP_BOX);
8920 ins->type = STACK_OBJ;
8921 ins->klass = klass;
8922 ins->sreg1 = val->dreg;
8923 ins->dreg = alloc_dreg (cfg, (MonoStackType)val->type);
8925 MONO_ADD_INS (cfg->cbb, ins);
8926 *sp++ = ins;
8927 /* Create domainvar early so it gets initialized earlier than this code */
8928 if (cfg->opt & MONO_OPT_SHARED)
8929 mono_get_domainvar (cfg);
8930 } else {
8931 *sp++ = mini_emit_box (cfg, val, klass, context_used);
8933 CHECK_CFG_EXCEPTION;
8934 inline_costs += 1;
8935 break;
8937 case MONO_CEE_UNBOX: {
8938 --sp;
8939 klass = mini_get_class (method, token, generic_context);
8940 CHECK_TYPELOAD (klass);
8942 mono_save_token_info (cfg, image, token, klass);
8944 context_used = mini_class_check_context_used (cfg, klass);
8946 if (mono_class_is_nullable (klass)) {
8947 MonoInst *val;
8949 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8950 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), m_class_get_byval_arg (val->klass));
8952 *sp++= ins;
8953 } else {
8954 ins = handle_unbox (cfg, klass, sp, context_used);
8955 *sp++ = ins;
8957 inline_costs += 2;
8958 break;
8960 case MONO_CEE_LDFLD:
8961 case MONO_CEE_LDFLDA:
8962 case MONO_CEE_STFLD:
8963 case MONO_CEE_LDSFLD:
8964 case MONO_CEE_LDSFLDA:
8965 case MONO_CEE_STSFLD: {
8966 MonoClassField *field;
8967 #ifndef DISABLE_REMOTING
8968 int costs;
8969 #endif
8970 guint foffset;
8971 gboolean is_instance;
8972 gpointer addr = NULL;
8973 gboolean is_special_static;
8974 MonoType *ftype;
8975 MonoInst *store_val = NULL;
8976 MonoInst *thread_ins;
8978 is_instance = (il_op == MONO_CEE_LDFLD || il_op == MONO_CEE_LDFLDA || il_op == MONO_CEE_STFLD);
8979 if (is_instance) {
8980 if (il_op == MONO_CEE_STFLD) {
8981 sp -= 2;
8982 store_val = sp [1];
8983 } else {
8984 --sp;
8986 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8987 UNVERIFIED;
8988 if (il_op != MONO_CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8989 UNVERIFIED;
8990 } else {
8991 if (il_op == MONO_CEE_STSFLD) {
8992 sp--;
8993 store_val = sp [0];
8997 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8998 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
8999 klass = field->parent;
9001 else {
9002 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
9003 CHECK_CFG_ERROR;
9005 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
9006 FIELD_ACCESS_FAILURE (method, field);
9007 mono_class_init_internal (klass);
9009 /* if the class is Critical then transparent code cannot access it's fields */
9010 if (!is_instance && mono_security_core_clr_enabled ())
9011 ensure_method_is_allowed_to_access_field (cfg, method, field);
9013 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
9014 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
9015 if (mono_security_core_clr_enabled ())
9016 ensure_method_is_allowed_to_access_field (cfg, method, field);
9019 ftype = mono_field_get_type_internal (field);
9022 * LDFLD etc. is usable on static fields as well, so convert those cases to
9023 * the static case.
9025 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
9026 switch (il_op) {
9027 case MONO_CEE_LDFLD:
9028 il_op = MONO_CEE_LDSFLD;
9029 break;
9030 case MONO_CEE_STFLD:
9031 il_op = MONO_CEE_STSFLD;
9032 break;
9033 case MONO_CEE_LDFLDA:
9034 il_op = MONO_CEE_LDSFLDA;
9035 break;
9036 default:
9037 g_assert_not_reached ();
9039 is_instance = FALSE;
9042 context_used = mini_class_check_context_used (cfg, klass);
9044 if (il_op == MONO_CEE_LDSFLD) {
9045 ins = mini_emit_inst_for_field_load (cfg, field);
9046 if (ins) {
9047 *sp++ = ins;
9048 goto field_access_end;
9052 /* INSTANCE CASE */
9054 foffset = m_class_is_valuetype (klass) ? field->offset - MONO_ABI_SIZEOF (MonoObject): field->offset;
9055 if (il_op == MONO_CEE_STFLD) {
9056 sp [1] = convert_value (cfg, field->type, sp [1]);
9057 if (target_type_is_incompatible (cfg, field->type, sp [1]))
9058 UNVERIFIED;
9059 #ifndef DISABLE_REMOTING
9060 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
9061 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
9062 MonoInst *iargs [5];
9064 GSHAREDVT_FAILURE (il_op);
9066 iargs [0] = sp [0];
9067 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9068 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9069 EMIT_NEW_ICONST (cfg, iargs [3], m_class_is_valuetype (klass) ? field->offset - MONO_ABI_SIZEOF (MonoObject) :
9070 field->offset);
9071 iargs [4] = sp [1];
9073 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9074 costs = inline_method (cfg, stfld_wrapper, mono_method_signature_internal (stfld_wrapper),
9075 iargs, ip, cfg->real_offset, TRUE);
9076 CHECK_CFG_EXCEPTION;
9077 g_assert (costs > 0);
9079 cfg->real_offset += 5;
9081 inline_costs += costs;
9082 } else {
9083 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
9085 } else
9086 #endif
9088 MonoInst *store;
9090 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg, foffset > mono_target_pagesize ());
9092 if (ins_flag & MONO_INST_VOLATILE) {
9093 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9094 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9097 if (mini_is_gsharedvt_klass (klass)) {
9098 MonoInst *offset_ins;
9100 context_used = mini_class_check_context_used (cfg, klass);
9102 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9103 /* The value is offset by 1 */
9104 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
9105 dreg = alloc_ireg_mp (cfg);
9106 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9107 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
9108 store = mini_emit_storing_write_barrier (cfg, ins, sp [1]);
9109 } else {
9110 /* The decomposition will call mini_emit_memory_copy () which will emit a wbarrier if needed */
9111 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
9113 } else {
9114 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
9115 /* insert call to write barrier */
9116 MonoInst *ptr;
9117 int dreg;
9119 dreg = alloc_ireg_mp (cfg);
9120 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9121 store = mini_emit_storing_write_barrier (cfg, ptr, sp [1]);
9122 } else {
9123 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
9127 if (sp [0]->opcode != OP_LDADDR)
9128 store->flags |= MONO_INST_FAULT;
9130 store->flags |= ins_flag;
9132 goto field_access_end;
9135 #ifndef DISABLE_REMOTING
9136 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
9137 MonoMethod *wrapper = (il_op == MONO_CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
9138 MonoInst *iargs [4];
9140 GSHAREDVT_FAILURE (il_op);
9142 iargs [0] = sp [0];
9143 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9144 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9145 EMIT_NEW_ICONST (cfg, iargs [3], m_class_is_valuetype (klass) ? field->offset - MONO_ABI_SIZEOF (MonoObject) : field->offset);
9146 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9147 costs = inline_method (cfg, wrapper, mono_method_signature_internal (wrapper),
9148 iargs, ip, cfg->real_offset, TRUE);
9149 CHECK_CFG_EXCEPTION;
9150 g_assert (costs > 0);
9152 cfg->real_offset += 5;
9154 *sp++ = iargs [0];
9156 inline_costs += costs;
9157 } else {
9158 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
9159 *sp++ = ins;
9161 } else
9162 #endif
9163 if (is_instance) {
9164 if (sp [0]->type == STACK_VTYPE) {
9165 MonoInst *var;
9167 /* Have to compute the address of the variable */
9169 var = get_vreg_to_inst (cfg, sp [0]->dreg);
9170 if (!var)
9171 var = mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (klass), OP_LOCAL, sp [0]->dreg);
9172 else
9173 g_assert (var->klass == klass);
9175 EMIT_NEW_VARLOADA (cfg, ins, var, m_class_get_byval_arg (var->klass));
9176 sp [0] = ins;
9179 if (il_op == MONO_CEE_LDFLDA) {
9180 if (sp [0]->type == STACK_OBJ) {
9181 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
9182 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
9185 dreg = alloc_ireg_mp (cfg);
9187 if (mini_is_gsharedvt_klass (klass)) {
9188 MonoInst *offset_ins;
9190 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9191 /* The value is offset by 1 */
9192 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
9193 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9194 } else {
9195 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9197 ins->klass = mono_class_from_mono_type_internal (field->type);
9198 ins->type = STACK_MP;
9199 *sp++ = ins;
9200 } else {
9201 MonoInst *load;
9203 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg, foffset > mono_target_pagesize ());
9205 #ifdef MONO_ARCH_SIMD_INTRINSICS
9206 if (sp [0]->opcode == OP_LDADDR && m_class_is_simd_type (klass) && cfg->opt & MONO_OPT_SIMD) {
9207 ins = mono_emit_simd_field_load (cfg, field, sp [0]);
9208 if (ins) {
9209 *sp++ = ins;
9210 goto field_access_end;
9213 #endif
9215 MonoInst *field_add_inst = sp [0];
9216 if (mini_is_gsharedvt_klass (klass)) {
9217 MonoInst *offset_ins;
9219 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9220 /* The value is offset by 1 */
9221 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
9222 EMIT_NEW_BIALU (cfg, field_add_inst, OP_PADD, alloc_ireg_mp (cfg), sp [0]->dreg, offset_ins->dreg);
9223 foffset = 0;
9226 load = mini_emit_memory_load (cfg, field->type, field_add_inst, foffset, ins_flag);
9228 if (sp [0]->opcode != OP_LDADDR)
9229 load->flags |= MONO_INST_FAULT;
9230 *sp++ = load;
9234 if (is_instance)
9235 goto field_access_end;
9237 /* STATIC CASE */
9238 context_used = mini_class_check_context_used (cfg, klass);
9240 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) {
9241 mono_error_set_field_missing (&cfg->error, field->parent, field->name, NULL, "Using static instructions with literal field");
9242 CHECK_CFG_ERROR;
9245 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
9246 * to be called here.
9248 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
9249 mono_class_vtable_checked (cfg->domain, klass, &cfg->error);
9250 CHECK_CFG_ERROR;
9251 CHECK_TYPELOAD (klass);
9253 mono_domain_lock (cfg->domain);
9254 if (cfg->domain->special_static_fields)
9255 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
9256 mono_domain_unlock (cfg->domain);
9258 is_special_static = mono_class_field_is_special_static (field);
9260 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
9261 thread_ins = mono_create_tls_get (cfg, TLS_KEY_THREAD);
9262 else
9263 thread_ins = NULL;
9265 /* Generate IR to compute the field address */
9266 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
9268 * Fast access to TLS data
9269 * Inline version of get_thread_static_data () in
9270 * threads.c.
9272 guint32 offset;
9273 int idx, static_data_reg, array_reg, dreg;
9275 if (context_used && cfg->gsharedvt && mini_is_gsharedvt_klass (klass))
9276 GSHAREDVT_FAILURE (il_op);
9278 static_data_reg = alloc_ireg (cfg);
9279 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
9281 if (cfg->compile_aot) {
9282 int offset_reg, offset2_reg, idx_reg;
9284 /* For TLS variables, this will return the TLS offset */
9285 EMIT_NEW_SFLDACONST (cfg, ins, field);
9286 offset_reg = ins->dreg;
9287 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
9288 idx_reg = alloc_ireg (cfg);
9289 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
9290 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, TARGET_SIZEOF_VOID_P == 8 ? 3 : 2);
9291 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
9292 array_reg = alloc_ireg (cfg);
9293 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
9294 offset2_reg = alloc_ireg (cfg);
9295 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
9296 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
9297 dreg = alloc_ireg (cfg);
9298 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
9299 } else {
9300 offset = (gsize)addr & 0x7fffffff;
9301 idx = offset & 0x3f;
9303 array_reg = alloc_ireg (cfg);
9304 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * TARGET_SIZEOF_VOID_P);
9305 dreg = alloc_ireg (cfg);
9306 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
9308 } else if ((cfg->opt & MONO_OPT_SHARED) ||
9309 (cfg->compile_aot && is_special_static) ||
9310 (context_used && is_special_static)) {
9311 MonoInst *iargs [2];
9313 g_assert (field->parent);
9314 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9315 if (context_used) {
9316 iargs [1] = emit_get_rgctx_field (cfg, context_used,
9317 field, MONO_RGCTX_INFO_CLASS_FIELD);
9318 } else {
9319 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
9321 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
9322 } else if (context_used) {
9323 MonoInst *static_data;
9326 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
9327 method->klass->name_space, method->klass->name, method->name,
9328 depth, field->offset);
9331 if (mono_class_needs_cctor_run (klass, method))
9332 emit_class_init (cfg, klass);
9335 * The pointer we're computing here is
9337 * super_info.static_data + field->offset
9339 static_data = mini_emit_get_rgctx_klass (cfg, context_used,
9340 klass, MONO_RGCTX_INFO_STATIC_DATA);
9342 if (mini_is_gsharedvt_klass (klass)) {
9343 MonoInst *offset_ins;
9345 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9346 /* The value is offset by 1 */
9347 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
9348 dreg = alloc_ireg_mp (cfg);
9349 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
9350 } else if (field->offset == 0) {
9351 ins = static_data;
9352 } else {
9353 int addr_reg = mono_alloc_preg (cfg);
9354 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
9356 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
9357 MonoInst *iargs [2];
9359 g_assert (field->parent);
9360 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9361 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
9362 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
9363 } else {
9364 MonoVTable *vtable = NULL;
9366 if (!cfg->compile_aot)
9367 vtable = mono_class_vtable_checked (cfg->domain, klass, &cfg->error);
9368 CHECK_CFG_ERROR;
9369 CHECK_TYPELOAD (klass);
9371 if (!addr) {
9372 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
9373 if (!(g_slist_find (class_inits, klass))) {
9374 emit_class_init (cfg, klass);
9375 if (cfg->verbose_level > 2)
9376 printf ("class %s.%s needs init call for %s\n", m_class_get_name_space (klass), m_class_get_name (klass), mono_field_get_name (field));
9377 class_inits = g_slist_prepend (class_inits, klass);
9379 } else {
9380 if (cfg->run_cctors) {
9381 /* This makes so that inline cannot trigger */
9382 /* .cctors: too many apps depend on them */
9383 /* running with a specific order... */
9384 g_assert (vtable);
9385 if (!vtable->initialized && m_class_has_cctor (vtable->klass))
9386 INLINE_FAILURE ("class init");
9387 if (!mono_runtime_class_init_full (vtable, &cfg->error)) {
9388 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
9389 goto exception_exit;
9393 if (cfg->compile_aot)
9394 EMIT_NEW_SFLDACONST (cfg, ins, field);
9395 else {
9396 g_assert (vtable);
9397 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
9398 g_assert (addr);
9399 EMIT_NEW_PCONST (cfg, ins, addr);
9401 } else {
9402 MonoInst *iargs [1];
9403 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
9404 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
9408 /* Generate IR to do the actual load/store operation */
9410 if ((il_op == MONO_CEE_STFLD || il_op == MONO_CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
9411 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9412 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9415 if (il_op == MONO_CEE_LDSFLDA) {
9416 ins->klass = mono_class_from_mono_type_internal (ftype);
9417 ins->type = STACK_PTR;
9418 *sp++ = ins;
9419 } else if (il_op == MONO_CEE_STSFLD) {
9420 MonoInst *store;
9422 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
9423 store->flags |= ins_flag;
9424 } else {
9425 gboolean is_const = FALSE;
9426 MonoVTable *vtable = NULL;
9427 gpointer addr = NULL;
9429 if (!context_used) {
9430 vtable = mono_class_vtable_checked (cfg->domain, klass, &cfg->error);
9431 CHECK_CFG_ERROR;
9432 CHECK_TYPELOAD (klass);
9434 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
9435 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
9436 int ro_type = ftype->type;
9437 if (!addr)
9438 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
9439 if (ro_type == MONO_TYPE_VALUETYPE && m_class_is_enumtype (ftype->data.klass)) {
9440 ro_type = mono_class_enum_basetype_internal (ftype->data.klass)->type;
9443 GSHAREDVT_FAILURE (il_op);
9445 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
9446 is_const = TRUE;
9447 switch (ro_type) {
9448 case MONO_TYPE_BOOLEAN:
9449 case MONO_TYPE_U1:
9450 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
9451 sp++;
9452 break;
9453 case MONO_TYPE_I1:
9454 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
9455 sp++;
9456 break;
9457 case MONO_TYPE_CHAR:
9458 case MONO_TYPE_U2:
9459 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
9460 sp++;
9461 break;
9462 case MONO_TYPE_I2:
9463 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
9464 sp++;
9465 break;
9466 break;
9467 case MONO_TYPE_I4:
9468 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
9469 sp++;
9470 break;
9471 case MONO_TYPE_U4:
9472 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
9473 sp++;
9474 break;
9475 case MONO_TYPE_I:
9476 case MONO_TYPE_U:
9477 case MONO_TYPE_PTR:
9478 case MONO_TYPE_FNPTR:
9479 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
9480 mini_type_to_eval_stack_type ((cfg), field->type, *sp);
9481 sp++;
9482 break;
9483 case MONO_TYPE_STRING:
9484 case MONO_TYPE_OBJECT:
9485 case MONO_TYPE_CLASS:
9486 case MONO_TYPE_SZARRAY:
9487 case MONO_TYPE_ARRAY:
9488 if (!mono_gc_is_moving ()) {
9489 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
9490 mini_type_to_eval_stack_type ((cfg), field->type, *sp);
9491 sp++;
9492 } else {
9493 is_const = FALSE;
9495 break;
9496 case MONO_TYPE_I8:
9497 case MONO_TYPE_U8:
9498 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
9499 sp++;
9500 break;
9501 case MONO_TYPE_R4:
9502 case MONO_TYPE_R8:
9503 case MONO_TYPE_VALUETYPE:
9504 default:
9505 is_const = FALSE;
9506 break;
9510 if (!is_const) {
9511 MonoInst *load;
9513 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
9514 load->flags |= ins_flag;
9515 *sp++ = load;
9519 field_access_end:
9520 if ((il_op == MONO_CEE_LDFLD || il_op == MONO_CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
9521 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9522 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9525 ins_flag = 0;
9526 break;
9528 case MONO_CEE_STOBJ:
9529 sp -= 2;
9530 klass = mini_get_class (method, token, generic_context);
9531 CHECK_TYPELOAD (klass);
9533 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
9534 mini_emit_memory_store (cfg, m_class_get_byval_arg (klass), sp [0], sp [1], ins_flag);
9535 ins_flag = 0;
9536 inline_costs += 1;
9537 break;
9540 * Array opcodes
9542 case MONO_CEE_NEWARR: {
9543 MonoInst *len_ins;
9544 const char *data_ptr;
9545 int data_size = 0;
9546 guint32 field_token;
9548 --sp;
9550 klass = mini_get_class (method, token, generic_context);
9551 CHECK_TYPELOAD (klass);
9552 if (m_class_get_byval_arg (klass)->type == MONO_TYPE_VOID)
9553 UNVERIFIED;
9555 context_used = mini_class_check_context_used (cfg, klass);
9557 if (sp [0]->type == STACK_I8 || (TARGET_SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
9558 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
9559 ins->sreg1 = sp [0]->dreg;
9560 ins->type = STACK_I4;
9561 ins->dreg = alloc_ireg (cfg);
9562 MONO_ADD_INS (cfg->cbb, ins);
9563 *sp = mono_decompose_opcode (cfg, ins);
9566 if (context_used) {
9567 MonoInst *args [3];
9568 MonoClass *array_class = mono_class_create_array (klass, 1);
9569 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
9571 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
9573 /* vtable */
9574 args [0] = mini_emit_get_rgctx_klass (cfg, context_used,
9575 array_class, MONO_RGCTX_INFO_VTABLE);
9576 /* array len */
9577 args [1] = sp [0];
9579 if (managed_alloc)
9580 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
9581 else
9582 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
9583 } else {
9584 if (cfg->opt & MONO_OPT_SHARED) {
9585 /* Decompose now to avoid problems with references to the domainvar */
9586 MonoInst *iargs [3];
9588 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9589 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9590 iargs [2] = sp [0];
9592 ins = mono_emit_jit_icall (cfg, ves_icall_array_new, iargs);
9593 } else {
9594 /* Decompose later since it is needed by abcrem */
9595 MonoClass *array_type = mono_class_create_array (klass, 1);
9596 mono_class_vtable_checked (cfg->domain, array_type, &cfg->error);
9597 CHECK_CFG_ERROR;
9598 CHECK_TYPELOAD (array_type);
9600 MONO_INST_NEW (cfg, ins, OP_NEWARR);
9601 ins->dreg = alloc_ireg_ref (cfg);
9602 ins->sreg1 = sp [0]->dreg;
9603 ins->inst_newa_class = klass;
9604 ins->type = STACK_OBJ;
9605 ins->klass = array_type;
9606 MONO_ADD_INS (cfg->cbb, ins);
9607 cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE;
9608 cfg->cbb->needs_decompose = TRUE;
9610 /* Needed so mono_emit_load_get_addr () gets called */
9611 mono_get_got_var (cfg);
9615 len_ins = sp [0];
9616 ip += 5;
9617 *sp++ = ins;
9618 inline_costs += 1;
9621 * we inline/optimize the initialization sequence if possible.
9622 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
9623 * for small sizes open code the memcpy
9624 * ensure the rva field is big enough
9626 if ((cfg->opt & MONO_OPT_INTRINS) && next_ip < end
9627 && ip_in_bb (cfg, cfg->cbb, next_ip)
9628 && (len_ins->opcode == OP_ICONST)
9629 && (data_ptr = initialize_array_data (cfg, method,
9630 cfg->compile_aot, next_ip, end, klass,
9631 len_ins->inst_c0, &data_size, &field_token,
9632 &il_op, &next_ip))) {
9633 MonoMethod *memcpy_method = mini_get_memcpy_method ();
9634 MonoInst *iargs [3];
9635 int add_reg = alloc_ireg_mp (cfg);
9637 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
9638 if (cfg->compile_aot) {
9639 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, m_class_get_image (method->klass), GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
9640 } else {
9641 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
9643 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
9644 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9647 break;
9649 case MONO_CEE_LDLEN:
9650 --sp;
9651 if (sp [0]->type != STACK_OBJ)
9652 UNVERIFIED;
9654 MONO_INST_NEW (cfg, ins, OP_LDLEN);
9655 ins->dreg = alloc_preg (cfg);
9656 ins->sreg1 = sp [0]->dreg;
9657 ins->inst_imm = MONO_STRUCT_OFFSET (MonoArray, max_length);
9658 ins->type = STACK_I4;
9659 /* This flag will be inherited by the decomposition */
9660 ins->flags |= MONO_INST_FAULT | MONO_INST_INVARIANT_LOAD;
9661 MONO_ADD_INS (cfg->cbb, ins);
9662 cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE;
9663 cfg->cbb->needs_decompose = TRUE;
9664 *sp++ = ins;
9665 break;
9666 case MONO_CEE_LDELEMA:
9667 sp -= 2;
9668 if (sp [0]->type != STACK_OBJ)
9669 UNVERIFIED;
9671 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9673 klass = mini_get_class (method, token, generic_context);
9674 CHECK_TYPELOAD (klass);
9675 /* we need to make sure that this array is exactly the type it needs
9676 * to be for correctness. the wrappers are lax with their usage
9677 * so we need to ignore them here
9679 if (!m_class_is_valuetype (klass) && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
9680 MonoClass *array_class = mono_class_create_array (klass, 1);
9681 mini_emit_check_array_type (cfg, sp [0], array_class);
9682 CHECK_TYPELOAD (array_class);
9685 readonly = FALSE;
9686 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9687 *sp++ = ins;
9688 break;
9689 case MONO_CEE_LDELEM:
9690 case MONO_CEE_LDELEM_I1:
9691 case MONO_CEE_LDELEM_U1:
9692 case MONO_CEE_LDELEM_I2:
9693 case MONO_CEE_LDELEM_U2:
9694 case MONO_CEE_LDELEM_I4:
9695 case MONO_CEE_LDELEM_U4:
9696 case MONO_CEE_LDELEM_I8:
9697 case MONO_CEE_LDELEM_I:
9698 case MONO_CEE_LDELEM_R4:
9699 case MONO_CEE_LDELEM_R8:
9700 case MONO_CEE_LDELEM_REF: {
9701 MonoInst *addr;
9703 sp -= 2;
9705 if (il_op == MONO_CEE_LDELEM) {
9706 klass = mini_get_class (method, token, generic_context);
9707 CHECK_TYPELOAD (klass);
9708 mono_class_init_internal (klass);
9710 else
9711 klass = array_access_to_klass (il_op);
9713 if (sp [0]->type != STACK_OBJ)
9714 UNVERIFIED;
9716 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9718 if (mini_is_gsharedvt_variable_klass (klass)) {
9719 // FIXME-VT: OP_ICONST optimization
9720 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9721 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0);
9722 ins->opcode = OP_LOADV_MEMBASE;
9723 } else if (sp [1]->opcode == OP_ICONST) {
9724 int array_reg = sp [0]->dreg;
9725 int index_reg = sp [1]->dreg;
9726 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
9728 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
9729 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
9731 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9732 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), array_reg, offset);
9733 } else {
9734 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9735 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0);
9737 *sp++ = ins;
9738 break;
9740 case MONO_CEE_STELEM_I:
9741 case MONO_CEE_STELEM_I1:
9742 case MONO_CEE_STELEM_I2:
9743 case MONO_CEE_STELEM_I4:
9744 case MONO_CEE_STELEM_I8:
9745 case MONO_CEE_STELEM_R4:
9746 case MONO_CEE_STELEM_R8:
9747 case MONO_CEE_STELEM_REF:
9748 case MONO_CEE_STELEM: {
9749 sp -= 3;
9751 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9753 if (il_op == MONO_CEE_STELEM) {
9754 klass = mini_get_class (method, token, generic_context);
9755 CHECK_TYPELOAD (klass);
9756 mono_class_init_internal (klass);
9758 else
9759 klass = array_access_to_klass (il_op);
9761 if (sp [0]->type != STACK_OBJ)
9762 UNVERIFIED;
9764 sp [2] = convert_value (cfg, m_class_get_byval_arg (klass), sp [2]);
9765 mini_emit_array_store (cfg, klass, sp, TRUE);
9767 inline_costs += 1;
9768 break;
9770 case MONO_CEE_CKFINITE: {
9771 --sp;
9773 if (cfg->llvm_only) {
9774 MonoInst *iargs [1];
9776 iargs [0] = sp [0];
9777 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
9778 } else {
9779 sp [0] = convert_value (cfg, m_class_get_byval_arg (mono_defaults.double_class), sp [0]);
9780 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
9781 ins->sreg1 = sp [0]->dreg;
9782 ins->dreg = alloc_freg (cfg);
9783 ins->type = STACK_R8;
9784 MONO_ADD_INS (cfg->cbb, ins);
9786 *sp++ = mono_decompose_opcode (cfg, ins);
9789 break;
9791 case MONO_CEE_REFANYVAL: {
9792 MonoInst *src_var, *src;
9794 int klass_reg = alloc_preg (cfg);
9795 int dreg = alloc_preg (cfg);
9797 GSHAREDVT_FAILURE (il_op);
9799 MONO_INST_NEW (cfg, ins, il_op);
9800 --sp;
9801 klass = mini_get_class (method, token, generic_context);
9802 CHECK_TYPELOAD (klass);
9804 context_used = mini_class_check_context_used (cfg, klass);
9806 // FIXME:
9807 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9808 if (!src_var)
9809 src_var = mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.typed_reference_class), OP_LOCAL, sp [0]->dreg);
9810 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9811 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
9813 if (context_used) {
9814 MonoInst *klass_ins;
9816 klass_ins = mini_emit_get_rgctx_klass (cfg, context_used,
9817 klass, MONO_RGCTX_INFO_KLASS);
9819 // FIXME:
9820 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
9821 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
9822 } else {
9823 mini_emit_class_check (cfg, klass_reg, klass);
9825 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
9826 ins->type = STACK_MP;
9827 ins->klass = klass;
9828 *sp++ = ins;
9829 break;
9831 case MONO_CEE_MKREFANY: {
9832 MonoInst *loc, *addr;
9834 GSHAREDVT_FAILURE (il_op);
9836 MONO_INST_NEW (cfg, ins, il_op);
9837 --sp;
9838 klass = mini_get_class (method, token, generic_context);
9839 CHECK_TYPELOAD (klass);
9841 context_used = mini_class_check_context_used (cfg, klass);
9843 loc = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.typed_reference_class), OP_LOCAL);
9844 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
9846 MonoInst *const_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
9847 int type_reg = alloc_preg (cfg);
9849 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
9850 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, m_class_offsetof_byval_arg ());
9851 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9853 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
9855 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
9856 ins->type = STACK_VTYPE;
9857 ins->klass = mono_defaults.typed_reference_class;
9858 *sp++ = ins;
9859 break;
9861 case MONO_CEE_LDTOKEN: {
9862 gpointer handle;
9863 MonoClass *handle_class;
9865 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
9866 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9867 handle = mono_method_get_wrapper_data (method, n);
9868 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
9869 if (handle_class == mono_defaults.typehandle_class)
9870 handle = m_class_get_byval_arg ((MonoClass*)handle);
9872 else {
9873 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
9874 CHECK_CFG_ERROR;
9876 if (!handle)
9877 LOAD_ERROR;
9878 mono_class_init_internal (handle_class);
9879 if (cfg->gshared) {
9880 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
9881 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
9882 /* This case handles ldtoken
9883 of an open type, like for
9884 typeof(Gen<>). */
9885 context_used = 0;
9886 } else if (handle_class == mono_defaults.typehandle_class) {
9887 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type_internal ((MonoType *)handle));
9888 } else if (handle_class == mono_defaults.fieldhandle_class)
9889 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
9890 else if (handle_class == mono_defaults.methodhandle_class)
9891 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
9892 else
9893 g_assert_not_reached ();
9896 if ((cfg->opt & MONO_OPT_SHARED) &&
9897 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
9898 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
9899 MonoInst *addr, *vtvar, *iargs [3];
9900 int method_context_used;
9902 method_context_used = mini_method_check_context_used (cfg, method);
9904 vtvar = mono_compile_create_var (cfg, m_class_get_byval_arg (handle_class), OP_LOCAL);
9906 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9907 EMIT_NEW_ICONST (cfg, iargs [1], n);
9908 if (method_context_used) {
9909 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
9910 method, MONO_RGCTX_INFO_METHOD);
9911 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
9912 } else {
9913 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9914 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9916 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9918 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9920 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9921 } else {
9922 if ((next_ip + 4 < end) && ip_in_bb (cfg, cfg->cbb, next_ip) &&
9923 ((next_ip [0] == CEE_CALL) || (next_ip [0] == CEE_CALLVIRT)) &&
9924 (cmethod = mini_get_method (cfg, method, read32 (next_ip + 1), NULL, generic_context)) &&
9925 (cmethod->klass == mono_defaults.systemtype_class) &&
9926 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9927 MonoClass *tclass = mono_class_from_mono_type_internal ((MonoType *)handle);
9929 mono_class_init_internal (tclass);
9930 if (context_used) {
9931 ins = mini_emit_get_rgctx_klass (cfg, context_used,
9932 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9933 } else if (cfg->compile_aot) {
9934 if (method->wrapper_type) {
9935 error_init (error); //got to do it since there are multiple conditionals below
9936 if (mono_class_get_checked (m_class_get_image (tclass), m_class_get_type_token (tclass), error) == tclass && !generic_context) {
9937 /* Special case for static synchronized wrappers */
9938 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, m_class_get_image (tclass), m_class_get_type_token (tclass), generic_context);
9939 } else {
9940 mono_error_cleanup (error); /* FIXME don't swallow the error */
9941 /* FIXME: n is not a normal token */
9942 DISABLE_AOT (cfg);
9943 EMIT_NEW_PCONST (cfg, ins, NULL);
9945 } else {
9946 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
9948 } else {
9949 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &cfg->error);
9950 CHECK_CFG_ERROR;
9951 EMIT_NEW_PCONST (cfg, ins, rt);
9953 ins->type = STACK_OBJ;
9954 ins->klass = cmethod->klass;
9955 il_op = (MonoOpcodeEnum)next_ip [0];
9956 next_ip += 5;
9957 } else {
9958 MonoInst *addr, *vtvar;
9960 vtvar = mono_compile_create_var (cfg, m_class_get_byval_arg (handle_class), OP_LOCAL);
9962 if (context_used) {
9963 if (handle_class == mono_defaults.typehandle_class) {
9964 ins = mini_emit_get_rgctx_klass (cfg, context_used,
9965 mono_class_from_mono_type_internal ((MonoType *)handle),
9966 MONO_RGCTX_INFO_TYPE);
9967 } else if (handle_class == mono_defaults.methodhandle_class) {
9968 ins = emit_get_rgctx_method (cfg, context_used,
9969 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
9970 } else if (handle_class == mono_defaults.fieldhandle_class) {
9971 ins = emit_get_rgctx_field (cfg, context_used,
9972 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
9973 } else {
9974 g_assert_not_reached ();
9976 } else if (cfg->compile_aot) {
9977 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
9978 } else {
9979 EMIT_NEW_PCONST (cfg, ins, handle);
9981 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9982 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9983 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9987 *sp++ = ins;
9988 break;
9990 case MONO_CEE_THROW:
9991 if (sp [-1]->type != STACK_OBJ)
9992 UNVERIFIED;
9994 MONO_INST_NEW (cfg, ins, OP_THROW);
9995 --sp;
9996 ins->sreg1 = sp [0]->dreg;
9997 cfg->cbb->out_of_line = TRUE;
9998 MONO_ADD_INS (cfg->cbb, ins);
9999 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10000 MONO_ADD_INS (cfg->cbb, ins);
10001 sp = stack_start;
10003 link_bblock (cfg, cfg->cbb, end_bblock);
10004 start_new_bblock = 1;
10005 /* This can complicate code generation for llvm since the return value might not be defined */
10006 if (COMPILE_LLVM (cfg))
10007 INLINE_FAILURE ("throw");
10008 break;
10009 case MONO_CEE_ENDFINALLY:
10010 if (!ip_in_finally_clause (cfg, ip - header->code))
10011 UNVERIFIED;
10012 /* mono_save_seq_point_info () depends on this */
10013 if (sp != stack_start)
10014 emit_seq_point (cfg, method, ip, FALSE, FALSE);
10015 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
10016 MONO_ADD_INS (cfg->cbb, ins);
10017 start_new_bblock = 1;
10020 * Control will leave the method so empty the stack, otherwise
10021 * the next basic block will start with a nonempty stack.
10023 while (sp != stack_start) {
10024 sp--;
10026 break;
10027 case MONO_CEE_LEAVE:
10028 case MONO_CEE_LEAVE_S: {
10029 GList *handlers;
10031 /* empty the stack */
10032 g_assert (sp >= stack_start);
10033 sp = stack_start;
10036 * If this leave statement is in a catch block, check for a
10037 * pending exception, and rethrow it if necessary.
10038 * We avoid doing this in runtime invoke wrappers, since those are called
10039 * by native code which excepts the wrapper to catch all exceptions.
10041 for (i = 0; i < header->num_clauses; ++i) {
10042 MonoExceptionClause *clause = &header->clauses [i];
10045 * Use <= in the final comparison to handle clauses with multiple
10046 * leave statements, like in bug #78024.
10047 * The ordering of the exception clauses guarantees that we find the
10048 * innermost clause.
10050 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((il_op == MONO_CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
10051 MonoInst *exc_ins;
10052 MonoBasicBlock *dont_throw;
10055 MonoInst *load;
10057 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
10060 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
10062 NEW_BBLOCK (cfg, dont_throw);
10065 * Currently, we always rethrow the abort exception, despite the
10066 * fact that this is not correct. See thread6.cs for an example.
10067 * But propagating the abort exception is more important than
10068 * getting the semantics right.
10070 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
10071 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
10072 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
10074 MONO_START_BB (cfg, dont_throw);
10078 #ifdef ENABLE_LLVM
10079 cfg->cbb->try_end = (intptr_t)(ip - header->code);
10080 #endif
10082 if ((handlers = mono_find_leave_clauses (cfg, ip, target))) {
10083 GList *tmp;
10085 * For each finally clause that we exit we need to invoke the finally block.
10086 * After each invocation we need to add try holes for all the clauses that
10087 * we already exited.
10089 for (tmp = handlers; tmp; tmp = tmp->next) {
10090 MonoLeaveClause *leave = (MonoLeaveClause *) tmp->data;
10091 MonoExceptionClause *clause = leave->clause;
10093 if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY)
10094 continue;
10096 MonoInst *abort_exc = (MonoInst *)mono_find_exvar_for_offset (cfg, clause->handler_offset);
10097 MonoBasicBlock *dont_throw;
10100 * Emit instrumentation code before linking the basic blocks below as this
10101 * will alter cfg->cbb.
10103 mini_profiler_emit_call_finally (cfg, header, ip, leave->index, clause);
10105 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
10106 g_assert (tblock);
10107 link_bblock (cfg, cfg->cbb, tblock);
10109 MONO_EMIT_NEW_PCONST (cfg, abort_exc->dreg, 0);
10111 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
10112 ins->inst_target_bb = tblock;
10113 ins->inst_eh_blocks = tmp;
10114 MONO_ADD_INS (cfg->cbb, ins);
10115 cfg->cbb->has_call_handler = 1;
10117 /* Throw exception if exvar is set */
10118 /* FIXME Do we need this for calls from catch/filter ? */
10119 NEW_BBLOCK (cfg, dont_throw);
10120 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, abort_exc->dreg, 0);
10121 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
10122 mono_emit_jit_icall (cfg, ves_icall_thread_finish_async_abort, NULL);
10123 cfg->cbb->clause_holes = tmp;
10125 MONO_START_BB (cfg, dont_throw);
10126 cfg->cbb->clause_holes = tmp;
10128 if (COMPILE_LLVM (cfg)) {
10129 MonoBasicBlock *target_bb;
10132 * Link the finally bblock with the target, since it will
10133 * conceptually branch there.
10135 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
10136 GET_BBLOCK (cfg, target_bb, target);
10137 link_bblock (cfg, tblock, target_bb);
10142 MONO_INST_NEW (cfg, ins, OP_BR);
10143 MONO_ADD_INS (cfg->cbb, ins);
10144 GET_BBLOCK (cfg, tblock, target);
10145 link_bblock (cfg, cfg->cbb, tblock);
10146 ins->inst_target_bb = tblock;
10148 start_new_bblock = 1;
10149 break;
10153 * Mono specific opcodes
10156 case MONO_CEE_MONO_ICALL: {
10157 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10158 const MonoJitICallId jit_icall_id = (MonoJitICallId)token;
10159 MonoJitICallInfo * const info = mono_find_jit_icall_info (jit_icall_id);
10161 CHECK_STACK (info->sig->param_count);
10162 sp -= info->sig->param_count;
10164 if (token == MONO_JIT_ICALL_mono_threads_attach_coop) {
10165 MonoInst *addr;
10166 MonoBasicBlock *next_bb;
10168 if (cfg->compile_aot) {
10170 * This is called on unattached threads, so it cannot go through the trampoline
10171 * infrastructure. Use an indirect call through a got slot initialized at load time
10172 * instead.
10174 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL, GUINT_TO_POINTER (jit_icall_id));
10175 ins = mini_emit_calli (cfg, info->sig, sp, addr, NULL, NULL);
10176 } else {
10177 ins = mono_emit_jit_icall_id (cfg, jit_icall_id, sp);
10181 * Parts of the initlocals code needs to come after this, since it might call methods like memset.
10183 init_localsbb2 = cfg->cbb;
10184 NEW_BBLOCK (cfg, next_bb);
10185 MONO_START_BB (cfg, next_bb);
10186 } else {
10187 ins = mono_emit_jit_icall_id (cfg, jit_icall_id, sp);
10190 if (!MONO_TYPE_IS_VOID (info->sig->ret))
10191 *sp++ = ins;
10193 inline_costs += CALL_COST * MIN(10, num_calls++);
10194 break;
10197 MonoJumpInfoType ldptr_type;
10199 case MONO_CEE_MONO_LDPTR_CARD_TABLE:
10200 ldptr_type = MONO_PATCH_INFO_GC_CARD_TABLE_ADDR;
10201 goto mono_ldptr;
10202 case MONO_CEE_MONO_LDPTR_NURSERY_START:
10203 ldptr_type = MONO_PATCH_INFO_GC_NURSERY_START;
10204 goto mono_ldptr;
10205 case MONO_CEE_MONO_LDPTR_NURSERY_BITS:
10206 ldptr_type = MONO_PATCH_INFO_GC_NURSERY_BITS;
10207 goto mono_ldptr;
10208 case MONO_CEE_MONO_LDPTR_INT_REQ_FLAG:
10209 ldptr_type = MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG;
10210 goto mono_ldptr;
10211 case MONO_CEE_MONO_LDPTR_PROFILER_ALLOCATION_COUNT:
10212 ldptr_type = MONO_PATCH_INFO_PROFILER_ALLOCATION_COUNT;
10213 mono_ldptr:
10214 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10215 ins = mini_emit_runtime_constant (cfg, ldptr_type, NULL);
10216 *sp++ = ins;
10217 inline_costs += CALL_COST * MIN(10, num_calls++);
10218 break;
10220 case MONO_CEE_MONO_LDPTR: {
10221 gpointer ptr;
10223 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10224 ptr = mono_method_get_wrapper_data (method, token);
10225 EMIT_NEW_PCONST (cfg, ins, ptr);
10226 *sp++ = ins;
10227 inline_costs += CALL_COST * MIN(10, num_calls++);
10228 /* Can't embed random pointers into AOT code */
10229 DISABLE_AOT (cfg);
10230 break;
10232 case MONO_CEE_MONO_JIT_ICALL_ADDR:
10233 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10234 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, GUINT_TO_POINTER (token));
10235 *sp++ = ins;
10236 inline_costs += CALL_COST * MIN(10, num_calls++);
10237 break;
10239 case MONO_CEE_MONO_ICALL_ADDR: {
10240 MonoMethod *cmethod;
10241 gpointer ptr;
10243 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10245 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
10247 if (cfg->compile_aot) {
10248 if (cfg->direct_pinvoke && ip + 6 < end && (ip [6] == CEE_POP)) {
10250 * This is generated by emit_native_wrapper () to resolve the pinvoke address
10251 * before the call, its not needed when using direct pinvoke.
10252 * This is not an optimization, but its used to avoid looking up pinvokes
10253 * on platforms which don't support dlopen ().
10255 EMIT_NEW_PCONST (cfg, ins, NULL);
10256 } else {
10257 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
10259 } else {
10260 ptr = mono_lookup_internal_call (cmethod);
10261 g_assert (ptr);
10262 EMIT_NEW_PCONST (cfg, ins, ptr);
10264 *sp++ = ins;
10265 break;
10267 case MONO_CEE_MONO_VTADDR: {
10268 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10269 MonoInst *src_var, *src;
10271 --sp;
10273 // FIXME:
10274 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10275 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
10276 *sp++ = src;
10277 break;
10279 case MONO_CEE_MONO_NEWOBJ: {
10280 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10281 MonoInst *iargs [2];
10283 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10284 mono_class_init_internal (klass);
10285 NEW_DOMAINCONST (cfg, iargs [0]);
10286 MONO_ADD_INS (cfg->cbb, iargs [0]);
10287 NEW_CLASSCONST (cfg, iargs [1], klass);
10288 MONO_ADD_INS (cfg->cbb, iargs [1]);
10289 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
10290 inline_costs += CALL_COST * MIN(10, num_calls++);
10291 break;
10293 case MONO_CEE_MONO_OBJADDR:
10294 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10295 --sp;
10296 MONO_INST_NEW (cfg, ins, OP_MOVE);
10297 ins->dreg = alloc_ireg_mp (cfg);
10298 ins->sreg1 = sp [0]->dreg;
10299 ins->type = STACK_MP;
10300 MONO_ADD_INS (cfg->cbb, ins);
10301 *sp++ = ins;
10302 break;
10303 case MONO_CEE_MONO_LDNATIVEOBJ:
10305 * Similar to LDOBJ, but instead load the unmanaged
10306 * representation of the vtype to the stack.
10308 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10309 --sp;
10310 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10311 g_assert (m_class_is_valuetype (klass));
10312 mono_class_init_internal (klass);
10315 MonoInst *src, *dest, *temp;
10317 src = sp [0];
10318 temp = mono_compile_create_var (cfg, m_class_get_byval_arg (klass), OP_LOCAL);
10319 temp->backend.is_pinvoke = 1;
10320 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
10321 mini_emit_memory_copy (cfg, dest, src, klass, TRUE, 0);
10323 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
10324 dest->type = STACK_VTYPE;
10325 dest->klass = klass;
10327 *sp ++ = dest;
10329 break;
10330 case MONO_CEE_MONO_RETOBJ: {
10332 * Same as RET, but return the native representation of a vtype
10333 * to the caller.
10335 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10336 g_assert (cfg->ret);
10337 g_assert (mono_method_signature_internal (method)->pinvoke);
10338 --sp;
10340 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10342 if (!cfg->vret_addr) {
10343 g_assert (cfg->ret_var_is_local);
10345 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
10346 } else {
10347 EMIT_NEW_RETLOADA (cfg, ins);
10349 mini_emit_memory_copy (cfg, ins, sp [0], klass, TRUE, 0);
10351 if (sp != stack_start)
10352 UNVERIFIED;
10354 mini_profiler_emit_leave (cfg, sp [0]);
10356 MONO_INST_NEW (cfg, ins, OP_BR);
10357 ins->inst_target_bb = end_bblock;
10358 MONO_ADD_INS (cfg->cbb, ins);
10359 link_bblock (cfg, cfg->cbb, end_bblock);
10360 start_new_bblock = 1;
10361 break;
10363 case MONO_CEE_MONO_SAVE_LMF:
10364 case MONO_CEE_MONO_RESTORE_LMF:
10365 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10366 break;
10367 case MONO_CEE_MONO_CLASSCONST:
10368 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10369 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
10370 *sp++ = ins;
10371 inline_costs += CALL_COST * MIN(10, num_calls++);
10372 break;
10373 case MONO_CEE_MONO_NOT_TAKEN:
10374 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10375 cfg->cbb->out_of_line = TRUE;
10376 break;
10377 case MONO_CEE_MONO_TLS: {
10378 MonoTlsKey key;
10380 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10381 key = (MonoTlsKey)n;
10382 g_assert (key < TLS_KEY_NUM);
10384 ins = mono_create_tls_get (cfg, key);
10385 g_assert (ins);
10386 ins->type = STACK_PTR;
10387 *sp++ = ins;
10388 break;
10390 case MONO_CEE_MONO_DYN_CALL: {
10391 MonoCallInst *call;
10393 /* It would be easier to call a trampoline, but that would put an
10394 * extra frame on the stack, confusing exception handling. So
10395 * implement it inline using an opcode for now.
10398 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10399 if (!cfg->dyn_call_var) {
10400 cfg->dyn_call_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
10401 /* prevent it from being register allocated */
10402 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
10405 /* Has to use a call inst since local regalloc expects it */
10406 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
10407 ins = (MonoInst*)call;
10408 sp -= 2;
10409 ins->sreg1 = sp [0]->dreg;
10410 ins->sreg2 = sp [1]->dreg;
10411 MONO_ADD_INS (cfg->cbb, ins);
10413 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
10414 /* OP_DYN_CALL might need to allocate a dynamically sized param area */
10415 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10417 inline_costs += CALL_COST * MIN(10, num_calls++);
10418 break;
10420 case MONO_CEE_MONO_MEMORY_BARRIER: {
10421 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10422 mini_emit_memory_barrier (cfg, (int)n);
10423 break;
10425 case MONO_CEE_MONO_ATOMIC_STORE_I4: {
10426 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10427 g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4));
10429 sp -= 2;
10431 MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4);
10432 ins->dreg = sp [0]->dreg;
10433 ins->sreg1 = sp [1]->dreg;
10434 ins->backend.memory_barrier_kind = (int)n;
10435 MONO_ADD_INS (cfg->cbb, ins);
10436 break;
10438 case MONO_CEE_MONO_LD_DELEGATE_METHOD_PTR: {
10439 CHECK_STACK (1);
10440 --sp;
10442 dreg = alloc_preg (cfg);
10443 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
10444 *sp++ = ins;
10445 break;
10447 case MONO_CEE_MONO_CALLI_EXTRA_ARG: {
10448 MonoInst *addr;
10449 MonoMethodSignature *fsig;
10450 MonoInst *arg;
10453 * This is the same as CEE_CALLI, but passes an additional argument
10454 * to the called method in llvmonly mode.
10455 * This is only used by delegate invoke wrappers to call the
10456 * actual delegate method.
10458 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
10460 ins = NULL;
10462 cmethod = NULL;
10463 CHECK_STACK (1);
10464 --sp;
10465 addr = *sp;
10466 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
10467 CHECK_CFG_ERROR;
10469 if (cfg->llvm_only)
10470 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
10472 n = fsig->param_count + fsig->hasthis + 1;
10474 CHECK_STACK (n);
10476 sp -= n;
10477 arg = sp [n - 1];
10479 if (cfg->llvm_only) {
10481 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
10482 * cconv. This is set by mono_init_delegate ().
10484 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
10485 MonoInst *callee = addr;
10486 MonoInst *call, *localloc_ins;
10487 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
10488 int low_bit_reg = alloc_preg (cfg);
10490 NEW_BBLOCK (cfg, is_gsharedvt_bb);
10491 NEW_BBLOCK (cfg, end_bb);
10493 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
10494 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
10495 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
10497 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
10498 addr = emit_get_rgctx_sig (cfg, context_used,
10499 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
10501 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
10503 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
10504 ins->dreg = alloc_preg (cfg);
10505 ins->inst_imm = 2 * TARGET_SIZEOF_VOID_P;
10506 MONO_ADD_INS (cfg->cbb, ins);
10507 localloc_ins = ins;
10508 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10509 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
10510 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, TARGET_SIZEOF_VOID_P, arg->dreg);
10512 call = mini_emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
10513 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
10515 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
10516 MONO_START_BB (cfg, is_gsharedvt_bb);
10517 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
10518 ins = mini_emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
10519 ins->dreg = call->dreg;
10521 MONO_START_BB (cfg, end_bb);
10522 } else {
10523 /* Caller uses a normal calling conv */
10525 MonoInst *callee = addr;
10526 MonoInst *call, *localloc_ins;
10527 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
10528 int low_bit_reg = alloc_preg (cfg);
10530 NEW_BBLOCK (cfg, is_gsharedvt_bb);
10531 NEW_BBLOCK (cfg, end_bb);
10533 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
10534 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
10535 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
10537 /* Normal case: callee uses a normal cconv, no conversion is needed */
10538 call = mini_emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
10539 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
10540 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
10541 MONO_START_BB (cfg, is_gsharedvt_bb);
10542 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
10543 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
10544 MONO_ADD_INS (cfg->cbb, addr);
10546 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
10548 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
10549 ins->dreg = alloc_preg (cfg);
10550 ins->inst_imm = 2 * TARGET_SIZEOF_VOID_P;
10551 MONO_ADD_INS (cfg->cbb, ins);
10552 localloc_ins = ins;
10553 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10554 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
10555 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, TARGET_SIZEOF_VOID_P, arg->dreg);
10557 ins = mini_emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
10558 ins->dreg = call->dreg;
10559 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
10561 MONO_START_BB (cfg, end_bb);
10563 } else {
10564 /* Same as CEE_CALLI */
10565 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
10567 * We pass the address to the gsharedvt trampoline in the rgctx reg
10569 MonoInst *callee = addr;
10571 addr = emit_get_rgctx_sig (cfg, context_used,
10572 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
10573 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, callee);
10574 } else {
10575 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
10579 if (!MONO_TYPE_IS_VOID (fsig->ret))
10580 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
10582 CHECK_CFG_EXCEPTION;
10584 ins_flag = 0;
10585 constrained_class = NULL;
10586 break;
10588 case MONO_CEE_MONO_LDDOMAIN:
10589 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10590 EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : cfg->domain);
10591 *sp++ = ins;
10592 break;
10593 case MONO_CEE_MONO_SAVE_LAST_ERROR:
10594 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10596 // Just an IL prefix, setting this flag, picked up by call instructions.
10597 save_last_error = TRUE;
10598 break;
10599 case MONO_CEE_MONO_GET_RGCTX_ARG:
10600 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10602 mono_create_rgctx_var (cfg);
10604 MONO_INST_NEW (cfg, ins, OP_MOVE);
10605 ins->dreg = alloc_dreg (cfg, STACK_PTR);
10606 ins->sreg1 = cfg->rgctx_var->dreg;
10607 ins->type = STACK_PTR;
10608 MONO_ADD_INS (cfg->cbb, ins);
10610 *sp++ = ins;
10611 break;
10613 case MONO_CEE_ARGLIST: {
10614 /* somewhat similar to LDTOKEN */
10615 MonoInst *addr, *vtvar;
10616 vtvar = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.argumenthandle_class), OP_LOCAL);
10618 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10619 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
10621 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10622 ins->type = STACK_VTYPE;
10623 ins->klass = mono_defaults.argumenthandle_class;
10624 *sp++ = ins;
10625 break;
10627 case MONO_CEE_CEQ:
10628 case MONO_CEE_CGT:
10629 case MONO_CEE_CGT_UN:
10630 case MONO_CEE_CLT:
10631 case MONO_CEE_CLT_UN: {
10632 MonoInst *cmp, *arg1, *arg2;
10634 sp -= 2;
10635 arg1 = sp [0];
10636 arg2 = sp [1];
10639 * The following transforms:
10640 * CEE_CEQ into OP_CEQ
10641 * CEE_CGT into OP_CGT
10642 * CEE_CGT_UN into OP_CGT_UN
10643 * CEE_CLT into OP_CLT
10644 * CEE_CLT_UN into OP_CLT_UN
10646 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
10648 MONO_INST_NEW (cfg, ins, cmp->opcode);
10649 cmp->sreg1 = arg1->dreg;
10650 cmp->sreg2 = arg2->dreg;
10651 type_from_op (cfg, cmp, arg1, arg2);
10652 CHECK_TYPE (cmp);
10653 add_widen_op (cfg, cmp, &arg1, &arg2);
10654 if ((arg1->type == STACK_I8) || ((TARGET_SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
10655 cmp->opcode = OP_LCOMPARE;
10656 else if (arg1->type == STACK_R4)
10657 cmp->opcode = OP_RCOMPARE;
10658 else if (arg1->type == STACK_R8)
10659 cmp->opcode = OP_FCOMPARE;
10660 else
10661 cmp->opcode = OP_ICOMPARE;
10662 MONO_ADD_INS (cfg->cbb, cmp);
10663 ins->type = STACK_I4;
10664 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
10665 type_from_op (cfg, ins, arg1, arg2);
10667 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
10669 * The backends expect the fceq opcodes to do the
10670 * comparison too.
10672 ins->sreg1 = cmp->sreg1;
10673 ins->sreg2 = cmp->sreg2;
10674 NULLIFY_INS (cmp);
10676 MONO_ADD_INS (cfg->cbb, ins);
10677 *sp++ = ins;
10678 break;
10680 case MONO_CEE_LDFTN: {
10681 MonoInst *argconst;
10682 MonoMethod *cil_method;
10684 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10685 CHECK_CFG_ERROR;
10687 mono_class_init_internal (cmethod->klass);
10689 mono_save_token_info (cfg, image, n, cmethod);
10691 context_used = mini_method_check_context_used (cfg, cmethod);
10693 cil_method = cmethod;
10694 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
10695 emit_method_access_failure (cfg, method, cil_method);
10697 if (mono_security_core_clr_enabled ())
10698 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10701 * Optimize the common case of ldftn+delegate creation
10703 if ((sp > stack_start) && (next_ip + 4 < end) && ip_in_bb (cfg, cfg->cbb, next_ip) && (next_ip [0] == CEE_NEWOBJ)) {
10704 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (next_ip + 1), NULL, generic_context);
10705 if (ctor_method && (m_class_get_parent (ctor_method->klass) == mono_defaults.multicastdelegate_class)) {
10706 MonoInst *target_ins, *handle_ins;
10707 MonoMethod *invoke;
10708 int invoke_context_used;
10710 invoke = mono_get_delegate_invoke_internal (ctor_method->klass);
10711 if (!invoke || !mono_method_signature_internal (invoke))
10712 LOAD_ERROR;
10714 invoke_context_used = mini_method_check_context_used (cfg, invoke);
10716 target_ins = sp [-1];
10718 if (mono_security_core_clr_enabled ())
10719 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
10721 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
10722 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
10723 if (mono_method_signature_internal (invoke)->param_count == mono_method_signature_internal (cmethod)->param_count) {
10724 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
10725 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
10729 if ((invoke_context_used == 0 || !cfg->gsharedvt) || cfg->llvm_only) {
10730 if (cfg->verbose_level > 3)
10731 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip + 6, NULL));
10732 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, invoke_context_used, FALSE))) {
10733 sp --;
10734 *sp = handle_ins;
10735 CHECK_CFG_EXCEPTION;
10736 sp ++;
10737 next_ip += 5;
10738 il_op = MONO_CEE_NEWOBJ;
10739 break;
10740 } else {
10741 CHECK_CFG_ERROR;
10747 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
10748 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
10749 *sp++ = ins;
10751 inline_costs += CALL_COST * MIN(10, num_calls++);
10752 break;
10754 case MONO_CEE_LDVIRTFTN: {
10755 MonoInst *args [2];
10757 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10758 CHECK_CFG_ERROR;
10760 mono_class_init_internal (cmethod->klass);
10762 context_used = mini_method_check_context_used (cfg, cmethod);
10764 if (mono_security_core_clr_enabled ())
10765 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10768 * Optimize the common case of ldvirtftn+delegate creation
10770 if (previous_il_op == MONO_CEE_DUP && (sp > stack_start) && (next_ip + 4 < end) && ip_in_bb (cfg, cfg->cbb, next_ip) && (next_ip [0] == CEE_NEWOBJ)) {
10771 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (next_ip + 1), NULL, generic_context);
10772 if (ctor_method && (m_class_get_parent (ctor_method->klass) == mono_defaults.multicastdelegate_class)) {
10773 MonoInst *target_ins, *handle_ins;
10774 MonoMethod *invoke;
10775 int invoke_context_used;
10776 const gboolean is_virtual = (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) != 0;
10778 invoke = mono_get_delegate_invoke_internal (ctor_method->klass);
10779 if (!invoke || !mono_method_signature_internal (invoke))
10780 LOAD_ERROR;
10782 invoke_context_used = mini_method_check_context_used (cfg, invoke);
10784 target_ins = sp [-1];
10786 if (mono_security_core_clr_enabled ())
10787 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
10789 if (invoke_context_used == 0 || !cfg->gsharedvt || cfg->llvm_only) {
10790 if (cfg->verbose_level > 3)
10791 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip + 6, NULL));
10792 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, invoke_context_used, is_virtual))) {
10793 sp -= 2;
10794 *sp = handle_ins;
10795 CHECK_CFG_EXCEPTION;
10796 next_ip += 5;
10797 previous_il_op = MONO_CEE_NEWOBJ;
10798 sp ++;
10799 break;
10800 } else {
10801 CHECK_CFG_ERROR;
10807 --sp;
10808 args [0] = *sp;
10810 args [1] = emit_get_rgctx_method (cfg, context_used,
10811 cmethod, MONO_RGCTX_INFO_METHOD);
10813 if (context_used)
10814 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
10815 else
10816 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
10818 inline_costs += CALL_COST * MIN(10, num_calls++);
10819 break;
10821 case MONO_CEE_LOCALLOC: {
10822 MonoBasicBlock *non_zero_bb, *end_bb;
10823 int alloc_ptr = alloc_preg (cfg);
10824 --sp;
10825 if (sp != stack_start)
10826 UNVERIFIED;
10827 if (cfg->method != method)
10829 * Inlining this into a loop in a parent could lead to
10830 * stack overflows which is different behavior than the
10831 * non-inlined case, thus disable inlining in this case.
10833 INLINE_FAILURE("localloc");
10835 NEW_BBLOCK (cfg, non_zero_bb);
10836 NEW_BBLOCK (cfg, end_bb);
10838 /* if size != zero */
10839 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10840 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_zero_bb);
10842 //size is zero, so result is NULL
10843 MONO_EMIT_NEW_PCONST (cfg, alloc_ptr, NULL);
10844 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
10846 MONO_START_BB (cfg, non_zero_bb);
10847 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
10848 ins->dreg = alloc_ptr;
10849 ins->sreg1 = sp [0]->dreg;
10850 ins->type = STACK_PTR;
10851 MONO_ADD_INS (cfg->cbb, ins);
10853 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10854 if (init_locals)
10855 ins->flags |= MONO_INST_INIT;
10857 MONO_START_BB (cfg, end_bb);
10858 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), alloc_ptr);
10859 ins->type = STACK_PTR;
10861 *sp++ = ins;
10862 break;
10864 case MONO_CEE_ENDFILTER: {
10865 MonoExceptionClause *clause, *nearest;
10866 int cc;
10868 --sp;
10869 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
10870 UNVERIFIED;
10871 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
10872 ins->sreg1 = (*sp)->dreg;
10873 MONO_ADD_INS (cfg->cbb, ins);
10874 start_new_bblock = 1;
10876 nearest = NULL;
10877 for (cc = 0; cc < header->num_clauses; ++cc) {
10878 clause = &header->clauses [cc];
10879 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
10880 ((next_ip - header->code) > clause->data.filter_offset && (next_ip - header->code) <= clause->handler_offset) &&
10881 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
10882 nearest = clause;
10884 g_assert (nearest);
10885 if ((next_ip - header->code) != nearest->handler_offset)
10886 UNVERIFIED;
10888 break;
10890 case MONO_CEE_UNALIGNED_:
10891 ins_flag |= MONO_INST_UNALIGNED;
10892 /* FIXME: record alignment? we can assume 1 for now */
10893 break;
10894 case MONO_CEE_VOLATILE_:
10895 ins_flag |= MONO_INST_VOLATILE;
10896 break;
10897 case MONO_CEE_TAIL_:
10898 ins_flag |= MONO_INST_TAILCALL;
10899 cfg->flags |= MONO_CFG_HAS_TAILCALL;
10900 /* Can't inline tailcalls at this time */
10901 inline_costs += 100000;
10902 break;
10903 case MONO_CEE_INITOBJ:
10904 --sp;
10905 klass = mini_get_class (method, token, generic_context);
10906 CHECK_TYPELOAD (klass);
10907 if (mini_class_is_reference (klass))
10908 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
10909 else
10910 mini_emit_initobj (cfg, *sp, NULL, klass);
10911 inline_costs += 1;
10912 break;
10913 case MONO_CEE_CONSTRAINED_:
10914 constrained_class = mini_get_class (method, token, generic_context);
10915 CHECK_TYPELOAD (constrained_class);
10916 break;
10917 case MONO_CEE_CPBLK:
10918 sp -= 3;
10919 mini_emit_memory_copy_bytes (cfg, sp [0], sp [1], sp [2], ins_flag);
10920 ins_flag = 0;
10921 inline_costs += 1;
10922 break;
10923 case MONO_CEE_INITBLK:
10924 sp -= 3;
10925 mini_emit_memory_init_bytes (cfg, sp [0], sp [1], sp [2], ins_flag);
10926 ins_flag = 0;
10927 inline_costs += 1;
10928 break;
10929 case MONO_CEE_NO_:
10930 if (ip [2] & 1)
10931 ins_flag |= MONO_INST_NOTYPECHECK;
10932 if (ip [2] & 2)
10933 ins_flag |= MONO_INST_NORANGECHECK;
10934 /* we ignore the no-nullcheck for now since we
10935 * really do it explicitly only when doing callvirt->call
10937 break;
10938 case MONO_CEE_RETHROW: {
10939 MonoInst *load;
10940 int handler_offset = -1;
10942 for (i = 0; i < header->num_clauses; ++i) {
10943 MonoExceptionClause *clause = &header->clauses [i];
10944 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
10945 handler_offset = clause->handler_offset;
10946 break;
10950 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
10952 if (handler_offset == -1)
10953 UNVERIFIED;
10955 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
10956 MONO_INST_NEW (cfg, ins, OP_RETHROW);
10957 ins->sreg1 = load->dreg;
10958 MONO_ADD_INS (cfg->cbb, ins);
10960 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10961 MONO_ADD_INS (cfg->cbb, ins);
10963 sp = stack_start;
10964 link_bblock (cfg, cfg->cbb, end_bblock);
10965 start_new_bblock = 1;
10966 break;
10968 case MONO_CEE_MONO_RETHROW: {
10969 if (sp [-1]->type != STACK_OBJ)
10970 UNVERIFIED;
10972 MONO_INST_NEW (cfg, ins, OP_RETHROW);
10973 --sp;
10974 ins->sreg1 = sp [0]->dreg;
10975 cfg->cbb->out_of_line = TRUE;
10976 MONO_ADD_INS (cfg->cbb, ins);
10977 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10978 MONO_ADD_INS (cfg->cbb, ins);
10979 sp = stack_start;
10981 link_bblock (cfg, cfg->cbb, end_bblock);
10982 start_new_bblock = 1;
10983 /* This can complicate code generation for llvm since the return value might not be defined */
10984 if (COMPILE_LLVM (cfg))
10985 INLINE_FAILURE ("mono_rethrow");
10986 break;
10988 case MONO_CEE_SIZEOF: {
10989 guint32 val;
10990 int ialign;
10992 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (m_class_get_image (method->klass)) && !generic_context) {
10993 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
10994 CHECK_CFG_ERROR;
10996 val = mono_type_size (type, &ialign);
10997 EMIT_NEW_ICONST (cfg, ins, val);
10998 } else {
10999 MonoClass *klass = mini_get_class (method, token, generic_context);
11000 CHECK_TYPELOAD (klass);
11002 if (mini_is_gsharedvt_klass (klass)) {
11003 ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_SIZEOF);
11004 ins->type = STACK_I4;
11005 } else {
11006 val = mono_type_size (m_class_get_byval_arg (klass), &ialign);
11007 EMIT_NEW_ICONST (cfg, ins, val);
11011 *sp++ = ins;
11012 break;
11014 case MONO_CEE_REFANYTYPE: {
11015 MonoInst *src_var, *src;
11017 GSHAREDVT_FAILURE (il_op);
11019 --sp;
11021 // FIXME:
11022 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11023 if (!src_var)
11024 src_var = mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.typed_reference_class), OP_LOCAL, sp [0]->dreg);
11025 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11026 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (mono_defaults.typehandle_class), src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
11027 *sp++ = ins;
11028 break;
11030 case MONO_CEE_READONLY_:
11031 readonly = TRUE;
11032 break;
11034 case MONO_CEE_UNUSED56:
11035 case MONO_CEE_UNUSED57:
11036 case MONO_CEE_UNUSED70:
11037 case MONO_CEE_UNUSED:
11038 case MONO_CEE_UNUSED99:
11039 case MONO_CEE_UNUSED58:
11040 case MONO_CEE_UNUSED1:
11041 UNVERIFIED;
11043 default:
11044 g_warning ("opcode 0x%02x not handled", il_op);
11045 UNVERIFIED;
11048 if (start_new_bblock != 1)
11049 UNVERIFIED;
11051 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
11052 if (cfg->cbb->next_bb) {
11053 /* This could already be set because of inlining, #693905 */
11054 MonoBasicBlock *bb = cfg->cbb;
11056 while (bb->next_bb)
11057 bb = bb->next_bb;
11058 bb->next_bb = end_bblock;
11059 } else {
11060 cfg->cbb->next_bb = end_bblock;
11063 if (cfg->method == method && cfg->domainvar) {
11064 MonoInst *store;
11065 MonoInst *get_domain;
11067 cfg->cbb = init_localsbb;
11069 get_domain = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
11070 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
11071 MONO_ADD_INS (cfg->cbb, store);
11072 cfg->domainvar_inited = TRUE;
11075 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
11076 if (cfg->compile_aot)
11077 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
11078 mono_get_got_var (cfg);
11079 #endif
11081 if (cfg->method == method && cfg->got_var)
11082 mono_emit_load_got_addr (cfg);
11084 if (init_localsbb) {
11085 cfg->cbb = init_localsbb;
11086 cfg->ip = NULL;
11087 for (i = 0; i < header->num_locals; ++i) {
11089 * Vtype initialization might need to be done after CEE_JIT_ATTACH, since it can make calls to memset (),
11090 * which need the trampoline code to work.
11092 if (MONO_TYPE_ISSTRUCT (header->locals [i]))
11093 cfg->cbb = init_localsbb2;
11094 else
11095 cfg->cbb = init_localsbb;
11096 emit_init_local (cfg, i, header->locals [i], init_locals);
11100 if (cfg->init_ref_vars && cfg->method == method) {
11101 /* Emit initialization for ref vars */
11102 // FIXME: Avoid duplication initialization for IL locals.
11103 for (i = 0; i < cfg->num_varinfo; ++i) {
11104 MonoInst *ins = cfg->varinfo [i];
11106 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
11107 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
11111 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
11112 cfg->cbb = init_localsbb;
11113 emit_push_lmf (cfg);
11116 cfg->cbb = init_localsbb;
11117 mini_profiler_emit_enter (cfg);
11119 if (seq_points) {
11120 MonoBasicBlock *bb;
11123 * Make seq points at backward branch targets interruptable.
11125 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
11126 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
11127 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
11130 /* Add a sequence point for method entry/exit events */
11131 if (seq_points && cfg->gen_sdb_seq_points) {
11132 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
11133 MONO_ADD_INS (init_localsbb, ins);
11134 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
11135 MONO_ADD_INS (cfg->bb_exit, ins);
11139 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
11140 * the code they refer to was dead (#11880).
11142 if (sym_seq_points) {
11143 for (i = 0; i < header->code_size; ++i) {
11144 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
11145 MonoInst *ins;
11147 NEW_SEQ_POINT (cfg, ins, i, FALSE);
11148 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
11153 cfg->ip = NULL;
11155 if (cfg->method == method) {
11156 compute_bb_regions (cfg);
11157 } else {
11158 MonoBasicBlock *bb;
11159 /* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
11160 for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) {
11161 bb->real_offset = inline_offset;
11165 if (inline_costs < 0) {
11166 char *mname;
11168 /* Method is too large */
11169 mname = mono_method_full_name (method, TRUE);
11170 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
11171 g_free (mname);
11174 if ((cfg->verbose_level > 2) && (cfg->method == method))
11175 mono_print_code (cfg, "AFTER METHOD-TO-IR");
11177 goto cleanup;
11179 mono_error_exit:
11180 if (cfg->verbose_level > 3)
11181 g_print ("exiting due to error");
11183 g_assert (!mono_error_ok (&cfg->error));
11184 goto cleanup;
11186 exception_exit:
11187 if (cfg->verbose_level > 3)
11188 g_print ("exiting due to exception");
11190 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
11191 goto cleanup;
11193 unverified:
11194 if (cfg->verbose_level > 3)
11195 g_print ("exiting due to invalid il");
11197 set_exception_type_from_invalid_il (cfg, method, ip);
11198 goto cleanup;
11200 cleanup:
11201 g_slist_free (class_inits);
11202 mono_basic_block_free (original_bb);
11203 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
11204 if (cfg->exception_type)
11205 return -1;
11206 else
11207 return inline_costs;
11210 static int
11211 store_membase_reg_to_store_membase_imm (int opcode)
11213 switch (opcode) {
11214 case OP_STORE_MEMBASE_REG:
11215 return OP_STORE_MEMBASE_IMM;
11216 case OP_STOREI1_MEMBASE_REG:
11217 return OP_STOREI1_MEMBASE_IMM;
11218 case OP_STOREI2_MEMBASE_REG:
11219 return OP_STOREI2_MEMBASE_IMM;
11220 case OP_STOREI4_MEMBASE_REG:
11221 return OP_STOREI4_MEMBASE_IMM;
11222 case OP_STOREI8_MEMBASE_REG:
11223 return OP_STOREI8_MEMBASE_IMM;
11224 default:
11225 g_assert_not_reached ();
11228 return -1;
11232 mono_op_to_op_imm (int opcode)
11234 switch (opcode) {
11235 case OP_IADD:
11236 return OP_IADD_IMM;
11237 case OP_ISUB:
11238 return OP_ISUB_IMM;
11239 case OP_IDIV:
11240 return OP_IDIV_IMM;
11241 case OP_IDIV_UN:
11242 return OP_IDIV_UN_IMM;
11243 case OP_IREM:
11244 return OP_IREM_IMM;
11245 case OP_IREM_UN:
11246 return OP_IREM_UN_IMM;
11247 case OP_IMUL:
11248 return OP_IMUL_IMM;
11249 case OP_IAND:
11250 return OP_IAND_IMM;
11251 case OP_IOR:
11252 return OP_IOR_IMM;
11253 case OP_IXOR:
11254 return OP_IXOR_IMM;
11255 case OP_ISHL:
11256 return OP_ISHL_IMM;
11257 case OP_ISHR:
11258 return OP_ISHR_IMM;
11259 case OP_ISHR_UN:
11260 return OP_ISHR_UN_IMM;
11262 case OP_LADD:
11263 return OP_LADD_IMM;
11264 case OP_LSUB:
11265 return OP_LSUB_IMM;
11266 case OP_LAND:
11267 return OP_LAND_IMM;
11268 case OP_LOR:
11269 return OP_LOR_IMM;
11270 case OP_LXOR:
11271 return OP_LXOR_IMM;
11272 case OP_LSHL:
11273 return OP_LSHL_IMM;
11274 case OP_LSHR:
11275 return OP_LSHR_IMM;
11276 case OP_LSHR_UN:
11277 return OP_LSHR_UN_IMM;
11278 #if SIZEOF_REGISTER == 8
11279 case OP_LMUL:
11280 return OP_LMUL_IMM;
11281 case OP_LREM:
11282 return OP_LREM_IMM;
11283 #endif
11285 case OP_COMPARE:
11286 return OP_COMPARE_IMM;
11287 case OP_ICOMPARE:
11288 return OP_ICOMPARE_IMM;
11289 case OP_LCOMPARE:
11290 return OP_LCOMPARE_IMM;
11292 case OP_STORE_MEMBASE_REG:
11293 return OP_STORE_MEMBASE_IMM;
11294 case OP_STOREI1_MEMBASE_REG:
11295 return OP_STOREI1_MEMBASE_IMM;
11296 case OP_STOREI2_MEMBASE_REG:
11297 return OP_STOREI2_MEMBASE_IMM;
11298 case OP_STOREI4_MEMBASE_REG:
11299 return OP_STOREI4_MEMBASE_IMM;
11301 #if defined(TARGET_X86) || defined (TARGET_AMD64)
11302 case OP_X86_PUSH:
11303 return OP_X86_PUSH_IMM;
11304 case OP_X86_COMPARE_MEMBASE_REG:
11305 return OP_X86_COMPARE_MEMBASE_IMM;
11306 #endif
11307 #if defined(TARGET_AMD64)
11308 case OP_AMD64_ICOMPARE_MEMBASE_REG:
11309 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
11310 #endif
11311 case OP_VOIDCALL_REG:
11312 return OP_VOIDCALL;
11313 case OP_CALL_REG:
11314 return OP_CALL;
11315 case OP_LCALL_REG:
11316 return OP_LCALL;
11317 case OP_FCALL_REG:
11318 return OP_FCALL;
11319 case OP_LOCALLOC:
11320 return OP_LOCALLOC_IMM;
11323 return -1;
11326 static int
11327 stind_to_store_membase (int opcode)
11329 switch (opcode) {
11330 case MONO_CEE_STIND_I1:
11331 return OP_STOREI1_MEMBASE_REG;
11332 case MONO_CEE_STIND_I2:
11333 return OP_STOREI2_MEMBASE_REG;
11334 case MONO_CEE_STIND_I4:
11335 return OP_STOREI4_MEMBASE_REG;
11336 case MONO_CEE_STIND_I:
11337 case MONO_CEE_STIND_REF:
11338 return OP_STORE_MEMBASE_REG;
11339 case MONO_CEE_STIND_I8:
11340 return OP_STOREI8_MEMBASE_REG;
11341 case MONO_CEE_STIND_R4:
11342 return OP_STORER4_MEMBASE_REG;
11343 case MONO_CEE_STIND_R8:
11344 return OP_STORER8_MEMBASE_REG;
11345 default:
11346 g_assert_not_reached ();
11349 return -1;
11353 mono_load_membase_to_load_mem (int opcode)
11355 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
11356 #if defined(TARGET_X86) || defined(TARGET_AMD64)
11357 switch (opcode) {
11358 case OP_LOAD_MEMBASE:
11359 return OP_LOAD_MEM;
11360 case OP_LOADU1_MEMBASE:
11361 return OP_LOADU1_MEM;
11362 case OP_LOADU2_MEMBASE:
11363 return OP_LOADU2_MEM;
11364 case OP_LOADI4_MEMBASE:
11365 return OP_LOADI4_MEM;
11366 case OP_LOADU4_MEMBASE:
11367 return OP_LOADU4_MEM;
11368 #if SIZEOF_REGISTER == 8
11369 case OP_LOADI8_MEMBASE:
11370 return OP_LOADI8_MEM;
11371 #endif
11373 #endif
11375 return -1;
11378 static inline int
11379 op_to_op_dest_membase (int store_opcode, int opcode)
11381 #if defined(TARGET_X86)
11382 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
11383 return -1;
11385 switch (opcode) {
11386 case OP_IADD:
11387 return OP_X86_ADD_MEMBASE_REG;
11388 case OP_ISUB:
11389 return OP_X86_SUB_MEMBASE_REG;
11390 case OP_IAND:
11391 return OP_X86_AND_MEMBASE_REG;
11392 case OP_IOR:
11393 return OP_X86_OR_MEMBASE_REG;
11394 case OP_IXOR:
11395 return OP_X86_XOR_MEMBASE_REG;
11396 case OP_ADD_IMM:
11397 case OP_IADD_IMM:
11398 return OP_X86_ADD_MEMBASE_IMM;
11399 case OP_SUB_IMM:
11400 case OP_ISUB_IMM:
11401 return OP_X86_SUB_MEMBASE_IMM;
11402 case OP_AND_IMM:
11403 case OP_IAND_IMM:
11404 return OP_X86_AND_MEMBASE_IMM;
11405 case OP_OR_IMM:
11406 case OP_IOR_IMM:
11407 return OP_X86_OR_MEMBASE_IMM;
11408 case OP_XOR_IMM:
11409 case OP_IXOR_IMM:
11410 return OP_X86_XOR_MEMBASE_IMM;
11411 case OP_MOVE:
11412 return OP_NOP;
11414 #endif
11416 #if defined(TARGET_AMD64)
11417 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
11418 return -1;
11420 switch (opcode) {
11421 case OP_IADD:
11422 return OP_X86_ADD_MEMBASE_REG;
11423 case OP_ISUB:
11424 return OP_X86_SUB_MEMBASE_REG;
11425 case OP_IAND:
11426 return OP_X86_AND_MEMBASE_REG;
11427 case OP_IOR:
11428 return OP_X86_OR_MEMBASE_REG;
11429 case OP_IXOR:
11430 return OP_X86_XOR_MEMBASE_REG;
11431 case OP_IADD_IMM:
11432 return OP_X86_ADD_MEMBASE_IMM;
11433 case OP_ISUB_IMM:
11434 return OP_X86_SUB_MEMBASE_IMM;
11435 case OP_IAND_IMM:
11436 return OP_X86_AND_MEMBASE_IMM;
11437 case OP_IOR_IMM:
11438 return OP_X86_OR_MEMBASE_IMM;
11439 case OP_IXOR_IMM:
11440 return OP_X86_XOR_MEMBASE_IMM;
11441 case OP_LADD:
11442 return OP_AMD64_ADD_MEMBASE_REG;
11443 case OP_LSUB:
11444 return OP_AMD64_SUB_MEMBASE_REG;
11445 case OP_LAND:
11446 return OP_AMD64_AND_MEMBASE_REG;
11447 case OP_LOR:
11448 return OP_AMD64_OR_MEMBASE_REG;
11449 case OP_LXOR:
11450 return OP_AMD64_XOR_MEMBASE_REG;
11451 case OP_ADD_IMM:
11452 case OP_LADD_IMM:
11453 return OP_AMD64_ADD_MEMBASE_IMM;
11454 case OP_SUB_IMM:
11455 case OP_LSUB_IMM:
11456 return OP_AMD64_SUB_MEMBASE_IMM;
11457 case OP_AND_IMM:
11458 case OP_LAND_IMM:
11459 return OP_AMD64_AND_MEMBASE_IMM;
11460 case OP_OR_IMM:
11461 case OP_LOR_IMM:
11462 return OP_AMD64_OR_MEMBASE_IMM;
11463 case OP_XOR_IMM:
11464 case OP_LXOR_IMM:
11465 return OP_AMD64_XOR_MEMBASE_IMM;
11466 case OP_MOVE:
11467 return OP_NOP;
11469 #endif
11471 return -1;
11474 static inline int
11475 op_to_op_store_membase (int store_opcode, int opcode)
11477 #if defined(TARGET_X86) || defined(TARGET_AMD64)
11478 switch (opcode) {
11479 case OP_ICEQ:
11480 if (store_opcode == OP_STOREI1_MEMBASE_REG)
11481 return OP_X86_SETEQ_MEMBASE;
11482 case OP_CNE:
11483 if (store_opcode == OP_STOREI1_MEMBASE_REG)
11484 return OP_X86_SETNE_MEMBASE;
11486 #endif
11488 return -1;
11491 static inline int
11492 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
11494 #ifdef TARGET_X86
11495 /* FIXME: This has sign extension issues */
11497 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
11498 return OP_X86_COMPARE_MEMBASE8_IMM;
11501 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
11502 return -1;
11504 switch (opcode) {
11505 case OP_X86_PUSH:
11506 return OP_X86_PUSH_MEMBASE;
11507 case OP_COMPARE_IMM:
11508 case OP_ICOMPARE_IMM:
11509 return OP_X86_COMPARE_MEMBASE_IMM;
11510 case OP_COMPARE:
11511 case OP_ICOMPARE:
11512 return OP_X86_COMPARE_MEMBASE_REG;
11514 #endif
11516 #ifdef TARGET_AMD64
11517 /* FIXME: This has sign extension issues */
11519 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
11520 return OP_X86_COMPARE_MEMBASE8_IMM;
11523 switch (opcode) {
11524 case OP_X86_PUSH:
11525 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
11526 return OP_X86_PUSH_MEMBASE;
11527 break;
11528 /* FIXME: This only works for 32 bit immediates
11529 case OP_COMPARE_IMM:
11530 case OP_LCOMPARE_IMM:
11531 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11532 return OP_AMD64_COMPARE_MEMBASE_IMM;
11534 case OP_ICOMPARE_IMM:
11535 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
11536 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
11537 break;
11538 case OP_COMPARE:
11539 case OP_LCOMPARE:
11540 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
11541 return OP_AMD64_ICOMPARE_MEMBASE_REG;
11542 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
11543 return OP_AMD64_COMPARE_MEMBASE_REG;
11544 break;
11545 case OP_ICOMPARE:
11546 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
11547 return OP_AMD64_ICOMPARE_MEMBASE_REG;
11548 break;
11550 #endif
11552 return -1;
11555 static inline int
11556 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
11558 #ifdef TARGET_X86
11559 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
11560 return -1;
11562 switch (opcode) {
11563 case OP_COMPARE:
11564 case OP_ICOMPARE:
11565 return OP_X86_COMPARE_REG_MEMBASE;
11566 case OP_IADD:
11567 return OP_X86_ADD_REG_MEMBASE;
11568 case OP_ISUB:
11569 return OP_X86_SUB_REG_MEMBASE;
11570 case OP_IAND:
11571 return OP_X86_AND_REG_MEMBASE;
11572 case OP_IOR:
11573 return OP_X86_OR_REG_MEMBASE;
11574 case OP_IXOR:
11575 return OP_X86_XOR_REG_MEMBASE;
11577 #endif
11579 #ifdef TARGET_AMD64
11580 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
11581 switch (opcode) {
11582 case OP_ICOMPARE:
11583 return OP_AMD64_ICOMPARE_REG_MEMBASE;
11584 case OP_IADD:
11585 return OP_X86_ADD_REG_MEMBASE;
11586 case OP_ISUB:
11587 return OP_X86_SUB_REG_MEMBASE;
11588 case OP_IAND:
11589 return OP_X86_AND_REG_MEMBASE;
11590 case OP_IOR:
11591 return OP_X86_OR_REG_MEMBASE;
11592 case OP_IXOR:
11593 return OP_X86_XOR_REG_MEMBASE;
11595 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
11596 switch (opcode) {
11597 case OP_COMPARE:
11598 case OP_LCOMPARE:
11599 return OP_AMD64_COMPARE_REG_MEMBASE;
11600 case OP_LADD:
11601 return OP_AMD64_ADD_REG_MEMBASE;
11602 case OP_LSUB:
11603 return OP_AMD64_SUB_REG_MEMBASE;
11604 case OP_LAND:
11605 return OP_AMD64_AND_REG_MEMBASE;
11606 case OP_LOR:
11607 return OP_AMD64_OR_REG_MEMBASE;
11608 case OP_LXOR:
11609 return OP_AMD64_XOR_REG_MEMBASE;
11612 #endif
11614 return -1;
11618 mono_op_to_op_imm_noemul (int opcode)
11620 switch (opcode) {
11621 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
11622 case OP_LSHR:
11623 case OP_LSHL:
11624 case OP_LSHR_UN:
11625 return -1;
11626 #endif
11627 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
11628 case OP_IDIV:
11629 case OP_IDIV_UN:
11630 case OP_IREM:
11631 case OP_IREM_UN:
11632 return -1;
11633 #endif
11634 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
11635 case OP_IMUL:
11636 return -1;
11637 #endif
11638 default:
11639 return mono_op_to_op_imm (opcode);
11644 * mono_handle_global_vregs:
11646 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
11647 * for them.
11649 void
11650 mono_handle_global_vregs (MonoCompile *cfg)
11652 gint32 *vreg_to_bb;
11653 MonoBasicBlock *bb;
11654 int i, pos;
11656 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
11658 #ifdef MONO_ARCH_SIMD_INTRINSICS
11659 if (cfg->uses_simd_intrinsics & MONO_CFG_USES_SIMD_INTRINSICS_SIMPLIFY_INDIRECTION)
11660 mono_simd_simplify_indirection (cfg);
11661 #endif
11663 /* Find local vregs used in more than one bb */
11664 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11665 MonoInst *ins = bb->code;
11666 int block_num = bb->block_num;
11668 if (cfg->verbose_level > 2)
11669 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
11671 cfg->cbb = bb;
11672 for (; ins; ins = ins->next) {
11673 const char *spec = INS_INFO (ins->opcode);
11674 int regtype = 0, regindex;
11675 gint32 prev_bb;
11677 if (G_UNLIKELY (cfg->verbose_level > 2))
11678 mono_print_ins (ins);
11680 g_assert (ins->opcode >= MONO_CEE_LAST);
11682 for (regindex = 0; regindex < 4; regindex ++) {
11683 int vreg = 0;
11685 if (regindex == 0) {
11686 regtype = spec [MONO_INST_DEST];
11687 if (regtype == ' ')
11688 continue;
11689 vreg = ins->dreg;
11690 } else if (regindex == 1) {
11691 regtype = spec [MONO_INST_SRC1];
11692 if (regtype == ' ')
11693 continue;
11694 vreg = ins->sreg1;
11695 } else if (regindex == 2) {
11696 regtype = spec [MONO_INST_SRC2];
11697 if (regtype == ' ')
11698 continue;
11699 vreg = ins->sreg2;
11700 } else if (regindex == 3) {
11701 regtype = spec [MONO_INST_SRC3];
11702 if (regtype == ' ')
11703 continue;
11704 vreg = ins->sreg3;
11707 #if SIZEOF_REGISTER == 4
11708 /* In the LLVM case, the long opcodes are not decomposed */
11709 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
11711 * Since some instructions reference the original long vreg,
11712 * and some reference the two component vregs, it is quite hard
11713 * to determine when it needs to be global. So be conservative.
11715 if (!get_vreg_to_inst (cfg, vreg)) {
11716 mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.int64_class), OP_LOCAL, vreg);
11718 if (cfg->verbose_level > 2)
11719 printf ("LONG VREG R%d made global.\n", vreg);
11723 * Make the component vregs volatile since the optimizations can
11724 * get confused otherwise.
11726 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
11727 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
11729 #endif
11731 g_assert (vreg != -1);
11733 prev_bb = vreg_to_bb [vreg];
11734 if (prev_bb == 0) {
11735 /* 0 is a valid block num */
11736 vreg_to_bb [vreg] = block_num + 1;
11737 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
11738 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
11739 continue;
11741 if (!get_vreg_to_inst (cfg, vreg)) {
11742 if (G_UNLIKELY (cfg->verbose_level > 2))
11743 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
11745 switch (regtype) {
11746 case 'i':
11747 if (vreg_is_ref (cfg, vreg))
11748 mono_compile_create_var_for_vreg (cfg, mono_get_object_type (), OP_LOCAL, vreg);
11749 else
11750 mono_compile_create_var_for_vreg (cfg, mono_get_int_type (), OP_LOCAL, vreg);
11751 break;
11752 case 'l':
11753 mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.int64_class), OP_LOCAL, vreg);
11754 break;
11755 case 'f':
11756 mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.double_class), OP_LOCAL, vreg);
11757 break;
11758 case 'v':
11759 case 'x':
11760 mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (ins->klass), OP_LOCAL, vreg);
11761 break;
11762 default:
11763 g_assert_not_reached ();
11767 /* Flag as having been used in more than one bb */
11768 vreg_to_bb [vreg] = -1;
11774 /* If a variable is used in only one bblock, convert it into a local vreg */
11775 for (i = 0; i < cfg->num_varinfo; i++) {
11776 MonoInst *var = cfg->varinfo [i];
11777 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
11779 switch (var->type) {
11780 case STACK_I4:
11781 case STACK_OBJ:
11782 case STACK_PTR:
11783 case STACK_MP:
11784 case STACK_VTYPE:
11785 #if SIZEOF_REGISTER == 8
11786 case STACK_I8:
11787 #endif
11788 #if !defined(TARGET_X86)
11789 /* Enabling this screws up the fp stack on x86 */
11790 case STACK_R8:
11791 #endif
11792 if (mono_arch_is_soft_float ())
11793 break;
11796 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
11797 break;
11800 /* Arguments are implicitly global */
11801 /* Putting R4 vars into registers doesn't work currently */
11802 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
11803 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (m_class_get_byval_arg (var->klass)->type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
11805 * Make that the variable's liveness interval doesn't contain a call, since
11806 * that would cause the lvreg to be spilled, making the whole optimization
11807 * useless.
11809 /* This is too slow for JIT compilation */
11810 #if 0
11811 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
11812 MonoInst *ins;
11813 int def_index, call_index, ins_index;
11814 gboolean spilled = FALSE;
11816 def_index = -1;
11817 call_index = -1;
11818 ins_index = 0;
11819 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
11820 const char *spec = INS_INFO (ins->opcode);
11822 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
11823 def_index = ins_index;
11825 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
11826 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
11827 if (call_index > def_index) {
11828 spilled = TRUE;
11829 break;
11833 if (MONO_IS_CALL (ins))
11834 call_index = ins_index;
11836 ins_index ++;
11839 if (spilled)
11840 break;
11842 #endif
11844 if (G_UNLIKELY (cfg->verbose_level > 2))
11845 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
11846 var->flags |= MONO_INST_IS_DEAD;
11847 cfg->vreg_to_inst [var->dreg] = NULL;
11849 break;
11854 * Compress the varinfo and vars tables so the liveness computation is faster and
11855 * takes up less space.
11857 pos = 0;
11858 for (i = 0; i < cfg->num_varinfo; ++i) {
11859 MonoInst *var = cfg->varinfo [i];
11860 if (pos < i && cfg->locals_start == i)
11861 cfg->locals_start = pos;
11862 if (!(var->flags & MONO_INST_IS_DEAD)) {
11863 if (pos < i) {
11864 cfg->varinfo [pos] = cfg->varinfo [i];
11865 cfg->varinfo [pos]->inst_c0 = pos;
11866 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
11867 cfg->vars [pos].idx = pos;
11868 #if SIZEOF_REGISTER == 4
11869 if (cfg->varinfo [pos]->type == STACK_I8) {
11870 /* Modify the two component vars too */
11871 MonoInst *var1;
11873 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
11874 var1->inst_c0 = pos;
11875 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
11876 var1->inst_c0 = pos;
11878 #endif
11880 pos ++;
11883 cfg->num_varinfo = pos;
11884 if (cfg->locals_start > cfg->num_varinfo)
11885 cfg->locals_start = cfg->num_varinfo;
11889 * mono_allocate_gsharedvt_vars:
11891 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
11892 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
11894 void
11895 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
11897 int i;
11899 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
11901 for (i = 0; i < cfg->num_varinfo; ++i) {
11902 MonoInst *ins = cfg->varinfo [i];
11903 int idx;
11905 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
11906 if (i >= cfg->locals_start) {
11907 /* Local */
11908 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
11909 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
11910 ins->opcode = OP_GSHAREDVT_LOCAL;
11911 ins->inst_imm = idx;
11912 } else {
11913 /* Arg */
11914 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
11915 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
11922 * mono_spill_global_vars:
11924 * Generate spill code for variables which are not allocated to registers,
11925 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
11926 * code is generated which could be optimized by the local optimization passes.
11928 void
11929 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
11931 MonoBasicBlock *bb;
11932 char spec2 [16];
11933 int orig_next_vreg;
11934 guint32 *vreg_to_lvreg;
11935 guint32 *lvregs;
11936 guint32 i, lvregs_len, lvregs_size;
11937 gboolean dest_has_lvreg = FALSE;
11938 MonoStackType stacktypes [128];
11939 MonoInst **live_range_start, **live_range_end;
11940 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
11942 *need_local_opts = FALSE;
11944 memset (spec2, 0, sizeof (spec2));
11946 /* FIXME: Move this function to mini.c */
11947 stacktypes [(int)'i'] = STACK_PTR;
11948 stacktypes [(int)'l'] = STACK_I8;
11949 stacktypes [(int)'f'] = STACK_R8;
11950 #ifdef MONO_ARCH_SIMD_INTRINSICS
11951 stacktypes [(int)'x'] = STACK_VTYPE;
11952 #endif
11954 #if SIZEOF_REGISTER == 4
11955 /* Create MonoInsts for longs */
11956 for (i = 0; i < cfg->num_varinfo; i++) {
11957 MonoInst *ins = cfg->varinfo [i];
11959 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
11960 switch (ins->type) {
11961 case STACK_R8:
11962 case STACK_I8: {
11963 MonoInst *tree;
11965 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
11966 break;
11968 g_assert (ins->opcode == OP_REGOFFSET);
11970 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
11971 g_assert (tree);
11972 tree->opcode = OP_REGOFFSET;
11973 tree->inst_basereg = ins->inst_basereg;
11974 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
11976 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
11977 g_assert (tree);
11978 tree->opcode = OP_REGOFFSET;
11979 tree->inst_basereg = ins->inst_basereg;
11980 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
11981 break;
11983 default:
11984 break;
11988 #endif
11990 if (cfg->compute_gc_maps) {
11991 /* registers need liveness info even for !non refs */
11992 for (i = 0; i < cfg->num_varinfo; i++) {
11993 MonoInst *ins = cfg->varinfo [i];
11995 if (ins->opcode == OP_REGVAR)
11996 ins->flags |= MONO_INST_GC_TRACK;
12000 /* FIXME: widening and truncation */
12003 * As an optimization, when a variable allocated to the stack is first loaded into
12004 * an lvreg, we will remember the lvreg and use it the next time instead of loading
12005 * the variable again.
12007 orig_next_vreg = cfg->next_vreg;
12008 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
12009 lvregs_size = 1024;
12010 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * lvregs_size);
12011 lvregs_len = 0;
12014 * These arrays contain the first and last instructions accessing a given
12015 * variable.
12016 * Since we emit bblocks in the same order we process them here, and we
12017 * don't split live ranges, these will precisely describe the live range of
12018 * the variable, i.e. the instruction range where a valid value can be found
12019 * in the variables location.
12020 * The live range is computed using the liveness info computed by the liveness pass.
12021 * We can't use vmv->range, since that is an abstract live range, and we need
12022 * one which is instruction precise.
12023 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
12025 /* FIXME: Only do this if debugging info is requested */
12026 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
12027 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
12028 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12029 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12031 /* Add spill loads/stores */
12032 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12033 MonoInst *ins;
12035 if (cfg->verbose_level > 2)
12036 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
12038 /* Clear vreg_to_lvreg array */
12039 for (i = 0; i < lvregs_len; i++)
12040 vreg_to_lvreg [lvregs [i]] = 0;
12041 lvregs_len = 0;
12043 cfg->cbb = bb;
12044 MONO_BB_FOR_EACH_INS (bb, ins) {
12045 const char *spec = INS_INFO (ins->opcode);
12046 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
12047 gboolean store, no_lvreg;
12048 int sregs [MONO_MAX_SRC_REGS];
12050 if (G_UNLIKELY (cfg->verbose_level > 2))
12051 mono_print_ins (ins);
12053 if (ins->opcode == OP_NOP)
12054 continue;
12057 * We handle LDADDR here as well, since it can only be decomposed
12058 * when variable addresses are known.
12060 if (ins->opcode == OP_LDADDR) {
12061 MonoInst *var = (MonoInst *)ins->inst_p0;
12063 if (var->opcode == OP_VTARG_ADDR) {
12064 /* Happens on SPARC/S390 where vtypes are passed by reference */
12065 MonoInst *vtaddr = var->inst_left;
12066 if (vtaddr->opcode == OP_REGVAR) {
12067 ins->opcode = OP_MOVE;
12068 ins->sreg1 = vtaddr->dreg;
12070 else if (var->inst_left->opcode == OP_REGOFFSET) {
12071 ins->opcode = OP_LOAD_MEMBASE;
12072 ins->inst_basereg = vtaddr->inst_basereg;
12073 ins->inst_offset = vtaddr->inst_offset;
12074 } else
12075 NOT_IMPLEMENTED;
12076 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
12077 /* gsharedvt arg passed by ref */
12078 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
12080 ins->opcode = OP_LOAD_MEMBASE;
12081 ins->inst_basereg = var->inst_basereg;
12082 ins->inst_offset = var->inst_offset;
12083 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
12084 MonoInst *load, *load2, *load3;
12085 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
12086 int reg1, reg2, reg3;
12087 MonoInst *info_var = cfg->gsharedvt_info_var;
12088 MonoInst *locals_var = cfg->gsharedvt_locals_var;
12091 * gsharedvt local.
12092 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
12095 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
12097 g_assert (info_var);
12098 g_assert (locals_var);
12100 /* Mark the instruction used to compute the locals var as used */
12101 cfg->gsharedvt_locals_var_ins = NULL;
12103 /* Load the offset */
12104 if (info_var->opcode == OP_REGOFFSET) {
12105 reg1 = alloc_ireg (cfg);
12106 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
12107 } else if (info_var->opcode == OP_REGVAR) {
12108 load = NULL;
12109 reg1 = info_var->dreg;
12110 } else {
12111 g_assert_not_reached ();
12113 reg2 = alloc_ireg (cfg);
12114 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * TARGET_SIZEOF_VOID_P));
12115 /* Load the locals area address */
12116 reg3 = alloc_ireg (cfg);
12117 if (locals_var->opcode == OP_REGOFFSET) {
12118 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
12119 } else if (locals_var->opcode == OP_REGVAR) {
12120 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
12121 } else {
12122 g_assert_not_reached ();
12124 /* Compute the address */
12125 ins->opcode = OP_PADD;
12126 ins->sreg1 = reg3;
12127 ins->sreg2 = reg2;
12129 mono_bblock_insert_before_ins (bb, ins, load3);
12130 mono_bblock_insert_before_ins (bb, load3, load2);
12131 if (load)
12132 mono_bblock_insert_before_ins (bb, load2, load);
12133 } else {
12134 g_assert (var->opcode == OP_REGOFFSET);
12136 ins->opcode = OP_ADD_IMM;
12137 ins->sreg1 = var->inst_basereg;
12138 ins->inst_imm = var->inst_offset;
12141 *need_local_opts = TRUE;
12142 spec = INS_INFO (ins->opcode);
12145 if (ins->opcode < MONO_CEE_LAST) {
12146 mono_print_ins (ins);
12147 g_assert_not_reached ();
12151 * Store opcodes have destbasereg in the dreg, but in reality, it is an
12152 * src register.
12153 * FIXME:
12155 if (MONO_IS_STORE_MEMBASE (ins)) {
12156 tmp_reg = ins->dreg;
12157 ins->dreg = ins->sreg2;
12158 ins->sreg2 = tmp_reg;
12159 store = TRUE;
12161 spec2 [MONO_INST_DEST] = ' ';
12162 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
12163 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
12164 spec2 [MONO_INST_SRC3] = ' ';
12165 spec = spec2;
12166 } else if (MONO_IS_STORE_MEMINDEX (ins))
12167 g_assert_not_reached ();
12168 else
12169 store = FALSE;
12170 no_lvreg = FALSE;
12172 if (G_UNLIKELY (cfg->verbose_level > 2)) {
12173 printf ("\t %.3s %d", spec, ins->dreg);
12174 num_sregs = mono_inst_get_src_registers (ins, sregs);
12175 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
12176 printf (" %d", sregs [srcindex]);
12177 printf ("\n");
12180 /***************/
12181 /* DREG */
12182 /***************/
12183 regtype = spec [MONO_INST_DEST];
12184 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
12185 prev_dreg = -1;
12186 int dreg_using_dest_to_membase_op = -1;
12188 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
12189 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
12190 MonoInst *store_ins;
12191 int store_opcode;
12192 MonoInst *def_ins = ins;
12193 int dreg = ins->dreg; /* The original vreg */
12195 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
12197 if (var->opcode == OP_REGVAR) {
12198 ins->dreg = var->dreg;
12199 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
12201 * Instead of emitting a load+store, use a _membase opcode.
12203 g_assert (var->opcode == OP_REGOFFSET);
12204 if (ins->opcode == OP_MOVE) {
12205 NULLIFY_INS (ins);
12206 def_ins = NULL;
12207 } else {
12208 dreg_using_dest_to_membase_op = ins->dreg;
12209 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
12210 ins->inst_basereg = var->inst_basereg;
12211 ins->inst_offset = var->inst_offset;
12212 ins->dreg = -1;
12214 spec = INS_INFO (ins->opcode);
12215 } else {
12216 guint32 lvreg;
12218 g_assert (var->opcode == OP_REGOFFSET);
12220 prev_dreg = ins->dreg;
12222 /* Invalidate any previous lvreg for this vreg */
12223 vreg_to_lvreg [ins->dreg] = 0;
12225 lvreg = 0;
12227 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
12228 regtype = 'l';
12229 store_opcode = OP_STOREI8_MEMBASE_REG;
12232 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
12234 #if SIZEOF_REGISTER != 8
12235 if (regtype == 'l') {
12236 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
12237 mono_bblock_insert_after_ins (bb, ins, store_ins);
12238 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
12239 mono_bblock_insert_after_ins (bb, ins, store_ins);
12240 def_ins = store_ins;
12242 else
12243 #endif
12245 g_assert (store_opcode != OP_STOREV_MEMBASE);
12247 /* Try to fuse the store into the instruction itself */
12248 /* FIXME: Add more instructions */
12249 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
12250 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
12251 ins->inst_imm = ins->inst_c0;
12252 ins->inst_destbasereg = var->inst_basereg;
12253 ins->inst_offset = var->inst_offset;
12254 spec = INS_INFO (ins->opcode);
12255 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
12256 ins->opcode = store_opcode;
12257 ins->inst_destbasereg = var->inst_basereg;
12258 ins->inst_offset = var->inst_offset;
12260 no_lvreg = TRUE;
12262 tmp_reg = ins->dreg;
12263 ins->dreg = ins->sreg2;
12264 ins->sreg2 = tmp_reg;
12265 store = TRUE;
12267 spec2 [MONO_INST_DEST] = ' ';
12268 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
12269 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
12270 spec2 [MONO_INST_SRC3] = ' ';
12271 spec = spec2;
12272 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
12273 // FIXME: The backends expect the base reg to be in inst_basereg
12274 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
12275 ins->dreg = -1;
12276 ins->inst_basereg = var->inst_basereg;
12277 ins->inst_offset = var->inst_offset;
12278 spec = INS_INFO (ins->opcode);
12279 } else {
12280 /* printf ("INS: "); mono_print_ins (ins); */
12281 /* Create a store instruction */
12282 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
12284 /* Insert it after the instruction */
12285 mono_bblock_insert_after_ins (bb, ins, store_ins);
12287 def_ins = store_ins;
12290 * We can't assign ins->dreg to var->dreg here, since the
12291 * sregs could use it. So set a flag, and do it after
12292 * the sregs.
12294 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
12295 dest_has_lvreg = TRUE;
12300 if (def_ins && !live_range_start [dreg]) {
12301 live_range_start [dreg] = def_ins;
12302 live_range_start_bb [dreg] = bb;
12305 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
12306 MonoInst *tmp;
12308 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
12309 tmp->inst_c1 = dreg;
12310 mono_bblock_insert_after_ins (bb, def_ins, tmp);
12314 /************/
12315 /* SREGS */
12316 /************/
12317 num_sregs = mono_inst_get_src_registers (ins, sregs);
12318 for (srcindex = 0; srcindex < 3; ++srcindex) {
12319 regtype = spec [MONO_INST_SRC1 + srcindex];
12320 sreg = sregs [srcindex];
12322 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
12323 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
12324 MonoInst *var = get_vreg_to_inst (cfg, sreg);
12325 MonoInst *use_ins = ins;
12326 MonoInst *load_ins;
12327 guint32 load_opcode;
12329 if (var->opcode == OP_REGVAR) {
12330 sregs [srcindex] = var->dreg;
12331 //mono_inst_set_src_registers (ins, sregs);
12332 live_range_end [sreg] = use_ins;
12333 live_range_end_bb [sreg] = bb;
12335 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
12336 MonoInst *tmp;
12338 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
12339 /* var->dreg is a hreg */
12340 tmp->inst_c1 = sreg;
12341 mono_bblock_insert_after_ins (bb, ins, tmp);
12344 continue;
12347 g_assert (var->opcode == OP_REGOFFSET);
12349 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
12351 g_assert (load_opcode != OP_LOADV_MEMBASE);
12353 if (vreg_to_lvreg [sreg]) {
12354 g_assert (vreg_to_lvreg [sreg] != -1);
12356 /* The variable is already loaded to an lvreg */
12357 if (G_UNLIKELY (cfg->verbose_level > 2))
12358 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
12359 sregs [srcindex] = vreg_to_lvreg [sreg];
12360 //mono_inst_set_src_registers (ins, sregs);
12361 continue;
12364 /* Try to fuse the load into the instruction */
12365 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
12366 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
12367 sregs [0] = var->inst_basereg;
12368 //mono_inst_set_src_registers (ins, sregs);
12369 ins->inst_offset = var->inst_offset;
12370 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
12371 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
12372 sregs [1] = var->inst_basereg;
12373 //mono_inst_set_src_registers (ins, sregs);
12374 ins->inst_offset = var->inst_offset;
12375 } else {
12376 if (MONO_IS_REAL_MOVE (ins)) {
12377 ins->opcode = OP_NOP;
12378 sreg = ins->dreg;
12379 } else {
12380 //printf ("%d ", srcindex); mono_print_ins (ins);
12382 sreg = alloc_dreg (cfg, stacktypes [regtype]);
12384 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
12385 if (var->dreg == prev_dreg) {
12387 * sreg refers to the value loaded by the load
12388 * emitted below, but we need to use ins->dreg
12389 * since it refers to the store emitted earlier.
12391 sreg = ins->dreg;
12393 g_assert (sreg != -1);
12394 if (var->dreg == dreg_using_dest_to_membase_op) {
12395 if (cfg->verbose_level > 2)
12396 printf ("\tCan't cache R%d because it's part of a dreg dest_membase optimization\n", var->dreg);
12397 } else {
12398 vreg_to_lvreg [var->dreg] = sreg;
12400 if (lvregs_len >= lvregs_size) {
12401 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
12402 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
12403 lvregs = new_lvregs;
12404 lvregs_size *= 2;
12406 lvregs [lvregs_len ++] = var->dreg;
12410 sregs [srcindex] = sreg;
12411 //mono_inst_set_src_registers (ins, sregs);
12413 #if SIZEOF_REGISTER != 8
12414 if (regtype == 'l') {
12415 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
12416 mono_bblock_insert_before_ins (bb, ins, load_ins);
12417 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
12418 mono_bblock_insert_before_ins (bb, ins, load_ins);
12419 use_ins = load_ins;
12421 else
12422 #endif
12424 #if SIZEOF_REGISTER == 4
12425 g_assert (load_opcode != OP_LOADI8_MEMBASE);
12426 #endif
12427 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
12428 mono_bblock_insert_before_ins (bb, ins, load_ins);
12429 use_ins = load_ins;
12433 if (var->dreg < orig_next_vreg) {
12434 live_range_end [var->dreg] = use_ins;
12435 live_range_end_bb [var->dreg] = bb;
12438 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
12439 MonoInst *tmp;
12441 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
12442 tmp->inst_c1 = var->dreg;
12443 mono_bblock_insert_after_ins (bb, ins, tmp);
12447 mono_inst_set_src_registers (ins, sregs);
12449 if (dest_has_lvreg) {
12450 g_assert (ins->dreg != -1);
12451 vreg_to_lvreg [prev_dreg] = ins->dreg;
12452 if (lvregs_len >= lvregs_size) {
12453 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
12454 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
12455 lvregs = new_lvregs;
12456 lvregs_size *= 2;
12458 lvregs [lvregs_len ++] = prev_dreg;
12459 dest_has_lvreg = FALSE;
12462 if (store) {
12463 tmp_reg = ins->dreg;
12464 ins->dreg = ins->sreg2;
12465 ins->sreg2 = tmp_reg;
12468 if (MONO_IS_CALL (ins)) {
12469 /* Clear vreg_to_lvreg array */
12470 for (i = 0; i < lvregs_len; i++)
12471 vreg_to_lvreg [lvregs [i]] = 0;
12472 lvregs_len = 0;
12473 } else if (ins->opcode == OP_NOP) {
12474 ins->dreg = -1;
12475 MONO_INST_NULLIFY_SREGS (ins);
12478 if (cfg->verbose_level > 2)
12479 mono_print_ins_index (1, ins);
12482 /* Extend the live range based on the liveness info */
12483 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
12484 for (i = 0; i < cfg->num_varinfo; i ++) {
12485 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
12487 if (vreg_is_volatile (cfg, vi->vreg))
12488 /* The liveness info is incomplete */
12489 continue;
12491 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
12492 /* Live from at least the first ins of this bb */
12493 live_range_start [vi->vreg] = bb->code;
12494 live_range_start_bb [vi->vreg] = bb;
12497 if (mono_bitset_test_fast (bb->live_out_set, i)) {
12498 /* Live at least until the last ins of this bb */
12499 live_range_end [vi->vreg] = bb->last_ins;
12500 live_range_end_bb [vi->vreg] = bb;
12507 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
12508 * by storing the current native offset into MonoMethodVar->live_range_start/end.
12510 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
12511 for (i = 0; i < cfg->num_varinfo; ++i) {
12512 int vreg = MONO_VARINFO (cfg, i)->vreg;
12513 MonoInst *ins;
12515 if (live_range_start [vreg]) {
12516 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
12517 ins->inst_c0 = i;
12518 ins->inst_c1 = vreg;
12519 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
12521 if (live_range_end [vreg]) {
12522 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
12523 ins->inst_c0 = i;
12524 ins->inst_c1 = vreg;
12525 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
12526 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
12527 else
12528 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
12533 if (cfg->gsharedvt_locals_var_ins) {
12534 /* Nullify if unused */
12535 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
12536 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
12539 g_free (live_range_start);
12540 g_free (live_range_end);
12541 g_free (live_range_start_bb);
12542 g_free (live_range_end_bb);
12546 * FIXME:
12547 * - use 'iadd' instead of 'int_add'
12548 * - handling ovf opcodes: decompose in method_to_ir.
12549 * - unify iregs/fregs
12550 * -> partly done, the missing parts are:
12551 * - a more complete unification would involve unifying the hregs as well, so
12552 * code wouldn't need if (fp) all over the place. but that would mean the hregs
12553 * would no longer map to the machine hregs, so the code generators would need to
12554 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
12555 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
12556 * fp/non-fp branches speeds it up by about 15%.
12557 * - use sext/zext opcodes instead of shifts
12558 * - add OP_ICALL
12559 * - get rid of TEMPLOADs if possible and use vregs instead
12560 * - clean up usage of OP_P/OP_ opcodes
12561 * - cleanup usage of DUMMY_USE
12562 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
12563 * stack
12564 * - set the stack type and allocate a dreg in the EMIT_NEW macros
12565 * - get rid of all the <foo>2 stuff when the new JIT is ready.
12566 * - make sure handle_stack_args () is called before the branch is emitted
12567 * - when the new IR is done, get rid of all unused stuff
12568 * - COMPARE/BEQ as separate instructions or unify them ?
12569 * - keeping them separate allows specialized compare instructions like
12570 * compare_imm, compare_membase
12571 * - most back ends unify fp compare+branch, fp compare+ceq
12572 * - integrate mono_save_args into inline_method
12573 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
12574 * - handle long shift opts on 32 bit platforms somehow: they require
12575 * 3 sregs (2 for arg1 and 1 for arg2)
12576 * - make byref a 'normal' type.
12577 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
12578 * variable if needed.
12579 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
12580 * like inline_method.
12581 * - remove inlining restrictions
12582 * - fix LNEG and enable cfold of INEG
12583 * - generalize x86 optimizations like ldelema as a peephole optimization
12584 * - add store_mem_imm for amd64
12585 * - optimize the loading of the interruption flag in the managed->native wrappers
12586 * - avoid special handling of OP_NOP in passes
12587 * - move code inserting instructions into one function/macro.
12588 * - try a coalescing phase after liveness analysis
12589 * - add float -> vreg conversion + local optimizations on !x86
12590 * - figure out how to handle decomposed branches during optimizations, ie.
12591 * compare+branch, op_jump_table+op_br etc.
12592 * - promote RuntimeXHandles to vregs
12593 * - vtype cleanups:
12594 * - add a NEW_VARLOADA_VREG macro
12595 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
12596 * accessing vtype fields.
12597 * - get rid of I8CONST on 64 bit platforms
12598 * - dealing with the increase in code size due to branches created during opcode
12599 * decomposition:
12600 * - use extended basic blocks
12601 * - all parts of the JIT
12602 * - handle_global_vregs () && local regalloc
12603 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
12604 * - sources of increase in code size:
12605 * - vtypes
12606 * - long compares
12607 * - isinst and castclass
12608 * - lvregs not allocated to global registers even if used multiple times
12609 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
12610 * meaningful.
12611 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
12612 * - add all micro optimizations from the old JIT
12613 * - put tree optimizations into the deadce pass
12614 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
12615 * specific function.
12616 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
12617 * fcompare + branchCC.
12618 * - create a helper function for allocating a stack slot, taking into account
12619 * MONO_CFG_HAS_SPILLUP.
12620 * - merge r68207.
12621 * - optimize mono_regstate2_alloc_int/float.
12622 * - fix the pessimistic handling of variables accessed in exception handler blocks.
12623 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
12624 * parts of the tree could be separated by other instructions, killing the tree
12625 * arguments, or stores killing loads etc. Also, should we fold loads into other
12626 * instructions if the result of the load is used multiple times ?
12627 * - make the REM_IMM optimization in mini-x86.c arch-independent.
12628 * - LAST MERGE: 108395.
12629 * - when returning vtypes in registers, generate IR and append it to the end of the
12630 * last bb instead of doing it in the epilog.
12631 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
12636 NOTES
12637 -----
12639 - When to decompose opcodes:
12640 - earlier: this makes some optimizations hard to implement, since the low level IR
12641 no longer contains the neccessary information. But it is easier to do.
12642 - later: harder to implement, enables more optimizations.
12643 - Branches inside bblocks:
12644 - created when decomposing complex opcodes.
12645 - branches to another bblock: harmless, but not tracked by the branch
12646 optimizations, so need to branch to a label at the start of the bblock.
12647 - branches to inside the same bblock: very problematic, trips up the local
12648 reg allocator. Can be fixed by spitting the current bblock, but that is a
12649 complex operation, since some local vregs can become global vregs etc.
12650 - Local/global vregs:
12651 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
12652 local register allocator.
12653 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
12654 structure, created by mono_create_var (). Assigned to hregs or the stack by
12655 the global register allocator.
12656 - When to do optimizations like alu->alu_imm:
12657 - earlier -> saves work later on since the IR will be smaller/simpler
12658 - later -> can work on more instructions
12659 - Handling of valuetypes:
12660 - When a vtype is pushed on the stack, a new temporary is created, an
12661 instruction computing its address (LDADDR) is emitted and pushed on
12662 the stack. Need to optimize cases when the vtype is used immediately as in
12663 argument passing, stloc etc.
12664 - Instead of the to_end stuff in the old JIT, simply call the function handling
12665 the values on the stack before emitting the last instruction of the bb.
12667 #else /* !DISABLE_JIT */
12669 MONO_EMPTY_SOURCE_FILE (method_to_ir);
12670 #endif /* !DISABLE_JIT */