Fix CHECK_CANARY_FOR_OBJECT. (#11957)
[mono-project.git] / mono / mini / method-to-ir.c
blobfd0227caabc8acaead19fe735413b547c5256414
1 /**
2 * \file
3 * Convert CIL to the JIT internal representation
5 * Author:
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
9 * (C) 2002 Ximian, Inc.
10 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
11 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
15 #include <config.h>
16 #include <mono/utils/mono-compiler.h>
17 #include "mini.h"
19 #ifndef DISABLE_JIT
21 #include <signal.h>
23 #ifdef HAVE_UNISTD_H
24 #include <unistd.h>
25 #endif
27 #include <math.h>
28 #include <string.h>
29 #include <ctype.h>
31 #ifdef HAVE_SYS_TIME_H
32 #include <sys/time.h>
33 #endif
35 #ifdef HAVE_ALLOCA_H
36 #include <alloca.h>
37 #endif
39 #include <mono/utils/memcheck.h>
40 #include <mono/metadata/abi-details.h>
41 #include <mono/metadata/assembly.h>
42 #include <mono/metadata/attrdefs.h>
43 #include <mono/metadata/loader.h>
44 #include <mono/metadata/tabledefs.h>
45 #include <mono/metadata/class.h>
46 #include <mono/metadata/class-abi-details.h>
47 #include <mono/metadata/object.h>
48 #include <mono/metadata/exception.h>
49 #include <mono/metadata/exception-internals.h>
50 #include <mono/metadata/opcodes.h>
51 #include <mono/metadata/mono-endian.h>
52 #include <mono/metadata/tokentype.h>
53 #include <mono/metadata/tabledefs.h>
54 #include <mono/metadata/marshal.h>
55 #include <mono/metadata/debug-helpers.h>
56 #include <mono/metadata/debug-internals.h>
57 #include <mono/metadata/gc-internals.h>
58 #include <mono/metadata/security-manager.h>
59 #include <mono/metadata/threads-types.h>
60 #include <mono/metadata/security-core-clr.h>
61 #include <mono/metadata/profiler-private.h>
62 #include <mono/metadata/profiler.h>
63 #include <mono/metadata/monitor.h>
64 #include <mono/utils/mono-memory-model.h>
65 #include <mono/utils/mono-error-internals.h>
66 #include <mono/metadata/mono-basic-block.h>
67 #include <mono/metadata/reflection-internals.h>
68 #include <mono/utils/mono-threads-coop.h>
69 #include <mono/utils/mono-utils-debug.h>
70 #include <mono/utils/mono-logger-internals.h>
71 #include <mono/metadata/verify-internals.h>
73 #include "trace.h"
75 #include "ir-emit.h"
77 #include "jit-icalls.h"
78 #include "jit.h"
79 #include "debugger-agent.h"
80 #include "seq-points.h"
81 #include "aot-compiler.h"
82 #include "mini-llvm.h"
83 #include "mini-runtime.h"
85 #define BRANCH_COST 10
86 #define CALL_COST 10
87 /* Used for the JIT */
88 #define INLINE_LENGTH_LIMIT 20
89 /* Used to LLVM JIT */
90 #define LLVM_JIT_INLINE_LENGTH_LIMIT 100
92 static const gboolean debug_tailcall = FALSE; // logging
93 static const gboolean debug_tailcall_try_all = FALSE; // consider any call followed by ret
95 gboolean
96 mono_tailcall_print_enabled (void)
98 return debug_tailcall || MONO_TRACE_IS_TRACED (G_LOG_LEVEL_DEBUG, MONO_TRACE_TAILCALL);
101 void
102 mono_tailcall_print (const char *format, ...)
104 if (!mono_tailcall_print_enabled ())
105 return;
106 va_list args;
107 va_start (args, format);
108 g_printv (format, args);
109 va_end (args);
112 /* These have 'cfg' as an implicit argument */
113 #define INLINE_FAILURE(msg) do { \
114 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
115 inline_failure (cfg, msg); \
116 goto exception_exit; \
118 } while (0)
119 #define CHECK_CFG_EXCEPTION do {\
120 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
121 goto exception_exit; \
122 } while (0)
123 #define FIELD_ACCESS_FAILURE(method, field) do { \
124 field_access_failure ((cfg), (method), (field)); \
125 goto exception_exit; \
126 } while (0)
127 #define GENERIC_SHARING_FAILURE(opcode) do { \
128 if (cfg->gshared) { \
129 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
130 goto exception_exit; \
132 } while (0)
133 #define GSHAREDVT_FAILURE(opcode) do { \
134 if (cfg->gsharedvt) { \
135 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
136 goto exception_exit; \
138 } while (0)
139 #define OUT_OF_MEMORY_FAILURE do { \
140 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
141 mono_error_set_out_of_memory (&cfg->error, ""); \
142 goto exception_exit; \
143 } while (0)
144 #define DISABLE_AOT(cfg) do { \
145 if ((cfg)->verbose_level >= 2) \
146 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
147 (cfg)->disable_aot = TRUE; \
148 } while (0)
149 #define LOAD_ERROR do { \
150 break_on_unverified (); \
151 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
152 goto exception_exit; \
153 } while (0)
155 #define TYPE_LOAD_ERROR(klass) do { \
156 cfg->exception_ptr = klass; \
157 LOAD_ERROR; \
158 } while (0)
160 #define CHECK_CFG_ERROR do {\
161 if (!mono_error_ok (&cfg->error)) { \
162 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
163 goto mono_error_exit; \
165 } while (0)
167 static int stind_to_store_membase (int opcode);
169 int mono_op_to_op_imm (int opcode);
170 int mono_op_to_op_imm_noemul (int opcode);
172 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
173 guchar *ip, guint real_offset, gboolean inline_always);
174 static MonoInst*
175 convert_value (MonoCompile *cfg, MonoType *type, MonoInst *ins);
177 /* helper methods signatures */
178 static MonoMethodSignature *helper_sig_domain_get;
179 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
180 static MonoMethodSignature *helper_sig_jit_thread_attach;
181 static MonoMethodSignature *helper_sig_get_tls_tramp;
182 static MonoMethodSignature *helper_sig_set_tls_tramp;
183 MonoMethodSignature *helper_sig_llvmonly_imt_trampoline;
185 /* type loading helpers */
186 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, "System.Diagnostics", "DebuggableAttribute")
187 static GENERATE_GET_CLASS_WITH_CACHE (iequatable, "System", "IEquatable`1")
188 static GENERATE_GET_CLASS_WITH_CACHE (geqcomparer, "System.Collections.Generic", "GenericEqualityComparer`1");
191 * Instruction metadata
193 #ifdef MINI_OP
194 #undef MINI_OP
195 #endif
196 #ifdef MINI_OP3
197 #undef MINI_OP3
198 #endif
199 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
200 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
201 #define NONE ' '
202 #define IREG 'i'
203 #define FREG 'f'
204 #define VREG 'v'
205 #define XREG 'x'
206 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == TARGET_SIZEOF_VOID_P
207 #define LREG IREG
208 #else
209 #define LREG 'l'
210 #endif
211 /* keep in sync with the enum in mini.h */
212 const char
213 mini_ins_info[] = {
214 #include "mini-ops.h"
216 #undef MINI_OP
217 #undef MINI_OP3
219 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
220 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
222 * This should contain the index of the last sreg + 1. This is not the same
223 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
225 const gint8 mini_ins_sreg_counts[] = {
226 #include "mini-ops.h"
228 #undef MINI_OP
229 #undef MINI_OP3
231 guint32
232 mono_alloc_ireg (MonoCompile *cfg)
234 return alloc_ireg (cfg);
237 guint32
238 mono_alloc_lreg (MonoCompile *cfg)
240 return alloc_lreg (cfg);
243 guint32
244 mono_alloc_freg (MonoCompile *cfg)
246 return alloc_freg (cfg);
249 guint32
250 mono_alloc_preg (MonoCompile *cfg)
252 return alloc_preg (cfg);
255 guint32
256 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
258 return alloc_dreg (cfg, stack_type);
262 * mono_alloc_ireg_ref:
264 * Allocate an IREG, and mark it as holding a GC ref.
266 guint32
267 mono_alloc_ireg_ref (MonoCompile *cfg)
269 return alloc_ireg_ref (cfg);
273 * mono_alloc_ireg_mp:
275 * Allocate an IREG, and mark it as holding a managed pointer.
277 guint32
278 mono_alloc_ireg_mp (MonoCompile *cfg)
280 return alloc_ireg_mp (cfg);
284 * mono_alloc_ireg_copy:
286 * Allocate an IREG with the same GC type as VREG.
288 guint32
289 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
291 if (vreg_is_ref (cfg, vreg))
292 return alloc_ireg_ref (cfg);
293 else if (vreg_is_mp (cfg, vreg))
294 return alloc_ireg_mp (cfg);
295 else
296 return alloc_ireg (cfg);
299 guint
300 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
302 if (type->byref)
303 return OP_MOVE;
305 type = mini_get_underlying_type (type);
306 handle_enum:
307 switch (type->type) {
308 case MONO_TYPE_I1:
309 case MONO_TYPE_U1:
310 return OP_MOVE;
311 case MONO_TYPE_I2:
312 case MONO_TYPE_U2:
313 return OP_MOVE;
314 case MONO_TYPE_I4:
315 case MONO_TYPE_U4:
316 return OP_MOVE;
317 case MONO_TYPE_I:
318 case MONO_TYPE_U:
319 case MONO_TYPE_PTR:
320 case MONO_TYPE_FNPTR:
321 return OP_MOVE;
322 case MONO_TYPE_CLASS:
323 case MONO_TYPE_STRING:
324 case MONO_TYPE_OBJECT:
325 case MONO_TYPE_SZARRAY:
326 case MONO_TYPE_ARRAY:
327 return OP_MOVE;
328 case MONO_TYPE_I8:
329 case MONO_TYPE_U8:
330 #if SIZEOF_REGISTER == 8
331 return OP_MOVE;
332 #else
333 return OP_LMOVE;
334 #endif
335 case MONO_TYPE_R4:
336 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
337 case MONO_TYPE_R8:
338 return OP_FMOVE;
339 case MONO_TYPE_VALUETYPE:
340 if (m_class_is_enumtype (type->data.klass)) {
341 type = mono_class_enum_basetype_internal (type->data.klass);
342 goto handle_enum;
344 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (type)))
345 return OP_XMOVE;
346 return OP_VMOVE;
347 case MONO_TYPE_TYPEDBYREF:
348 return OP_VMOVE;
349 case MONO_TYPE_GENERICINST:
350 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (type)))
351 return OP_XMOVE;
352 type = m_class_get_byval_arg (type->data.generic_class->container_class);
353 goto handle_enum;
354 case MONO_TYPE_VAR:
355 case MONO_TYPE_MVAR:
356 g_assert (cfg->gshared);
357 if (mini_type_var_is_vt (type))
358 return OP_VMOVE;
359 else
360 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
361 default:
362 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
364 return -1;
367 void
368 mono_print_bb (MonoBasicBlock *bb, const char *msg)
370 int i;
371 MonoInst *tree;
372 GString *str = g_string_new ("");
374 g_string_append_printf (str, "%s %d: [IN: ", msg, bb->block_num);
375 for (i = 0; i < bb->in_count; ++i)
376 g_string_append_printf (str, " BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
377 g_string_append_printf (str, ", OUT: ");
378 for (i = 0; i < bb->out_count; ++i)
379 g_string_append_printf (str, " BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
380 g_string_append_printf (str, " ]\n");
382 g_print ("%s", str->str);
383 g_string_free (str, TRUE);
385 for (tree = bb->code; tree; tree = tree->next)
386 mono_print_ins_index (-1, tree);
389 void
390 mono_create_helper_signatures (void)
392 helper_sig_domain_get = mono_create_icall_signature ("ptr");
393 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
394 helper_sig_llvmonly_imt_trampoline = mono_create_icall_signature ("ptr ptr ptr");
395 helper_sig_jit_thread_attach = mono_create_icall_signature ("ptr ptr");
396 helper_sig_get_tls_tramp = mono_create_icall_signature ("ptr");
397 helper_sig_set_tls_tramp = mono_create_icall_signature ("void ptr");
400 static MONO_NEVER_INLINE gboolean
401 break_on_unverified (void)
403 if (mini_get_debug_options ()->break_on_unverified) {
404 G_BREAKPOINT ();
405 return TRUE;
407 return FALSE;
410 static void
411 clear_cfg_error (MonoCompile *cfg)
413 mono_error_cleanup (&cfg->error);
414 error_init (&cfg->error);
417 static MONO_NEVER_INLINE void
418 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
420 char *method_fname = mono_method_full_name (method, TRUE);
421 char *field_fname = mono_field_full_name (field);
422 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
423 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
424 g_free (method_fname);
425 g_free (field_fname);
428 static MONO_NEVER_INLINE void
429 inline_failure (MonoCompile *cfg, const char *msg)
431 if (cfg->verbose_level >= 2)
432 printf ("inline failed: %s\n", msg);
433 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
436 static MONO_NEVER_INLINE void
437 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
439 if (cfg->verbose_level > 2)
440 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", m_class_get_name_space (cfg->current_method->klass), m_class_get_name (cfg->current_method->klass), cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name (opcode), line);
441 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
444 static MONO_NEVER_INLINE void
445 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
447 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", m_class_get_name_space (cfg->current_method->klass), m_class_get_name (cfg->current_method->klass), cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
448 if (cfg->verbose_level >= 2)
449 printf ("%s\n", cfg->exception_message);
450 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
453 void
454 mini_set_inline_failure (MonoCompile *cfg, const char *msg)
456 if (cfg->verbose_level >= 2)
457 printf ("inline failed: %s\n", msg);
458 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
462 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
463 * foo<T> (int i) { ldarg.0; box T; }
465 #define UNVERIFIED do { \
466 if (cfg->gsharedvt) { \
467 if (cfg->verbose_level > 2) \
468 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
469 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
470 goto exception_exit; \
472 break_on_unverified (); \
473 goto unverified; \
474 } while (0)
476 #define GET_BBLOCK(cfg,tblock,ip) do { \
477 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
478 if (!(tblock)) { \
479 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
480 NEW_BBLOCK (cfg, (tblock)); \
481 (tblock)->cil_code = (ip); \
482 ADD_BBLOCK (cfg, (tblock)); \
484 } while (0)
486 /* Emit conversions so both operands of a binary opcode are of the same type */
487 static void
488 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
490 MonoInst *arg1 = *arg1_ref;
491 MonoInst *arg2 = *arg2_ref;
493 if (cfg->r4fp &&
494 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
495 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
496 MonoInst *conv;
498 /* Mixing r4/r8 is allowed by the spec */
499 if (arg1->type == STACK_R4) {
500 int dreg = alloc_freg (cfg);
502 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
503 conv->type = STACK_R8;
504 ins->sreg1 = dreg;
505 *arg1_ref = conv;
507 if (arg2->type == STACK_R4) {
508 int dreg = alloc_freg (cfg);
510 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
511 conv->type = STACK_R8;
512 ins->sreg2 = dreg;
513 *arg2_ref = conv;
517 #if SIZEOF_REGISTER == 8
518 /* FIXME: Need to add many more cases */
519 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
520 MonoInst *widen;
522 int dr = alloc_preg (cfg);
523 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
524 (ins)->sreg2 = widen->dreg;
526 #endif
529 #define ADD_BINOP(op) do { \
530 MONO_INST_NEW (cfg, ins, (op)); \
531 sp -= 2; \
532 ins->sreg1 = sp [0]->dreg; \
533 ins->sreg2 = sp [1]->dreg; \
534 type_from_op (cfg, ins, sp [0], sp [1]); \
535 CHECK_TYPE (ins); \
536 /* Have to insert a widening op */ \
537 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
538 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
539 MONO_ADD_INS ((cfg)->cbb, (ins)); \
540 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
541 } while (0)
543 #define ADD_UNOP(op) do { \
544 MONO_INST_NEW (cfg, ins, (op)); \
545 sp--; \
546 ins->sreg1 = sp [0]->dreg; \
547 type_from_op (cfg, ins, sp [0], NULL); \
548 CHECK_TYPE (ins); \
549 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
550 MONO_ADD_INS ((cfg)->cbb, (ins)); \
551 *sp++ = mono_decompose_opcode (cfg, ins); \
552 } while (0)
554 #define ADD_BINCOND(next_block) do { \
555 MonoInst *cmp; \
556 sp -= 2; \
557 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
558 cmp->sreg1 = sp [0]->dreg; \
559 cmp->sreg2 = sp [1]->dreg; \
560 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
561 type_from_op (cfg, cmp, sp [0], sp [1]); \
562 CHECK_TYPE (cmp); \
563 type_from_op (cfg, ins, sp [0], sp [1]); \
564 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
565 GET_BBLOCK (cfg, tblock, target); \
566 link_bblock (cfg, cfg->cbb, tblock); \
567 ins->inst_true_bb = tblock; \
568 if ((next_block)) { \
569 link_bblock (cfg, cfg->cbb, (next_block)); \
570 ins->inst_false_bb = (next_block); \
571 start_new_bblock = 1; \
572 } else { \
573 GET_BBLOCK (cfg, tblock, next_ip); \
574 link_bblock (cfg, cfg->cbb, tblock); \
575 ins->inst_false_bb = tblock; \
576 start_new_bblock = 2; \
578 if (sp != stack_start) { \
579 handle_stack_args (cfg, stack_start, sp - stack_start); \
580 CHECK_UNVERIFIABLE (cfg); \
582 MONO_ADD_INS (cfg->cbb, cmp); \
583 MONO_ADD_INS (cfg->cbb, ins); \
584 } while (0)
586 /* *
587 * link_bblock: Links two basic blocks
589 * links two basic blocks in the control flow graph, the 'from'
590 * argument is the starting block and the 'to' argument is the block
591 * the control flow ends to after 'from'.
593 static void
594 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
596 MonoBasicBlock **newa;
597 int i, found;
599 #if 0
600 if (from->cil_code) {
601 if (to->cil_code)
602 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
603 else
604 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
605 } else {
606 if (to->cil_code)
607 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
608 else
609 printf ("edge from entry to exit\n");
611 #endif
613 found = FALSE;
614 for (i = 0; i < from->out_count; ++i) {
615 if (to == from->out_bb [i]) {
616 found = TRUE;
617 break;
620 if (!found) {
621 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
622 for (i = 0; i < from->out_count; ++i) {
623 newa [i] = from->out_bb [i];
625 newa [i] = to;
626 from->out_count++;
627 from->out_bb = newa;
630 found = FALSE;
631 for (i = 0; i < to->in_count; ++i) {
632 if (from == to->in_bb [i]) {
633 found = TRUE;
634 break;
637 if (!found) {
638 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
639 for (i = 0; i < to->in_count; ++i) {
640 newa [i] = to->in_bb [i];
642 newa [i] = from;
643 to->in_count++;
644 to->in_bb = newa;
648 void
649 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
651 link_bblock (cfg, from, to);
654 static void
655 mono_create_spvar_for_region (MonoCompile *cfg, int region);
657 static void
658 mark_bb_in_region (MonoCompile *cfg, guint region, uint32_t start, uint32_t end)
660 MonoBasicBlock *bb = cfg->cil_offset_to_bb [start];
662 //start must exist in cil_offset_to_bb as those are il offsets used by EH which should have GET_BBLOCK early.
663 g_assert (bb);
665 if (cfg->verbose_level > 1)
666 g_print ("FIRST BB for %d is BB_%d\n", start, bb->block_num);
667 for (; bb && bb->real_offset < end; bb = bb->next_bb) {
668 //no one claimed this bb, take it.
669 if (bb->region == -1) {
670 bb->region = region;
671 continue;
674 //current region is an early handler, bail
675 if ((bb->region & (0xf << 4)) != MONO_REGION_TRY) {
676 continue;
679 //current region is a try, only overwrite if new region is a handler
680 if ((region & (0xf << 4)) != MONO_REGION_TRY) {
681 bb->region = region;
685 if (cfg->spvars)
686 mono_create_spvar_for_region (cfg, region);
689 static void
690 compute_bb_regions (MonoCompile *cfg)
692 MonoBasicBlock *bb;
693 MonoMethodHeader *header = cfg->header;
694 int i;
696 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
697 bb->region = -1;
699 for (i = 0; i < header->num_clauses; ++i) {
700 MonoExceptionClause *clause = &header->clauses [i];
702 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER)
703 mark_bb_in_region (cfg, ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags, clause->data.filter_offset, clause->handler_offset);
705 guint handler_region;
706 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
707 handler_region = ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
708 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
709 handler_region = ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
710 else
711 handler_region = ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
713 mark_bb_in_region (cfg, handler_region, clause->handler_offset, clause->handler_offset + clause->handler_len);
714 mark_bb_in_region (cfg, ((i + 1) << 8) | clause->flags, clause->try_offset, clause->try_offset + clause->try_len);
717 if (cfg->verbose_level > 2) {
718 MonoBasicBlock *bb;
719 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
720 g_print ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
726 static gboolean
727 ip_in_finally_clause (MonoCompile *cfg, int offset)
729 MonoMethodHeader *header = cfg->header;
730 MonoExceptionClause *clause;
731 int i;
733 for (i = 0; i < header->num_clauses; ++i) {
734 clause = &header->clauses [i];
735 if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)
736 continue;
738 if (MONO_OFFSET_IN_HANDLER (clause, offset))
739 return TRUE;
741 return FALSE;
744 /* Find clauses between ip and target, from inner to outer */
745 static GList*
746 mono_find_leave_clauses (MonoCompile *cfg, guchar *ip, guchar *target)
748 MonoMethodHeader *header = cfg->header;
749 MonoExceptionClause *clause;
750 int i;
751 GList *res = NULL;
753 for (i = 0; i < header->num_clauses; ++i) {
754 clause = &header->clauses [i];
755 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
756 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
757 MonoLeaveClause *leave = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoLeaveClause));
758 leave->index = i;
759 leave->clause = clause;
761 res = g_list_append_mempool (cfg->mempool, res, leave);
764 return res;
767 static void
768 mono_create_spvar_for_region (MonoCompile *cfg, int region)
770 MonoInst *var;
772 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
773 if (var)
774 return;
776 var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
777 /* prevent it from being register allocated */
778 var->flags |= MONO_INST_VOLATILE;
780 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
783 MonoInst *
784 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
786 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
789 static MonoInst*
790 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
792 MonoInst *var;
794 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
795 if (var)
796 return var;
798 var = mono_compile_create_var (cfg, mono_get_object_type (), OP_LOCAL);
799 /* prevent it from being register allocated */
800 var->flags |= MONO_INST_VOLATILE;
802 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
804 return var;
808 * Returns the type used in the eval stack when @type is loaded.
809 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
811 void
812 mini_type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
814 MonoClass *klass;
816 type = mini_get_underlying_type (type);
817 inst->klass = klass = mono_class_from_mono_type_internal (type);
818 if (type->byref) {
819 inst->type = STACK_MP;
820 return;
823 handle_enum:
824 switch (type->type) {
825 case MONO_TYPE_VOID:
826 inst->type = STACK_INV;
827 return;
828 case MONO_TYPE_I1:
829 case MONO_TYPE_U1:
830 case MONO_TYPE_I2:
831 case MONO_TYPE_U2:
832 case MONO_TYPE_I4:
833 case MONO_TYPE_U4:
834 inst->type = STACK_I4;
835 return;
836 case MONO_TYPE_I:
837 case MONO_TYPE_U:
838 case MONO_TYPE_PTR:
839 case MONO_TYPE_FNPTR:
840 inst->type = STACK_PTR;
841 return;
842 case MONO_TYPE_CLASS:
843 case MONO_TYPE_STRING:
844 case MONO_TYPE_OBJECT:
845 case MONO_TYPE_SZARRAY:
846 case MONO_TYPE_ARRAY:
847 inst->type = STACK_OBJ;
848 return;
849 case MONO_TYPE_I8:
850 case MONO_TYPE_U8:
851 inst->type = STACK_I8;
852 return;
853 case MONO_TYPE_R4:
854 inst->type = cfg->r4_stack_type;
855 break;
856 case MONO_TYPE_R8:
857 inst->type = STACK_R8;
858 return;
859 case MONO_TYPE_VALUETYPE:
860 if (m_class_is_enumtype (type->data.klass)) {
861 type = mono_class_enum_basetype_internal (type->data.klass);
862 goto handle_enum;
863 } else {
864 inst->klass = klass;
865 inst->type = STACK_VTYPE;
866 return;
868 case MONO_TYPE_TYPEDBYREF:
869 inst->klass = mono_defaults.typed_reference_class;
870 inst->type = STACK_VTYPE;
871 return;
872 case MONO_TYPE_GENERICINST:
873 type = m_class_get_byval_arg (type->data.generic_class->container_class);
874 goto handle_enum;
875 case MONO_TYPE_VAR:
876 case MONO_TYPE_MVAR:
877 g_assert (cfg->gshared);
878 if (mini_is_gsharedvt_type (type)) {
879 g_assert (cfg->gsharedvt);
880 inst->type = STACK_VTYPE;
881 } else {
882 mini_type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
884 return;
885 default:
886 g_error ("unknown type 0x%02x in eval stack type", type->type);
891 * The following tables are used to quickly validate the IL code in type_from_op ().
893 static const char
894 bin_num_table [STACK_MAX] [STACK_MAX] = {
895 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
896 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
897 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
898 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
899 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
900 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
901 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
902 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
903 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
906 static const char
907 neg_table [] = {
908 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
911 /* reduce the size of this table */
912 static const char
913 bin_int_table [STACK_MAX] [STACK_MAX] = {
914 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
915 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
916 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
917 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
918 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
919 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
920 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
921 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
924 static const char
925 bin_comp_table [STACK_MAX] [STACK_MAX] = {
926 /* Inv i L p F & O vt r4 */
927 {0},
928 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
929 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
930 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
931 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
932 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
933 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
934 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
935 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
938 /* reduce the size of this table */
939 static const char
940 shift_table [STACK_MAX] [STACK_MAX] = {
941 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
942 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
943 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
944 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
945 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
946 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
947 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
948 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
952 * Tables to map from the non-specific opcode to the matching
953 * type-specific opcode.
955 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
956 static const guint16
957 binops_op_map [STACK_MAX] = {
958 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
961 /* handles from CEE_NEG to CEE_CONV_U8 */
962 static const guint16
963 unops_op_map [STACK_MAX] = {
964 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
967 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
968 static const guint16
969 ovfops_op_map [STACK_MAX] = {
970 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
973 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
974 static const guint16
975 ovf2ops_op_map [STACK_MAX] = {
976 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
979 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
980 static const guint16
981 ovf3ops_op_map [STACK_MAX] = {
982 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
985 /* handles from CEE_BEQ to CEE_BLT_UN */
986 static const guint16
987 beqops_op_map [STACK_MAX] = {
988 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
991 /* handles from CEE_CEQ to CEE_CLT_UN */
992 static const guint16
993 ceqops_op_map [STACK_MAX] = {
994 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
998 * Sets ins->type (the type on the eval stack) according to the
999 * type of the opcode and the arguments to it.
1000 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
1002 * FIXME: this function sets ins->type unconditionally in some cases, but
1003 * it should set it to invalid for some types (a conv.x on an object)
1005 static void
1006 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
1008 switch (ins->opcode) {
1009 /* binops */
1010 case MONO_CEE_ADD:
1011 case MONO_CEE_SUB:
1012 case MONO_CEE_MUL:
1013 case MONO_CEE_DIV:
1014 case MONO_CEE_REM:
1015 /* FIXME: check unverifiable args for STACK_MP */
1016 ins->type = bin_num_table [src1->type] [src2->type];
1017 ins->opcode += binops_op_map [ins->type];
1018 break;
1019 case MONO_CEE_DIV_UN:
1020 case MONO_CEE_REM_UN:
1021 case MONO_CEE_AND:
1022 case MONO_CEE_OR:
1023 case MONO_CEE_XOR:
1024 ins->type = bin_int_table [src1->type] [src2->type];
1025 ins->opcode += binops_op_map [ins->type];
1026 break;
1027 case MONO_CEE_SHL:
1028 case MONO_CEE_SHR:
1029 case MONO_CEE_SHR_UN:
1030 ins->type = shift_table [src1->type] [src2->type];
1031 ins->opcode += binops_op_map [ins->type];
1032 break;
1033 case OP_COMPARE:
1034 case OP_LCOMPARE:
1035 case OP_ICOMPARE:
1036 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
1037 if ((src1->type == STACK_I8) || ((TARGET_SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
1038 ins->opcode = OP_LCOMPARE;
1039 else if (src1->type == STACK_R4)
1040 ins->opcode = OP_RCOMPARE;
1041 else if (src1->type == STACK_R8)
1042 ins->opcode = OP_FCOMPARE;
1043 else
1044 ins->opcode = OP_ICOMPARE;
1045 break;
1046 case OP_ICOMPARE_IMM:
1047 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
1048 if ((src1->type == STACK_I8) || ((TARGET_SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
1049 ins->opcode = OP_LCOMPARE_IMM;
1050 break;
1051 case MONO_CEE_BEQ:
1052 case MONO_CEE_BGE:
1053 case MONO_CEE_BGT:
1054 case MONO_CEE_BLE:
1055 case MONO_CEE_BLT:
1056 case MONO_CEE_BNE_UN:
1057 case MONO_CEE_BGE_UN:
1058 case MONO_CEE_BGT_UN:
1059 case MONO_CEE_BLE_UN:
1060 case MONO_CEE_BLT_UN:
1061 ins->opcode += beqops_op_map [src1->type];
1062 break;
1063 case OP_CEQ:
1064 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
1065 ins->opcode += ceqops_op_map [src1->type];
1066 break;
1067 case OP_CGT:
1068 case OP_CGT_UN:
1069 case OP_CLT:
1070 case OP_CLT_UN:
1071 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
1072 ins->opcode += ceqops_op_map [src1->type];
1073 break;
1074 /* unops */
1075 case MONO_CEE_NEG:
1076 ins->type = neg_table [src1->type];
1077 ins->opcode += unops_op_map [ins->type];
1078 break;
1079 case MONO_CEE_NOT:
1080 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1081 ins->type = src1->type;
1082 else
1083 ins->type = STACK_INV;
1084 ins->opcode += unops_op_map [ins->type];
1085 break;
1086 case MONO_CEE_CONV_I1:
1087 case MONO_CEE_CONV_I2:
1088 case MONO_CEE_CONV_I4:
1089 case MONO_CEE_CONV_U4:
1090 ins->type = STACK_I4;
1091 ins->opcode += unops_op_map [src1->type];
1092 break;
1093 case MONO_CEE_CONV_R_UN:
1094 ins->type = STACK_R8;
1095 switch (src1->type) {
1096 case STACK_I4:
1097 case STACK_PTR:
1098 ins->opcode = OP_ICONV_TO_R_UN;
1099 break;
1100 case STACK_I8:
1101 ins->opcode = OP_LCONV_TO_R_UN;
1102 break;
1104 break;
1105 case MONO_CEE_CONV_OVF_I1:
1106 case MONO_CEE_CONV_OVF_U1:
1107 case MONO_CEE_CONV_OVF_I2:
1108 case MONO_CEE_CONV_OVF_U2:
1109 case MONO_CEE_CONV_OVF_I4:
1110 case MONO_CEE_CONV_OVF_U4:
1111 ins->type = STACK_I4;
1112 ins->opcode += ovf3ops_op_map [src1->type];
1113 break;
1114 case MONO_CEE_CONV_OVF_I_UN:
1115 case MONO_CEE_CONV_OVF_U_UN:
1116 ins->type = STACK_PTR;
1117 ins->opcode += ovf2ops_op_map [src1->type];
1118 break;
1119 case MONO_CEE_CONV_OVF_I1_UN:
1120 case MONO_CEE_CONV_OVF_I2_UN:
1121 case MONO_CEE_CONV_OVF_I4_UN:
1122 case MONO_CEE_CONV_OVF_U1_UN:
1123 case MONO_CEE_CONV_OVF_U2_UN:
1124 case MONO_CEE_CONV_OVF_U4_UN:
1125 ins->type = STACK_I4;
1126 ins->opcode += ovf2ops_op_map [src1->type];
1127 break;
1128 case MONO_CEE_CONV_U:
1129 ins->type = STACK_PTR;
1130 switch (src1->type) {
1131 case STACK_I4:
1132 ins->opcode = OP_ICONV_TO_U;
1133 break;
1134 case STACK_PTR:
1135 case STACK_MP:
1136 case STACK_OBJ:
1137 #if TARGET_SIZEOF_VOID_P == 8
1138 ins->opcode = OP_LCONV_TO_U;
1139 #else
1140 ins->opcode = OP_MOVE;
1141 #endif
1142 break;
1143 case STACK_I8:
1144 ins->opcode = OP_LCONV_TO_U;
1145 break;
1146 case STACK_R8:
1147 ins->opcode = OP_FCONV_TO_U;
1148 break;
1149 case STACK_R4:
1150 if (TARGET_SIZEOF_VOID_P == 8)
1151 ins->opcode = OP_RCONV_TO_U8;
1152 else
1153 ins->opcode = OP_RCONV_TO_U4;
1154 break;
1156 break;
1157 case MONO_CEE_CONV_I8:
1158 case MONO_CEE_CONV_U8:
1159 ins->type = STACK_I8;
1160 ins->opcode += unops_op_map [src1->type];
1161 break;
1162 case MONO_CEE_CONV_OVF_I8:
1163 case MONO_CEE_CONV_OVF_U8:
1164 ins->type = STACK_I8;
1165 ins->opcode += ovf3ops_op_map [src1->type];
1166 break;
1167 case MONO_CEE_CONV_OVF_U8_UN:
1168 case MONO_CEE_CONV_OVF_I8_UN:
1169 ins->type = STACK_I8;
1170 ins->opcode += ovf2ops_op_map [src1->type];
1171 break;
1172 case MONO_CEE_CONV_R4:
1173 ins->type = cfg->r4_stack_type;
1174 ins->opcode += unops_op_map [src1->type];
1175 break;
1176 case MONO_CEE_CONV_R8:
1177 ins->type = STACK_R8;
1178 ins->opcode += unops_op_map [src1->type];
1179 break;
1180 case OP_CKFINITE:
1181 ins->type = STACK_R8;
1182 break;
1183 case MONO_CEE_CONV_U2:
1184 case MONO_CEE_CONV_U1:
1185 ins->type = STACK_I4;
1186 ins->opcode += ovfops_op_map [src1->type];
1187 break;
1188 case MONO_CEE_CONV_I:
1189 case MONO_CEE_CONV_OVF_I:
1190 case MONO_CEE_CONV_OVF_U:
1191 ins->type = STACK_PTR;
1192 ins->opcode += ovfops_op_map [src1->type];
1193 break;
1194 case MONO_CEE_ADD_OVF:
1195 case MONO_CEE_ADD_OVF_UN:
1196 case MONO_CEE_MUL_OVF:
1197 case MONO_CEE_MUL_OVF_UN:
1198 case MONO_CEE_SUB_OVF:
1199 case MONO_CEE_SUB_OVF_UN:
1200 ins->type = bin_num_table [src1->type] [src2->type];
1201 ins->opcode += ovfops_op_map [src1->type];
1202 if (ins->type == STACK_R8)
1203 ins->type = STACK_INV;
1204 break;
1205 case OP_LOAD_MEMBASE:
1206 ins->type = STACK_PTR;
1207 break;
1208 case OP_LOADI1_MEMBASE:
1209 case OP_LOADU1_MEMBASE:
1210 case OP_LOADI2_MEMBASE:
1211 case OP_LOADU2_MEMBASE:
1212 case OP_LOADI4_MEMBASE:
1213 case OP_LOADU4_MEMBASE:
1214 ins->type = STACK_PTR;
1215 break;
1216 case OP_LOADI8_MEMBASE:
1217 ins->type = STACK_I8;
1218 break;
1219 case OP_LOADR4_MEMBASE:
1220 ins->type = cfg->r4_stack_type;
1221 break;
1222 case OP_LOADR8_MEMBASE:
1223 ins->type = STACK_R8;
1224 break;
1225 default:
1226 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1227 break;
1230 if (ins->type == STACK_MP)
1231 ins->klass = mono_defaults.object_class;
1234 void
1235 mini_type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
1237 type_from_op (cfg, ins, src1, src2);
1240 static MonoClass*
1241 ldind_to_type (int op)
1243 switch (op) {
1244 case MONO_CEE_LDIND_I1: return mono_defaults.sbyte_class;
1245 case MONO_CEE_LDIND_U1: return mono_defaults.byte_class;
1246 case MONO_CEE_LDIND_I2: return mono_defaults.int16_class;
1247 case MONO_CEE_LDIND_U2: return mono_defaults.uint16_class;
1248 case MONO_CEE_LDIND_I4: return mono_defaults.int32_class;
1249 case MONO_CEE_LDIND_U4: return mono_defaults.uint32_class;
1250 case MONO_CEE_LDIND_I8: return mono_defaults.int64_class;
1251 case MONO_CEE_LDIND_I: return mono_defaults.int_class;
1252 case MONO_CEE_LDIND_R4: return mono_defaults.single_class;
1253 case MONO_CEE_LDIND_R8: return mono_defaults.double_class;
1254 case MONO_CEE_LDIND_REF:return mono_defaults.object_class; //FIXME we should try to return a more specific type
1255 default: g_error ("Unknown ldind type %d", op);
1259 #if 0
1261 static const char
1262 param_table [STACK_MAX] [STACK_MAX] = {
1263 {0},
1266 static int
1267 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1269 int i;
1271 if (sig->hasthis) {
1272 switch (args->type) {
1273 case STACK_I4:
1274 case STACK_I8:
1275 case STACK_R8:
1276 case STACK_VTYPE:
1277 case STACK_INV:
1278 return 0;
1280 args++;
1282 for (i = 0; i < sig->param_count; ++i) {
1283 switch (args [i].type) {
1284 case STACK_INV:
1285 return 0;
1286 case STACK_MP:
1287 if (!sig->params [i]->byref)
1288 return 0;
1289 continue;
1290 case STACK_OBJ:
1291 if (sig->params [i]->byref)
1292 return 0;
1293 switch (sig->params [i]->type) {
1294 case MONO_TYPE_CLASS:
1295 case MONO_TYPE_STRING:
1296 case MONO_TYPE_OBJECT:
1297 case MONO_TYPE_SZARRAY:
1298 case MONO_TYPE_ARRAY:
1299 break;
1300 default:
1301 return 0;
1303 continue;
1304 case STACK_R8:
1305 if (sig->params [i]->byref)
1306 return 0;
1307 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1308 return 0;
1309 continue;
1310 case STACK_PTR:
1311 case STACK_I4:
1312 case STACK_I8:
1313 case STACK_VTYPE:
1314 break;
1316 /*if (!param_table [args [i].type] [sig->params [i]->type])
1317 return 0;*/
1319 return 1;
1321 #endif
1324 * When we need a pointer to the current domain many times in a method, we
1325 * call mono_domain_get() once and we store the result in a local variable.
1326 * This function returns the variable that represents the MonoDomain*.
1328 inline static MonoInst *
1329 mono_get_domainvar (MonoCompile *cfg)
1331 if (!cfg->domainvar) {
1332 /* Make sure we don't generate references after checking whenever to init this */
1333 g_assert (!cfg->domainvar_inited);
1334 cfg->domainvar = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
1335 /* Avoid optimizing it away */
1336 cfg->domainvar->flags |= MONO_INST_VOLATILE;
1338 return cfg->domainvar;
1342 * The got_var contains the address of the Global Offset Table when AOT
1343 * compiling.
1345 MonoInst *
1346 mono_get_got_var (MonoCompile *cfg)
1348 if (!cfg->compile_aot || !cfg->backend->need_got_var || cfg->llvm_only)
1349 return NULL;
1350 if (!cfg->got_var) {
1351 cfg->got_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
1353 return cfg->got_var;
1356 static void
1357 mono_create_rgctx_var (MonoCompile *cfg)
1359 if (!cfg->rgctx_var) {
1360 cfg->rgctx_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
1361 /* force the var to be stack allocated */
1362 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1366 static MonoInst *
1367 mono_get_vtable_var (MonoCompile *cfg)
1369 g_assert (cfg->gshared);
1371 mono_create_rgctx_var (cfg);
1373 return cfg->rgctx_var;
1376 static MonoType*
1377 type_from_stack_type (MonoInst *ins) {
1378 switch (ins->type) {
1379 case STACK_I4: return mono_get_int32_type ();
1380 case STACK_I8: return m_class_get_byval_arg (mono_defaults.int64_class);
1381 case STACK_PTR: return mono_get_int_type ();
1382 case STACK_R4: return m_class_get_byval_arg (mono_defaults.single_class);
1383 case STACK_R8: return m_class_get_byval_arg (mono_defaults.double_class);
1384 case STACK_MP:
1385 return m_class_get_this_arg (ins->klass);
1386 case STACK_OBJ: return mono_get_object_type ();
1387 case STACK_VTYPE: return m_class_get_byval_arg (ins->klass);
1388 default:
1389 g_error ("stack type %d to monotype not handled\n", ins->type);
1391 return NULL;
1394 static G_GNUC_UNUSED int
1395 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1397 t = mono_type_get_underlying_type (t);
1398 switch (t->type) {
1399 case MONO_TYPE_I1:
1400 case MONO_TYPE_U1:
1401 case MONO_TYPE_I2:
1402 case MONO_TYPE_U2:
1403 case MONO_TYPE_I4:
1404 case MONO_TYPE_U4:
1405 return STACK_I4;
1406 case MONO_TYPE_I:
1407 case MONO_TYPE_U:
1408 case MONO_TYPE_PTR:
1409 case MONO_TYPE_FNPTR:
1410 return STACK_PTR;
1411 case MONO_TYPE_CLASS:
1412 case MONO_TYPE_STRING:
1413 case MONO_TYPE_OBJECT:
1414 case MONO_TYPE_SZARRAY:
1415 case MONO_TYPE_ARRAY:
1416 return STACK_OBJ;
1417 case MONO_TYPE_I8:
1418 case MONO_TYPE_U8:
1419 return STACK_I8;
1420 case MONO_TYPE_R4:
1421 return cfg->r4_stack_type;
1422 case MONO_TYPE_R8:
1423 return STACK_R8;
1424 case MONO_TYPE_VALUETYPE:
1425 case MONO_TYPE_TYPEDBYREF:
1426 return STACK_VTYPE;
1427 case MONO_TYPE_GENERICINST:
1428 if (mono_type_generic_inst_is_valuetype (t))
1429 return STACK_VTYPE;
1430 else
1431 return STACK_OBJ;
1432 break;
1433 default:
1434 g_assert_not_reached ();
1437 return -1;
1440 static MonoClass*
1441 array_access_to_klass (int opcode)
1443 switch (opcode) {
1444 case MONO_CEE_LDELEM_U1:
1445 return mono_defaults.byte_class;
1446 case MONO_CEE_LDELEM_U2:
1447 return mono_defaults.uint16_class;
1448 case MONO_CEE_LDELEM_I:
1449 case MONO_CEE_STELEM_I:
1450 return mono_defaults.int_class;
1451 case MONO_CEE_LDELEM_I1:
1452 case MONO_CEE_STELEM_I1:
1453 return mono_defaults.sbyte_class;
1454 case MONO_CEE_LDELEM_I2:
1455 case MONO_CEE_STELEM_I2:
1456 return mono_defaults.int16_class;
1457 case MONO_CEE_LDELEM_I4:
1458 case MONO_CEE_STELEM_I4:
1459 return mono_defaults.int32_class;
1460 case MONO_CEE_LDELEM_U4:
1461 return mono_defaults.uint32_class;
1462 case MONO_CEE_LDELEM_I8:
1463 case MONO_CEE_STELEM_I8:
1464 return mono_defaults.int64_class;
1465 case MONO_CEE_LDELEM_R4:
1466 case MONO_CEE_STELEM_R4:
1467 return mono_defaults.single_class;
1468 case MONO_CEE_LDELEM_R8:
1469 case MONO_CEE_STELEM_R8:
1470 return mono_defaults.double_class;
1471 case MONO_CEE_LDELEM_REF:
1472 case MONO_CEE_STELEM_REF:
1473 return mono_defaults.object_class;
1474 default:
1475 g_assert_not_reached ();
1477 return NULL;
1481 * We try to share variables when possible
1483 static MonoInst *
1484 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1486 MonoInst *res;
1487 int pos, vnum;
1488 MonoType *type;
1490 type = type_from_stack_type (ins);
1492 /* inlining can result in deeper stacks */
1493 if (cfg->inlined_method || slot >= cfg->header->max_stack)
1494 return mono_compile_create_var (cfg, type, OP_LOCAL);
1496 pos = ins->type - 1 + slot * STACK_MAX;
1498 switch (ins->type) {
1499 case STACK_I4:
1500 case STACK_I8:
1501 case STACK_R8:
1502 case STACK_PTR:
1503 case STACK_MP:
1504 case STACK_OBJ:
1505 if ((vnum = cfg->intvars [pos]))
1506 return cfg->varinfo [vnum];
1507 res = mono_compile_create_var (cfg, type, OP_LOCAL);
1508 cfg->intvars [pos] = res->inst_c0;
1509 break;
1510 default:
1511 res = mono_compile_create_var (cfg, type, OP_LOCAL);
1513 return res;
1516 static void
1517 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1520 * Don't use this if a generic_context is set, since that means AOT can't
1521 * look up the method using just the image+token.
1522 * table == 0 means this is a reference made from a wrapper.
1524 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1525 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1526 jump_info_token->image = image;
1527 jump_info_token->token = token;
1528 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1533 * This function is called to handle items that are left on the evaluation stack
1534 * at basic block boundaries. What happens is that we save the values to local variables
1535 * and we reload them later when first entering the target basic block (with the
1536 * handle_loaded_temps () function).
1537 * A single joint point will use the same variables (stored in the array bb->out_stack or
1538 * bb->in_stack, if the basic block is before or after the joint point).
1540 * This function needs to be called _before_ emitting the last instruction of
1541 * the bb (i.e. before emitting a branch).
1542 * If the stack merge fails at a join point, cfg->unverifiable is set.
1544 static void
1545 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1547 int i, bindex;
1548 MonoBasicBlock *bb = cfg->cbb;
1549 MonoBasicBlock *outb;
1550 MonoInst *inst, **locals;
1551 gboolean found;
1553 if (!count)
1554 return;
1555 if (cfg->verbose_level > 3)
1556 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1557 if (!bb->out_scount) {
1558 bb->out_scount = count;
1559 //printf ("bblock %d has out:", bb->block_num);
1560 found = FALSE;
1561 for (i = 0; i < bb->out_count; ++i) {
1562 outb = bb->out_bb [i];
1563 /* exception handlers are linked, but they should not be considered for stack args */
1564 if (outb->flags & BB_EXCEPTION_HANDLER)
1565 continue;
1566 //printf (" %d", outb->block_num);
1567 if (outb->in_stack) {
1568 found = TRUE;
1569 bb->out_stack = outb->in_stack;
1570 break;
1573 //printf ("\n");
1574 if (!found) {
1575 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1576 for (i = 0; i < count; ++i) {
1578 * try to reuse temps already allocated for this purpouse, if they occupy the same
1579 * stack slot and if they are of the same type.
1580 * This won't cause conflicts since if 'local' is used to
1581 * store one of the values in the in_stack of a bblock, then
1582 * the same variable will be used for the same outgoing stack
1583 * slot as well.
1584 * This doesn't work when inlining methods, since the bblocks
1585 * in the inlined methods do not inherit their in_stack from
1586 * the bblock they are inlined to. See bug #58863 for an
1587 * example.
1589 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1594 for (i = 0; i < bb->out_count; ++i) {
1595 outb = bb->out_bb [i];
1596 /* exception handlers are linked, but they should not be considered for stack args */
1597 if (outb->flags & BB_EXCEPTION_HANDLER)
1598 continue;
1599 if (outb->in_scount) {
1600 if (outb->in_scount != bb->out_scount) {
1601 cfg->unverifiable = TRUE;
1602 return;
1604 continue; /* check they are the same locals */
1606 outb->in_scount = count;
1607 outb->in_stack = bb->out_stack;
1610 locals = bb->out_stack;
1611 cfg->cbb = bb;
1612 for (i = 0; i < count; ++i) {
1613 sp [i] = convert_value (cfg, locals [i]->inst_vtype, sp [i]);
1614 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1615 inst->cil_code = sp [i]->cil_code;
1616 sp [i] = locals [i];
1617 if (cfg->verbose_level > 3)
1618 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1622 * It is possible that the out bblocks already have in_stack assigned, and
1623 * the in_stacks differ. In this case, we will store to all the different
1624 * in_stacks.
1627 found = TRUE;
1628 bindex = 0;
1629 while (found) {
1630 /* Find a bblock which has a different in_stack */
1631 found = FALSE;
1632 while (bindex < bb->out_count) {
1633 outb = bb->out_bb [bindex];
1634 /* exception handlers are linked, but they should not be considered for stack args */
1635 if (outb->flags & BB_EXCEPTION_HANDLER) {
1636 bindex++;
1637 continue;
1639 if (outb->in_stack != locals) {
1640 for (i = 0; i < count; ++i) {
1641 sp [i] = convert_value (cfg, outb->in_stack [i]->inst_vtype, sp [i]);
1642 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1643 inst->cil_code = sp [i]->cil_code;
1644 sp [i] = locals [i];
1645 if (cfg->verbose_level > 3)
1646 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1648 locals = outb->in_stack;
1649 found = TRUE;
1650 break;
1652 bindex ++;
1657 MonoInst*
1658 mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1660 MonoInst *ins;
1662 if (cfg->compile_aot) {
1663 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1664 } else {
1665 MonoJumpInfo ji;
1666 gpointer target;
1667 ERROR_DECL (error);
1669 ji.type = patch_type;
1670 ji.data.target = data;
1671 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, error);
1672 mono_error_assert_ok (error);
1674 EMIT_NEW_PCONST (cfg, ins, target);
1676 return ins;
1679 static MonoInst*
1680 mono_create_fast_tls_getter (MonoCompile *cfg, MonoTlsKey key)
1682 int tls_offset = mono_tls_get_tls_offset (key);
1684 if (cfg->compile_aot)
1685 return NULL;
1687 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1688 MonoInst *ins;
1689 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
1690 ins->dreg = mono_alloc_preg (cfg);
1691 ins->inst_offset = tls_offset;
1692 return ins;
1694 return NULL;
1697 static MonoInst*
1698 mono_create_tls_get (MonoCompile *cfg, MonoTlsKey key)
1700 MonoInst *fast_tls = NULL;
1702 if (!mini_get_debug_options ()->use_fallback_tls)
1703 fast_tls = mono_create_fast_tls_getter (cfg, key);
1705 if (fast_tls) {
1706 MONO_ADD_INS (cfg->cbb, fast_tls);
1707 return fast_tls;
1710 if (cfg->compile_aot) {
1711 MonoInst *addr;
1713 * tls getters are critical pieces of code and we don't want to resolve them
1714 * through the standard plt/tramp mechanism since we might expose ourselves
1715 * to crashes and infinite recursions.
1717 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GET_TLS_TRAMP, (void*)key);
1718 return mini_emit_calli (cfg, helper_sig_get_tls_tramp, NULL, addr, NULL, NULL);
1719 } else {
1720 gpointer getter = mono_tls_get_tls_getter (key, FALSE);
1721 return mono_emit_jit_icall (cfg, getter, NULL);
1726 * emit_push_lmf:
1728 * Emit IR to push the current LMF onto the LMF stack.
1730 static void
1731 emit_push_lmf (MonoCompile *cfg)
1734 * Emit IR to push the LMF:
1735 * lmf_addr = <lmf_addr from tls>
1736 * lmf->lmf_addr = lmf_addr
1737 * lmf->prev_lmf = *lmf_addr
1738 * *lmf_addr = lmf
1740 MonoInst *ins, *lmf_ins;
1742 if (!cfg->lmf_ir)
1743 return;
1745 int lmf_reg, prev_lmf_reg;
1747 * Store lmf_addr in a variable, so it can be allocated to a global register.
1749 if (!cfg->lmf_addr_var)
1750 cfg->lmf_addr_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
1752 #ifdef HOST_WIN32
1753 ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
1754 g_assert (ins);
1755 int jit_tls_dreg = ins->dreg;
1757 lmf_reg = alloc_preg (cfg);
1758 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
1759 #else
1760 lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR);
1761 g_assert (lmf_ins);
1762 #endif
1763 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1765 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1766 lmf_reg = ins->dreg;
1768 prev_lmf_reg = alloc_preg (cfg);
1769 /* Save previous_lmf */
1770 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1771 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1772 /* Set new lmf */
1773 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1777 * emit_pop_lmf:
1779 * Emit IR to pop the current LMF from the LMF stack.
1781 static void
1782 emit_pop_lmf (MonoCompile *cfg)
1784 int lmf_reg, lmf_addr_reg;
1785 MonoInst *ins;
1787 if (!cfg->lmf_ir)
1788 return;
1790 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1791 lmf_reg = ins->dreg;
1793 int prev_lmf_reg;
1795 * Emit IR to pop the LMF:
1796 * *(lmf->lmf_addr) = lmf->prev_lmf
1798 /* This could be called before emit_push_lmf () */
1799 if (!cfg->lmf_addr_var)
1800 cfg->lmf_addr_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
1801 lmf_addr_reg = cfg->lmf_addr_var->dreg;
1803 prev_lmf_reg = alloc_preg (cfg);
1804 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
1805 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
1809 * target_type_is_incompatible:
1810 * @cfg: MonoCompile context
1812 * Check that the item @arg on the evaluation stack can be stored
1813 * in the target type (can be a local, or field, etc).
1814 * The cfg arg can be used to check if we need verification or just
1815 * validity checks.
1817 * Returns: non-0 value if arg can't be stored on a target.
1819 static int
1820 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1822 MonoType *simple_type;
1823 MonoClass *klass;
1825 if (target->byref) {
1826 /* FIXME: check that the pointed to types match */
1827 if (arg->type == STACK_MP) {
1828 /* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
1829 MonoClass *target_class_lowered = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (mono_class_from_mono_type_internal (target))));
1830 MonoClass *source_class_lowered = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (arg->klass)));
1832 /* if the target is native int& or X* or same type */
1833 if (target->type == MONO_TYPE_I || target->type == MONO_TYPE_PTR || target_class_lowered == source_class_lowered)
1834 return 0;
1836 /* Both are primitive type byrefs and the source points to a larger type that the destination */
1837 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (m_class_get_byval_arg (target_class_lowered)) && MONO_TYPE_IS_PRIMITIVE_SCALAR (m_class_get_byval_arg (source_class_lowered)) &&
1838 mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
1839 return 0;
1840 return 1;
1842 if (arg->type == STACK_PTR)
1843 return 0;
1844 return 1;
1847 simple_type = mini_get_underlying_type (target);
1848 switch (simple_type->type) {
1849 case MONO_TYPE_VOID:
1850 return 1;
1851 case MONO_TYPE_I1:
1852 case MONO_TYPE_U1:
1853 case MONO_TYPE_I2:
1854 case MONO_TYPE_U2:
1855 case MONO_TYPE_I4:
1856 case MONO_TYPE_U4:
1857 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1858 return 1;
1859 return 0;
1860 case MONO_TYPE_PTR:
1861 /* STACK_MP is needed when setting pinned locals */
1862 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1863 return 1;
1864 return 0;
1865 case MONO_TYPE_I:
1866 case MONO_TYPE_U:
1867 case MONO_TYPE_FNPTR:
1869 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
1870 * in native int. (#688008).
1872 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1873 return 1;
1874 return 0;
1875 case MONO_TYPE_CLASS:
1876 case MONO_TYPE_STRING:
1877 case MONO_TYPE_OBJECT:
1878 case MONO_TYPE_SZARRAY:
1879 case MONO_TYPE_ARRAY:
1880 if (arg->type != STACK_OBJ)
1881 return 1;
1882 /* FIXME: check type compatibility */
1883 return 0;
1884 case MONO_TYPE_I8:
1885 case MONO_TYPE_U8:
1886 if (arg->type != STACK_I8)
1887 return 1;
1888 return 0;
1889 case MONO_TYPE_R4:
1890 if (arg->type != cfg->r4_stack_type)
1891 return 1;
1892 return 0;
1893 case MONO_TYPE_R8:
1894 if (arg->type != STACK_R8)
1895 return 1;
1896 return 0;
1897 case MONO_TYPE_VALUETYPE:
1898 if (arg->type != STACK_VTYPE)
1899 return 1;
1900 klass = mono_class_from_mono_type_internal (simple_type);
1901 if (klass != arg->klass)
1902 return 1;
1903 return 0;
1904 case MONO_TYPE_TYPEDBYREF:
1905 if (arg->type != STACK_VTYPE)
1906 return 1;
1907 klass = mono_class_from_mono_type_internal (simple_type);
1908 if (klass != arg->klass)
1909 return 1;
1910 return 0;
1911 case MONO_TYPE_GENERICINST:
1912 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1913 MonoClass *target_class;
1914 if (arg->type != STACK_VTYPE)
1915 return 1;
1916 klass = mono_class_from_mono_type_internal (simple_type);
1917 target_class = mono_class_from_mono_type_internal (target);
1918 /* The second cases is needed when doing partial sharing */
1919 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (arg->klass))))
1920 return 1;
1921 return 0;
1922 } else {
1923 if (arg->type != STACK_OBJ)
1924 return 1;
1925 /* FIXME: check type compatibility */
1926 return 0;
1928 case MONO_TYPE_VAR:
1929 case MONO_TYPE_MVAR:
1930 g_assert (cfg->gshared);
1931 if (mini_type_var_is_vt (simple_type)) {
1932 if (arg->type != STACK_VTYPE)
1933 return 1;
1934 } else {
1935 if (arg->type != STACK_OBJ)
1936 return 1;
1938 return 0;
1939 default:
1940 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1942 return 1;
1946 * convert_value:
1948 * Emit some implicit conversions which are not part of the .net spec, but are allowed by MS.NET.
1950 static MonoInst*
1951 convert_value (MonoCompile *cfg, MonoType *type, MonoInst *ins)
1953 if (!cfg->r4fp)
1954 return ins;
1955 type = mini_get_underlying_type (type);
1956 switch (type->type) {
1957 case MONO_TYPE_R4:
1958 if (ins->type == STACK_R8) {
1959 int dreg = alloc_freg (cfg);
1960 MonoInst *conv;
1961 EMIT_NEW_UNALU (cfg, conv, OP_FCONV_TO_R4, dreg, ins->dreg);
1962 conv->type = STACK_R4;
1963 return conv;
1965 break;
1966 case MONO_TYPE_R8:
1967 if (ins->type == STACK_R4) {
1968 int dreg = alloc_freg (cfg);
1969 MonoInst *conv;
1970 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, ins->dreg);
1971 conv->type = STACK_R8;
1972 return conv;
1974 break;
1975 default:
1976 break;
1978 return ins;
1982 * Prepare arguments for passing to a function call.
1983 * Return a non-zero value if the arguments can't be passed to the given
1984 * signature.
1985 * The type checks are not yet complete and some conversions may need
1986 * casts on 32 or 64 bit architectures.
1988 * FIXME: implement this using target_type_is_incompatible ()
1990 static gboolean
1991 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1993 MonoType *simple_type;
1994 int i;
1996 if (sig->hasthis) {
1997 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1998 return TRUE;
1999 args++;
2001 for (i = 0; i < sig->param_count; ++i) {
2002 if (sig->params [i]->byref) {
2003 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2004 return TRUE;
2005 continue;
2007 simple_type = mini_get_underlying_type (sig->params [i]);
2008 handle_enum:
2009 switch (simple_type->type) {
2010 case MONO_TYPE_VOID:
2011 return TRUE;
2012 case MONO_TYPE_I1:
2013 case MONO_TYPE_U1:
2014 case MONO_TYPE_I2:
2015 case MONO_TYPE_U2:
2016 case MONO_TYPE_I4:
2017 case MONO_TYPE_U4:
2018 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2019 return TRUE;
2020 continue;
2021 case MONO_TYPE_I:
2022 case MONO_TYPE_U:
2023 case MONO_TYPE_PTR:
2024 case MONO_TYPE_FNPTR:
2025 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2026 return TRUE;
2027 continue;
2028 case MONO_TYPE_CLASS:
2029 case MONO_TYPE_STRING:
2030 case MONO_TYPE_OBJECT:
2031 case MONO_TYPE_SZARRAY:
2032 case MONO_TYPE_ARRAY:
2033 if (args [i]->type != STACK_OBJ)
2034 return TRUE;
2035 continue;
2036 case MONO_TYPE_I8:
2037 case MONO_TYPE_U8:
2038 if (args [i]->type != STACK_I8)
2039 return TRUE;
2040 continue;
2041 case MONO_TYPE_R4:
2042 if (args [i]->type != cfg->r4_stack_type)
2043 return TRUE;
2044 continue;
2045 case MONO_TYPE_R8:
2046 if (args [i]->type != STACK_R8)
2047 return TRUE;
2048 continue;
2049 case MONO_TYPE_VALUETYPE:
2050 if (m_class_is_enumtype (simple_type->data.klass)) {
2051 simple_type = mono_class_enum_basetype_internal (simple_type->data.klass);
2052 goto handle_enum;
2054 if (args [i]->type != STACK_VTYPE)
2055 return TRUE;
2056 continue;
2057 case MONO_TYPE_TYPEDBYREF:
2058 if (args [i]->type != STACK_VTYPE)
2059 return TRUE;
2060 continue;
2061 case MONO_TYPE_GENERICINST:
2062 simple_type = m_class_get_byval_arg (simple_type->data.generic_class->container_class);
2063 goto handle_enum;
2064 case MONO_TYPE_VAR:
2065 case MONO_TYPE_MVAR:
2066 /* gsharedvt */
2067 if (args [i]->type != STACK_VTYPE)
2068 return TRUE;
2069 continue;
2070 default:
2071 g_error ("unknown type 0x%02x in check_call_signature",
2072 simple_type->type);
2075 return FALSE;
2078 MonoJumpInfo *
2079 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2081 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2083 ji->ip.i = ip;
2084 ji->type = type;
2085 ji->data.target = target;
2087 return ji;
2091 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2093 if (cfg->gshared)
2094 return mono_class_check_context_used (klass);
2095 else
2096 return 0;
2100 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2102 if (cfg->gshared)
2103 return mono_method_check_context_used (method);
2104 else
2105 return 0;
2109 * check_method_sharing:
2111 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2113 static void
2114 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2116 gboolean pass_vtable = FALSE;
2117 gboolean pass_mrgctx = FALSE;
2119 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || m_class_is_valuetype (cmethod->klass)) &&
2120 (mono_class_is_ginst (cmethod->klass) || mono_class_is_gtd (cmethod->klass))) {
2121 gboolean sharable = FALSE;
2123 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2124 sharable = TRUE;
2127 * Pass vtable iff target method might
2128 * be shared, which means that sharing
2129 * is enabled for its class and its
2130 * context is sharable (and it's not a
2131 * generic method).
2133 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2134 pass_vtable = TRUE;
2137 if (mini_method_needs_mrgctx (cmethod)) {
2138 g_assert (!pass_vtable);
2140 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2141 pass_mrgctx = TRUE;
2142 } else {
2143 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature_internal (cmethod)))
2144 pass_mrgctx = TRUE;
2148 if (out_pass_vtable)
2149 *out_pass_vtable = pass_vtable;
2150 if (out_pass_mrgctx)
2151 *out_pass_mrgctx = pass_mrgctx;
2154 static gboolean
2155 direct_icalls_enabled (MonoCompile *cfg)
2157 return FALSE;
2159 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2160 #ifdef TARGET_AMD64
2161 if (cfg->compile_llvm && !cfg->llvm_only)
2162 return FALSE;
2163 #endif
2164 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
2165 return FALSE;
2166 return TRUE;
2169 MonoInst*
2170 mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
2173 * Call the jit icall without a wrapper if possible.
2174 * The wrapper is needed to be able to do stack walks for asynchronously suspended
2175 * threads when debugging.
2177 if (direct_icalls_enabled (cfg)) {
2178 char *name;
2179 int costs;
2181 if (!info->wrapper_method) {
2182 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
2183 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
2184 g_free (name);
2185 mono_memory_barrier ();
2189 * Inline the wrapper method, which is basically a call to the C icall, and
2190 * an exception check.
2192 costs = inline_method (cfg, info->wrapper_method, NULL,
2193 args, NULL, il_offset, TRUE);
2194 g_assert (costs > 0);
2195 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
2197 return args [0];
2198 } else {
2199 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2203 static MonoInst*
2204 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2206 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2207 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2208 int widen_op = -1;
2211 * Native code might return non register sized integers
2212 * without initializing the upper bits.
2214 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2215 case OP_LOADI1_MEMBASE:
2216 widen_op = OP_ICONV_TO_I1;
2217 break;
2218 case OP_LOADU1_MEMBASE:
2219 widen_op = OP_ICONV_TO_U1;
2220 break;
2221 case OP_LOADI2_MEMBASE:
2222 widen_op = OP_ICONV_TO_I2;
2223 break;
2224 case OP_LOADU2_MEMBASE:
2225 widen_op = OP_ICONV_TO_U2;
2226 break;
2227 default:
2228 break;
2231 if (widen_op != -1) {
2232 int dreg = alloc_preg (cfg);
2233 MonoInst *widen;
2235 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2236 widen->type = ins->type;
2237 ins = widen;
2242 return ins;
2245 static MonoInst*
2246 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2247 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2249 static void
2250 emit_method_access_failure (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
2252 MonoInst *args [2];
2253 args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (caller), caller, MONO_RGCTX_INFO_METHOD);
2254 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (callee), callee, MONO_RGCTX_INFO_METHOD);
2255 mono_emit_jit_icall (cfg, mono_throw_method_access, args);
2258 static MonoMethod*
2259 get_method_nofail (MonoClass *klass, const char *method_name, int num_params, int flags)
2261 MonoMethod *method;
2262 ERROR_DECL (error);
2263 method = mono_class_get_method_from_name_checked (klass, method_name, num_params, flags, error);
2264 mono_error_assert_ok (error);
2265 g_assertf (method, "Could not lookup method %s in %s", method_name, m_class_get_name (klass));
2266 return method;
2269 MonoMethod*
2270 mini_get_memcpy_method (void)
2272 static MonoMethod *memcpy_method = NULL;
2273 if (!memcpy_method) {
2274 memcpy_method = get_method_nofail (mono_defaults.string_class, "memcpy", 3, 0);
2275 if (!memcpy_method)
2276 g_error ("Old corlib found. Install a new one");
2278 return memcpy_method;
2281 void
2282 mini_emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2284 int card_table_shift_bits;
2285 target_mgreg_t card_table_mask;
2286 guint8 *card_table;
2287 MonoInst *dummy_use;
2288 int nursery_shift_bits;
2289 size_t nursery_size;
2291 if (!cfg->gen_write_barriers)
2292 return;
2294 //method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1])
2296 card_table = mono_gc_get_target_card_table (&card_table_shift_bits, &card_table_mask);
2298 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2300 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
2301 MonoInst *wbarrier;
2303 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2304 wbarrier->sreg1 = ptr->dreg;
2305 wbarrier->sreg2 = value->dreg;
2306 MONO_ADD_INS (cfg->cbb, wbarrier);
2307 } else if (card_table) {
2308 int offset_reg = alloc_preg (cfg);
2309 int card_reg;
2310 MonoInst *ins;
2313 * We emit a fast light weight write barrier. This always marks cards as in the concurrent
2314 * collector case, so, for the serial collector, it might slightly slow down nursery
2315 * collections. We also expect that the host system and the target system have the same card
2316 * table configuration, which is the case if they have the same pointer size.
2319 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2320 if (card_table_mask)
2321 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2323 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2324 * IMM's larger than 32bits.
2326 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
2327 card_reg = ins->dreg;
2329 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2330 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2331 } else {
2332 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2333 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2336 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2339 MonoMethod*
2340 mini_get_memset_method (void)
2342 static MonoMethod *memset_method = NULL;
2343 if (!memset_method) {
2344 memset_method = get_method_nofail (mono_defaults.string_class, "memset", 3, 0);
2345 if (!memset_method)
2346 g_error ("Old corlib found. Install a new one");
2348 return memset_method;
2351 void
2352 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2354 MonoInst *iargs [3];
2355 int n;
2356 guint32 align;
2357 MonoMethod *memset_method;
2358 MonoInst *size_ins = NULL;
2359 MonoInst *bzero_ins = NULL;
2360 static MonoMethod *bzero_method;
2362 /* FIXME: Optimize this for the case when dest is an LDADDR */
2363 mono_class_init_internal (klass);
2364 if (mini_is_gsharedvt_klass (klass)) {
2365 size_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
2366 bzero_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
2367 if (!bzero_method)
2368 bzero_method = get_method_nofail (mono_defaults.string_class, "bzero_aligned_1", 2, 0);
2369 g_assert (bzero_method);
2370 iargs [0] = dest;
2371 iargs [1] = size_ins;
2372 mini_emit_calli (cfg, mono_method_signature_internal (bzero_method), iargs, bzero_ins, NULL, NULL);
2373 return;
2376 klass = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (klass)));
2378 n = mono_class_value_size (klass, &align);
2380 if (n <= TARGET_SIZEOF_VOID_P * 8) {
2381 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2383 else {
2384 memset_method = mini_get_memset_method ();
2385 iargs [0] = dest;
2386 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2387 EMIT_NEW_ICONST (cfg, iargs [2], n);
2388 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2392 static gboolean
2393 context_used_is_mrgctx (MonoCompile *cfg, int context_used)
2395 /* gshared dim methods use an mrgctx */
2396 if (mini_method_is_default_method (cfg->method))
2397 return context_used != 0;
2398 return context_used & MONO_GENERIC_CONTEXT_USED_METHOD;
2402 * emit_get_rgctx:
2404 * Emit IR to return either the this pointer for instance method,
2405 * or the mrgctx for static methods.
2407 static MonoInst*
2408 emit_get_rgctx (MonoCompile *cfg, int context_used)
2410 MonoInst *this_ins = NULL;
2411 MonoMethod *method = cfg->method;
2413 g_assert (cfg->gshared);
2415 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2416 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2417 !m_class_is_valuetype (method->klass))
2418 EMIT_NEW_VARLOAD (cfg, this_ins, cfg->this_arg, mono_get_object_type ());
2420 if (context_used_is_mrgctx (cfg, context_used)) {
2421 MonoInst *mrgctx_loc, *mrgctx_var;
2423 if (!mini_method_is_default_method (method)) {
2424 g_assert (!this_ins);
2425 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2428 mrgctx_loc = mono_get_vtable_var (cfg);
2429 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2431 return mrgctx_var;
2432 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || m_class_is_valuetype (method->klass)) {
2433 MonoInst *vtable_loc, *vtable_var;
2435 g_assert (!this_ins);
2437 vtable_loc = mono_get_vtable_var (cfg);
2438 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2440 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2441 MonoInst *mrgctx_var = vtable_var;
2442 int vtable_reg;
2444 vtable_reg = alloc_preg (cfg);
2445 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2446 vtable_var->type = STACK_PTR;
2449 return vtable_var;
2450 } else {
2451 MonoInst *ins;
2452 int vtable_reg;
2454 vtable_reg = alloc_preg (cfg);
2455 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2456 return ins;
2460 static MonoJumpInfoRgctxEntry *
2461 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
2463 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2464 res->method = method;
2465 res->in_mrgctx = in_mrgctx;
2466 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2467 res->data->type = patch_type;
2468 res->data->data.target = patch_data;
2469 res->info_type = info_type;
2471 return res;
2474 static inline MonoInst*
2475 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2477 MonoInst *args [16];
2478 MonoInst *call;
2480 // FIXME: No fastpath since the slot is not a compile time constant
2481 args [0] = rgctx;
2482 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
2483 if (entry->in_mrgctx)
2484 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
2485 else
2486 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
2487 return call;
2488 #if 0
2490 * FIXME: This can be called during decompose, which is a problem since it creates
2491 * new bblocks.
2492 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
2494 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
2495 gboolean mrgctx;
2496 MonoBasicBlock *is_null_bb, *end_bb;
2497 MonoInst *res, *ins, *call;
2498 MonoInst *args[16];
2500 slot = mini_get_rgctx_entry_slot (entry);
2502 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
2503 index = MONO_RGCTX_SLOT_INDEX (slot);
2504 if (mrgctx)
2505 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / TARGET_SIZEOF_VOID_P;
2506 for (depth = 0; ; ++depth) {
2507 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
2509 if (index < size - 1)
2510 break;
2511 index -= size - 1;
2514 NEW_BBLOCK (cfg, end_bb);
2515 NEW_BBLOCK (cfg, is_null_bb);
2517 if (mrgctx) {
2518 rgctx_reg = rgctx->dreg;
2519 } else {
2520 rgctx_reg = alloc_preg (cfg);
2522 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
2523 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
2524 NEW_BBLOCK (cfg, is_null_bb);
2526 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
2527 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2530 for (i = 0; i < depth; ++i) {
2531 int array_reg = alloc_preg (cfg);
2533 /* load ptr to next array */
2534 if (mrgctx && i == 0)
2535 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
2536 else
2537 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
2538 rgctx_reg = array_reg;
2539 /* is the ptr null? */
2540 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
2541 /* if yes, jump to actual trampoline */
2542 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2545 /* fetch slot */
2546 val_reg = alloc_preg (cfg);
2547 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * TARGET_SIZEOF_VOID_P);
2548 /* is the slot null? */
2549 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
2550 /* if yes, jump to actual trampoline */
2551 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2553 /* Fastpath */
2554 res_reg = alloc_preg (cfg);
2555 MONO_INST_NEW (cfg, ins, OP_MOVE);
2556 ins->dreg = res_reg;
2557 ins->sreg1 = val_reg;
2558 MONO_ADD_INS (cfg->cbb, ins);
2559 res = ins;
2560 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
2562 /* Slowpath */
2563 MONO_START_BB (cfg, is_null_bb);
2564 args [0] = rgctx;
2565 EMIT_NEW_ICONST (cfg, args [1], index);
2566 if (mrgctx)
2567 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
2568 else
2569 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
2570 MONO_INST_NEW (cfg, ins, OP_MOVE);
2571 ins->dreg = res_reg;
2572 ins->sreg1 = call->dreg;
2573 MONO_ADD_INS (cfg->cbb, ins);
2574 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
2576 MONO_START_BB (cfg, end_bb);
2578 return res;
2579 #endif
2583 * emit_rgctx_fetch:
2585 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
2586 * given by RGCTX.
2588 static MonoInst*
2589 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2591 if (cfg->llvm_only)
2592 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
2593 else
2594 return mini_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2598 * mini_emit_get_rgctx_klass:
2600 * Emit IR to load the property RGCTX_TYPE of KLASS. If context_used is 0, emit
2601 * normal constants, else emit a load from the rgctx.
2603 MonoInst*
2604 mini_emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2605 MonoClass *klass, MonoRgctxInfoType rgctx_type)
2607 if (!context_used) {
2608 MonoInst *ins;
2610 switch (rgctx_type) {
2611 case MONO_RGCTX_INFO_KLASS:
2612 EMIT_NEW_CLASSCONST (cfg, ins, klass);
2613 return ins;
2614 default:
2615 g_assert_not_reached ();
2619 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2620 MonoInst *rgctx = emit_get_rgctx (cfg, context_used);
2622 return emit_rgctx_fetch (cfg, rgctx, entry);
2625 static MonoInst*
2626 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
2627 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
2629 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
2630 MonoInst *rgctx = emit_get_rgctx (cfg, context_used);
2632 return emit_rgctx_fetch (cfg, rgctx, entry);
2635 static MonoInst*
2636 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
2637 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
2639 MonoJumpInfoGSharedVtCall *call_info;
2640 MonoJumpInfoRgctxEntry *entry;
2641 MonoInst *rgctx;
2643 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
2644 call_info->sig = sig;
2645 call_info->method = cmethod;
2647 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
2648 rgctx = emit_get_rgctx (cfg, context_used);
2650 return emit_rgctx_fetch (cfg, rgctx, entry);
2654 * emit_get_rgctx_virt_method:
2656 * Return data for method VIRT_METHOD for a receiver of type KLASS.
2658 static MonoInst*
2659 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
2660 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
2662 MonoJumpInfoVirtMethod *info;
2663 MonoJumpInfoRgctxEntry *entry;
2664 MonoInst *rgctx;
2666 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
2667 info->klass = klass;
2668 info->method = virt_method;
2670 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
2671 rgctx = emit_get_rgctx (cfg, context_used);
2673 return emit_rgctx_fetch (cfg, rgctx, entry);
2676 static MonoInst*
2677 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
2678 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
2680 MonoJumpInfoRgctxEntry *entry;
2681 MonoInst *rgctx;
2683 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
2684 rgctx = emit_get_rgctx (cfg, context_used);
2686 return emit_rgctx_fetch (cfg, rgctx, entry);
2690 * emit_get_rgctx_method:
2692 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2693 * normal constants, else emit a load from the rgctx.
2695 static MonoInst*
2696 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2697 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
2699 if (!context_used) {
2700 MonoInst *ins;
2702 switch (rgctx_type) {
2703 case MONO_RGCTX_INFO_METHOD:
2704 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2705 return ins;
2706 case MONO_RGCTX_INFO_METHOD_RGCTX:
2707 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2708 return ins;
2709 default:
2710 g_assert_not_reached ();
2712 } else {
2713 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2714 MonoInst *rgctx = emit_get_rgctx (cfg, context_used);
2716 return emit_rgctx_fetch (cfg, rgctx, entry);
2720 static MonoInst*
2721 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2722 MonoClassField *field, MonoRgctxInfoType rgctx_type)
2724 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_FIELD, field, rgctx_type);
2725 MonoInst *rgctx = emit_get_rgctx (cfg, context_used);
2727 return emit_rgctx_fetch (cfg, rgctx, entry);
2730 MonoInst*
2731 mini_emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2732 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
2734 return emit_get_rgctx_method (cfg, context_used, cmethod, rgctx_type);
2737 static int
2738 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
2740 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
2741 MonoRuntimeGenericContextInfoTemplate *template_;
2742 int i, idx;
2744 g_assert (info);
2746 for (i = 0; i < info->num_entries; ++i) {
2747 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
2749 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
2750 return i;
2753 if (info->num_entries == info->count_entries) {
2754 MonoRuntimeGenericContextInfoTemplate *new_entries;
2755 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
2757 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
2759 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
2760 info->entries = new_entries;
2761 info->count_entries = new_count_entries;
2764 idx = info->num_entries;
2765 template_ = &info->entries [idx];
2766 template_->info_type = rgctx_type;
2767 template_->data = data;
2769 info->num_entries ++;
2771 return idx;
2775 * emit_get_gsharedvt_info:
2777 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
2779 static MonoInst*
2780 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
2782 MonoInst *ins;
2783 int idx, dreg;
2785 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
2786 /* Load info->entries [idx] */
2787 dreg = alloc_preg (cfg);
2788 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * TARGET_SIZEOF_VOID_P));
2790 return ins;
2793 MonoInst*
2794 mini_emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
2796 return emit_get_gsharedvt_info (cfg, m_class_get_byval_arg (klass), rgctx_type);
2800 * On return the caller must check @klass for load errors.
2802 static void
2803 emit_class_init (MonoCompile *cfg, MonoClass *klass)
2805 MonoInst *vtable_arg;
2806 int context_used;
2808 context_used = mini_class_check_context_used (cfg, klass);
2810 if (context_used) {
2811 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
2812 klass, MONO_RGCTX_INFO_VTABLE);
2813 } else {
2814 MonoVTable *vtable = mono_class_vtable_checked (cfg->domain, klass, &cfg->error);
2815 if (!is_ok (&cfg->error)) {
2816 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
2817 return;
2820 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2823 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
2824 MonoInst *ins;
2827 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
2828 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
2830 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
2831 ins->sreg1 = vtable_arg->dreg;
2832 MONO_ADD_INS (cfg->cbb, ins);
2833 } else {
2834 int inited_reg;
2835 MonoBasicBlock *inited_bb;
2837 inited_reg = alloc_ireg (cfg);
2839 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, inited_reg, vtable_arg->dreg, MONO_STRUCT_OFFSET (MonoVTable, initialized));
2841 NEW_BBLOCK (cfg, inited_bb);
2843 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
2844 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
2846 mono_emit_jit_icall (cfg, mono_generic_class_init, &vtable_arg);
2848 MONO_START_BB (cfg, inited_bb);
2852 static void
2853 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
2855 MonoInst *ins;
2857 if (cfg->gen_seq_points && cfg->method == method) {
2858 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
2859 if (nonempty_stack)
2860 ins->flags |= MONO_INST_NONEMPTY_STACK;
2861 MONO_ADD_INS (cfg->cbb, ins);
2865 void
2866 mini_save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
2868 if (mini_get_debug_options ()->better_cast_details) {
2869 int vtable_reg = alloc_preg (cfg);
2870 int klass_reg = alloc_preg (cfg);
2871 MonoBasicBlock *is_null_bb = NULL;
2872 MonoInst *tls_get;
2874 if (null_check) {
2875 NEW_BBLOCK (cfg, is_null_bb);
2877 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2878 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2881 tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
2882 if (!tls_get) {
2883 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2884 exit (1);
2887 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2888 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
2890 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2892 MonoInst *class_ins = mini_emit_get_rgctx_klass (cfg, mini_class_check_context_used (cfg, klass), klass, MONO_RGCTX_INFO_KLASS);
2893 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), class_ins->dreg);
2895 if (null_check)
2896 MONO_START_BB (cfg, is_null_bb);
2900 void
2901 mini_reset_cast_details (MonoCompile *cfg)
2903 /* Reset the variables holding the cast details */
2904 if (mini_get_debug_options ()->better_cast_details) {
2905 MonoInst *tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
2906 /* It is enough to reset the from field */
2907 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2912 * On return the caller must check @array_class for load errors
2914 static void
2915 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2917 int vtable_reg = alloc_preg (cfg);
2918 int context_used;
2920 context_used = mini_class_check_context_used (cfg, array_class);
2922 mini_save_cast_details (cfg, array_class, obj->dreg, FALSE);
2924 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2926 if (cfg->opt & MONO_OPT_SHARED) {
2927 int class_reg = alloc_preg (cfg);
2928 MonoInst *ins;
2930 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
2931 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
2932 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
2933 } else if (context_used) {
2934 MonoInst *vtable_ins;
2936 vtable_ins = mini_emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2937 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2938 } else {
2939 if (cfg->compile_aot) {
2940 int vt_reg;
2941 MonoVTable *vtable;
2943 if (!(vtable = mono_class_vtable_checked (cfg->domain, array_class, &cfg->error))) {
2944 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
2945 return;
2947 vt_reg = alloc_preg (cfg);
2948 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2949 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2950 } else {
2951 MonoVTable *vtable;
2952 if (!(vtable = mono_class_vtable_checked (cfg->domain, array_class, &cfg->error))) {
2953 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
2954 return;
2956 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, (gssize)vtable);
2960 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2962 mini_reset_cast_details (cfg);
2966 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2967 * generic code is generated.
2969 static MonoInst*
2970 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2972 MonoMethod* method;
2974 if (m_class_is_enumtype (mono_class_get_nullable_param (klass)))
2975 method = get_method_nofail (klass, "UnboxExact", 1, 0);
2976 else
2977 method = get_method_nofail (klass, "Unbox", 1, 0);
2978 g_assert (method);
2980 if (context_used) {
2981 MonoInst *rgctx, *addr;
2983 /* FIXME: What if the class is shared? We might not
2984 have to get the address of the method from the
2985 RGCTX. */
2986 addr = emit_get_rgctx_method (cfg, context_used, method,
2987 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2988 if (cfg->llvm_only) {
2989 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature_internal (method));
2990 return mini_emit_llvmonly_calli (cfg, mono_method_signature_internal (method), &val, addr);
2991 } else {
2992 rgctx = emit_get_rgctx (cfg, context_used);
2994 return mini_emit_calli (cfg, mono_method_signature_internal (method), &val, addr, NULL, rgctx);
2996 } else {
2997 gboolean pass_vtable, pass_mrgctx;
2998 MonoInst *rgctx_arg = NULL;
3000 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3001 g_assert (!pass_mrgctx);
3003 if (pass_vtable) {
3004 MonoVTable *vtable = mono_class_vtable_checked (cfg->domain, method->klass, &cfg->error);
3006 mono_error_assert_ok (&cfg->error);
3007 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3010 return mini_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3014 static MonoInst*
3015 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3017 MonoInst *add;
3018 int obj_reg;
3019 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3020 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3021 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3022 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3024 obj_reg = sp [0]->dreg;
3025 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3026 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3028 /* FIXME: generics */
3029 g_assert (m_class_get_rank (klass) == 0);
3031 // Check rank == 0
3032 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3033 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3035 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3036 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, m_class_offsetof_element_class ());
3038 if (context_used) {
3039 MonoInst *element_class;
3041 /* This assertion is from the unboxcast insn */
3042 g_assert (m_class_get_rank (klass) == 0);
3044 element_class = mini_emit_get_rgctx_klass (cfg, context_used,
3045 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3047 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3048 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3049 } else {
3050 mini_save_cast_details (cfg, m_class_get_element_class (klass), obj_reg, FALSE);
3051 mini_emit_class_check (cfg, eclass_reg, m_class_get_element_class (klass));
3052 mini_reset_cast_details (cfg);
3055 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, MONO_ABI_SIZEOF (MonoObject));
3056 MONO_ADD_INS (cfg->cbb, add);
3057 add->type = STACK_MP;
3058 add->klass = klass;
3060 return add;
3063 static MonoInst*
3064 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
3066 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3067 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3068 MonoInst *ins;
3069 int dreg, addr_reg;
3071 klass_inst = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3073 /* obj */
3074 args [0] = obj;
3076 /* klass */
3077 args [1] = klass_inst;
3079 /* CASTCLASS */
3080 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3082 NEW_BBLOCK (cfg, is_ref_bb);
3083 NEW_BBLOCK (cfg, is_nullable_bb);
3084 NEW_BBLOCK (cfg, end_bb);
3085 is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3086 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
3087 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3089 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
3090 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3092 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3093 addr_reg = alloc_dreg (cfg, STACK_MP);
3095 /* Non-ref case */
3096 /* UNBOX */
3097 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, MONO_ABI_SIZEOF (MonoObject));
3098 MONO_ADD_INS (cfg->cbb, addr);
3100 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3102 /* Ref case */
3103 MONO_START_BB (cfg, is_ref_bb);
3105 /* Save the ref to a temporary */
3106 dreg = alloc_ireg (cfg);
3107 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, m_class_get_byval_arg (klass));
3108 addr->dreg = addr_reg;
3109 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3110 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3112 /* Nullable case */
3113 MONO_START_BB (cfg, is_nullable_bb);
3116 MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3117 MonoInst *unbox_call;
3118 MonoMethodSignature *unbox_sig;
3120 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3121 unbox_sig->ret = m_class_get_byval_arg (klass);
3122 unbox_sig->param_count = 1;
3123 unbox_sig->params [0] = mono_get_object_type ();
3125 if (cfg->llvm_only)
3126 unbox_call = mini_emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
3127 else
3128 unbox_call = mini_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3130 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, m_class_get_byval_arg (klass));
3131 addr->dreg = addr_reg;
3134 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3136 /* End */
3137 MONO_START_BB (cfg, end_bb);
3139 /* LDOBJ */
3140 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr_reg, 0);
3142 return ins;
3146 * Returns NULL and set the cfg exception on error.
3148 static MonoInst*
3149 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3151 MonoInst *iargs [2];
3152 void *alloc_ftn;
3154 if (mono_class_get_flags (klass) & TYPE_ATTRIBUTE_ABSTRACT) {
3155 char* full_name = mono_type_get_full_name (klass);
3156 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
3157 mono_error_set_member_access (&cfg->error, "Cannot create an abstract class: %s", full_name);
3158 g_free (full_name);
3159 return NULL;
3162 if (context_used) {
3163 MonoInst *data;
3164 MonoRgctxInfoType rgctx_info;
3165 MonoInst *iargs [2];
3166 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
3168 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
3170 if (cfg->opt & MONO_OPT_SHARED)
3171 rgctx_info = MONO_RGCTX_INFO_KLASS;
3172 else
3173 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3174 data = mini_emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3176 if (cfg->opt & MONO_OPT_SHARED) {
3177 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3178 iargs [1] = data;
3179 alloc_ftn = (gpointer)ves_icall_object_new;
3180 } else {
3181 iargs [0] = data;
3182 alloc_ftn = (gpointer)ves_icall_object_new_specific;
3185 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
3186 if (known_instance_size) {
3187 int size = mono_class_instance_size (klass);
3188 if (size < MONO_ABI_SIZEOF (MonoObject))
3189 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
3191 EMIT_NEW_ICONST (cfg, iargs [1], size);
3193 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3196 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3199 if (cfg->opt & MONO_OPT_SHARED) {
3200 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3201 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3203 alloc_ftn = (gpointer)ves_icall_object_new;
3204 } else if (cfg->compile_aot && cfg->cbb->out_of_line && m_class_get_type_token (klass) && m_class_get_image (klass) == mono_defaults.corlib && !mono_class_is_ginst (klass)) {
3205 /* This happens often in argument checking code, eg. throw new FooException... */
3206 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3207 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (m_class_get_type_token (klass)));
3208 alloc_ftn = (gpointer)mono_helper_newobj_mscorlib;
3209 } else {
3210 MonoVTable *vtable = mono_class_vtable_checked (cfg->domain, klass, &cfg->error);
3212 if (!is_ok (&cfg->error)) {
3213 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
3214 return NULL;
3217 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
3219 if (managed_alloc) {
3220 int size = mono_class_instance_size (klass);
3221 if (size < MONO_ABI_SIZEOF (MonoObject))
3222 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
3224 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3225 EMIT_NEW_ICONST (cfg, iargs [1], size);
3226 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3228 alloc_ftn = (gpointer)ves_icall_object_new_specific;
3229 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3232 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3236 * Returns NULL and set the cfg exception on error.
3238 MonoInst*
3239 mini_emit_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3241 MonoInst *alloc, *ins;
3243 if (G_UNLIKELY (m_class_is_byreflike (klass))) {
3244 mono_error_set_bad_image (&cfg->error, m_class_get_image (cfg->method->klass), "Cannot box IsByRefLike type '%s.%s'", m_class_get_name_space (klass), m_class_get_name (klass));
3245 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
3246 return NULL;
3249 if (mono_class_is_nullable (klass)) {
3250 MonoMethod* method = get_method_nofail (klass, "Box", 1, 0);
3252 if (context_used) {
3253 if (cfg->llvm_only && cfg->gsharedvt) {
3254 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3255 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3256 return mini_emit_llvmonly_calli (cfg, mono_method_signature_internal (method), &val, addr);
3257 } else {
3258 /* FIXME: What if the class is shared? We might not
3259 have to get the method address from the RGCTX. */
3260 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3261 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3262 MonoInst *rgctx = emit_get_rgctx (cfg, context_used);
3264 return mini_emit_calli (cfg, mono_method_signature_internal (method), &val, addr, NULL, rgctx);
3266 } else {
3267 gboolean pass_vtable, pass_mrgctx;
3268 MonoInst *rgctx_arg = NULL;
3270 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3271 g_assert (!pass_mrgctx);
3273 if (pass_vtable) {
3274 MonoVTable *vtable = mono_class_vtable_checked (cfg->domain, method->klass, &cfg->error);
3276 mono_error_assert_ok (&cfg->error);
3277 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3280 return mini_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3284 if (mini_is_gsharedvt_klass (klass)) {
3285 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3286 MonoInst *res, *is_ref, *src_var, *addr;
3287 int dreg;
3289 dreg = alloc_ireg (cfg);
3291 NEW_BBLOCK (cfg, is_ref_bb);
3292 NEW_BBLOCK (cfg, is_nullable_bb);
3293 NEW_BBLOCK (cfg, end_bb);
3294 is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3295 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
3296 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3298 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
3299 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3301 /* Non-ref case */
3302 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3303 if (!alloc)
3304 return NULL;
3305 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), alloc->dreg, MONO_ABI_SIZEOF (MonoObject), val->dreg);
3306 ins->opcode = OP_STOREV_MEMBASE;
3308 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
3309 res->type = STACK_OBJ;
3310 res->klass = klass;
3311 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3313 /* Ref case */
3314 MONO_START_BB (cfg, is_ref_bb);
3316 /* val is a vtype, so has to load the value manually */
3317 src_var = get_vreg_to_inst (cfg, val->dreg);
3318 if (!src_var)
3319 src_var = mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (klass), OP_LOCAL, val->dreg);
3320 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
3321 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
3322 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3324 /* Nullable case */
3325 MONO_START_BB (cfg, is_nullable_bb);
3328 MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass,
3329 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
3330 MonoInst *box_call;
3331 MonoMethodSignature *box_sig;
3334 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
3335 * construct that method at JIT time, so have to do things by hand.
3337 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3338 box_sig->ret = mono_get_object_type ();
3339 box_sig->param_count = 1;
3340 box_sig->params [0] = m_class_get_byval_arg (klass);
3342 if (cfg->llvm_only)
3343 box_call = mini_emit_llvmonly_calli (cfg, box_sig, &val, addr);
3344 else
3345 box_call = mini_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
3346 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
3347 res->type = STACK_OBJ;
3348 res->klass = klass;
3351 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3353 MONO_START_BB (cfg, end_bb);
3355 return res;
3358 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3359 if (!alloc)
3360 return NULL;
3362 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), alloc->dreg, MONO_ABI_SIZEOF (MonoObject), val->dreg);
3363 return alloc;
3366 static gboolean
3367 method_needs_stack_walk (MonoCompile *cfg, MonoMethod *cmethod)
3369 if (cmethod->klass == mono_defaults.systemtype_class) {
3370 if (!strcmp (cmethod->name, "GetType"))
3371 return TRUE;
3373 return FALSE;
3376 G_GNUC_UNUSED MonoInst*
3377 mini_handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, int enum_val_reg, MonoInst *enum_flag)
3379 MonoType *enum_type = mono_type_get_underlying_type (m_class_get_byval_arg (klass));
3380 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
3381 gboolean is_i4;
3383 switch (enum_type->type) {
3384 case MONO_TYPE_I8:
3385 case MONO_TYPE_U8:
3386 #if SIZEOF_REGISTER == 8
3387 case MONO_TYPE_I:
3388 case MONO_TYPE_U:
3389 #endif
3390 is_i4 = FALSE;
3391 break;
3392 default:
3393 is_i4 = TRUE;
3394 break;
3398 MonoInst *load = NULL, *and_, *cmp, *ceq;
3399 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
3400 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
3401 int dest_reg = alloc_ireg (cfg);
3403 if (enum_this) {
3404 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
3405 } else {
3406 g_assert (enum_val_reg != -1);
3407 enum_reg = enum_val_reg;
3409 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
3410 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
3411 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
3413 ceq->type = STACK_I4;
3415 if (!is_i4) {
3416 load = load ? mono_decompose_opcode (cfg, load) : NULL;
3417 and_ = mono_decompose_opcode (cfg, and_);
3418 cmp = mono_decompose_opcode (cfg, cmp);
3419 ceq = mono_decompose_opcode (cfg, ceq);
3422 return ceq;
3426 static MonoInst*
3427 emit_get_rgctx_dele_tramp (MonoCompile *cfg, int context_used,
3428 MonoClass *klass, MonoMethod *virt_method, gboolean _virtual, MonoRgctxInfoType rgctx_type)
3430 MonoDelegateClassMethodPair *info;
3431 MonoJumpInfoRgctxEntry *entry;
3432 MonoInst *rgctx;
3434 info = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
3435 info->klass = klass;
3436 info->method = virt_method;
3437 info->is_virtual = _virtual;
3439 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used_is_mrgctx (cfg, context_used), MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, info, rgctx_type);
3440 rgctx = emit_get_rgctx (cfg, context_used);
3442 return emit_rgctx_fetch (cfg, rgctx, entry);
3447 * Returns NULL and set the cfg exception on error.
3449 static G_GNUC_UNUSED MonoInst*
3450 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int target_method_context_used, int invoke_context_used, gboolean virtual_)
3452 MonoInst *ptr;
3453 int dreg;
3454 gpointer trampoline;
3455 MonoInst *obj, *tramp_ins;
3456 MonoDomain *domain;
3457 guint8 **code_slot;
3459 if (virtual_ && !cfg->llvm_only) {
3460 MonoMethod *invoke = mono_get_delegate_invoke_internal (klass);
3461 g_assert (invoke);
3463 //FIXME verify & fix any issue with removing invoke_context_used restriction
3464 if (invoke_context_used || !mono_get_delegate_virtual_invoke_impl (mono_method_signature_internal (invoke), target_method_context_used ? NULL : method))
3465 return NULL;
3468 obj = handle_alloc (cfg, klass, FALSE, invoke_context_used);
3469 if (!obj)
3470 return NULL;
3472 /* Inline the contents of mono_delegate_ctor */
3474 /* Set target field */
3475 /* Optimize away setting of NULL target */
3476 if (!MONO_INS_IS_PCONST_NULL (target)) {
3477 if (!(method->flags & METHOD_ATTRIBUTE_STATIC)) {
3478 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target->dreg, 0);
3479 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
3481 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3482 if (cfg->gen_write_barriers) {
3483 dreg = alloc_preg (cfg);
3484 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
3485 mini_emit_write_barrier (cfg, ptr, target);
3489 /* Set method field */
3490 if (!(target_method_context_used || invoke_context_used) || cfg->llvm_only) {
3491 //If compiling with gsharing enabled, it's faster to load method the delegate trampoline info than to use a rgctx slot
3492 MonoInst *method_ins = emit_get_rgctx_method (cfg, target_method_context_used, method, MONO_RGCTX_INFO_METHOD);
3493 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3497 * To avoid looking up the compiled code belonging to the target method
3498 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3499 * store it, and we fill it after the method has been compiled.
3501 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
3502 MonoInst *code_slot_ins;
3504 if (target_method_context_used) {
3505 code_slot_ins = emit_get_rgctx_method (cfg, target_method_context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3506 } else {
3507 domain = mono_domain_get ();
3508 mono_domain_lock (domain);
3509 if (!domain_jit_info (domain)->method_code_hash)
3510 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3511 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3512 if (!code_slot) {
3513 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
3514 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3516 mono_domain_unlock (domain);
3518 code_slot_ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
3520 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3523 if (cfg->llvm_only) {
3524 if (virtual_) {
3525 MonoInst *args [ ] = {
3526 obj,
3527 target,
3528 emit_get_rgctx_method (cfg, target_method_context_used, method, MONO_RGCTX_INFO_METHOD)
3530 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
3531 } else {
3532 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, &obj);
3535 return obj;
3537 if (target_method_context_used || invoke_context_used) {
3538 tramp_ins = emit_get_rgctx_dele_tramp (cfg, target_method_context_used | invoke_context_used, klass, method, virtual_, MONO_RGCTX_INFO_DELEGATE_TRAMP_INFO);
3540 //This is emited as a contant store for the non-shared case.
3541 //We copy from the delegate trampoline info as it's faster than a rgctx fetch
3542 dreg = alloc_preg (cfg);
3543 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method));
3544 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), dreg);
3545 } else if (cfg->compile_aot) {
3546 MonoDelegateClassMethodPair *del_tramp;
3548 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
3549 del_tramp->klass = klass;
3550 del_tramp->method = method;
3551 del_tramp->is_virtual = virtual_;
3552 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
3553 } else {
3554 if (virtual_)
3555 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, method);
3556 else
3557 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, method);
3558 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3561 /* Set invoke_impl field */
3562 if (virtual_) {
3563 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3564 } else {
3565 dreg = alloc_preg (cfg);
3566 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
3567 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
3569 dreg = alloc_preg (cfg);
3570 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
3571 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
3574 dreg = alloc_preg (cfg);
3575 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
3576 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
3578 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3580 return obj;
3583 static MonoInst*
3584 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, guchar *ip)
3586 MonoJitICallInfo *info;
3588 /* Need to register the icall so it gets an icall wrapper */
3589 info = mono_get_array_new_va_icall (rank);
3591 cfg->flags |= MONO_CFG_HAS_VARARGS;
3593 /* mono_array_new_va () needs a vararg calling convention */
3594 cfg->exception_message = g_strdup ("array-new");
3595 cfg->disable_llvm = TRUE;
3597 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3598 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3602 * handle_constrained_gsharedvt_call:
3604 * Handle constrained calls where the receiver is a gsharedvt type.
3605 * Return the instruction representing the call. Set the cfg exception on failure.
3607 static MonoInst*
3608 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
3609 gboolean *ref_emit_widen)
3611 MonoInst *ins = NULL;
3612 gboolean emit_widen = *ref_emit_widen;
3613 gboolean supported;
3616 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
3617 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
3618 * pack the arguments into an array, and do the rest of the work in in an icall.
3620 supported = ((cmethod->klass == mono_defaults.object_class) || mono_class_is_interface (cmethod->klass) || (!m_class_is_valuetype (cmethod->klass) && m_class_get_image (cmethod->klass) != mono_defaults.corlib));
3621 if (supported)
3622 supported = (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || m_class_is_enumtype (mono_class_from_mono_type_internal (fsig->ret)) || mini_is_gsharedvt_type (fsig->ret));
3623 if (supported) {
3624 if (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1)) {
3625 supported = TRUE;
3626 } else {
3627 /* Allow scalar parameters and a gsharedvt first parameter */
3628 supported = MONO_TYPE_IS_PRIMITIVE (fsig->params [0]) || MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]);
3629 if (supported) {
3630 for (int i = 1; i < fsig->param_count; ++i) {
3631 if (!(fsig->params [i]->byref || MONO_TYPE_IS_PRIMITIVE (fsig->params [i]) || MONO_TYPE_IS_REFERENCE (fsig->params [i])))
3632 supported = FALSE;
3637 if (supported) {
3638 MonoInst *args [16];
3641 * This case handles calls to
3642 * - object:ToString()/Equals()/GetHashCode(),
3643 * - System.IComparable<T>:CompareTo()
3644 * - System.IEquatable<T>:Equals ()
3645 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
3648 args [0] = sp [0];
3649 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
3650 args [2] = mini_emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
3652 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
3653 if (fsig->hasthis && fsig->param_count) {
3654 /* Call mono_gsharedvt_constrained_call (gpointer mp, MonoMethod *cmethod, MonoClass *klass, gboolean deref_arg, gpointer *args) */
3655 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
3656 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
3657 ins->dreg = alloc_preg (cfg);
3658 ins->inst_imm = fsig->param_count * sizeof (target_mgreg_t);
3659 MONO_ADD_INS (cfg->cbb, ins);
3660 args [4] = ins;
3662 /* Only the first argument is allowed to be gsharedvt */
3663 /* args [3] = deref_arg */
3664 if (mini_is_gsharedvt_type (fsig->params [0])) {
3665 int deref_arg_reg;
3666 ins = mini_emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type_internal (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3667 deref_arg_reg = alloc_preg (cfg);
3668 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
3669 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
3670 } else {
3671 EMIT_NEW_ICONST (cfg, args [3], 0);
3674 for (int i = 0; i < fsig->param_count; ++i) {
3675 int addr_reg;
3677 if (mini_is_gsharedvt_type (fsig->params [i]) || MONO_TYPE_IS_PRIMITIVE (fsig->params [i])) {
3678 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [i + 1]->dreg, fsig->params [i]);
3679 addr_reg = ins->dreg;
3680 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, i * sizeof (target_mgreg_t), addr_reg);
3681 } else {
3682 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, i * sizeof (target_mgreg_t), sp [i + 1]->dreg);
3685 } else {
3686 EMIT_NEW_ICONST (cfg, args [3], 0);
3687 EMIT_NEW_ICONST (cfg, args [4], 0);
3689 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
3690 emit_widen = FALSE;
3692 if (mini_is_gsharedvt_type (fsig->ret)) {
3693 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type_internal (fsig->ret), ins);
3694 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || m_class_is_enumtype (mono_class_from_mono_type_internal (fsig->ret))) {
3695 MonoInst *add;
3697 /* Unbox */
3698 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, MONO_ABI_SIZEOF (MonoObject));
3699 MONO_ADD_INS (cfg->cbb, add);
3700 /* Load value */
3701 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
3702 MONO_ADD_INS (cfg->cbb, ins);
3703 /* ins represents the call result */
3705 } else {
3706 GSHAREDVT_FAILURE (CEE_CALLVIRT);
3709 *ref_emit_widen = emit_widen;
3711 return ins;
3713 exception_exit:
3714 return NULL;
3717 static void
3718 mono_emit_load_got_addr (MonoCompile *cfg)
3720 MonoInst *getaddr, *dummy_use;
3722 if (!cfg->got_var || cfg->got_var_allocated)
3723 return;
3725 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3726 getaddr->cil_code = cfg->header->code;
3727 getaddr->dreg = cfg->got_var->dreg;
3729 /* Add it to the start of the first bblock */
3730 if (cfg->bb_entry->code) {
3731 getaddr->next = cfg->bb_entry->code;
3732 cfg->bb_entry->code = getaddr;
3734 else
3735 MONO_ADD_INS (cfg->bb_entry, getaddr);
3737 cfg->got_var_allocated = TRUE;
3740 * Add a dummy use to keep the got_var alive, since real uses might
3741 * only be generated by the back ends.
3742 * Add it to end_bblock, so the variable's lifetime covers the whole
3743 * method.
3744 * It would be better to make the usage of the got var explicit in all
3745 * cases when the backend needs it (i.e. calls, throw etc.), so this
3746 * wouldn't be needed.
3748 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3749 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3752 static int inline_limit, llvm_jit_inline_limit;
3753 static gboolean inline_limit_inited;
3755 static gboolean
3756 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3758 MonoMethodHeaderSummary header;
3759 MonoVTable *vtable;
3760 int limit;
3761 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
3762 MonoMethodSignature *sig = mono_method_signature_internal (method);
3763 int i;
3764 #endif
3766 if (cfg->disable_inline)
3767 return FALSE;
3768 if (cfg->gsharedvt)
3769 return FALSE;
3771 if (cfg->inline_depth > 10)
3772 return FALSE;
3774 if (!mono_method_get_header_summary (method, &header))
3775 return FALSE;
3777 /*runtime, icall and pinvoke are checked by summary call*/
3778 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3779 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3780 (mono_class_is_marshalbyref (method->klass)) ||
3781 header.has_clauses)
3782 return FALSE;
3784 /* also consider num_locals? */
3785 /* Do the size check early to avoid creating vtables */
3786 if (!inline_limit_inited) {
3787 char *inlinelimit;
3788 if ((inlinelimit = g_getenv ("MONO_INLINELIMIT"))) {
3789 inline_limit = atoi (inlinelimit);
3790 llvm_jit_inline_limit = inline_limit;
3791 g_free (inlinelimit);
3792 } else {
3793 inline_limit = INLINE_LENGTH_LIMIT;
3794 llvm_jit_inline_limit = LLVM_JIT_INLINE_LENGTH_LIMIT;
3796 inline_limit_inited = TRUE;
3799 if (COMPILE_LLVM (cfg) && !cfg->compile_aot)
3800 limit = llvm_jit_inline_limit;
3801 else
3802 limit = inline_limit;
3803 if (header.code_size >= limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
3804 return FALSE;
3807 * if we can initialize the class of the method right away, we do,
3808 * otherwise we don't allow inlining if the class needs initialization,
3809 * since it would mean inserting a call to mono_runtime_class_init()
3810 * inside the inlined code
3812 if (cfg->gshared && m_class_has_cctor (method->klass) && mini_class_check_context_used (cfg, method->klass))
3813 return FALSE;
3815 if (!(cfg->opt & MONO_OPT_SHARED)) {
3816 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
3817 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
3818 if (m_class_has_cctor (method->klass)) {
3819 ERROR_DECL (error);
3820 vtable = mono_class_vtable_checked (cfg->domain, method->klass, error);
3821 if (!is_ok (error)) {
3822 mono_error_cleanup (error);
3823 return FALSE;
3825 if (!cfg->compile_aot) {
3826 if (!mono_runtime_class_init_full (vtable, error)) {
3827 mono_error_cleanup (error);
3828 return FALSE;
3832 } else if (mono_class_is_before_field_init (method->klass)) {
3833 if (cfg->run_cctors && m_class_has_cctor (method->klass)) {
3834 ERROR_DECL (error);
3835 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3836 if (!m_class_get_runtime_info (method->klass))
3837 /* No vtable created yet */
3838 return FALSE;
3839 vtable = mono_class_vtable_checked (cfg->domain, method->klass, error);
3840 if (!is_ok (error)) {
3841 mono_error_cleanup (error);
3842 return FALSE;
3844 /* This makes so that inline cannot trigger */
3845 /* .cctors: too many apps depend on them */
3846 /* running with a specific order... */
3847 if (! vtable->initialized)
3848 return FALSE;
3849 if (!mono_runtime_class_init_full (vtable, error)) {
3850 mono_error_cleanup (error);
3851 return FALSE;
3854 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3855 ERROR_DECL (error);
3856 if (!m_class_get_runtime_info (method->klass))
3857 /* No vtable created yet */
3858 return FALSE;
3859 vtable = mono_class_vtable_checked (cfg->domain, method->klass, error);
3860 if (!is_ok (error)) {
3861 mono_error_cleanup (error);
3862 return FALSE;
3864 if (!vtable->initialized)
3865 return FALSE;
3867 } else {
3869 * If we're compiling for shared code
3870 * the cctor will need to be run at aot method load time, for example,
3871 * or at the end of the compilation of the inlining method.
3873 if (mono_class_needs_cctor_run (method->klass, NULL) && !mono_class_is_before_field_init (method->klass))
3874 return FALSE;
3877 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
3878 if (mono_arch_is_soft_float ()) {
3879 /* FIXME: */
3880 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3881 return FALSE;
3882 for (i = 0; i < sig->param_count; ++i)
3883 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3884 return FALSE;
3886 #endif
3888 if (g_list_find (cfg->dont_inline, method))
3889 return FALSE;
3891 if (mono_profiler_get_call_instrumentation_flags (method))
3892 return FALSE;
3894 if (mono_profiler_coverage_instrumentation_enabled (method))
3895 return FALSE;
3897 return TRUE;
3900 static gboolean
3901 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
3903 if (!cfg->compile_aot) {
3904 g_assert (vtable);
3905 if (vtable->initialized)
3906 return FALSE;
3909 if (mono_class_is_before_field_init (klass)) {
3910 if (cfg->method == method)
3911 return FALSE;
3914 if (!mono_class_needs_cctor_run (klass, method))
3915 return FALSE;
3917 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
3918 /* The initialization is already done before the method is called */
3919 return FALSE;
3921 return TRUE;
3925 mini_emit_sext_index_reg (MonoCompile *cfg, MonoInst *index)
3927 int index_reg = index->dreg;
3928 int index2_reg;
3930 #if SIZEOF_REGISTER == 8
3931 /* The array reg is 64 bits but the index reg is only 32 */
3932 if (COMPILE_LLVM (cfg)) {
3934 * abcrem can't handle the OP_SEXT_I4, so add this after abcrem,
3935 * during OP_BOUNDS_CHECK decomposition, and in the implementation
3936 * of OP_X86_LEA for llvm.
3938 index2_reg = index_reg;
3939 } else {
3940 index2_reg = alloc_preg (cfg);
3941 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3943 #else
3944 if (index->type == STACK_I8) {
3945 index2_reg = alloc_preg (cfg);
3946 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3947 } else {
3948 index2_reg = index_reg;
3950 #endif
3952 return index2_reg;
3955 MonoInst*
3956 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
3958 MonoInst *ins;
3959 guint32 size;
3960 int mult_reg, add_reg, array_reg, index2_reg;
3961 int context_used;
3963 if (mini_is_gsharedvt_variable_klass (klass)) {
3964 size = -1;
3965 } else {
3966 mono_class_init_internal (klass);
3967 size = mono_class_array_element_size (klass);
3970 mult_reg = alloc_preg (cfg);
3971 array_reg = arr->dreg;
3973 index2_reg = mini_emit_sext_index_reg (cfg, index);
3975 if (bcheck)
3976 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3978 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3979 if (size == 1 || size == 2 || size == 4 || size == 8) {
3980 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3982 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
3983 ins->klass = m_class_get_element_class (klass);
3984 ins->type = STACK_MP;
3986 return ins;
3988 #endif
3990 add_reg = alloc_ireg_mp (cfg);
3992 if (size == -1) {
3993 MonoInst *rgctx_ins;
3995 /* gsharedvt */
3996 g_assert (cfg->gshared);
3997 context_used = mini_class_check_context_used (cfg, klass);
3998 g_assert (context_used);
3999 rgctx_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4000 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4001 } else {
4002 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4004 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4005 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4006 ins->klass = m_class_get_element_class (klass);
4007 ins->type = STACK_MP;
4008 MONO_ADD_INS (cfg->cbb, ins);
4010 return ins;
4013 static MonoInst*
4014 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4016 int bounds_reg = alloc_preg (cfg);
4017 int add_reg = alloc_ireg_mp (cfg);
4018 int mult_reg = alloc_preg (cfg);
4019 int mult2_reg = alloc_preg (cfg);
4020 int low1_reg = alloc_preg (cfg);
4021 int low2_reg = alloc_preg (cfg);
4022 int high1_reg = alloc_preg (cfg);
4023 int high2_reg = alloc_preg (cfg);
4024 int realidx1_reg = alloc_preg (cfg);
4025 int realidx2_reg = alloc_preg (cfg);
4026 int sum_reg = alloc_preg (cfg);
4027 int index1, index2, tmpreg;
4028 MonoInst *ins;
4029 guint32 size;
4031 mono_class_init_internal (klass);
4032 size = mono_class_array_element_size (klass);
4034 index1 = index_ins1->dreg;
4035 index2 = index_ins2->dreg;
4037 #if SIZEOF_REGISTER == 8
4038 /* The array reg is 64 bits but the index reg is only 32 */
4039 if (COMPILE_LLVM (cfg)) {
4040 /* Not needed */
4041 } else {
4042 tmpreg = alloc_preg (cfg);
4043 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4044 index1 = tmpreg;
4045 tmpreg = alloc_preg (cfg);
4046 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4047 index2 = tmpreg;
4049 #else
4050 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4051 tmpreg = -1;
4052 #endif
4054 /* range checking */
4055 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4056 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4058 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4059 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4060 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4061 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4062 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4063 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4064 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4066 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4067 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4068 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4069 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4070 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4071 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4072 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4074 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4075 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4076 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4077 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4078 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4080 ins->type = STACK_MP;
4081 ins->klass = klass;
4082 MONO_ADD_INS (cfg->cbb, ins);
4084 return ins;
4087 static MonoInst*
4088 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, guchar *ip, gboolean is_set)
4090 int rank;
4091 MonoInst *addr;
4092 MonoMethod *addr_method;
4093 int element_size;
4094 MonoClass *eclass = m_class_get_element_class (cmethod->klass);
4096 rank = mono_method_signature_internal (cmethod)->param_count - (is_set? 1: 0);
4098 if (rank == 1)
4099 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
4101 /* emit_ldelema_2 depends on OP_LMUL */
4102 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
4103 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
4106 if (mini_is_gsharedvt_variable_klass (eclass))
4107 element_size = 0;
4108 else
4109 element_size = mono_class_array_element_size (eclass);
4110 addr_method = mono_marshal_get_array_address (rank, element_size);
4111 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4113 return addr;
4116 static gboolean
4117 mini_class_is_reference (MonoClass *klass)
4119 return mini_type_is_reference (m_class_get_byval_arg (klass));
4122 MonoInst*
4123 mini_emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4125 if (safety_checks && mini_class_is_reference (klass) &&
4126 !(MONO_INS_IS_PCONST_NULL (sp [2]))) {
4127 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4128 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4129 MonoInst *iargs [3];
4131 if (!helper->slot)
4132 mono_class_setup_vtable (obj_array);
4133 g_assert (helper->slot);
4135 if (sp [0]->type != STACK_OBJ)
4136 return NULL;
4137 if (sp [2]->type != STACK_OBJ)
4138 return NULL;
4140 iargs [2] = sp [2];
4141 iargs [1] = sp [1];
4142 iargs [0] = sp [0];
4144 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4145 } else {
4146 MonoInst *ins;
4148 if (mini_is_gsharedvt_variable_klass (klass)) {
4149 MonoInst *addr;
4151 // FIXME-VT: OP_ICONST optimization
4152 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4153 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0, sp [2]->dreg);
4154 ins->opcode = OP_STOREV_MEMBASE;
4155 } else if (sp [1]->opcode == OP_ICONST) {
4156 int array_reg = sp [0]->dreg;
4157 int index_reg = sp [1]->dreg;
4158 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
4160 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
4161 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
4163 if (safety_checks)
4164 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
4165 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), array_reg, offset, sp [2]->dreg);
4166 } else {
4167 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
4168 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0, sp [2]->dreg);
4169 if (mini_class_is_reference (klass))
4170 mini_emit_write_barrier (cfg, addr, sp [2]);
4172 return ins;
4176 MonoInst*
4177 mini_emit_memory_barrier (MonoCompile *cfg, int kind)
4179 MonoInst *ins = NULL;
4180 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4181 MONO_ADD_INS (cfg->cbb, ins);
4182 ins->backend.memory_barrier_kind = kind;
4184 return ins;
4188 * This entry point could be used later for arbitrary method
4189 * redirection.
4191 inline static MonoInst*
4192 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4193 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
4195 if (method->klass == mono_defaults.string_class) {
4196 /* managed string allocation support */
4197 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(cfg->opt & MONO_OPT_SHARED)) {
4198 MonoInst *iargs [2];
4199 MonoVTable *vtable = mono_class_vtable_checked (cfg->domain, method->klass, &cfg->error);
4200 MonoMethod *managed_alloc = NULL;
4202 mono_error_assert_ok (&cfg->error); /*Should not fail since it System.String*/
4203 #ifndef MONO_CROSS_COMPILE
4204 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
4205 #endif
4206 if (!managed_alloc)
4207 return NULL;
4208 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4209 iargs [1] = args [0];
4210 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
4213 return NULL;
4216 static void
4217 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4219 MonoInst *store, *temp;
4220 int i;
4222 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4223 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4226 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4227 * would be different than the MonoInst's used to represent arguments, and
4228 * the ldelema implementation can't deal with that.
4229 * Solution: When ldelema is used on an inline argument, create a var for
4230 * it, emit ldelema on that var, and emit the saving code below in
4231 * inline_method () if needed.
4233 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4234 cfg->args [i] = temp;
4235 /* This uses cfg->args [i] which is set by the preceeding line */
4236 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4237 store->cil_code = sp [0]->cil_code;
4238 sp++;
4242 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4243 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4245 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4246 static gboolean
4247 check_inline_called_method_name_limit (MonoMethod *called_method)
4249 int strncmp_result;
4250 static const char *limit = NULL;
4252 if (limit == NULL) {
4253 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4255 if (limit_string != NULL)
4256 limit = limit_string;
4257 else
4258 limit = "";
4261 if (limit [0] != '\0') {
4262 char *called_method_name = mono_method_full_name (called_method, TRUE);
4264 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4265 g_free (called_method_name);
4267 //return (strncmp_result <= 0);
4268 return (strncmp_result == 0);
4269 } else {
4270 return TRUE;
4273 #endif
4275 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4276 static gboolean
4277 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4279 int strncmp_result;
4280 static const char *limit = NULL;
4282 if (limit == NULL) {
4283 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4284 if (limit_string != NULL) {
4285 limit = limit_string;
4286 } else {
4287 limit = "";
4291 if (limit [0] != '\0') {
4292 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4294 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4295 g_free (caller_method_name);
4297 //return (strncmp_result <= 0);
4298 return (strncmp_result == 0);
4299 } else {
4300 return TRUE;
4303 #endif
4305 static void
4306 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
4308 static double r8_0 = 0.0;
4309 static float r4_0 = 0.0;
4310 MonoInst *ins;
4311 int t;
4313 rtype = mini_get_underlying_type (rtype);
4314 t = rtype->type;
4316 if (rtype->byref) {
4317 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
4318 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
4319 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4320 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
4321 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
4322 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
4323 MONO_INST_NEW (cfg, ins, OP_R4CONST);
4324 ins->type = STACK_R4;
4325 ins->inst_p0 = (void*)&r4_0;
4326 ins->dreg = dreg;
4327 MONO_ADD_INS (cfg->cbb, ins);
4328 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
4329 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4330 ins->type = STACK_R8;
4331 ins->inst_p0 = (void*)&r8_0;
4332 ins->dreg = dreg;
4333 MONO_ADD_INS (cfg->cbb, ins);
4334 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
4335 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
4336 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type_internal (rtype));
4337 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
4338 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type_internal (rtype));
4339 } else {
4340 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
4344 static void
4345 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
4347 int t;
4349 rtype = mini_get_underlying_type (rtype);
4350 t = rtype->type;
4352 if (rtype->byref) {
4353 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
4354 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
4355 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
4356 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
4357 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
4358 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
4359 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
4360 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
4361 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
4362 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
4363 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
4364 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
4365 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
4366 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
4367 } else {
4368 emit_init_rvar (cfg, dreg, rtype);
4372 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
4373 static void
4374 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
4376 MonoInst *var = cfg->locals [local];
4377 if (COMPILE_SOFT_FLOAT (cfg)) {
4378 MonoInst *store;
4379 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
4380 emit_init_rvar (cfg, reg, type);
4381 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
4382 } else {
4383 if (init)
4384 emit_init_rvar (cfg, var->dreg, type);
4385 else
4386 emit_dummy_init_rvar (cfg, var->dreg, type);
4391 mini_inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always)
4393 return inline_method (cfg, cmethod, fsig, sp, ip, real_offset, inline_always);
4397 * inline_method:
4399 * Return the cost of inlining CMETHOD, or zero if it should not be inlined.
4401 static int
4402 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4403 guchar *ip, guint real_offset, gboolean inline_always)
4405 ERROR_DECL (error);
4406 MonoInst *ins, *rvar = NULL;
4407 MonoMethodHeader *cheader;
4408 MonoBasicBlock *ebblock, *sbblock;
4409 int i, costs;
4410 MonoMethod *prev_inlined_method;
4411 MonoInst **prev_locals, **prev_args;
4412 MonoType **prev_arg_types;
4413 guint prev_real_offset;
4414 GHashTable *prev_cbb_hash;
4415 MonoBasicBlock **prev_cil_offset_to_bb;
4416 MonoBasicBlock *prev_cbb;
4417 const guchar *prev_ip;
4418 guchar *prev_cil_start;
4419 guint32 prev_cil_offset_to_bb_len;
4420 MonoMethod *prev_current_method;
4421 MonoGenericContext *prev_generic_context;
4422 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
4424 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4426 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4427 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
4428 return 0;
4429 #endif
4430 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4431 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
4432 return 0;
4433 #endif
4435 if (!fsig)
4436 fsig = mono_method_signature_internal (cmethod);
4438 if (cfg->verbose_level > 2)
4439 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4441 if (!cmethod->inline_info) {
4442 cfg->stat_inlineable_methods++;
4443 cmethod->inline_info = 1;
4446 /* allocate local variables */
4447 cheader = mono_method_get_header_checked (cmethod, error);
4448 if (!cheader) {
4449 if (inline_always) {
4450 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
4451 mono_error_move (&cfg->error, error);
4452 } else {
4453 mono_error_cleanup (error);
4455 return 0;
4458 /*Must verify before creating locals as it can cause the JIT to assert.*/
4459 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
4460 mono_metadata_free_mh (cheader);
4461 return 0;
4464 /* allocate space to store the return value */
4465 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4466 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4469 prev_locals = cfg->locals;
4470 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4471 for (i = 0; i < cheader->num_locals; ++i)
4472 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4474 /* allocate start and end blocks */
4475 /* This is needed so if the inline is aborted, we can clean up */
4476 NEW_BBLOCK (cfg, sbblock);
4477 sbblock->real_offset = real_offset;
4479 NEW_BBLOCK (cfg, ebblock);
4480 ebblock->block_num = cfg->num_bblocks++;
4481 ebblock->real_offset = real_offset;
4483 prev_args = cfg->args;
4484 prev_arg_types = cfg->arg_types;
4485 prev_inlined_method = cfg->inlined_method;
4486 prev_ret_var_set = cfg->ret_var_set;
4487 prev_real_offset = cfg->real_offset;
4488 prev_cbb_hash = cfg->cbb_hash;
4489 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4490 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4491 prev_cil_start = cfg->cil_start;
4492 prev_ip = cfg->ip;
4493 prev_cbb = cfg->cbb;
4494 prev_current_method = cfg->current_method;
4495 prev_generic_context = cfg->generic_context;
4496 prev_disable_inline = cfg->disable_inline;
4498 cfg->inlined_method = cmethod;
4499 cfg->ret_var_set = FALSE;
4500 cfg->inline_depth ++;
4502 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
4503 virtual_ = TRUE;
4505 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
4507 ret_var_set = cfg->ret_var_set;
4509 cfg->inlined_method = prev_inlined_method;
4510 cfg->real_offset = prev_real_offset;
4511 cfg->cbb_hash = prev_cbb_hash;
4512 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4513 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4514 cfg->cil_start = prev_cil_start;
4515 cfg->ip = prev_ip;
4516 cfg->locals = prev_locals;
4517 cfg->args = prev_args;
4518 cfg->arg_types = prev_arg_types;
4519 cfg->current_method = prev_current_method;
4520 cfg->generic_context = prev_generic_context;
4521 cfg->ret_var_set = prev_ret_var_set;
4522 cfg->disable_inline = prev_disable_inline;
4523 cfg->inline_depth --;
4525 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
4526 if (cfg->verbose_level > 2)
4527 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4529 mono_error_assert_ok (&cfg->error);
4531 cfg->stat_inlined_methods++;
4533 /* always add some code to avoid block split failures */
4534 MONO_INST_NEW (cfg, ins, OP_NOP);
4535 MONO_ADD_INS (prev_cbb, ins);
4537 prev_cbb->next_bb = sbblock;
4538 link_bblock (cfg, prev_cbb, sbblock);
4541 * Get rid of the begin and end bblocks if possible to aid local
4542 * optimizations.
4544 if (prev_cbb->out_count == 1)
4545 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4547 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4548 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4550 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4551 MonoBasicBlock *prev = ebblock->in_bb [0];
4553 if (prev->next_bb == ebblock) {
4554 mono_merge_basic_blocks (cfg, prev, ebblock);
4555 cfg->cbb = prev;
4556 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4557 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4558 cfg->cbb = prev_cbb;
4560 } else {
4561 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
4562 cfg->cbb = ebblock;
4564 } else {
4566 * Its possible that the rvar is set in some prev bblock, but not in others.
4567 * (#1835).
4569 if (rvar) {
4570 MonoBasicBlock *bb;
4572 for (i = 0; i < ebblock->in_count; ++i) {
4573 bb = ebblock->in_bb [i];
4575 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
4576 cfg->cbb = bb;
4578 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
4583 cfg->cbb = ebblock;
4586 if (rvar) {
4588 * If the inlined method contains only a throw, then the ret var is not
4589 * set, so set it to a dummy value.
4591 if (!ret_var_set)
4592 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
4594 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4595 *sp++ = ins;
4597 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4598 return costs + 1;
4599 } else {
4600 if (cfg->verbose_level > 2) {
4601 const char *msg = mono_error_get_message (&cfg->error);
4602 printf ("INLINE ABORTED %s (cost %d) %s\n", mono_method_full_name (cmethod, TRUE), costs, msg ? msg : "");
4604 cfg->exception_type = MONO_EXCEPTION_NONE;
4606 clear_cfg_error (cfg);
4608 /* This gets rid of the newly added bblocks */
4609 cfg->cbb = prev_cbb;
4611 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4612 return 0;
4616 * Some of these comments may well be out-of-date.
4617 * Design decisions: we do a single pass over the IL code (and we do bblock
4618 * splitting/merging in the few cases when it's required: a back jump to an IL
4619 * address that was not already seen as bblock starting point).
4620 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4621 * Complex operations are decomposed in simpler ones right away. We need to let the
4622 * arch-specific code peek and poke inside this process somehow (except when the
4623 * optimizations can take advantage of the full semantic info of coarse opcodes).
4624 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4625 * MonoInst->opcode initially is the IL opcode or some simplification of that
4626 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4627 * opcode with value bigger than OP_LAST.
4628 * At this point the IR can be handed over to an interpreter, a dumb code generator
4629 * or to the optimizing code generator that will translate it to SSA form.
4631 * Profiling directed optimizations.
4632 * We may compile by default with few or no optimizations and instrument the code
4633 * or the user may indicate what methods to optimize the most either in a config file
4634 * or through repeated runs where the compiler applies offline the optimizations to
4635 * each method and then decides if it was worth it.
4638 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4639 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4640 #define CHECK_STACK_OVF() if (((sp - stack_start) + 1) > header->max_stack) UNVERIFIED
4641 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4642 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4643 #define CHECK_OPSIZE(size) if ((size) < 1 || ip + (size) > end) UNVERIFIED
4644 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4645 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
4647 /* offset from br.s -> br like opcodes */
4648 #define BIG_BRANCH_OFFSET 13
4650 static gboolean
4651 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4653 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4655 return b == NULL || b == bb;
4658 static int
4659 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, guchar *start, guchar *end, guchar **pos)
4661 guchar *ip = start;
4662 guchar *target;
4663 int i;
4664 guint cli_addr;
4665 MonoBasicBlock *bblock;
4666 const MonoOpcode *opcode;
4668 while (ip < end) {
4669 cli_addr = ip - start;
4670 i = mono_opcode_value ((const guint8 **)&ip, end);
4671 if (i < 0)
4672 UNVERIFIED;
4673 opcode = &mono_opcodes [i];
4674 switch (opcode->argument) {
4675 case MonoInlineNone:
4676 ip++;
4677 break;
4678 case MonoInlineString:
4679 case MonoInlineType:
4680 case MonoInlineField:
4681 case MonoInlineMethod:
4682 case MonoInlineTok:
4683 case MonoInlineSig:
4684 case MonoShortInlineR:
4685 case MonoInlineI:
4686 ip += 5;
4687 break;
4688 case MonoInlineVar:
4689 ip += 3;
4690 break;
4691 case MonoShortInlineVar:
4692 case MonoShortInlineI:
4693 ip += 2;
4694 break;
4695 case MonoShortInlineBrTarget:
4696 target = start + cli_addr + 2 + (signed char)ip [1];
4697 GET_BBLOCK (cfg, bblock, target);
4698 ip += 2;
4699 if (ip < end)
4700 GET_BBLOCK (cfg, bblock, ip);
4701 break;
4702 case MonoInlineBrTarget:
4703 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4704 GET_BBLOCK (cfg, bblock, target);
4705 ip += 5;
4706 if (ip < end)
4707 GET_BBLOCK (cfg, bblock, ip);
4708 break;
4709 case MonoInlineSwitch: {
4710 guint32 n = read32 (ip + 1);
4711 guint32 j;
4712 ip += 5;
4713 cli_addr += 5 + 4 * n;
4714 target = start + cli_addr;
4715 GET_BBLOCK (cfg, bblock, target);
4717 for (j = 0; j < n; ++j) {
4718 target = start + cli_addr + (gint32)read32 (ip);
4719 GET_BBLOCK (cfg, bblock, target);
4720 ip += 4;
4722 break;
4724 case MonoInlineR:
4725 case MonoInlineI8:
4726 ip += 9;
4727 break;
4728 default:
4729 g_assert_not_reached ();
4732 if (i == CEE_THROW) {
4733 guchar *bb_start = ip - 1;
4735 /* Find the start of the bblock containing the throw */
4736 bblock = NULL;
4737 while ((bb_start >= start) && !bblock) {
4738 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4739 bb_start --;
4741 if (bblock)
4742 bblock->out_of_line = 1;
4745 return 0;
4746 unverified:
4747 exception_exit:
4748 *pos = ip;
4749 return 1;
4752 static inline MonoMethod *
4753 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
4755 MonoMethod *method;
4757 error_init (error);
4759 if (m->wrapper_type != MONO_WRAPPER_NONE) {
4760 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
4761 if (context) {
4762 method = mono_class_inflate_generic_method_checked (method, context, error);
4764 } else {
4765 method = mono_get_method_checked (m_class_get_image (m->klass), token, klass, context, error);
4768 return method;
4771 static inline MonoMethod *
4772 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4774 ERROR_DECL (error);
4775 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : error);
4777 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (m_class_get_byval_arg (method->klass))) {
4778 mono_error_set_bad_image (&cfg->error, m_class_get_image (cfg->method->klass), "Method with open type while not compiling gshared");
4779 method = NULL;
4782 if (!method && !cfg)
4783 mono_error_cleanup (error); /* FIXME don't swallow the error */
4785 return method;
4788 static inline MonoMethodSignature*
4789 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
4791 MonoMethodSignature *fsig;
4793 error_init (error);
4794 if (method->wrapper_type != MONO_WRAPPER_NONE) {
4795 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
4796 } else {
4797 fsig = mono_metadata_parse_signature_checked (m_class_get_image (method->klass), token, error);
4798 return_val_if_nok (error, NULL);
4800 if (context) {
4801 fsig = mono_inflate_generic_signature(fsig, context, error);
4803 return fsig;
4806 static MonoMethod*
4807 throw_exception (void)
4809 static MonoMethod *method = NULL;
4811 if (!method) {
4812 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4813 method = get_method_nofail (secman->securitymanager, "ThrowException", 1, 0);
4815 g_assert (method);
4816 return method;
4819 static void
4820 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
4822 MonoMethod *thrower = throw_exception ();
4823 MonoInst *args [1];
4825 EMIT_NEW_PCONST (cfg, args [0], ex);
4826 mono_emit_method_call (cfg, thrower, args, NULL);
4830 * Return the original method is a wrapper is specified. We can only access
4831 * the custom attributes from the original method.
4833 static MonoMethod*
4834 get_original_method (MonoMethod *method)
4836 if (method->wrapper_type == MONO_WRAPPER_NONE)
4837 return method;
4839 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4840 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
4841 return NULL;
4843 /* in other cases we need to find the original method */
4844 return mono_marshal_method_from_wrapper (method);
4847 static void
4848 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
4850 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4851 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
4852 if (ex)
4853 emit_throw_exception (cfg, ex);
4856 static void
4857 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4859 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4860 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
4861 if (ex)
4862 emit_throw_exception (cfg, ex);
4865 static guchar*
4866 il_read_op (guchar *ip, guchar *end, guchar first_byte, MonoOpcodeEnum desired_il_op)
4867 // If ip is desired_il_op, return the next ip, else NULL.
4869 if (G_LIKELY (ip < end) && G_UNLIKELY (*ip == first_byte)) {
4870 MonoOpcodeEnum il_op = MonoOpcodeEnum_Invalid;
4871 // mono_opcode_value_and_size updates ip, but not in the expected way.
4872 const guchar *temp_ip = ip;
4873 const int size = mono_opcode_value_and_size (&temp_ip, end, &il_op);
4874 return (G_LIKELY (size > 0) && G_UNLIKELY (il_op == desired_il_op)) ? (ip + size) : NULL;
4876 return NULL;
4879 static guchar*
4880 il_read_op_and_token (guchar *ip, guchar *end, guchar first_byte, MonoOpcodeEnum desired_il_op, guint32 *token)
4882 ip = il_read_op (ip, end, first_byte, desired_il_op);
4883 if (ip)
4884 *token = read32 (ip - 4); // could be +1 or +2 from start
4885 return ip;
4888 static guchar*
4889 il_read_branch_and_target (guchar *ip, guchar *end, guchar first_byte, MonoOpcodeEnum desired_il_op, int size, guchar **target)
4891 ip = il_read_op (ip, end, first_byte, desired_il_op);
4892 if (ip) {
4893 gint32 delta = 0;
4894 switch (size) {
4895 case 1:
4896 delta = (signed char)ip [-1];
4897 break;
4898 case 4:
4899 delta = (gint32)read32 (ip - 4);
4900 break;
4902 // FIXME verify it is within the function and start of an instruction.
4903 *target = ip + delta;
4904 return ip;
4906 return NULL;
4909 #define il_read_brtrue(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRTRUE, MONO_CEE_BRTRUE, 4, target))
4910 #define il_read_brtrue_s(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRTRUE_S, MONO_CEE_BRTRUE_S, 1, target))
4911 #define il_read_brfalse(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRFALSE, MONO_CEE_BRFALSE, 4, target))
4912 #define il_read_brfalse_s(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRFALSE_S, MONO_CEE_BRFALSE_S, 1, target))
4913 #define il_read_dup(ip, end) (il_read_op (ip, end, CEE_DUP, MONO_CEE_DUP))
4914 #define il_read_newobj(ip, end, token) (il_read_op_and_token (ip, end, CEE_NEW_OBJ, MONO_CEE_NEWOBJ, token))
4915 #define il_read_ldtoken(ip, end, token) (il_read_op_and_token (ip, end, CEE_LDTOKEN, MONO_CEE_LDTOKEN, token))
4916 #define il_read_call(ip, end, token) (il_read_op_and_token (ip, end, CEE_CALL, MONO_CEE_CALL, token))
4917 #define il_read_callvirt(ip, end, token) (il_read_op_and_token (ip, end, CEE_CALLVIRT, MONO_CEE_CALLVIRT, token))
4918 #define il_read_initobj(ip, end, token) (il_read_op_and_token (ip, end, CEE_PREFIX1, MONO_CEE_INITOBJ, token))
4919 #define il_read_constrained(ip, end, token) (il_read_op_and_token (ip, end, CEE_PREFIX1, MONO_CEE_CONSTRAINED_, token))
4922 * Check that the IL instructions at ip are the array initialization
4923 * sequence and return the pointer to the data and the size.
4925 static const char*
4926 initialize_array_data (MonoCompile *cfg, MonoMethod *method, gboolean aot, guchar *ip,
4927 guchar *end, MonoClass *klass, guint32 len, int *out_size,
4928 guint32 *out_field_token, MonoOpcodeEnum *il_op, guchar **next_ip)
4931 * newarr[System.Int32]
4932 * dup
4933 * ldtoken field valuetype ...
4934 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4937 guint32 token;
4938 guint32 field_token;
4940 if ((ip = il_read_dup (ip, end))
4941 && ip_in_bb (cfg, cfg->cbb, ip)
4942 && (ip = il_read_ldtoken (ip, end, &field_token))
4943 && IS_FIELD_DEF (field_token)
4944 && ip_in_bb (cfg, cfg->cbb, ip)
4945 && (ip = il_read_call (ip, end, &token))) {
4946 ERROR_DECL (error);
4947 guint32 rva;
4948 const char *data_ptr;
4949 int size = 0;
4950 MonoMethod *cmethod;
4951 MonoClass *dummy_class;
4952 MonoClassField *field = mono_field_from_token_checked (m_class_get_image (method->klass), field_token, &dummy_class, NULL, error);
4953 int dummy_align;
4955 if (!field) {
4956 mono_error_cleanup (error); /* FIXME don't swallow the error */
4957 return NULL;
4960 *out_field_token = field_token;
4962 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4963 if (!cmethod)
4964 return NULL;
4965 if (strcmp (cmethod->name, "InitializeArray") || strcmp (m_class_get_name (cmethod->klass), "RuntimeHelpers") || m_class_get_image (cmethod->klass) != mono_defaults.corlib)
4966 return NULL;
4967 switch (mini_get_underlying_type (m_class_get_byval_arg (klass))->type) {
4968 case MONO_TYPE_I1:
4969 case MONO_TYPE_U1:
4970 size = 1; break;
4971 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4972 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
4973 case MONO_TYPE_I2:
4974 case MONO_TYPE_U2:
4975 size = 2; break;
4976 case MONO_TYPE_I4:
4977 case MONO_TYPE_U4:
4978 case MONO_TYPE_R4:
4979 size = 4; break;
4980 case MONO_TYPE_R8:
4981 case MONO_TYPE_I8:
4982 case MONO_TYPE_U8:
4983 size = 8; break;
4984 #endif
4985 default:
4986 return NULL;
4988 size *= len;
4989 if (size > mono_type_size (field->type, &dummy_align))
4990 return NULL;
4991 *out_size = size;
4992 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4993 MonoImage *method_klass_image = m_class_get_image (method->klass);
4994 if (!image_is_dynamic (method_klass_image)) {
4995 guint32 field_index = mono_metadata_token_index (field_token);
4996 mono_metadata_field_info (method_klass_image, field_index - 1, NULL, &rva, NULL);
4997 data_ptr = mono_image_rva_map (method_klass_image, rva);
4998 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4999 /* for aot code we do the lookup on load */
5000 if (aot && data_ptr)
5001 data_ptr = (const char *)GUINT_TO_POINTER (rva);
5002 } else {
5003 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5004 g_assert (!aot);
5005 data_ptr = mono_field_get_data (field);
5007 if (!data_ptr)
5008 return NULL;
5009 *il_op = MONO_CEE_CALL;
5010 *next_ip = ip;
5011 return data_ptr;
5013 return NULL;
5016 static void
5017 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, guchar *ip)
5019 ERROR_DECL (error);
5020 char *method_fname = mono_method_full_name (method, TRUE);
5021 char *method_code;
5022 MonoMethodHeader *header = mono_method_get_header_checked (method, error);
5024 if (!header) {
5025 method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (error));
5026 mono_error_cleanup (error);
5027 } else if (header->code_size == 0)
5028 method_code = g_strdup ("method body is empty.");
5029 else
5030 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5031 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
5032 g_free (method_fname);
5033 g_free (method_code);
5034 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5037 guint32
5038 mono_type_to_stloc_coerce (MonoType *type)
5040 if (type->byref)
5041 return 0;
5043 type = mini_get_underlying_type (type);
5044 handle_enum:
5045 switch (type->type) {
5046 case MONO_TYPE_I1:
5047 return OP_ICONV_TO_I1;
5048 case MONO_TYPE_U1:
5049 return OP_ICONV_TO_U1;
5050 case MONO_TYPE_I2:
5051 return OP_ICONV_TO_I2;
5052 case MONO_TYPE_U2:
5053 return OP_ICONV_TO_U2;
5054 case MONO_TYPE_I4:
5055 case MONO_TYPE_U4:
5056 case MONO_TYPE_I:
5057 case MONO_TYPE_U:
5058 case MONO_TYPE_PTR:
5059 case MONO_TYPE_FNPTR:
5060 case MONO_TYPE_CLASS:
5061 case MONO_TYPE_STRING:
5062 case MONO_TYPE_OBJECT:
5063 case MONO_TYPE_SZARRAY:
5064 case MONO_TYPE_ARRAY:
5065 case MONO_TYPE_I8:
5066 case MONO_TYPE_U8:
5067 case MONO_TYPE_R4:
5068 case MONO_TYPE_R8:
5069 case MONO_TYPE_TYPEDBYREF:
5070 case MONO_TYPE_GENERICINST:
5071 return 0;
5072 case MONO_TYPE_VALUETYPE:
5073 if (m_class_is_enumtype (type->data.klass)) {
5074 type = mono_class_enum_basetype_internal (type->data.klass);
5075 goto handle_enum;
5077 return 0;
5078 case MONO_TYPE_VAR:
5079 case MONO_TYPE_MVAR: //TODO I believe we don't need to handle gsharedvt as there won't be match and, for example, u1 is not covariant to u32
5080 return 0;
5081 default:
5082 g_error ("unknown type 0x%02x in mono_type_to_stloc_coerce", type->type);
5084 return -1;
5087 static void
5088 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5090 MonoInst *ins;
5091 guint32 coerce_op = mono_type_to_stloc_coerce (header->locals [n]);
5093 if (coerce_op) {
5094 if (cfg->cbb->last_ins == sp [0] && sp [0]->opcode == coerce_op) {
5095 if (cfg->verbose_level > 2)
5096 printf ("Found existing coercing is enough for stloc\n");
5097 } else {
5098 MONO_INST_NEW (cfg, ins, coerce_op);
5099 ins->dreg = alloc_ireg (cfg);
5100 ins->sreg1 = sp [0]->dreg;
5101 ins->type = STACK_I4;
5102 ins->klass = mono_class_from_mono_type_internal (header->locals [n]);
5103 MONO_ADD_INS (cfg->cbb, ins);
5104 *sp = mono_decompose_opcode (cfg, ins);
5109 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5110 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5111 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5112 /* Optimize reg-reg moves away */
5114 * Can't optimize other opcodes, since sp[0] might point to
5115 * the last ins of a decomposed opcode.
5117 sp [0]->dreg = (cfg)->locals [n]->dreg;
5118 } else {
5119 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5123 static void
5124 emit_starg_ir (MonoCompile *cfg, MonoInst **sp, int n)
5126 MonoInst *ins;
5127 guint32 coerce_op = mono_type_to_stloc_coerce (cfg->arg_types [n]);
5129 if (coerce_op) {
5130 if (cfg->cbb->last_ins == sp [0] && sp [0]->opcode == coerce_op) {
5131 if (cfg->verbose_level > 2)
5132 printf ("Found existing coercing is enough for starg\n");
5133 } else {
5134 MONO_INST_NEW (cfg, ins, coerce_op);
5135 ins->dreg = alloc_ireg (cfg);
5136 ins->sreg1 = sp [0]->dreg;
5137 ins->type = STACK_I4;
5138 ins->klass = mono_class_from_mono_type_internal (cfg->arg_types [n]);
5139 MONO_ADD_INS (cfg->cbb, ins);
5140 *sp = mono_decompose_opcode (cfg, ins);
5144 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5148 * ldloca inhibits many optimizations so try to get rid of it in common
5149 * cases.
5151 static guchar *
5152 emit_optimized_ldloca_ir (MonoCompile *cfg, guchar *ip, guchar *end, int local)
5154 guint32 token;
5155 MonoClass *klass;
5156 MonoType *type;
5158 guchar *start = ip;
5160 if ((ip = il_read_initobj (ip, end, &token)) && ip_in_bb (cfg, cfg->cbb, start + 1)) {
5161 /* From the INITOBJ case */
5162 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5163 CHECK_TYPELOAD (klass);
5164 type = mini_get_underlying_type (m_class_get_byval_arg (klass));
5165 emit_init_local (cfg, local, type, TRUE);
5166 return ip;
5168 exception_exit:
5169 return NULL;
5172 static MonoInst*
5173 handle_call_res_devirt (MonoCompile *cfg, MonoMethod *cmethod, MonoInst *call_res)
5176 * Devirt EqualityComparer.Default.Equals () calls for some types.
5177 * The corefx code excepts these calls to be devirtualized.
5178 * This depends on the implementation of EqualityComparer.Default, which is
5179 * in mcs/class/referencesource/mscorlib/system/collections/generic/equalitycomparer.cs
5181 if (FALSE &&
5182 m_class_get_image (cmethod->klass) == mono_defaults.corlib &&
5183 !strcmp (m_class_get_name (cmethod->klass), "EqualityComparer`1") &&
5184 !strcmp (cmethod->name, "get_Default")) {
5185 MonoType *param_type = mono_class_get_generic_class (cmethod->klass)->context.class_inst->type_argv [0];
5186 MonoClass *inst;
5187 MonoGenericContext ctx;
5188 MonoType *args [16];
5189 ERROR_DECL (error);
5191 memset (&ctx, 0, sizeof (ctx));
5193 args [0] = param_type;
5194 ctx.class_inst = mono_metadata_get_generic_inst (1, args);
5196 inst = mono_class_inflate_generic_class_checked (mono_class_get_iequatable_class (), &ctx, error);
5197 mono_error_assert_ok (error);
5199 /* EqualityComparer<T>.Default returns specific types depending on T */
5200 // FIXME: Add more
5201 /* 1. Implements IEquatable<T> */
5203 * Can't use this for string as it might use a different comparer:
5205 * #if MOBILE
5206 * // Breaks .net serialization compatibility
5207 * if (t == typeof (string))
5208 * return (EqualityComparer<T>)(object)new InternalStringComparer ();
5209 * #endif
5211 if (mono_class_is_assignable_from_internal (inst, mono_class_from_mono_type_internal (param_type)) && param_type->type != MONO_TYPE_STRING) {
5212 MonoInst *typed_objref;
5213 MonoClass *gcomparer_inst;
5215 memset (&ctx, 0, sizeof (ctx));
5217 args [0] = param_type;
5218 ctx.class_inst = mono_metadata_get_generic_inst (1, args);
5220 MonoClass *gcomparer = mono_class_get_geqcomparer_class ();
5221 g_assert (gcomparer);
5222 gcomparer_inst = mono_class_inflate_generic_class_checked (gcomparer, &ctx, error);
5223 mono_error_assert_ok (error);
5225 MONO_INST_NEW (cfg, typed_objref, OP_TYPED_OBJREF);
5226 typed_objref->type = STACK_OBJ;
5227 typed_objref->dreg = alloc_ireg_ref (cfg);
5228 typed_objref->sreg1 = call_res->dreg;
5229 typed_objref->klass = gcomparer_inst;
5230 MONO_ADD_INS (cfg->cbb, typed_objref);
5232 call_res = typed_objref;
5234 /* Force decompose */
5235 cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE;
5236 cfg->cbb->needs_decompose = TRUE;
5240 return call_res;
5243 static gboolean
5244 is_exception_class (MonoClass *klass)
5246 if (G_LIKELY (m_class_get_supertypes (klass)))
5247 return mono_class_has_parent_fast (klass, mono_defaults.exception_class);
5248 while (klass) {
5249 if (klass == mono_defaults.exception_class)
5250 return TRUE;
5251 klass = m_class_get_parent (klass);
5253 return FALSE;
5257 * is_jit_optimizer_disabled:
5259 * Determine whenever M's assembly has a DebuggableAttribute with the
5260 * IsJITOptimizerDisabled flag set.
5262 static gboolean
5263 is_jit_optimizer_disabled (MonoMethod *m)
5265 ERROR_DECL (error);
5266 MonoAssembly *ass = m_class_get_image (m->klass)->assembly;
5267 MonoCustomAttrInfo* attrs;
5268 MonoClass *klass;
5269 int i;
5270 gboolean val = FALSE;
5272 g_assert (ass);
5273 if (ass->jit_optimizer_disabled_inited)
5274 return ass->jit_optimizer_disabled;
5276 klass = mono_class_try_get_debuggable_attribute_class ();
5278 if (!klass) {
5279 /* Linked away */
5280 ass->jit_optimizer_disabled = FALSE;
5281 mono_memory_barrier ();
5282 ass->jit_optimizer_disabled_inited = TRUE;
5283 return FALSE;
5286 attrs = mono_custom_attrs_from_assembly_checked (ass, FALSE, error);
5287 mono_error_cleanup (error); /* FIXME don't swallow the error */
5288 if (attrs) {
5289 for (i = 0; i < attrs->num_attrs; ++i) {
5290 MonoCustomAttrEntry *attr = &attrs->attrs [i];
5291 const gchar *p;
5292 MonoMethodSignature *sig;
5294 if (!attr->ctor || attr->ctor->klass != klass)
5295 continue;
5296 /* Decode the attribute. See reflection.c */
5297 p = (const char*)attr->data;
5298 g_assert (read16 (p) == 0x0001);
5299 p += 2;
5301 // FIXME: Support named parameters
5302 sig = mono_method_signature_internal (attr->ctor);
5303 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
5304 continue;
5305 /* Two boolean arguments */
5306 p ++;
5307 val = *p;
5309 mono_custom_attrs_free (attrs);
5312 ass->jit_optimizer_disabled = val;
5313 mono_memory_barrier ();
5314 ass->jit_optimizer_disabled_inited = TRUE;
5316 return val;
5319 gboolean
5320 mono_is_supported_tailcall_helper (gboolean value, const char *svalue)
5322 if (!value)
5323 mono_tailcall_print ("%s %s\n", __func__, svalue);
5324 return value;
5327 static gboolean
5328 mono_is_not_supported_tailcall_helper (gboolean value, const char *svalue, MonoMethod *method, MonoMethod *cmethod)
5330 // Return value, printing if it inhibits tailcall.
5332 if (value && mono_tailcall_print_enabled ()) {
5333 const char *lparen = strchr (svalue, ' ') ? "(" : "";
5334 const char *rparen = *lparen ? ")" : "";
5335 mono_tailcall_print ("%s %s -> %s %s%s%s:%d\n", __func__, method->name, cmethod->name, lparen, svalue, rparen, value);
5337 return value;
5340 #define IS_NOT_SUPPORTED_TAILCALL(x) (mono_is_not_supported_tailcall_helper((x), #x, method, cmethod))
5342 static gboolean
5343 is_supported_tailcall (MonoCompile *cfg, const guint8 *ip, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig,
5344 gboolean virtual_, gboolean extra_arg, gboolean *ptailcall_calli)
5346 // Some checks apply to "regular", some to "calli", some to both.
5347 // To ease burden on caller, always compute regular and calli.
5349 gboolean tailcall = TRUE;
5350 gboolean tailcall_calli = TRUE;
5352 if (IS_NOT_SUPPORTED_TAILCALL (virtual_ && !cfg->backend->have_op_tailcall_membase))
5353 tailcall = FALSE;
5355 if (IS_NOT_SUPPORTED_TAILCALL (!cfg->backend->have_op_tailcall_reg))
5356 tailcall_calli = FALSE;
5358 if (!tailcall && !tailcall_calli)
5359 goto exit;
5361 // FIXME in calli, there is no type for for the this parameter,
5362 // so we assume it might be valuetype; in future we should issue a range
5363 // check, so rule out pointing to frame (for other reference parameters also)
5365 if ( IS_NOT_SUPPORTED_TAILCALL (cmethod && fsig->hasthis && m_class_is_valuetype (cmethod->klass)) // This might point to the current method's stack. Emit range check?
5366 || IS_NOT_SUPPORTED_TAILCALL (cmethod && (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL))
5367 || IS_NOT_SUPPORTED_TAILCALL (fsig->pinvoke) // i.e. if !cmethod (calli)
5368 || IS_NOT_SUPPORTED_TAILCALL (cfg->method->save_lmf)
5369 || IS_NOT_SUPPORTED_TAILCALL (!cmethod && fsig->hasthis) // FIXME could be valuetype to current frame; range check
5370 || IS_NOT_SUPPORTED_TAILCALL (cmethod && cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
5372 // http://www.mono-project.com/docs/advanced/runtime/docs/generic-sharing/
5374 // 1. Non-generic non-static methods of reference types have access to the
5375 // RGCTX via the “this” argument (this->vtable->rgctx).
5376 // 2. a Non-generic static methods of reference types and b. non-generic methods
5377 // of value types need to be passed a pointer to the caller’s class’s VTable in the MONO_ARCH_RGCTX_REG register.
5378 // 3. Generic methods need to be passed a pointer to the MRGCTX in the MONO_ARCH_RGCTX_REG register
5380 // That is what vtable_arg is here (always?).
5382 // Passing vtable_arg uses (requires?) a volatile non-parameter register,
5383 // such as AMD64 rax, r10, r11, or the return register on many architectures.
5384 // ARM32 does not always clearly have such a register. ARM32's return register
5385 // is a parameter register.
5386 // iPhone could use r9 except on old systems. iPhone/ARM32 is not particularly
5387 // important. Linux/arm32 is less clear.
5388 // ARM32's scratch r12 might work but only with much collateral change.
5390 // Imagine F1 calls F2, and F2 tailcalls F3.
5391 // F2 and F3 are managed. F1 is native.
5392 // Without a tailcall, F2 can save and restore everything needed for F1.
5393 // However if the extra parameter were in a non-volatile, such as ARM32 V5/R8,
5394 // F3 cannot easily restore it for F1, in the current scheme. The current
5395 // scheme where the extra parameter is not merely an extra parameter, but
5396 // passed "outside of the ABI".
5398 // If all native to managed transitions are intercepted and wrapped (w/o tailcall),
5399 // then they can preserve this register and the rest of the managed callgraph
5400 // treat it as volatile.
5402 // Interface method dispatch has the same problem (imt_arg).
5404 || IS_NOT_SUPPORTED_TAILCALL (extra_arg && !cfg->backend->have_volatile_non_param_register)
5405 || IS_NOT_SUPPORTED_TAILCALL (cfg->gsharedvt)
5407 tailcall_calli = FALSE;
5408 tailcall = FALSE;
5409 goto exit;
5412 for (int i = 0; i < fsig->param_count; ++i) {
5413 if (IS_NOT_SUPPORTED_TAILCALL (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)) {
5414 tailcall_calli = FALSE;
5415 tailcall = FALSE; // These can point to the current method's stack. Emit range check?
5416 goto exit;
5420 MonoMethodSignature *caller_signature;
5421 MonoMethodSignature *callee_signature;
5422 caller_signature = mono_method_signature_internal (method);
5423 callee_signature = cmethod ? mono_method_signature_internal (cmethod) : fsig;
5425 g_assert (caller_signature);
5426 g_assert (callee_signature);
5428 // Require an exact match on return type due to various conversions in emit_move_return_value that would be skipped.
5429 // The main troublesome conversions are double <=> float.
5430 // CoreCLR allows some conversions here, such as integer truncation.
5431 // As well I <=> I[48] and U <=> U[48] would be ok, for matching size.
5432 if (IS_NOT_SUPPORTED_TAILCALL (mini_get_underlying_type (caller_signature->ret)->type != mini_get_underlying_type (callee_signature->ret)->type)
5433 || IS_NOT_SUPPORTED_TAILCALL (!mono_arch_tailcall_supported (cfg, caller_signature, callee_signature, virtual_))) {
5434 tailcall_calli = FALSE;
5435 tailcall = FALSE;
5436 goto exit;
5439 /* Debugging support */
5440 #if 0
5441 if (!mono_debug_count ()) {
5442 tailcall_calli = FALSE;
5443 tailcall = FALSE;
5444 goto exit;
5446 #endif
5447 // See check_sp in mini_emit_calli_full.
5448 if (tailcall_calli && IS_NOT_SUPPORTED_TAILCALL (mini_should_check_stack_pointer (cfg)))
5449 tailcall_calli = FALSE;
5450 exit:
5451 mono_tailcall_print ("tail.%s %s -> %s tailcall:%d tailcall_calli:%d gshared:%d extra_arg:%d virtual_:%d\n",
5452 mono_opcode_name (*ip), method->name, cmethod ? cmethod->name : "calli", tailcall, tailcall_calli,
5453 cfg->gshared, extra_arg, virtual_);
5455 *ptailcall_calli = tailcall_calli;
5456 return tailcall;
5460 * is_addressable_valuetype_load
5462 * Returns true if a previous load can be done without doing an extra copy, given the new instruction ip and the type of the object being loaded ldtype
5464 static gboolean
5465 is_addressable_valuetype_load (MonoCompile* cfg, guint8* ip, MonoType* ldtype)
5467 /* Avoid loading a struct just to load one of its fields */
5468 gboolean is_load_instruction = (*ip == CEE_LDFLD);
5469 gboolean is_in_previous_bb = ip_in_bb(cfg, cfg->cbb, ip);
5470 gboolean is_struct = MONO_TYPE_ISSTRUCT(ldtype);
5471 return is_load_instruction && is_in_previous_bb && is_struct;
5475 * handle_ctor_call:
5477 * Handle calls made to ctors from NEWOBJ opcodes.
5479 static void
5480 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
5481 MonoInst **sp, guint8 *ip, int *inline_costs)
5483 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
5485 if (m_class_is_valuetype (cmethod->klass) && mono_class_generic_sharing_enabled (cmethod->klass) &&
5486 mono_method_is_generic_sharable (cmethod, TRUE)) {
5487 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
5488 mono_class_vtable_checked (cfg->domain, cmethod->klass, &cfg->error);
5489 CHECK_CFG_ERROR;
5490 CHECK_TYPELOAD (cmethod->klass);
5492 vtable_arg = emit_get_rgctx_method (cfg, context_used,
5493 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
5494 } else {
5495 if (context_used) {
5496 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
5497 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
5498 } else {
5499 MonoVTable *vtable = mono_class_vtable_checked (cfg->domain, cmethod->klass, &cfg->error);
5500 CHECK_CFG_ERROR;
5501 CHECK_TYPELOAD (cmethod->klass);
5502 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
5507 /* Avoid virtual calls to ctors if possible */
5508 if (mono_class_is_marshalbyref (cmethod->klass))
5509 callvirt_this_arg = sp [0];
5511 if (cmethod && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
5512 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
5513 CHECK_CFG_EXCEPTION;
5514 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
5515 mono_method_check_inlining (cfg, cmethod) &&
5516 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
5517 int costs;
5519 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
5520 cfg->real_offset += 5;
5522 *inline_costs += costs - 5;
5523 } else {
5524 INLINE_FAILURE ("inline failure");
5525 // FIXME-VT: Clean this up
5526 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
5527 GSHAREDVT_FAILURE(*ip);
5528 mini_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
5530 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
5531 MonoInst *addr;
5533 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
5535 if (cfg->llvm_only) {
5536 // FIXME: Avoid initializing vtable_arg
5537 mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
5538 } else {
5539 mini_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
5541 } else if (context_used &&
5542 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
5543 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
5544 MonoInst *cmethod_addr;
5546 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
5548 if (cfg->llvm_only) {
5549 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
5550 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
5551 mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
5552 } else {
5553 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
5554 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
5556 mini_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
5558 } else {
5559 INLINE_FAILURE ("ctor call");
5560 ins = mini_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
5561 callvirt_this_arg, NULL, vtable_arg);
5563 exception_exit:
5564 mono_error_exit:
5565 return;
5568 typedef struct {
5569 MonoMethod *method;
5570 gboolean inst_tailcall;
5571 } HandleCallData;
5574 * handle_constrained_call:
5576 * Handle constrained calls. Return a MonoInst* representing the call or NULL.
5577 * May overwrite sp [0] and modify the ref_... parameters.
5579 static MonoInst*
5580 handle_constrained_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoClass *constrained_class, MonoInst **sp,
5581 HandleCallData *cdata, MonoMethod **ref_cmethod, gboolean *ref_virtual, gboolean *ref_emit_widen)
5583 MonoInst *ins, *addr;
5584 MonoMethod *method = cdata->method;
5585 gboolean constrained_partial_call = FALSE;
5586 gboolean constrained_is_generic_param =
5587 m_class_get_byval_arg (constrained_class)->type == MONO_TYPE_VAR ||
5588 m_class_get_byval_arg (constrained_class)->type == MONO_TYPE_MVAR;
5590 if (constrained_is_generic_param && cfg->gshared) {
5591 if (!mini_is_gsharedvt_klass (constrained_class)) {
5592 g_assert (!m_class_is_valuetype (cmethod->klass));
5593 if (!mini_type_is_reference (m_class_get_byval_arg (constrained_class)))
5594 constrained_partial_call = TRUE;
5598 if (mini_is_gsharedvt_klass (constrained_class)) {
5599 if ((cmethod->klass != mono_defaults.object_class) && m_class_is_valuetype (constrained_class) && m_class_is_valuetype (cmethod->klass)) {
5600 /* The 'Own method' case below */
5601 } else if (m_class_get_image (cmethod->klass) != mono_defaults.corlib && !mono_class_is_interface (cmethod->klass) && !m_class_is_valuetype (cmethod->klass)) {
5602 /* 'The type parameter is instantiated as a reference type' case below. */
5603 } else {
5604 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, ref_emit_widen);
5605 CHECK_CFG_EXCEPTION;
5606 g_assert (ins);
5607 if (cdata->inst_tailcall) // FIXME
5608 mono_tailcall_print ("missed tailcall constrained_class %s -> %s\n", method->name, cmethod->name);
5609 return ins;
5613 if (constrained_partial_call) {
5614 gboolean need_box = TRUE;
5617 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
5618 * called method is not known at compile time either. The called method could end up being
5619 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
5620 * to box the receiver.
5621 * A simple solution would be to box always and make a normal virtual call, but that would
5622 * be bad performance wise.
5624 if (mono_class_is_interface (cmethod->klass) && mono_class_is_ginst (cmethod->klass)) {
5626 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
5628 need_box = FALSE;
5631 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == m_class_get_parent (mono_defaults.enum_class) || cmethod->klass == mono_defaults.enum_class)) {
5632 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
5633 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (constrained_class), sp [0]->dreg, 0);
5634 ins->klass = constrained_class;
5635 sp [0] = mini_emit_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
5636 CHECK_CFG_EXCEPTION;
5637 } else if (need_box) {
5638 MonoInst *box_type;
5639 MonoBasicBlock *is_ref_bb, *end_bb;
5640 MonoInst *nonbox_call, *addr;
5643 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
5644 * if needed.
5645 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
5646 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
5648 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
5650 NEW_BBLOCK (cfg, is_ref_bb);
5651 NEW_BBLOCK (cfg, end_bb);
5653 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
5654 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
5655 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
5657 /* Non-ref case */
5658 if (cfg->llvm_only)
5659 /* addr is an ftndesc in this case */
5660 nonbox_call = mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
5661 else
5662 nonbox_call = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
5664 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5666 /* Ref case */
5667 MONO_START_BB (cfg, is_ref_bb);
5668 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (constrained_class), sp [0]->dreg, 0);
5669 ins->klass = constrained_class;
5670 sp [0] = mini_emit_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
5671 CHECK_CFG_EXCEPTION;
5672 if (cfg->llvm_only)
5673 ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
5674 else
5675 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
5677 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5679 MONO_START_BB (cfg, end_bb);
5680 cfg->cbb = end_bb;
5682 nonbox_call->dreg = ins->dreg;
5683 if (cdata->inst_tailcall) // FIXME
5684 mono_tailcall_print ("missed tailcall constrained_partial_need_box %s -> %s\n", method->name, cmethod->name);
5685 return ins;
5686 } else {
5687 g_assert (mono_class_is_interface (cmethod->klass));
5688 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
5689 if (cfg->llvm_only)
5690 ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
5691 else
5692 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
5693 if (cdata->inst_tailcall) // FIXME
5694 mono_tailcall_print ("missed tailcall constrained_partial %s -> %s\n", method->name, cmethod->name);
5695 return ins;
5697 } else if (m_class_is_valuetype (constrained_class) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == m_class_get_parent (mono_defaults.enum_class) || cmethod->klass == mono_defaults.enum_class)) {
5699 * The type parameter is instantiated as a valuetype,
5700 * but that type doesn't override the method we're
5701 * calling, so we need to box `this'.
5703 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (constrained_class), sp [0]->dreg, 0);
5704 ins->klass = constrained_class;
5705 sp [0] = mini_emit_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
5706 CHECK_CFG_EXCEPTION;
5707 } else if (!m_class_is_valuetype (constrained_class)) {
5708 int dreg = alloc_ireg_ref (cfg);
5711 * The type parameter is instantiated as a reference
5712 * type. We have a managed pointer on the stack, so
5713 * we need to dereference it here.
5715 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
5716 ins->type = STACK_OBJ;
5717 sp [0] = ins;
5718 } else {
5719 if (m_class_is_valuetype (cmethod->klass)) {
5720 /* Own method */
5721 } else {
5722 /* Interface method */
5723 int ioffset, slot;
5725 mono_class_setup_vtable (constrained_class);
5726 CHECK_TYPELOAD (constrained_class);
5727 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
5728 if (ioffset == -1)
5729 TYPE_LOAD_ERROR (constrained_class);
5730 slot = mono_method_get_vtable_slot (cmethod);
5731 if (slot == -1)
5732 TYPE_LOAD_ERROR (cmethod->klass);
5733 cmethod = m_class_get_vtable (constrained_class) [ioffset + slot];
5734 *ref_cmethod = cmethod;
5736 if (cmethod->klass == mono_defaults.enum_class) {
5737 /* Enum implements some interfaces, so treat this as the first case */
5738 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (constrained_class), sp [0]->dreg, 0);
5739 ins->klass = constrained_class;
5740 sp [0] = mini_emit_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
5741 CHECK_CFG_EXCEPTION;
5744 *ref_virtual = FALSE;
5747 exception_exit:
5748 return NULL;
5751 static void
5752 emit_setret (MonoCompile *cfg, MonoInst *val)
5754 MonoType *ret_type = mini_get_underlying_type (mono_method_signature_internal (cfg->method)->ret);
5755 MonoInst *ins;
5757 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
5758 MonoInst *ret_addr;
5760 if (!cfg->vret_addr) {
5761 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
5762 } else {
5763 EMIT_NEW_RETLOADA (cfg, ret_addr);
5765 MonoClass *ret_class = mono_class_from_mono_type_internal (ret_type);
5766 if (MONO_CLASS_IS_SIMD (cfg, ret_class))
5767 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREX_MEMBASE, ret_addr->dreg, 0, val->dreg);
5768 else
5769 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
5770 ins->klass = ret_class;
5772 } else {
5773 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5774 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
5775 MonoInst *iargs [1];
5776 MonoInst *conv;
5778 iargs [0] = val;
5779 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
5780 mono_arch_emit_setret (cfg, cfg->method, conv);
5781 } else {
5782 mono_arch_emit_setret (cfg, cfg->method, val);
5784 #else
5785 mono_arch_emit_setret (cfg, cfg->method, val);
5786 #endif
5790 typedef union _MonoOpcodeParameter {
5791 gint32 i32;
5792 gint64 i64;
5793 float f;
5794 double d;
5795 guchar *branch_target;
5796 } MonoOpcodeParameter;
5798 typedef struct _MonoOpcodeInfo {
5799 guint constant : 4; // private
5800 gint pops : 3; // public -1 means variable
5801 gint pushes : 3; // public -1 means variable
5802 } MonoOpcodeInfo;
5804 static inline const MonoOpcodeInfo*
5805 mono_opcode_decode (guchar *ip, guint op_size, MonoOpcodeEnum il_op, MonoOpcodeParameter *parameter)
5807 #define Push0 (0)
5808 #define Pop0 (0)
5809 #define Push1 (1)
5810 #define Pop1 (1)
5811 #define PushI (1)
5812 #define PopI (1)
5813 #define PushI8 (1)
5814 #define PopI8 (1)
5815 #define PushRef (1)
5816 #define PopRef (1)
5817 #define PushR4 (1)
5818 #define PopR4 (1)
5819 #define PushR8 (1)
5820 #define PopR8 (1)
5821 #define VarPush (-1)
5822 #define VarPop (-1)
5824 static const MonoOpcodeInfo mono_opcode_info [ ] = {
5825 #define OPDEF(name, str, pops, pushes, param, param_constant, a, b, c, flow) {param_constant + 1, pops, pushes },
5826 #include "mono/cil/opcode.def"
5827 #undef OPDEF
5830 #undef Push0
5831 #undef Pop0
5832 #undef Push1
5833 #undef Pop1
5834 #undef PushI
5835 #undef PopI
5836 #undef PushI8
5837 #undef PopI8
5838 #undef PushRef
5839 #undef PopRef
5840 #undef PushR4
5841 #undef PopR4
5842 #undef PushR8
5843 #undef PopR8
5844 #undef VarPush
5845 #undef VarPop
5847 gint32 delta;
5848 guchar *next_ip = ip + op_size;
5850 const MonoOpcodeInfo *info = &mono_opcode_info [il_op];
5852 switch (mono_opcodes [il_op].argument) {
5853 case MonoInlineNone:
5854 parameter->i32 = (int)info->constant - 1;
5855 break;
5856 case MonoInlineString:
5857 case MonoInlineType:
5858 case MonoInlineField:
5859 case MonoInlineMethod:
5860 case MonoInlineTok:
5861 case MonoInlineSig:
5862 case MonoShortInlineR:
5863 case MonoInlineI:
5864 parameter->i32 = read32 (next_ip - 4);
5865 // FIXME check token type?
5866 break;
5867 case MonoShortInlineI:
5868 parameter->i32 = (signed char)next_ip [-1];
5869 break;
5870 case MonoInlineVar:
5871 parameter->i32 = read16 (next_ip - 2);
5872 break;
5873 case MonoShortInlineVar:
5874 parameter->i32 = next_ip [-1];
5875 break;
5876 case MonoInlineR:
5877 case MonoInlineI8:
5878 parameter->i64 = read64 (next_ip - 8);
5879 break;
5880 case MonoShortInlineBrTarget:
5881 delta = (signed char)next_ip [-1];
5882 goto branch_target;
5883 case MonoInlineBrTarget:
5884 delta = (gint32)read32 (next_ip - 4);
5885 branch_target:
5886 parameter->branch_target = delta + next_ip;
5887 break;
5888 case MonoInlineSwitch: // complicated
5889 break;
5890 default:
5891 g_error ("%s %d %d\n", __func__, il_op, mono_opcodes [il_op].argument);
5893 return info;
5897 * mono_method_to_ir:
5899 * Translate the .net IL into linear IR.
5901 * @start_bblock: if not NULL, the starting basic block, used during inlining.
5902 * @end_bblock: if not NULL, the ending basic block, used during inlining.
5903 * @return_var: if not NULL, the place where the return value is stored, used during inlining.
5904 * @inline_args: if not NULL, contains the arguments to the inline call
5905 * @inline_offset: if not zero, the real offset from the inline call, or zero otherwise.
5906 * @is_virtual_call: whether this method is being called as a result of a call to callvirt
5908 * This method is used to turn ECMA IL into Mono's internal Linear IR
5909 * reprensetation. It is used both for entire methods, as well as
5910 * inlining existing methods. In the former case, the @start_bblock,
5911 * @end_bblock, @return_var, @inline_args are all set to NULL, and the
5912 * inline_offset is set to zero.
5914 * Returns: the inline cost, or -1 if there was an error processing this method.
5917 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5918 MonoInst *return_var, MonoInst **inline_args,
5919 guint inline_offset, gboolean is_virtual_call)
5921 ERROR_DECL (error);
5922 MonoInst *ins, **sp, **stack_start;
5923 MonoBasicBlock *tblock = NULL;
5924 MonoBasicBlock *init_localsbb = NULL, *init_localsbb2 = NULL;
5925 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5926 MonoMethod *method_definition;
5927 MonoInst **arg_array;
5928 MonoMethodHeader *header;
5929 MonoImage *image;
5930 guint32 token, ins_flag;
5931 MonoClass *klass;
5932 MonoClass *constrained_class = NULL;
5933 guchar *ip, *end, *target, *err_pos;
5934 MonoMethodSignature *sig;
5935 MonoGenericContext *generic_context = NULL;
5936 MonoGenericContainer *generic_container = NULL;
5937 MonoType **param_types;
5938 int i, n, start_new_bblock, dreg;
5939 int num_calls = 0, inline_costs = 0;
5940 int breakpoint_id = 0;
5941 guint num_args;
5942 GSList *class_inits = NULL;
5943 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5944 int context_used;
5945 gboolean init_locals, seq_points, skip_dead_blocks;
5946 gboolean sym_seq_points = FALSE;
5947 MonoDebugMethodInfo *minfo;
5948 MonoBitSet *seq_point_locs = NULL;
5949 MonoBitSet *seq_point_set_locs = NULL;
5951 cfg->disable_inline = is_jit_optimizer_disabled (method);
5953 image = m_class_get_image (method->klass);
5955 /* serialization and xdomain stuff may need access to private fields and methods */
5956 dont_verify = image->assembly->corlib_internal? TRUE: FALSE;
5957 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5958 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5959 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5960 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5961 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5963 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5964 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5965 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_OTHER;
5966 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5967 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
5969 header = mono_method_get_header_checked (method, &cfg->error);
5970 if (!header) {
5971 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
5972 goto exception_exit;
5973 } else {
5974 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5977 generic_container = mono_method_get_generic_container (method);
5978 sig = mono_method_signature_internal (method);
5979 num_args = sig->hasthis + sig->param_count;
5980 ip = (guchar*)header->code;
5981 cfg->cil_start = ip;
5982 end = ip + header->code_size;
5983 cfg->stat_cil_code_size += header->code_size;
5985 seq_points = cfg->gen_seq_points && cfg->method == method;
5987 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
5988 /* We could hit a seq point before attaching to the JIT (#8338) */
5989 seq_points = FALSE;
5992 if (cfg->prof_coverage) {
5993 if (cfg->compile_aot)
5994 g_error ("Coverage profiling is not supported with AOT.");
5996 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5999 if ((cfg->gen_sdb_seq_points && cfg->method == method) || cfg->prof_coverage) {
6000 minfo = mono_debug_lookup_method (method);
6001 if (minfo) {
6002 MonoSymSeqPoint *sps;
6003 int i, n_il_offsets;
6005 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
6006 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6007 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6008 sym_seq_points = TRUE;
6009 for (i = 0; i < n_il_offsets; ++i) {
6010 if (sps [i].il_offset < header->code_size)
6011 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
6013 g_free (sps);
6015 MonoDebugMethodAsyncInfo* asyncMethod = mono_debug_lookup_method_async_debug_info (method);
6016 if (asyncMethod) {
6017 for (i = 0; asyncMethod != NULL && i < asyncMethod->num_awaits; i++)
6019 mono_bitset_set_fast (seq_point_locs, asyncMethod->resume_offsets[i]);
6020 mono_bitset_set_fast (seq_point_locs, asyncMethod->yield_offsets[i]);
6022 mono_debug_free_method_async_debug_info (asyncMethod);
6024 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (m_class_get_image (method->klass))) {
6025 /* Methods without line number info like auto-generated property accessors */
6026 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6027 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6028 sym_seq_points = TRUE;
6033 * Methods without init_locals set could cause asserts in various passes
6034 * (#497220). To work around this, we emit dummy initialization opcodes
6035 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
6036 * on some platforms.
6038 if (cfg->opt & MONO_OPT_UNSAFE)
6039 init_locals = header->init_locals;
6040 else
6041 init_locals = TRUE;
6043 method_definition = method;
6044 while (method_definition->is_inflated) {
6045 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
6046 method_definition = imethod->declaring;
6049 /* SkipVerification is not allowed if core-clr is enabled */
6050 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
6051 dont_verify = TRUE;
6052 dont_verify_stloc = TRUE;
6055 if (sig->is_inflated)
6056 generic_context = mono_method_get_context (method);
6057 else if (generic_container)
6058 generic_context = &generic_container->context;
6059 cfg->generic_context = generic_context;
6061 if (!cfg->gshared)
6062 g_assert (!sig->has_type_parameters);
6064 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
6065 g_assert (method->is_inflated);
6066 g_assert (mono_method_get_context (method)->method_inst);
6068 if (method->is_inflated && mono_method_get_context (method)->method_inst)
6069 g_assert (sig->generic_param_count);
6071 if (cfg->method == method) {
6072 cfg->real_offset = 0;
6073 } else {
6074 cfg->real_offset = inline_offset;
6077 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
6078 cfg->cil_offset_to_bb_len = header->code_size;
6080 cfg->current_method = method;
6082 if (cfg->verbose_level > 2)
6083 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
6085 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
6086 if (sig->hasthis)
6087 param_types [0] = m_class_is_valuetype (method->klass) ? m_class_get_this_arg (method->klass) : m_class_get_byval_arg (method->klass);
6088 for (n = 0; n < sig->param_count; ++n)
6089 param_types [n + sig->hasthis] = sig->params [n];
6090 cfg->arg_types = param_types;
6092 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
6093 if (cfg->method == method) {
6094 /* ENTRY BLOCK */
6095 NEW_BBLOCK (cfg, start_bblock);
6096 cfg->bb_entry = start_bblock;
6097 start_bblock->cil_code = NULL;
6098 start_bblock->cil_length = 0;
6100 /* EXIT BLOCK */
6101 NEW_BBLOCK (cfg, end_bblock);
6102 cfg->bb_exit = end_bblock;
6103 end_bblock->cil_code = NULL;
6104 end_bblock->cil_length = 0;
6105 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
6106 g_assert (cfg->num_bblocks == 2);
6108 arg_array = cfg->args;
6110 if (header->num_clauses) {
6111 cfg->spvars = g_hash_table_new (NULL, NULL);
6112 cfg->exvars = g_hash_table_new (NULL, NULL);
6114 /* handle exception clauses */
6115 for (i = 0; i < header->num_clauses; ++i) {
6116 MonoBasicBlock *try_bb;
6117 MonoExceptionClause *clause = &header->clauses [i];
6118 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
6120 try_bb->real_offset = clause->try_offset;
6121 try_bb->try_start = TRUE;
6122 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
6123 tblock->real_offset = clause->handler_offset;
6124 tblock->flags |= BB_EXCEPTION_HANDLER;
6126 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
6127 mono_create_exvar_for_offset (cfg, clause->handler_offset);
6129 * Linking the try block with the EH block hinders inlining as we won't be able to
6130 * merge the bblocks from inlining and produce an artificial hole for no good reason.
6132 if (COMPILE_LLVM (cfg))
6133 link_bblock (cfg, try_bb, tblock);
6135 if (*(ip + clause->handler_offset) == CEE_POP)
6136 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
6138 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
6139 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
6140 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
6141 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6142 MONO_ADD_INS (tblock, ins);
6144 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
6145 /* finally clauses already have a seq point */
6146 /* seq points for filter clauses are emitted below */
6147 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
6148 MONO_ADD_INS (tblock, ins);
6151 /* todo: is a fault block unsafe to optimize? */
6152 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
6153 tblock->flags |= BB_EXCEPTION_UNSAFE;
6156 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
6157 while (p < end) {
6158 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
6160 /* catch and filter blocks get the exception object on the stack */
6161 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
6162 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6164 /* mostly like handle_stack_args (), but just sets the input args */
6165 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
6166 tblock->in_scount = 1;
6167 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6168 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6170 cfg->cbb = tblock;
6172 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
6173 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
6174 if (!cfg->compile_llvm) {
6175 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
6176 ins->dreg = tblock->in_stack [0]->dreg;
6177 MONO_ADD_INS (tblock, ins);
6179 #else
6180 MonoInst *dummy_use;
6183 * Add a dummy use for the exvar so its liveness info will be
6184 * correct.
6186 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
6187 #endif
6189 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6190 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
6191 MONO_ADD_INS (tblock, ins);
6194 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6195 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
6196 tblock->flags |= BB_EXCEPTION_HANDLER;
6197 tblock->real_offset = clause->data.filter_offset;
6198 tblock->in_scount = 1;
6199 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6200 /* The filter block shares the exvar with the handler block */
6201 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6202 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6203 MONO_ADD_INS (tblock, ins);
6207 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
6208 clause->data.catch_class &&
6209 cfg->gshared &&
6210 mono_class_check_context_used (clause->data.catch_class)) {
6212 * In shared generic code with catch
6213 * clauses containing type variables
6214 * the exception handling code has to
6215 * be able to get to the rgctx.
6216 * Therefore we have to make sure that
6217 * the vtable/mrgctx argument (for
6218 * static or generic methods) or the
6219 * "this" argument (for non-static
6220 * methods) are live.
6222 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6223 mini_method_get_context (method)->method_inst ||
6224 m_class_is_valuetype (method->klass)) {
6225 mono_get_vtable_var (cfg);
6226 } else {
6227 MonoInst *dummy_use;
6229 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
6233 } else {
6234 arg_array = g_newa (MonoInst*, num_args);
6235 cfg->cbb = start_bblock;
6236 cfg->args = arg_array;
6237 mono_save_args (cfg, sig, inline_args);
6240 /* FIRST CODE BLOCK */
6241 NEW_BBLOCK (cfg, tblock);
6242 tblock->cil_code = ip;
6243 cfg->cbb = tblock;
6244 cfg->ip = ip;
6246 ADD_BBLOCK (cfg, tblock);
6248 if (cfg->method == method) {
6249 breakpoint_id = mono_debugger_method_has_breakpoint (method);
6250 if (breakpoint_id) {
6251 MONO_INST_NEW (cfg, ins, OP_BREAK);
6252 MONO_ADD_INS (cfg->cbb, ins);
6256 /* we use a separate basic block for the initialization code */
6257 NEW_BBLOCK (cfg, init_localsbb);
6258 if (cfg->method == method)
6259 cfg->bb_init = init_localsbb;
6260 init_localsbb->real_offset = cfg->real_offset;
6261 start_bblock->next_bb = init_localsbb;
6262 init_localsbb->next_bb = cfg->cbb;
6263 link_bblock (cfg, start_bblock, init_localsbb);
6264 link_bblock (cfg, init_localsbb, cfg->cbb);
6265 init_localsbb2 = init_localsbb;
6266 cfg->cbb = init_localsbb;
6268 if (cfg->gsharedvt && cfg->method == method) {
6269 MonoGSharedVtMethodInfo *info;
6270 MonoInst *var, *locals_var;
6271 int dreg;
6273 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
6274 info->method = cfg->method;
6275 info->count_entries = 16;
6276 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
6277 cfg->gsharedvt_info = info;
6279 var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
6280 /* prevent it from being register allocated */
6281 //var->flags |= MONO_INST_VOLATILE;
6282 cfg->gsharedvt_info_var = var;
6284 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
6285 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
6287 /* Allocate locals */
6288 locals_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
6289 /* prevent it from being register allocated */
6290 //locals_var->flags |= MONO_INST_VOLATILE;
6291 cfg->gsharedvt_locals_var = locals_var;
6293 dreg = alloc_ireg (cfg);
6294 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
6296 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
6297 ins->dreg = locals_var->dreg;
6298 ins->sreg1 = dreg;
6299 MONO_ADD_INS (cfg->cbb, ins);
6300 cfg->gsharedvt_locals_var_ins = ins;
6302 cfg->flags |= MONO_CFG_HAS_ALLOCA;
6304 if (init_locals)
6305 ins->flags |= MONO_INST_INIT;
6309 if (mono_security_core_clr_enabled ()) {
6310 /* check if this is native code, e.g. an icall or a p/invoke */
6311 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
6312 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6313 if (wrapped) {
6314 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
6315 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
6317 /* if this ia a native call then it can only be JITted from platform code */
6318 if ((icall || pinvk) && method->klass && m_class_get_image (method->klass)) {
6319 if (!mono_security_core_clr_is_platform_image (m_class_get_image (method->klass))) {
6320 MonoException *ex = icall ? mono_get_exception_security () :
6321 mono_get_exception_method_access ();
6322 emit_throw_exception (cfg, ex);
6329 CHECK_CFG_EXCEPTION;
6331 if (header->code_size == 0)
6332 UNVERIFIED;
6334 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
6335 ip = err_pos;
6336 UNVERIFIED;
6339 if (cfg->method == method)
6340 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
6342 for (n = 0; n < header->num_locals; ++n) {
6343 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
6344 UNVERIFIED;
6346 class_inits = NULL;
6348 /* We force the vtable variable here for all shared methods
6349 for the possibility that they might show up in a stack
6350 trace where their exact instantiation is needed. */
6351 if (cfg->gshared && method == cfg->method) {
6352 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6353 mini_method_get_context (method)->method_inst ||
6354 m_class_is_valuetype (method->klass)) {
6355 mono_get_vtable_var (cfg);
6356 } else {
6357 /* FIXME: Is there a better way to do this?
6358 We need the variable live for the duration
6359 of the whole method. */
6360 cfg->args [0]->flags |= MONO_INST_VOLATILE;
6364 /* add a check for this != NULL to inlined methods */
6365 if (is_virtual_call) {
6366 MonoInst *arg_ins;
6368 NEW_ARGLOAD (cfg, arg_ins, 0);
6369 MONO_ADD_INS (cfg->cbb, arg_ins);
6370 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
6373 skip_dead_blocks = !dont_verify;
6374 if (skip_dead_blocks) {
6375 original_bb = bb = mono_basic_block_split (method, &cfg->error, header);
6376 CHECK_CFG_ERROR;
6377 g_assert (bb);
6380 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
6381 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
6383 ins_flag = 0;
6384 start_new_bblock = 0;
6385 MonoOpcodeEnum il_op; il_op = MonoOpcodeEnum_Invalid;
6387 for (guchar *next_ip = ip; ip < end; ip = next_ip) {
6388 MonoOpcodeEnum previous_il_op = il_op;
6389 const guchar *tmp_ip = ip;
6390 const int op_size = mono_opcode_value_and_size (&tmp_ip, end, &il_op);
6391 CHECK_OPSIZE (op_size);
6392 next_ip += op_size;
6394 if (cfg->method == method)
6395 cfg->real_offset = ip - header->code;
6396 else
6397 cfg->real_offset = inline_offset;
6398 cfg->ip = ip;
6400 context_used = 0;
6402 if (start_new_bblock) {
6403 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
6404 if (start_new_bblock == 2) {
6405 g_assert (ip == tblock->cil_code);
6406 } else {
6407 GET_BBLOCK (cfg, tblock, ip);
6409 cfg->cbb->next_bb = tblock;
6410 cfg->cbb = tblock;
6411 start_new_bblock = 0;
6412 for (i = 0; i < cfg->cbb->in_scount; ++i) {
6413 if (cfg->verbose_level > 3)
6414 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
6415 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
6416 *sp++ = ins;
6418 if (class_inits)
6419 g_slist_free (class_inits);
6420 class_inits = NULL;
6421 } else {
6422 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
6423 link_bblock (cfg, cfg->cbb, tblock);
6424 if (sp != stack_start) {
6425 handle_stack_args (cfg, stack_start, sp - stack_start);
6426 sp = stack_start;
6427 CHECK_UNVERIFIABLE (cfg);
6429 cfg->cbb->next_bb = tblock;
6430 cfg->cbb = tblock;
6431 for (i = 0; i < cfg->cbb->in_scount; ++i) {
6432 if (cfg->verbose_level > 3)
6433 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
6434 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
6435 *sp++ = ins;
6437 g_slist_free (class_inits);
6438 class_inits = NULL;
6442 if (skip_dead_blocks) {
6443 int ip_offset = ip - header->code;
6445 if (ip_offset == bb->end)
6446 bb = bb->next;
6448 if (bb->dead) {
6449 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
6451 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
6453 if (ip_offset + op_size == bb->end) {
6454 MONO_INST_NEW (cfg, ins, OP_NOP);
6455 MONO_ADD_INS (cfg->cbb, ins);
6456 start_new_bblock = 1;
6458 continue;
6462 * Sequence points are points where the debugger can place a breakpoint.
6463 * Currently, we generate these automatically at points where the IL
6464 * stack is empty.
6466 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
6468 * Make methods interruptable at the beginning, and at the targets of
6469 * backward branches.
6470 * Also, do this at the start of every bblock in methods with clauses too,
6471 * to be able to handle instructions with inprecise control flow like
6472 * throw/endfinally.
6473 * Backward branches are handled at the end of method-to-ir ().
6475 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
6476 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
6478 /* Avoid sequence points on empty IL like .volatile */
6479 // FIXME: Enable this
6480 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
6481 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
6482 if ((sp != stack_start) && !sym_seq_point)
6483 ins->flags |= MONO_INST_NONEMPTY_STACK;
6484 MONO_ADD_INS (cfg->cbb, ins);
6486 if (sym_seq_points)
6487 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
6489 if (cfg->prof_coverage) {
6490 guint32 cil_offset = ip - header->code;
6491 gpointer counter = &cfg->coverage_info->data [cil_offset].count;
6492 cfg->coverage_info->data [cil_offset].cil_code = ip;
6494 if (mono_arch_opcode_supported (OP_ATOMIC_ADD_I4)) {
6495 MonoInst *one_ins, *load_ins;
6497 EMIT_NEW_PCONST (cfg, load_ins, counter);
6498 EMIT_NEW_ICONST (cfg, one_ins, 1);
6499 MONO_INST_NEW (cfg, ins, OP_ATOMIC_ADD_I4);
6500 ins->dreg = mono_alloc_ireg (cfg);
6501 ins->inst_basereg = load_ins->dreg;
6502 ins->inst_offset = 0;
6503 ins->sreg2 = one_ins->dreg;
6504 ins->type = STACK_I4;
6505 MONO_ADD_INS (cfg->cbb, ins);
6506 } else {
6507 EMIT_NEW_PCONST (cfg, ins, counter);
6508 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
6513 cfg->cbb->real_offset = cfg->real_offset;
6515 if (cfg->verbose_level > 3)
6516 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
6518 // Variables shared by CEE_CALLI CEE_CALL CEE_CALLVIRT CEE_JMP.
6519 // Initialize to either what they all need or zero.
6520 gboolean emit_widen = TRUE;
6521 gboolean tailcall = FALSE;
6522 gboolean common_call = FALSE;
6523 MonoInst *keep_this_alive = NULL;
6524 MonoMethod *cmethod = NULL;
6525 MonoMethodSignature *fsig = NULL;
6527 // These are used only in CALL/CALLVIRT but must be initialized also for CALLI,
6528 // since it jumps into CALL/CALLVIRT.
6529 gboolean need_seq_point = FALSE;
6530 gboolean push_res = TRUE;
6531 gboolean skip_ret = FALSE;
6532 gboolean tailcall_remove_ret = FALSE;
6534 // FIXME split 500 lines load/store field into separate file/function.
6536 MonoOpcodeParameter parameter;
6537 const MonoOpcodeInfo* info = mono_opcode_decode (ip, op_size, il_op, &parameter);
6538 g_assert (info);
6539 n = parameter.i32;
6540 token = parameter.i32;
6541 target = parameter.branch_target;
6543 // Check stack size for push/pop except variable cases -- -1 like call/ret/newobj.
6544 const int pushes = info->pushes;
6545 const int pops = info->pops;
6546 if (pushes >= 0 && pops >= 0) {
6547 g_assert (pushes - pops <= 1);
6548 if (pushes - pops == 1)
6549 CHECK_STACK_OVF ();
6551 if (pops >= 0)
6552 CHECK_STACK (pops);
6554 switch (il_op) {
6555 case MONO_CEE_NOP:
6556 if (seq_points && !sym_seq_points && sp != stack_start) {
6558 * The C# compiler uses these nops to notify the JIT that it should
6559 * insert seq points.
6561 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
6562 MONO_ADD_INS (cfg->cbb, ins);
6564 if (cfg->keep_cil_nops)
6565 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
6566 else
6567 MONO_INST_NEW (cfg, ins, OP_NOP);
6568 MONO_ADD_INS (cfg->cbb, ins);
6569 break;
6570 case MONO_CEE_BREAK:
6571 if (mini_should_insert_breakpoint (cfg->method)) {
6572 ins = mono_emit_jit_icall (cfg, mini_get_dbg_callbacks ()->user_break, NULL);
6573 } else {
6574 MONO_INST_NEW (cfg, ins, OP_NOP);
6576 MONO_ADD_INS (cfg->cbb, ins);
6577 break;
6578 case MONO_CEE_LDARG_0:
6579 case MONO_CEE_LDARG_1:
6580 case MONO_CEE_LDARG_2:
6581 case MONO_CEE_LDARG_3:
6582 case MONO_CEE_LDARG_S:
6583 case MONO_CEE_LDARG:
6584 CHECK_ARG (n);
6585 if (next_ip < end && is_addressable_valuetype_load (cfg, next_ip, cfg->arg_types[n])) {
6586 EMIT_NEW_ARGLOADA (cfg, ins, n);
6587 } else {
6588 EMIT_NEW_ARGLOAD (cfg, ins, n);
6590 *sp++ = ins;
6591 break;
6593 case MONO_CEE_LDLOC_0:
6594 case MONO_CEE_LDLOC_1:
6595 case MONO_CEE_LDLOC_2:
6596 case MONO_CEE_LDLOC_3:
6597 case MONO_CEE_LDLOC_S:
6598 case MONO_CEE_LDLOC:
6599 CHECK_LOCAL (n);
6600 if (next_ip < end && is_addressable_valuetype_load (cfg, next_ip, header->locals[n])) {
6601 EMIT_NEW_LOCLOADA (cfg, ins, n);
6602 } else {
6603 EMIT_NEW_LOCLOAD (cfg, ins, n);
6605 *sp++ = ins;
6606 break;
6608 case MONO_CEE_STLOC_0:
6609 case MONO_CEE_STLOC_1:
6610 case MONO_CEE_STLOC_2:
6611 case MONO_CEE_STLOC_3:
6612 case MONO_CEE_STLOC_S:
6613 case MONO_CEE_STLOC:
6614 CHECK_LOCAL (n);
6615 --sp;
6616 *sp = convert_value (cfg, header->locals [n], *sp);
6617 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
6618 UNVERIFIED;
6619 emit_stloc_ir (cfg, sp, header, n);
6620 inline_costs += 1;
6621 break;
6622 case MONO_CEE_LDARGA_S:
6623 case MONO_CEE_LDARGA:
6624 CHECK_ARG (n);
6625 NEW_ARGLOADA (cfg, ins, n);
6626 MONO_ADD_INS (cfg->cbb, ins);
6627 *sp++ = ins;
6628 break;
6629 case MONO_CEE_STARG_S:
6630 case MONO_CEE_STARG:
6631 --sp;
6632 CHECK_ARG (n);
6633 *sp = convert_value (cfg, param_types [n], *sp);
6634 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
6635 UNVERIFIED;
6636 emit_starg_ir (cfg, sp, n);
6637 break;
6638 case MONO_CEE_LDLOCA:
6639 case MONO_CEE_LDLOCA_S: {
6640 guchar *tmp_ip;
6641 CHECK_LOCAL (n);
6643 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, next_ip, end, n))) {
6644 next_ip = tmp_ip;
6645 il_op = MONO_CEE_INITOBJ;
6646 inline_costs += 1;
6647 break;
6650 EMIT_NEW_LOCLOADA (cfg, ins, n);
6651 *sp++ = ins;
6652 break;
6654 case MONO_CEE_LDNULL:
6655 EMIT_NEW_PCONST (cfg, ins, NULL);
6656 ins->type = STACK_OBJ;
6657 *sp++ = ins;
6658 break;
6659 case MONO_CEE_LDC_I4_M1:
6660 case MONO_CEE_LDC_I4_0:
6661 case MONO_CEE_LDC_I4_1:
6662 case MONO_CEE_LDC_I4_2:
6663 case MONO_CEE_LDC_I4_3:
6664 case MONO_CEE_LDC_I4_4:
6665 case MONO_CEE_LDC_I4_5:
6666 case MONO_CEE_LDC_I4_6:
6667 case MONO_CEE_LDC_I4_7:
6668 case MONO_CEE_LDC_I4_8:
6669 case MONO_CEE_LDC_I4_S:
6670 case MONO_CEE_LDC_I4:
6671 EMIT_NEW_ICONST (cfg, ins, n);
6672 *sp++ = ins;
6673 break;
6674 case MONO_CEE_LDC_I8:
6675 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6676 ins->type = STACK_I8;
6677 ins->dreg = alloc_dreg (cfg, STACK_I8);
6678 ins->inst_l = parameter.i64;
6679 MONO_ADD_INS (cfg->cbb, ins);
6680 *sp++ = ins;
6681 break;
6682 case MONO_CEE_LDC_R4: {
6683 float *f;
6684 gboolean use_aotconst = FALSE;
6686 #ifdef TARGET_POWERPC
6687 /* FIXME: Clean this up */
6688 if (cfg->compile_aot)
6689 use_aotconst = TRUE;
6690 #endif
6691 /* FIXME: we should really allocate this only late in the compilation process */
6692 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
6694 if (use_aotconst) {
6695 MonoInst *cons;
6696 int dreg;
6698 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6700 dreg = alloc_freg (cfg);
6701 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6702 ins->type = cfg->r4_stack_type;
6703 } else {
6704 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6705 ins->type = cfg->r4_stack_type;
6706 ins->dreg = alloc_dreg (cfg, STACK_R8);
6707 ins->inst_p0 = f;
6708 MONO_ADD_INS (cfg->cbb, ins);
6710 *f = parameter.f;
6711 *sp++ = ins;
6712 break;
6714 case MONO_CEE_LDC_R8: {
6715 double *d;
6716 gboolean use_aotconst = FALSE;
6718 #ifdef TARGET_POWERPC
6719 /* FIXME: Clean this up */
6720 if (cfg->compile_aot)
6721 use_aotconst = TRUE;
6722 #endif
6724 /* FIXME: we should really allocate this only late in the compilation process */
6725 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
6727 if (use_aotconst) {
6728 MonoInst *cons;
6729 int dreg;
6731 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6733 dreg = alloc_freg (cfg);
6734 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6735 ins->type = STACK_R8;
6736 } else {
6737 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6738 ins->type = STACK_R8;
6739 ins->dreg = alloc_dreg (cfg, STACK_R8);
6740 ins->inst_p0 = d;
6741 MONO_ADD_INS (cfg->cbb, ins);
6743 *d = parameter.d;
6744 *sp++ = ins;
6745 break;
6747 case MONO_CEE_DUP: {
6748 MonoInst *temp, *store;
6749 sp--;
6750 ins = *sp;
6752 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6753 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6755 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6756 *sp++ = ins;
6758 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6759 *sp++ = ins;
6761 inline_costs += 2;
6762 break;
6764 case MONO_CEE_POP:
6765 --sp;
6767 #ifdef TARGET_X86
6768 if (sp [0]->type == STACK_R8)
6769 /* we need to pop the value from the x86 FP stack */
6770 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6771 #endif
6772 break;
6773 case MONO_CEE_JMP: {
6774 MonoCallInst *call;
6775 int i, n;
6777 INLINE_FAILURE ("jmp");
6778 GSHAREDVT_FAILURE (il_op);
6780 if (stack_start != sp)
6781 UNVERIFIED;
6782 /* FIXME: check the signature matches */
6783 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6784 CHECK_CFG_ERROR;
6786 if (cfg->gshared && mono_method_check_context_used (cmethod))
6787 GENERIC_SHARING_FAILURE (CEE_JMP);
6789 mini_profiler_emit_tail_call (cfg, cmethod);
6791 fsig = mono_method_signature_internal (cmethod);
6792 n = fsig->param_count + fsig->hasthis;
6793 if (cfg->llvm_only) {
6794 MonoInst **args;
6796 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6797 for (i = 0; i < n; ++i)
6798 EMIT_NEW_ARGLOAD (cfg, args [i], i);
6799 ins = mini_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
6801 * The code in mono-basic-block.c treats the rest of the code as dead, but we
6802 * have to emit a normal return since llvm expects it.
6804 if (cfg->ret)
6805 emit_setret (cfg, ins);
6806 MONO_INST_NEW (cfg, ins, OP_BR);
6807 ins->inst_target_bb = end_bblock;
6808 MONO_ADD_INS (cfg->cbb, ins);
6809 link_bblock (cfg, cfg->cbb, end_bblock);
6810 break;
6811 } else {
6812 /* Handle tailcalls similarly to calls */
6813 DISABLE_AOT (cfg);
6815 mini_emit_tailcall_parameters (cfg, fsig);
6816 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6817 call->method = cmethod;
6818 // FIXME Other initialization of the tailcall field occurs after
6819 // it is used. So this is the only "real" use and needs more attention.
6820 call->tailcall = TRUE;
6821 call->signature = fsig;
6822 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6823 call->inst.inst_p0 = cmethod;
6824 for (i = 0; i < n; ++i)
6825 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6827 if (mini_type_is_vtype (mini_get_underlying_type (call->signature->ret)))
6828 call->vret_var = cfg->vret_addr;
6830 mono_arch_emit_call (cfg, call);
6831 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
6832 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
6835 start_new_bblock = 1;
6836 break;
6838 case MONO_CEE_CALLI: {
6839 // FIXME tail.calli is problemetic because the this pointer's type
6840 // is not in the signature, and we cannot check for a byref valuetype.
6841 MonoInst *addr;
6842 MonoInst *callee = NULL;
6844 // Variables shared by CEE_CALLI and CEE_CALL/CEE_CALLVIRT.
6845 common_call = TRUE; // i.e. skip_ret/push_res/seq_point logic
6846 cmethod = NULL;
6848 gboolean const inst_tailcall = G_UNLIKELY (debug_tailcall_try_all
6849 ? (next_ip < end && next_ip [0] == CEE_RET)
6850 : ((ins_flag & MONO_INST_TAILCALL) != 0));
6851 ins = NULL;
6853 //GSHAREDVT_FAILURE (il_op);
6854 CHECK_STACK (1);
6855 --sp;
6856 addr = *sp;
6857 g_assert (addr);
6858 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
6859 CHECK_CFG_ERROR;
6861 if (method->dynamic && fsig->pinvoke) {
6862 MonoInst *args [3];
6865 * This is a call through a function pointer using a pinvoke
6866 * signature. Have to create a wrapper and call that instead.
6867 * FIXME: This is very slow, need to create a wrapper at JIT time
6868 * instead based on the signature.
6870 EMIT_NEW_IMAGECONST (cfg, args [0], m_class_get_image (method->klass));
6871 EMIT_NEW_PCONST (cfg, args [1], fsig);
6872 args [2] = addr;
6873 // FIXME tailcall?
6874 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6877 n = fsig->param_count + fsig->hasthis;
6879 CHECK_STACK (n);
6881 //g_assert (!virtual_ || fsig->hasthis);
6883 sp -= n;
6885 if (!(cfg->method->wrapper_type && cfg->method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD) && check_call_signature (cfg, fsig, sp)) {
6886 if (break_on_unverified ())
6887 check_call_signature (cfg, fsig, sp); // Again, step through it.
6888 UNVERIFIED;
6891 inline_costs += CALL_COST * MIN(10, num_calls++);
6894 * Making generic calls out of gsharedvt methods.
6895 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
6896 * patching gshared method addresses into a gsharedvt method.
6898 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
6900 * We pass the address to the gsharedvt trampoline in the rgctx reg
6902 callee = addr;
6903 g_assert (addr); // Doubles as boolean after tailcall check.
6906 inst_tailcall && is_supported_tailcall (cfg, ip, method, NULL, fsig,
6907 FALSE/*virtual irrelevant*/, addr != NULL, &tailcall);
6909 if (callee) {
6910 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
6911 /* Not tested */
6912 GSHAREDVT_FAILURE (il_op);
6914 if (cfg->llvm_only)
6915 // FIXME:
6916 GSHAREDVT_FAILURE (il_op);
6918 addr = emit_get_rgctx_sig (cfg, context_used, fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
6919 ins = (MonoInst*)mini_emit_calli_full (cfg, fsig, sp, addr, NULL, callee, tailcall);
6920 goto calli_end;
6923 /* Prevent inlining of methods with indirect calls */
6924 INLINE_FAILURE ("indirect call");
6926 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
6927 MonoJumpInfoType info_type;
6928 gpointer info_data;
6931 * Instead of emitting an indirect call, emit a direct call
6932 * with the contents of the aotconst as the patch info.
6934 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
6935 info_type = (MonoJumpInfoType)addr->inst_c1;
6936 info_data = addr->inst_p0;
6937 } else {
6938 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
6939 info_data = addr->inst_right->inst_left;
6942 if (info_type == MONO_PATCH_INFO_ICALL_ADDR) {
6943 tailcall = FALSE;
6944 ins = (MonoInst*)mini_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp);
6945 NULLIFY_INS (addr);
6946 goto calli_end;
6947 } else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
6948 tailcall = FALSE;
6949 ins = (MonoInst*)mini_emit_abs_call (cfg, info_type, info_data, fsig, sp);
6950 NULLIFY_INS (addr);
6951 goto calli_end;
6954 ins = (MonoInst*)mini_emit_calli_full (cfg, fsig, sp, addr, NULL, NULL, tailcall);
6955 goto calli_end;
6957 case MONO_CEE_CALL:
6958 case MONO_CEE_CALLVIRT: {
6959 MonoInst *addr; addr = NULL;
6960 int array_rank; array_rank = 0;
6961 gboolean virtual_; virtual_ = il_op == MONO_CEE_CALLVIRT;
6962 gboolean pass_imt_from_rgctx; pass_imt_from_rgctx = FALSE;
6963 MonoInst *imt_arg; imt_arg = NULL;
6964 gboolean pass_vtable; pass_vtable = FALSE;
6965 gboolean pass_mrgctx; pass_mrgctx = FALSE;
6966 MonoInst *vtable_arg; vtable_arg = NULL;
6967 gboolean check_this; check_this = FALSE;
6968 gboolean delegate_invoke; delegate_invoke = FALSE;
6969 gboolean direct_icall; direct_icall = FALSE;
6970 gboolean tailcall_calli; tailcall_calli = FALSE;
6972 // Variables shared by CEE_CALLI and CEE_CALL/CEE_CALLVIRT.
6973 common_call = FALSE;
6975 // variables to help in assertions
6976 gboolean called_is_supported_tailcall; called_is_supported_tailcall = FALSE;
6977 MonoMethod *tailcall_method; tailcall_method = NULL;
6978 MonoMethod *tailcall_cmethod; tailcall_cmethod = NULL;
6979 MonoMethodSignature *tailcall_fsig; tailcall_fsig = NULL;
6980 gboolean tailcall_virtual; tailcall_virtual = FALSE;
6981 gboolean tailcall_extra_arg; tailcall_extra_arg = FALSE;
6983 gboolean inst_tailcall; inst_tailcall = G_UNLIKELY (debug_tailcall_try_all
6984 ? (next_ip < end && next_ip [0] == CEE_RET)
6985 : ((ins_flag & MONO_INST_TAILCALL) != 0));
6986 ins = NULL;
6988 /* Used to pass arguments to called functions */
6989 HandleCallData cdata;
6990 memset (&cdata, 0, sizeof (HandleCallData));
6992 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6993 CHECK_CFG_ERROR;
6995 MonoMethod *cil_method; cil_method = cmethod;
6997 if (constrained_class) {
6998 gboolean constrained_is_generic_param =
6999 m_class_get_byval_arg (constrained_class)->type == MONO_TYPE_VAR ||
7000 m_class_get_byval_arg (constrained_class)->type == MONO_TYPE_MVAR;
7002 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7003 if (cfg->verbose_level > 2)
7004 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
7005 if (!(constrained_is_generic_param &&
7006 cfg->gshared)) {
7007 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
7008 CHECK_CFG_ERROR;
7010 } else {
7011 if (cfg->verbose_level > 2)
7012 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
7014 if (constrained_is_generic_param && cfg->gshared) {
7016 * This is needed since get_method_constrained can't find
7017 * the method in klass representing a type var.
7018 * The type var is guaranteed to be a reference type in this
7019 * case.
7021 if (!mini_is_gsharedvt_klass (constrained_class))
7022 g_assert (!m_class_is_valuetype (cmethod->klass));
7023 } else {
7024 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
7025 CHECK_CFG_ERROR;
7029 if (m_class_is_enumtype (constrained_class) && !strcmp (cmethod->name, "GetHashCode")) {
7030 /* Use the corresponding method from the base type to avoid boxing */
7031 MonoType *base_type = mono_class_enum_basetype_internal (constrained_class);
7032 g_assert (base_type);
7033 constrained_class = mono_class_from_mono_type_internal (base_type);
7034 cmethod = get_method_nofail (constrained_class, cmethod->name, 0, 0);
7035 g_assert (cmethod);
7039 if (!dont_verify && !cfg->skip_visibility) {
7040 MonoMethod *target_method = cil_method;
7041 if (method->is_inflated) {
7042 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
7043 CHECK_CFG_ERROR;
7045 if (!mono_method_can_access_method (method_definition, target_method) &&
7046 !mono_method_can_access_method (method, cil_method))
7047 emit_method_access_failure (cfg, method, cil_method);
7050 if (mono_security_core_clr_enabled ())
7051 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
7053 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
7054 /* MS.NET seems to silently convert this to a callvirt */
7055 virtual_ = TRUE;
7059 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
7060 * converts to a callvirt.
7062 * tests/bug-515884.il is an example of this behavior
7064 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
7065 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
7066 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
7067 virtual_ = TRUE;
7070 if (!m_class_is_inited (cmethod->klass))
7071 if (!mono_class_init_internal (cmethod->klass))
7072 TYPE_LOAD_ERROR (cmethod->klass);
7074 fsig = mono_method_signature_internal (cmethod);
7075 if (!fsig)
7076 LOAD_ERROR;
7077 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
7078 mini_class_is_system_array (cmethod->klass)) {
7079 array_rank = m_class_get_rank (cmethod->klass);
7080 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && direct_icalls_enabled (cfg)) {
7081 direct_icall = TRUE;
7082 } else if (fsig->pinvoke) {
7083 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
7084 fsig = mono_method_signature_internal (wrapper);
7085 } else if (constrained_class) {
7086 } else {
7087 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
7088 CHECK_CFG_ERROR;
7091 if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated))
7092 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
7094 /* See code below */
7095 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature_internal (cmethod)->param_count == 1) {
7096 MonoBasicBlock *tbb;
7098 GET_BBLOCK (cfg, tbb, next_ip);
7099 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
7101 * We want to extend the try block to cover the call, but we can't do it if the
7102 * call is made directly since its followed by an exception check.
7104 direct_icall = FALSE;
7108 mono_save_token_info (cfg, image, token, cil_method);
7110 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, next_ip - header->code)))
7111 need_seq_point = TRUE;
7113 /* Don't support calls made using type arguments for now */
7115 if (cfg->gsharedvt) {
7116 if (mini_is_gsharedvt_signature (fsig))
7117 GSHAREDVT_FAILURE (il_op);
7121 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
7122 g_assert_not_reached ();
7124 n = fsig->param_count + fsig->hasthis;
7126 if (!cfg->gshared && mono_class_is_gtd (cmethod->klass))
7127 UNVERIFIED;
7129 if (!cfg->gshared)
7130 g_assert (!mono_method_check_context_used (cmethod));
7132 CHECK_STACK (n);
7134 //g_assert (!virtual_ || fsig->hasthis);
7136 sp -= n;
7138 if (virtual_ && cmethod && sp [0]->opcode == OP_TYPED_OBJREF) {
7139 ERROR_DECL (error);
7141 MonoMethod *new_cmethod = mono_class_get_virtual_method (sp [0]->klass, cmethod, FALSE, error);
7142 mono_error_assert_ok (error);
7143 cmethod = new_cmethod;
7144 virtual_ = FALSE;
7147 if (cmethod && m_class_get_image (cmethod->klass) == mono_defaults.corlib && !strcmp (m_class_get_name (cmethod->klass), "ThrowHelper"))
7148 cfg->cbb->out_of_line = TRUE;
7150 cdata.method = method;
7151 cdata.inst_tailcall = inst_tailcall;
7154 * We have the `constrained.' prefix opcode.
7156 if (constrained_class) {
7157 ins = handle_constrained_call (cfg, cmethod, fsig, constrained_class, sp, &cdata, &cmethod, &virtual_, &emit_widen);
7158 CHECK_CFG_EXCEPTION;
7159 constrained_class = NULL;
7160 if (ins)
7161 goto call_end;
7164 for (int i = 0; i < fsig->param_count; ++i)
7165 sp [i + fsig->hasthis] = convert_value (cfg, fsig->params [i], sp [i + fsig->hasthis]);
7167 if (check_call_signature (cfg, fsig, sp)) {
7168 if (break_on_unverified ())
7169 check_call_signature (cfg, fsig, sp); // Again, step through it.
7170 UNVERIFIED;
7173 if ((m_class_get_parent (cmethod->klass) == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
7174 delegate_invoke = TRUE;
7176 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
7177 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7178 mini_type_to_eval_stack_type ((cfg), fsig->ret, ins);
7179 emit_widen = FALSE;
7182 if (inst_tailcall) // FIXME
7183 mono_tailcall_print ("missed tailcall intrins_sharable %s -> %s\n", method->name, cmethod->name);
7184 goto call_end;
7188 * Implement a workaround for the inherent races involved in locking:
7189 * Monitor.Enter ()
7190 * try {
7191 * } finally {
7192 * Monitor.Exit ()
7194 * If a thread abort happens between the call to Monitor.Enter () and the start of the
7195 * try block, the Exit () won't be executed, see:
7196 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
7197 * To work around this, we extend such try blocks to include the last x bytes
7198 * of the Monitor.Enter () call.
7200 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature_internal (cmethod)->param_count == 1) {
7201 MonoBasicBlock *tbb;
7203 GET_BBLOCK (cfg, tbb, next_ip);
7205 * Only extend try blocks with a finally, to avoid catching exceptions thrown
7206 * from Monitor.Enter like ArgumentNullException.
7208 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
7209 /* Mark this bblock as needing to be extended */
7210 tbb->extend_try_block = TRUE;
7214 /* Conversion to a JIT intrinsic */
7215 if ((ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
7216 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7217 mini_type_to_eval_stack_type ((cfg), fsig->ret, ins);
7218 emit_widen = FALSE;
7220 // FIXME This is only missed if in fact the intrinsic involves a call.
7221 if (inst_tailcall) // FIXME
7222 mono_tailcall_print ("missed tailcall intrins %s -> %s\n", method->name, cmethod->name);
7223 goto call_end;
7225 CHECK_CFG_ERROR;
7228 * If the callee is a shared method, then its static cctor
7229 * might not get called after the call was patched.
7231 if (cfg->gshared && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7232 emit_class_init (cfg, cmethod->klass);
7233 CHECK_TYPELOAD (cmethod->klass);
7236 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
7238 if (cfg->gshared) {
7239 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
7241 context_used = mini_method_check_context_used (cfg, cmethod);
7243 if (context_used && mono_class_is_interface (cmethod->klass)) {
7244 /* Generic method interface
7245 calls are resolved via a
7246 helper function and don't
7247 need an imt. */
7248 if (!cmethod_context || !cmethod_context->method_inst)
7249 pass_imt_from_rgctx = TRUE;
7253 * If a shared method calls another
7254 * shared method then the caller must
7255 * have a generic sharing context
7256 * because the magic trampoline
7257 * requires it. FIXME: We shouldn't
7258 * have to force the vtable/mrgctx
7259 * variable here. Instead there
7260 * should be a flag in the cfg to
7261 * request a generic sharing context.
7263 if (context_used &&
7264 ((cfg->method->flags & METHOD_ATTRIBUTE_STATIC) || m_class_is_valuetype (cfg->method->klass)))
7265 mono_get_vtable_var (cfg);
7268 if (pass_vtable) {
7269 if (context_used) {
7270 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7271 } else {
7272 MonoVTable *vtable = mono_class_vtable_checked (cfg->domain, cmethod->klass, &cfg->error);
7273 CHECK_CFG_ERROR;
7275 CHECK_TYPELOAD (cmethod->klass);
7276 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7280 if (pass_mrgctx) {
7281 g_assert (!vtable_arg);
7283 if (!cfg->compile_aot) {
7285 * emit_get_rgctx_method () calls mono_class_vtable () so check
7286 * for type load errors before.
7288 mono_class_setup_vtable (cmethod->klass);
7289 CHECK_TYPELOAD (cmethod->klass);
7292 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7294 /* !marshalbyref is needed to properly handle generic methods + remoting */
7295 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
7296 MONO_METHOD_IS_FINAL (cmethod)) &&
7297 !mono_class_is_marshalbyref (cmethod->klass)) {
7298 if (virtual_)
7299 check_this = TRUE;
7300 virtual_ = FALSE;
7304 if (pass_imt_from_rgctx) {
7305 g_assert (!pass_vtable);
7307 imt_arg = emit_get_rgctx_method (cfg, context_used,
7308 cmethod, MONO_RGCTX_INFO_METHOD);
7309 g_assert (imt_arg);
7312 if (check_this)
7313 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
7315 /* Calling virtual generic methods */
7317 // These temporaries help detangle "pure" computation of
7318 // inputs to is_supported_tailcall from side effects, so that
7319 // is_supported_tailcall can be computed just once.
7320 gboolean virtual_generic; virtual_generic = FALSE;
7321 gboolean virtual_generic_imt; virtual_generic_imt = FALSE;
7323 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
7324 !(MONO_METHOD_IS_FINAL (cmethod) &&
7325 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
7326 fsig->generic_param_count &&
7327 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
7328 !cfg->llvm_only) {
7330 g_assert (fsig->is_inflated);
7332 virtual_generic = TRUE;
7334 /* Prevent inlining of methods that contain indirect calls */
7335 INLINE_FAILURE ("virtual generic call");
7337 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7338 GSHAREDVT_FAILURE (il_op);
7340 if (cfg->backend->have_generalized_imt_trampoline && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
7341 virtual_generic_imt = TRUE;
7342 g_assert (!imt_arg);
7343 if (!context_used)
7344 g_assert (cmethod->is_inflated);
7346 imt_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
7347 g_assert (imt_arg);
7349 virtual_ = TRUE;
7350 vtable_arg = NULL;
7354 // Capture some intent before computing tailcall.
7356 gboolean make_generic_call_out_of_gsharedvt_method;
7357 gboolean will_have_imt_arg;
7359 make_generic_call_out_of_gsharedvt_method = FALSE;
7360 will_have_imt_arg = FALSE;
7363 * Making generic calls out of gsharedvt methods.
7364 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
7365 * patching gshared method addresses into a gsharedvt method.
7367 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || mono_class_is_ginst (cmethod->klass)) &&
7368 !(m_class_get_rank (cmethod->klass) && m_class_get_byval_arg (cmethod->klass)->type != MONO_TYPE_SZARRAY) &&
7369 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
7371 make_generic_call_out_of_gsharedvt_method = TRUE;
7373 if (virtual_) {
7374 if (fsig->generic_param_count) {
7375 will_have_imt_arg = TRUE;
7376 } else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
7377 will_have_imt_arg = TRUE;
7382 /* Tail prefix / tailcall optimization */
7384 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests.
7385 Inlining and stack traces are not guaranteed however. */
7386 /* FIXME: runtime generic context pointer for jumps? */
7387 /* FIXME: handle this for generic sharing eventually */
7389 // tailcall means "the backend can and will handle it".
7390 // inst_tailcall means the tail. prefix is present.
7391 tailcall_extra_arg = vtable_arg || imt_arg || will_have_imt_arg || mono_class_is_interface (cmethod->klass);
7392 tailcall = inst_tailcall && is_supported_tailcall (cfg, ip, method, cmethod, fsig,
7393 virtual_, tailcall_extra_arg, &tailcall_calli);
7394 // Writes to imt_arg, vtable_arg, virtual_, cmethod, must not occur from here (inputs to is_supported_tailcall).
7395 // Capture values to later assert they don't change.
7396 called_is_supported_tailcall = TRUE;
7397 tailcall_method = method;
7398 tailcall_cmethod = cmethod;
7399 tailcall_fsig = fsig;
7400 tailcall_virtual = virtual_;
7402 if (virtual_generic) {
7403 if (virtual_generic_imt) {
7404 if (tailcall) {
7405 /* Prevent inlining of methods with tailcalls (the call stack would be altered) */
7406 INLINE_FAILURE ("tailcall");
7408 common_call = TRUE;
7409 goto call_end;
7412 MonoInst *this_temp, *this_arg_temp, *store;
7413 MonoInst *iargs [4];
7415 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
7416 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
7417 MONO_ADD_INS (cfg->cbb, store);
7419 /* FIXME: This should be a managed pointer */
7420 this_arg_temp = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
7422 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
7423 iargs [1] = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
7425 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
7426 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
7428 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
7430 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
7432 if (inst_tailcall) // FIXME
7433 mono_tailcall_print ("missed tailcall virtual generic %s -> %s\n", method->name, cmethod->name);
7434 goto call_end;
7436 CHECK_CFG_ERROR;
7438 /* Inlining */
7439 if ((cfg->opt & MONO_OPT_INLINE) &&
7440 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
7441 mono_method_check_inlining (cfg, cmethod)) {
7442 int costs;
7443 gboolean always = FALSE;
7445 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
7446 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7447 /* Prevent inlining of methods that call wrappers */
7448 INLINE_FAILURE ("wrapper call");
7449 // FIXME? Does this write to cmethod impact tailcall_supported? Probably not.
7450 // Neither pinvoke or icall are likely to be tailcalled.
7451 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
7452 always = TRUE;
7455 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
7456 if (costs) {
7457 cfg->real_offset += 5;
7459 if (!MONO_TYPE_IS_VOID (fsig->ret))
7460 /* *sp is already set by inline_method */
7461 ins = *sp;
7463 inline_costs += costs;
7464 // FIXME This is missed if the inlinee contains tail calls that
7465 // would work, but not once inlined into caller.
7466 // This matchingness could be a factor in inlining.
7467 // i.e. Do not inline if it hurts tailcall, do inline
7468 // if it helps and/or or is neutral, and helps performance
7469 // using usual heuristics.
7470 // Note that inlining will expose multiple tailcall opportunities
7471 // so the tradeoff is not obvious. If we can tailcall anything
7472 // like desktop, then this factor mostly falls away, except
7473 // that inlining can affect tailcall performance due to
7474 // signature match/mismatch.
7475 if (inst_tailcall) // FIXME
7476 mono_tailcall_print ("missed tailcall inline %s -> %s\n", method->name, cmethod->name);
7477 goto call_end;
7481 /* Tail recursion elimination */
7482 if (((cfg->opt & MONO_OPT_TAILCALL) || inst_tailcall) && il_op == MONO_CEE_CALL && cmethod == method && next_ip < end && next_ip [0] == CEE_RET && !vtable_arg) {
7483 gboolean has_vtargs = FALSE;
7484 int i;
7486 /* Prevent inlining of methods with tailcalls (the call stack would be altered) */
7487 INLINE_FAILURE ("tailcall");
7489 /* keep it simple */
7490 for (i = fsig->param_count - 1; !has_vtargs && i >= 0; i--)
7491 has_vtargs = MONO_TYPE_ISSTRUCT (mono_method_signature_internal (cmethod)->params [i]);
7493 if (!has_vtargs) {
7494 if (need_seq_point) {
7495 emit_seq_point (cfg, method, ip, FALSE, TRUE);
7496 need_seq_point = FALSE;
7498 for (i = 0; i < n; ++i)
7499 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7501 mini_profiler_emit_tail_call (cfg, cmethod);
7503 MONO_INST_NEW (cfg, ins, OP_BR);
7504 MONO_ADD_INS (cfg->cbb, ins);
7505 tblock = start_bblock->out_bb [0];
7506 link_bblock (cfg, cfg->cbb, tblock);
7507 ins->inst_target_bb = tblock;
7508 start_new_bblock = 1;
7510 /* skip the CEE_RET, too */
7511 if (ip_in_bb (cfg, cfg->cbb, next_ip))
7512 skip_ret = TRUE;
7513 push_res = FALSE;
7514 need_seq_point = FALSE;
7515 goto call_end;
7519 inline_costs += CALL_COST * MIN(10, num_calls++);
7522 * Synchronized wrappers.
7523 * Its hard to determine where to replace a method with its synchronized
7524 * wrapper without causing an infinite recursion. The current solution is
7525 * to add the synchronized wrapper in the trampolines, and to
7526 * change the called method to a dummy wrapper, and resolve that wrapper
7527 * to the real method in mono_jit_compile_method ().
7529 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
7530 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
7531 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig)) {
7532 // FIXME? Does this write to cmethod impact tailcall_supported? Probably not.
7533 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
7538 * Making generic calls out of gsharedvt methods.
7539 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
7540 * patching gshared method addresses into a gsharedvt method.
7542 if (make_generic_call_out_of_gsharedvt_method) {
7543 if (virtual_) {
7544 //if (mono_class_is_interface (cmethod->klass))
7545 //GSHAREDVT_FAILURE (il_op);
7546 // disable for possible remoting calls
7547 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
7548 GSHAREDVT_FAILURE (il_op);
7549 if (fsig->generic_param_count) {
7550 /* virtual generic call */
7551 g_assert (!imt_arg);
7552 g_assert (will_have_imt_arg);
7553 /* Same as the virtual generic case above */
7554 imt_arg = emit_get_rgctx_method (cfg, context_used,
7555 cmethod, MONO_RGCTX_INFO_METHOD);
7556 g_assert (imt_arg);
7557 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
7558 vtable_arg = NULL;
7559 } else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
7560 /* This can happen when we call a fully instantiated iface method */
7561 g_assert (will_have_imt_arg);
7562 imt_arg = emit_get_rgctx_method (cfg, context_used,
7563 cmethod, MONO_RGCTX_INFO_METHOD);
7564 g_assert (imt_arg);
7565 vtable_arg = NULL;
7569 if ((m_class_get_parent (cmethod->klass) == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
7570 keep_this_alive = sp [0];
7572 MonoRgctxInfoType info_type;
7574 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
7575 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
7576 else
7577 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
7578 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
7580 if (cfg->llvm_only) {
7581 // FIXME: Avoid initializing vtable_arg
7582 ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
7583 if (inst_tailcall) // FIXME
7584 mono_tailcall_print ("missed tailcall llvmonly gsharedvt %s -> %s\n", method->name, cmethod->name);
7585 } else {
7586 tailcall = tailcall_calli;
7587 ins = (MonoInst*)mini_emit_calli_full (cfg, fsig, sp, addr, imt_arg, vtable_arg, tailcall);
7588 tailcall_remove_ret |= tailcall;
7590 goto call_end;
7593 /* Generic sharing */
7596 * Use this if the callee is gsharedvt sharable too, since
7597 * at runtime we might find an instantiation so the call cannot
7598 * be patched (the 'no_patch' code path in mini-trampolines.c).
7600 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
7601 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7602 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
7603 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
7604 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
7605 INLINE_FAILURE ("gshared");
7607 g_assert (cfg->gshared && cmethod);
7608 g_assert (!addr);
7611 * We are compiling a call to a
7612 * generic method from shared code,
7613 * which means that we have to look up
7614 * the method in the rgctx and do an
7615 * indirect call.
7617 if (fsig->hasthis)
7618 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
7620 if (cfg->llvm_only) {
7621 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
7622 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
7623 else
7624 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7625 // FIXME: Avoid initializing imt_arg/vtable_arg
7626 ins = mini_emit_llvmonly_calli (cfg, fsig, sp, addr);
7627 if (inst_tailcall) // FIXME
7628 mono_tailcall_print ("missed tailcall context_used_llvmonly %s -> %s\n", method->name, cmethod->name);
7629 } else {
7630 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7631 if (inst_tailcall)
7632 mono_tailcall_print ("%s tailcall_calli#2 %s -> %s\n", tailcall_calli ? "making" : "missed", method->name, cmethod->name);
7633 tailcall = tailcall_calli;
7634 ins = (MonoInst*)mini_emit_calli_full (cfg, fsig, sp, addr, imt_arg, vtable_arg, tailcall);
7635 tailcall_remove_ret |= tailcall;
7637 goto call_end;
7640 /* Direct calls to icalls */
7641 if (direct_icall) {
7642 MonoMethod *wrapper;
7643 int costs;
7645 /* Inline the wrapper */
7646 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
7648 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
7649 g_assert (costs > 0);
7650 cfg->real_offset += 5;
7652 if (!MONO_TYPE_IS_VOID (fsig->ret))
7653 /* *sp is already set by inline_method */
7654 ins = *sp;
7656 inline_costs += costs;
7658 if (inst_tailcall) // FIXME
7659 mono_tailcall_print ("missed tailcall direct_icall %s -> %s\n", method->name, cmethod->name);
7660 goto call_end;
7663 /* Array methods */
7664 if (array_rank) {
7665 MonoInst *addr;
7667 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
7668 MonoInst *val = sp [fsig->param_count];
7670 if (val->type == STACK_OBJ) {
7671 MonoInst *iargs [2];
7673 iargs [0] = sp [0];
7674 iargs [1] = val;
7676 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
7679 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
7680 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
7681 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !MONO_INS_IS_PCONST_NULL (val))
7682 mini_emit_write_barrier (cfg, addr, val);
7683 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
7684 GSHAREDVT_FAILURE (il_op);
7685 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
7686 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7688 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
7689 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
7690 if (!m_class_is_valuetype (m_class_get_element_class (cmethod->klass)) && !readonly)
7691 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
7692 CHECK_TYPELOAD (cmethod->klass);
7694 readonly = FALSE;
7695 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7696 ins = addr;
7697 } else {
7698 g_assert_not_reached ();
7701 emit_widen = FALSE;
7702 if (inst_tailcall) // FIXME
7703 mono_tailcall_print ("missed tailcall array_rank %s -> %s\n", method->name, cmethod->name);
7704 goto call_end;
7707 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
7708 if (ins) {
7709 if (inst_tailcall) // FIXME
7710 mono_tailcall_print ("missed tailcall redirect %s -> %s\n", method->name, cmethod->name);
7711 goto call_end;
7714 /* Tail prefix / tailcall optimization */
7716 if (tailcall) {
7717 /* Prevent inlining of methods with tailcalls (the call stack would be altered) */
7718 INLINE_FAILURE ("tailcall");
7722 * Virtual calls in llvm-only mode.
7724 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
7725 ins = mini_emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
7726 goto call_end;
7729 /* Common call */
7730 if (!(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) && !(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
7731 INLINE_FAILURE ("call");
7732 common_call = TRUE;
7734 call_end:
7735 // Check that the decision to tailcall would not have changed.
7736 g_assert (!called_is_supported_tailcall || tailcall_method == method);
7737 // FIXME? cmethod does change, weaken the assert if we weren't tailcalling anyway.
7738 // If this still fails, restructure the code, or call tailcall_supported again and assert no change.
7739 g_assert (!called_is_supported_tailcall || !tailcall || tailcall_cmethod == cmethod);
7740 g_assert (!called_is_supported_tailcall || tailcall_fsig == fsig);
7741 g_assert (!called_is_supported_tailcall || tailcall_virtual == virtual_);
7742 g_assert (!called_is_supported_tailcall || tailcall_extra_arg == (vtable_arg || imt_arg || will_have_imt_arg || mono_class_is_interface (cmethod->klass)));
7744 if (common_call) // FIXME goto call_end && !common_call often skips tailcall processing.
7745 ins = mini_emit_method_call_full (cfg, cmethod, fsig, tailcall, sp, virtual_ ? sp [0] : NULL,
7746 imt_arg, vtable_arg);
7749 * Handle devirt of some A.B.C calls by replacing the result of A.B with a OP_TYPED_OBJREF instruction, so the .C
7750 * call can be devirtualized above.
7752 if (cmethod)
7753 ins = handle_call_res_devirt (cfg, cmethod, ins);
7755 calli_end:
7756 if ((tailcall_remove_ret || (common_call && tailcall)) && !cfg->llvm_only) {
7757 link_bblock (cfg, cfg->cbb, end_bblock);
7758 start_new_bblock = 1;
7760 // FIXME: Eliminate unreachable epilogs
7763 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
7764 * only reachable from this call.
7766 GET_BBLOCK (cfg, tblock, next_ip);
7767 if (tblock == cfg->cbb || tblock->in_count == 0)
7768 skip_ret = TRUE;
7769 push_res = FALSE;
7770 need_seq_point = FALSE;
7773 if (ins_flag & MONO_INST_TAILCALL)
7774 mini_test_tailcall (cfg, tailcall);
7776 /* End of call, INS should contain the result of the call, if any */
7778 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
7779 g_assert (ins);
7780 if (emit_widen)
7781 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7782 else
7783 *sp++ = ins;
7786 if (keep_this_alive) {
7787 MonoInst *dummy_use;
7789 /* See mini_emit_method_call_full () */
7790 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
7793 if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) {
7795 * Clang can convert these calls to tailcalls which screw up the stack
7796 * walk. This happens even when the -fno-optimize-sibling-calls
7797 * option is passed to clang.
7798 * Work around this by emitting a dummy call.
7800 mono_emit_jit_icall (cfg, mono_dummy_jit_icall, NULL);
7803 CHECK_CFG_EXCEPTION;
7805 if (skip_ret) {
7806 // FIXME When not followed by CEE_RET, correct behavior is to raise an exception.
7807 g_assert (next_ip [0] == CEE_RET);
7808 next_ip += 1;
7809 il_op = MonoOpcodeEnum_Invalid; // Call or ret? Unclear.
7811 ins_flag = 0;
7812 constrained_class = NULL;
7813 if (need_seq_point)
7814 emit_seq_point (cfg, method, next_ip, FALSE, TRUE);
7815 break;
7817 case MONO_CEE_RET:
7818 mini_profiler_emit_leave (cfg, sig->ret->type != MONO_TYPE_VOID ? sp [-1] : NULL);
7820 if (cfg->method != method) {
7821 /* return from inlined method */
7823 * If in_count == 0, that means the ret is unreachable due to
7824 * being preceeded by a throw. In that case, inline_method () will
7825 * handle setting the return value
7826 * (test case: test_0_inline_throw ()).
7828 if (return_var && cfg->cbb->in_count) {
7829 MonoType *ret_type = mono_method_signature_internal (method)->ret;
7831 MonoInst *store;
7832 CHECK_STACK (1);
7833 --sp;
7834 *sp = convert_value (cfg, ret_type, *sp);
7836 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7837 UNVERIFIED;
7839 //g_assert (returnvar != -1);
7840 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
7841 cfg->ret_var_set = TRUE;
7843 } else {
7844 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
7845 emit_pop_lmf (cfg);
7847 if (cfg->ret) {
7848 MonoType *ret_type = mini_get_underlying_type (mono_method_signature_internal (method)->ret);
7850 if (seq_points && !sym_seq_points) {
7852 * Place a seq point here too even through the IL stack is not
7853 * empty, so a step over on
7854 * call <FOO>
7855 * ret
7856 * will work correctly.
7858 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
7859 MONO_ADD_INS (cfg->cbb, ins);
7862 g_assert (!return_var);
7863 CHECK_STACK (1);
7864 --sp;
7865 *sp = convert_value (cfg, ret_type, *sp);
7867 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7868 UNVERIFIED;
7870 emit_setret (cfg, *sp);
7873 if (sp != stack_start)
7874 UNVERIFIED;
7875 MONO_INST_NEW (cfg, ins, OP_BR);
7876 ins->inst_target_bb = end_bblock;
7877 MONO_ADD_INS (cfg->cbb, ins);
7878 link_bblock (cfg, cfg->cbb, end_bblock);
7879 start_new_bblock = 1;
7880 break;
7881 case MONO_CEE_BR_S:
7882 MONO_INST_NEW (cfg, ins, OP_BR);
7883 GET_BBLOCK (cfg, tblock, target);
7884 link_bblock (cfg, cfg->cbb, tblock);
7885 ins->inst_target_bb = tblock;
7886 if (sp != stack_start) {
7887 handle_stack_args (cfg, stack_start, sp - stack_start);
7888 sp = stack_start;
7889 CHECK_UNVERIFIABLE (cfg);
7891 MONO_ADD_INS (cfg->cbb, ins);
7892 start_new_bblock = 1;
7893 inline_costs += BRANCH_COST;
7894 break;
7895 case MONO_CEE_BEQ_S:
7896 case MONO_CEE_BGE_S:
7897 case MONO_CEE_BGT_S:
7898 case MONO_CEE_BLE_S:
7899 case MONO_CEE_BLT_S:
7900 case MONO_CEE_BNE_UN_S:
7901 case MONO_CEE_BGE_UN_S:
7902 case MONO_CEE_BGT_UN_S:
7903 case MONO_CEE_BLE_UN_S:
7904 case MONO_CEE_BLT_UN_S:
7905 MONO_INST_NEW (cfg, ins, il_op + BIG_BRANCH_OFFSET);
7907 ADD_BINCOND (NULL);
7909 sp = stack_start;
7910 inline_costs += BRANCH_COST;
7911 break;
7912 case MONO_CEE_BR:
7913 MONO_INST_NEW (cfg, ins, OP_BR);
7915 GET_BBLOCK (cfg, tblock, target);
7916 link_bblock (cfg, cfg->cbb, tblock);
7917 ins->inst_target_bb = tblock;
7918 if (sp != stack_start) {
7919 handle_stack_args (cfg, stack_start, sp - stack_start);
7920 sp = stack_start;
7921 CHECK_UNVERIFIABLE (cfg);
7924 MONO_ADD_INS (cfg->cbb, ins);
7926 start_new_bblock = 1;
7927 inline_costs += BRANCH_COST;
7928 break;
7929 case MONO_CEE_BRFALSE_S:
7930 case MONO_CEE_BRTRUE_S:
7931 case MONO_CEE_BRFALSE:
7932 case MONO_CEE_BRTRUE: {
7933 MonoInst *cmp;
7934 gboolean is_true = il_op == MONO_CEE_BRTRUE_S || il_op == MONO_CEE_BRTRUE;
7936 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
7937 UNVERIFIED;
7939 sp--;
7941 GET_BBLOCK (cfg, tblock, target);
7942 link_bblock (cfg, cfg->cbb, tblock);
7943 GET_BBLOCK (cfg, tblock, next_ip);
7944 link_bblock (cfg, cfg->cbb, tblock);
7946 if (sp != stack_start) {
7947 handle_stack_args (cfg, stack_start, sp - stack_start);
7948 CHECK_UNVERIFIABLE (cfg);
7951 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
7952 cmp->sreg1 = sp [0]->dreg;
7953 type_from_op (cfg, cmp, sp [0], NULL);
7954 CHECK_TYPE (cmp);
7956 #if SIZEOF_REGISTER == 4
7957 if (cmp->opcode == OP_LCOMPARE_IMM) {
7958 /* Convert it to OP_LCOMPARE */
7959 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7960 ins->type = STACK_I8;
7961 ins->dreg = alloc_dreg (cfg, STACK_I8);
7962 ins->inst_l = 0;
7963 MONO_ADD_INS (cfg->cbb, ins);
7964 cmp->opcode = OP_LCOMPARE;
7965 cmp->sreg2 = ins->dreg;
7967 #endif
7968 MONO_ADD_INS (cfg->cbb, cmp);
7970 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
7971 type_from_op (cfg, ins, sp [0], NULL);
7972 MONO_ADD_INS (cfg->cbb, ins);
7973 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
7974 GET_BBLOCK (cfg, tblock, target);
7975 ins->inst_true_bb = tblock;
7976 GET_BBLOCK (cfg, tblock, next_ip);
7977 ins->inst_false_bb = tblock;
7978 start_new_bblock = 2;
7980 sp = stack_start;
7981 inline_costs += BRANCH_COST;
7982 break;
7984 case MONO_CEE_BEQ:
7985 case MONO_CEE_BGE:
7986 case MONO_CEE_BGT:
7987 case MONO_CEE_BLE:
7988 case MONO_CEE_BLT:
7989 case MONO_CEE_BNE_UN:
7990 case MONO_CEE_BGE_UN:
7991 case MONO_CEE_BGT_UN:
7992 case MONO_CEE_BLE_UN:
7993 case MONO_CEE_BLT_UN:
7994 MONO_INST_NEW (cfg, ins, il_op);
7996 ADD_BINCOND (NULL);
7998 sp = stack_start;
7999 inline_costs += BRANCH_COST;
8000 break;
8001 case MONO_CEE_SWITCH: {
8002 MonoInst *src1;
8003 MonoBasicBlock **targets;
8004 MonoBasicBlock *default_bblock;
8005 MonoJumpInfoBBTable *table;
8006 int offset_reg = alloc_preg (cfg);
8007 int target_reg = alloc_preg (cfg);
8008 int table_reg = alloc_preg (cfg);
8009 int sum_reg = alloc_preg (cfg);
8010 gboolean use_op_switch;
8012 n = read32 (ip + 1);
8013 --sp;
8014 src1 = sp [0];
8015 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
8016 UNVERIFIED;
8018 ip += 5;
8020 GET_BBLOCK (cfg, default_bblock, next_ip);
8021 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8023 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
8024 for (i = 0; i < n; ++i) {
8025 GET_BBLOCK (cfg, tblock, next_ip + (gint32)read32 (ip));
8026 targets [i] = tblock;
8027 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
8028 ip += 4;
8031 if (sp != stack_start) {
8033 * Link the current bb with the targets as well, so handle_stack_args
8034 * will set their in_stack correctly.
8036 link_bblock (cfg, cfg->cbb, default_bblock);
8037 for (i = 0; i < n; ++i)
8038 link_bblock (cfg, cfg->cbb, targets [i]);
8040 handle_stack_args (cfg, stack_start, sp - stack_start);
8041 sp = stack_start;
8042 CHECK_UNVERIFIABLE (cfg);
8044 /* Undo the links */
8045 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
8046 for (i = 0; i < n; ++i)
8047 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
8050 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
8051 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
8053 for (i = 0; i < n; ++i)
8054 link_bblock (cfg, cfg->cbb, targets [i]);
8056 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
8057 table->table = targets;
8058 table->table_size = n;
8060 use_op_switch = FALSE;
8061 #ifdef TARGET_ARM
8062 /* ARM implements SWITCH statements differently */
8063 /* FIXME: Make it use the generic implementation */
8064 if (!cfg->compile_aot)
8065 use_op_switch = TRUE;
8066 #endif
8068 if (COMPILE_LLVM (cfg))
8069 use_op_switch = TRUE;
8071 cfg->cbb->has_jump_table = 1;
8073 if (use_op_switch) {
8074 MONO_INST_NEW (cfg, ins, OP_SWITCH);
8075 ins->sreg1 = src1->dreg;
8076 ins->inst_p0 = table;
8077 ins->inst_many_bb = targets;
8078 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
8079 MONO_ADD_INS (cfg->cbb, ins);
8080 } else {
8081 if (TARGET_SIZEOF_VOID_P == 8)
8082 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
8083 else
8084 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
8086 #if SIZEOF_REGISTER == 8
8087 /* The upper word might not be zero, and we add it to a 64 bit address later */
8088 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
8089 #endif
8091 if (cfg->compile_aot) {
8092 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
8093 } else {
8094 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
8095 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
8096 ins->inst_p0 = table;
8097 ins->dreg = table_reg;
8098 MONO_ADD_INS (cfg->cbb, ins);
8101 /* FIXME: Use load_memindex */
8102 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
8103 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
8104 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
8106 start_new_bblock = 1;
8107 inline_costs += BRANCH_COST * 2;
8108 break;
8110 case MONO_CEE_LDIND_I1:
8111 case MONO_CEE_LDIND_U1:
8112 case MONO_CEE_LDIND_I2:
8113 case MONO_CEE_LDIND_U2:
8114 case MONO_CEE_LDIND_I4:
8115 case MONO_CEE_LDIND_U4:
8116 case MONO_CEE_LDIND_I8:
8117 case MONO_CEE_LDIND_I:
8118 case MONO_CEE_LDIND_R4:
8119 case MONO_CEE_LDIND_R8:
8120 case MONO_CEE_LDIND_REF:
8121 --sp;
8123 ins = mini_emit_memory_load (cfg, m_class_get_byval_arg (ldind_to_type (il_op)), sp [0], 0, ins_flag);
8124 *sp++ = ins;
8125 ins_flag = 0;
8126 break;
8127 case MONO_CEE_STIND_REF:
8128 case MONO_CEE_STIND_I1:
8129 case MONO_CEE_STIND_I2:
8130 case MONO_CEE_STIND_I4:
8131 case MONO_CEE_STIND_I8:
8132 case MONO_CEE_STIND_R4:
8133 case MONO_CEE_STIND_R8:
8134 case MONO_CEE_STIND_I: {
8135 sp -= 2;
8137 if (ins_flag & MONO_INST_VOLATILE) {
8138 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
8139 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
8142 if (il_op == MONO_CEE_STIND_R4 && sp [1]->type == STACK_R8)
8143 sp [1] = convert_value (cfg, m_class_get_byval_arg (mono_defaults.single_class), sp [1]);
8144 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (il_op), sp [0]->dreg, 0, sp [1]->dreg);
8145 ins->flags |= ins_flag;
8146 ins_flag = 0;
8148 MONO_ADD_INS (cfg->cbb, ins);
8150 if (il_op == MONO_CEE_STIND_REF) {
8151 /* stind.ref must only be used with object references. */
8152 if (sp [1]->type != STACK_OBJ)
8153 UNVERIFIED;
8154 if (cfg->gen_write_barriers && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1]))
8155 mini_emit_write_barrier (cfg, sp [0], sp [1]);
8158 inline_costs += 1;
8159 break;
8161 case MONO_CEE_MUL:
8162 MONO_INST_NEW (cfg, ins, il_op);
8163 sp -= 2;
8164 ins->sreg1 = sp [0]->dreg;
8165 ins->sreg2 = sp [1]->dreg;
8166 type_from_op (cfg, ins, sp [0], sp [1]);
8167 CHECK_TYPE (ins);
8168 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
8170 /* Use the immediate opcodes if possible */
8171 int imm_opcode; imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8173 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (ins->opcode, imm_opcode, sp [1]->inst_c0)) {
8174 if (imm_opcode != -1) {
8175 ins->opcode = imm_opcode;
8176 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
8177 ins->sreg2 = -1;
8179 NULLIFY_INS (sp [1]);
8183 MONO_ADD_INS ((cfg)->cbb, (ins));
8185 *sp++ = mono_decompose_opcode (cfg, ins);
8186 break;
8187 case MONO_CEE_ADD:
8188 case MONO_CEE_SUB:
8189 case MONO_CEE_DIV:
8190 case MONO_CEE_DIV_UN:
8191 case MONO_CEE_REM:
8192 case MONO_CEE_REM_UN:
8193 case MONO_CEE_AND:
8194 case MONO_CEE_OR:
8195 case MONO_CEE_XOR:
8196 case MONO_CEE_SHL:
8197 case MONO_CEE_SHR:
8198 case MONO_CEE_SHR_UN: {
8199 MONO_INST_NEW (cfg, ins, il_op);
8200 sp -= 2;
8201 ins->sreg1 = sp [0]->dreg;
8202 ins->sreg2 = sp [1]->dreg;
8203 type_from_op (cfg, ins, sp [0], sp [1]);
8204 CHECK_TYPE (ins);
8205 add_widen_op (cfg, ins, &sp [0], &sp [1]);
8206 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
8208 /* Use the immediate opcodes if possible */
8209 int imm_opcode; imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8211 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) &&
8212 mono_arch_is_inst_imm (ins->opcode, imm_opcode, sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
8213 if (imm_opcode != -1) {
8214 ins->opcode = imm_opcode;
8215 if (sp [1]->opcode == OP_I8CONST) {
8216 #if SIZEOF_REGISTER == 8
8217 ins->inst_imm = sp [1]->inst_l;
8218 #else
8219 ins->inst_l = sp [1]->inst_l;
8220 #endif
8221 } else {
8222 ins->inst_imm = (gssize)(sp [1]->inst_c0);
8224 ins->sreg2 = -1;
8226 /* Might be followed by an instruction added by add_widen_op */
8227 if (sp [1]->next == NULL)
8228 NULLIFY_INS (sp [1]);
8231 MONO_ADD_INS ((cfg)->cbb, (ins));
8233 *sp++ = mono_decompose_opcode (cfg, ins);
8234 break;
8236 case MONO_CEE_NEG:
8237 case MONO_CEE_NOT:
8238 case MONO_CEE_CONV_I1:
8239 case MONO_CEE_CONV_I2:
8240 case MONO_CEE_CONV_I4:
8241 case MONO_CEE_CONV_R4:
8242 case MONO_CEE_CONV_R8:
8243 case MONO_CEE_CONV_U4:
8244 case MONO_CEE_CONV_I8:
8245 case MONO_CEE_CONV_U8:
8246 case MONO_CEE_CONV_OVF_I8:
8247 case MONO_CEE_CONV_OVF_U8:
8248 case MONO_CEE_CONV_R_UN:
8249 /* Special case this earlier so we have long constants in the IR */
8250 if ((il_op == MONO_CEE_CONV_I8 || il_op == MONO_CEE_CONV_U8) && (sp [-1]->opcode == OP_ICONST)) {
8251 int data = sp [-1]->inst_c0;
8252 sp [-1]->opcode = OP_I8CONST;
8253 sp [-1]->type = STACK_I8;
8254 #if SIZEOF_REGISTER == 8
8255 if (il_op == MONO_CEE_CONV_U8)
8256 sp [-1]->inst_c0 = (guint32)data;
8257 else
8258 sp [-1]->inst_c0 = data;
8259 #else
8260 if (il_op == MONO_CEE_CONV_U8)
8261 sp [-1]->inst_l = (guint32)data;
8262 else
8263 sp [-1]->inst_l = data;
8264 #endif
8265 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
8267 else {
8268 ADD_UNOP (il_op);
8270 break;
8271 case MONO_CEE_CONV_OVF_I4:
8272 case MONO_CEE_CONV_OVF_I1:
8273 case MONO_CEE_CONV_OVF_I2:
8274 case MONO_CEE_CONV_OVF_I:
8275 case MONO_CEE_CONV_OVF_U:
8276 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
8277 ADD_UNOP (CEE_CONV_OVF_I8);
8278 ADD_UNOP (il_op);
8279 } else {
8280 ADD_UNOP (il_op);
8282 break;
8283 case MONO_CEE_CONV_OVF_U1:
8284 case MONO_CEE_CONV_OVF_U2:
8285 case MONO_CEE_CONV_OVF_U4:
8286 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
8287 ADD_UNOP (CEE_CONV_OVF_U8);
8288 ADD_UNOP (il_op);
8289 } else {
8290 ADD_UNOP (il_op);
8292 break;
8293 case MONO_CEE_CONV_OVF_I1_UN:
8294 case MONO_CEE_CONV_OVF_I2_UN:
8295 case MONO_CEE_CONV_OVF_I4_UN:
8296 case MONO_CEE_CONV_OVF_I8_UN:
8297 case MONO_CEE_CONV_OVF_U1_UN:
8298 case MONO_CEE_CONV_OVF_U2_UN:
8299 case MONO_CEE_CONV_OVF_U4_UN:
8300 case MONO_CEE_CONV_OVF_U8_UN:
8301 case MONO_CEE_CONV_OVF_I_UN:
8302 case MONO_CEE_CONV_OVF_U_UN:
8303 case MONO_CEE_CONV_U2:
8304 case MONO_CEE_CONV_U1:
8305 case MONO_CEE_CONV_I:
8306 case MONO_CEE_CONV_U:
8307 ADD_UNOP (il_op);
8308 CHECK_CFG_EXCEPTION;
8309 break;
8310 case MONO_CEE_ADD_OVF:
8311 case MONO_CEE_ADD_OVF_UN:
8312 case MONO_CEE_MUL_OVF:
8313 case MONO_CEE_MUL_OVF_UN:
8314 case MONO_CEE_SUB_OVF:
8315 case MONO_CEE_SUB_OVF_UN:
8316 ADD_BINOP (il_op);
8317 break;
8318 case MONO_CEE_CPOBJ:
8319 GSHAREDVT_FAILURE (il_op);
8320 GSHAREDVT_FAILURE (*ip);
8321 klass = mini_get_class (method, token, generic_context);
8322 CHECK_TYPELOAD (klass);
8323 sp -= 2;
8324 mini_emit_memory_copy (cfg, sp [0], sp [1], klass, FALSE, ins_flag);
8325 ins_flag = 0;
8326 break;
8327 case MONO_CEE_LDOBJ: {
8328 int loc_index = -1;
8329 int stloc_len = 0;
8331 --sp;
8332 klass = mini_get_class (method, token, generic_context);
8333 CHECK_TYPELOAD (klass);
8335 /* Optimize the common ldobj+stloc combination */
8336 if (next_ip < end) {
8337 switch (next_ip [0]) {
8338 case MONO_CEE_STLOC_S:
8339 CHECK_OPSIZE (7);
8340 loc_index = next_ip [1];
8341 stloc_len = 2;
8342 break;
8343 case MONO_CEE_STLOC_0:
8344 case MONO_CEE_STLOC_1:
8345 case MONO_CEE_STLOC_2:
8346 case MONO_CEE_STLOC_3:
8347 loc_index = next_ip [0] - CEE_STLOC_0;
8348 stloc_len = 1;
8349 break;
8350 default:
8351 break;
8355 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, next_ip)) {
8356 CHECK_LOCAL (loc_index);
8358 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), sp [0]->dreg, 0);
8359 ins->dreg = cfg->locals [loc_index]->dreg;
8360 ins->flags |= ins_flag;
8361 il_op = (MonoOpcodeEnum)next_ip [0];
8362 next_ip += stloc_len;
8363 if (ins_flag & MONO_INST_VOLATILE) {
8364 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
8365 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
8367 ins_flag = 0;
8368 break;
8371 /* Optimize the ldobj+stobj combination */
8372 if (next_ip + 4 < end && next_ip [0] == CEE_STOBJ && ip_in_bb (cfg, cfg->cbb, next_ip) && read32 (next_ip + 1) == token) {
8373 CHECK_STACK (1);
8375 sp --;
8377 mini_emit_memory_copy (cfg, sp [0], sp [1], klass, FALSE, ins_flag);
8379 il_op = (MonoOpcodeEnum)next_ip [0];
8380 next_ip += 5;
8381 ins_flag = 0;
8382 break;
8385 ins = mini_emit_memory_load (cfg, m_class_get_byval_arg (klass), sp [0], 0, ins_flag);
8386 *sp++ = ins;
8388 ins_flag = 0;
8389 inline_costs += 1;
8390 break;
8392 case MONO_CEE_LDSTR:
8393 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
8394 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
8395 ins->type = STACK_OBJ;
8396 *sp = ins;
8398 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
8399 MonoInst *iargs [1];
8400 char *str = (char *)mono_method_get_wrapper_data (method, n);
8402 if (cfg->compile_aot)
8403 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
8404 else
8405 EMIT_NEW_PCONST (cfg, iargs [0], str);
8406 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper_internal, iargs);
8407 } else {
8408 if (cfg->opt & MONO_OPT_SHARED) {
8409 MonoInst *iargs [3];
8411 if (cfg->compile_aot) {
8412 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
8414 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8415 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
8416 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
8417 *sp = mono_emit_jit_icall (cfg, ves_icall_mono_ldstr, iargs);
8418 mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
8419 CHECK_CFG_ERROR;
8420 } else {
8421 if (cfg->cbb->out_of_line) {
8422 MonoInst *iargs [2];
8424 if (image == mono_defaults.corlib) {
8426 * Avoid relocations in AOT and save some space by using a
8427 * version of helper_ldstr specialized to mscorlib.
8429 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
8430 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
8431 } else {
8432 /* Avoid creating the string object */
8433 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8434 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
8435 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
8438 else
8439 if (cfg->compile_aot) {
8440 NEW_LDSTRCONST (cfg, ins, image, n);
8441 *sp = ins;
8442 MONO_ADD_INS (cfg->cbb, ins);
8444 else {
8445 NEW_PCONST (cfg, ins, NULL);
8446 ins->type = STACK_OBJ;
8447 ins->inst_p0 = mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
8448 CHECK_CFG_ERROR;
8450 if (!ins->inst_p0)
8451 OUT_OF_MEMORY_FAILURE;
8453 *sp = ins;
8454 MONO_ADD_INS (cfg->cbb, ins);
8459 sp++;
8460 break;
8461 case MONO_CEE_NEWOBJ: {
8462 MonoInst *iargs [2];
8463 MonoMethodSignature *fsig;
8464 MonoInst this_ins;
8465 MonoInst *alloc;
8466 MonoInst *vtable_arg = NULL;
8468 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8469 CHECK_CFG_ERROR;
8471 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8472 CHECK_CFG_ERROR;
8474 mono_save_token_info (cfg, image, token, cmethod);
8476 if (!mono_class_init_internal (cmethod->klass))
8477 TYPE_LOAD_ERROR (cmethod->klass);
8479 context_used = mini_method_check_context_used (cfg, cmethod);
8481 if (!dont_verify && !cfg->skip_visibility) {
8482 MonoMethod *cil_method = cmethod;
8483 MonoMethod *target_method = cil_method;
8485 if (method->is_inflated) {
8486 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
8487 CHECK_CFG_ERROR;
8490 if (!mono_method_can_access_method (method_definition, target_method) &&
8491 !mono_method_can_access_method (method, cil_method))
8492 emit_method_access_failure (cfg, method, cil_method);
8495 if (mono_security_core_clr_enabled ())
8496 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
8498 if (cfg->gshared && cmethod && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8499 emit_class_init (cfg, cmethod->klass);
8500 CHECK_TYPELOAD (cmethod->klass);
8504 if (cfg->gsharedvt) {
8505 if (mini_is_gsharedvt_variable_signature (sig))
8506 GSHAREDVT_FAILURE (il_op);
8510 n = fsig->param_count;
8511 CHECK_STACK (n);
8514 * Generate smaller code for the common newobj <exception> instruction in
8515 * argument checking code.
8517 if (cfg->cbb->out_of_line && m_class_get_image (cmethod->klass) == mono_defaults.corlib &&
8518 is_exception_class (cmethod->klass) && n <= 2 &&
8519 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
8520 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
8521 MonoInst *iargs [3];
8523 sp -= n;
8525 EMIT_NEW_ICONST (cfg, iargs [0], m_class_get_type_token (cmethod->klass));
8526 switch (n) {
8527 case 0:
8528 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
8529 break;
8530 case 1:
8531 iargs [1] = sp [0];
8532 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
8533 break;
8534 case 2:
8535 iargs [1] = sp [0];
8536 iargs [2] = sp [1];
8537 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
8538 break;
8539 default:
8540 g_assert_not_reached ();
8543 inline_costs += 5;
8544 break;
8547 /* move the args to allow room for 'this' in the first position */
8548 while (n--) {
8549 --sp;
8550 sp [1] = sp [0];
8553 for (int i = 0; i < fsig->param_count; ++i)
8554 sp [i + fsig->hasthis] = convert_value (cfg, fsig->params [i], sp [i + fsig->hasthis]);
8556 /* check_call_signature () requires sp[0] to be set */
8557 this_ins.type = STACK_OBJ;
8558 sp [0] = &this_ins;
8559 if (check_call_signature (cfg, fsig, sp))
8560 UNVERIFIED;
8562 iargs [0] = NULL;
8564 if (mini_class_is_system_array (cmethod->klass)) {
8565 *sp = emit_get_rgctx_method (cfg, context_used,
8566 cmethod, MONO_RGCTX_INFO_METHOD);
8568 /* Avoid varargs in the common case */
8569 if (fsig->param_count == 1)
8570 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
8571 else if (fsig->param_count == 2)
8572 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
8573 else if (fsig->param_count == 3)
8574 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
8575 else if (fsig->param_count == 4)
8576 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
8577 else
8578 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
8579 } else if (cmethod->string_ctor) {
8580 g_assert (!context_used);
8581 g_assert (!vtable_arg);
8582 /* we simply pass a null pointer */
8583 EMIT_NEW_PCONST (cfg, *sp, NULL);
8584 /* now call the string ctor */
8585 alloc = mini_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
8586 } else {
8587 if (m_class_is_valuetype (cmethod->klass)) {
8588 iargs [0] = mono_compile_create_var (cfg, m_class_get_byval_arg (cmethod->klass), OP_LOCAL);
8589 emit_init_rvar (cfg, iargs [0]->dreg, m_class_get_byval_arg (cmethod->klass));
8590 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
8592 alloc = NULL;
8595 * The code generated by mini_emit_virtual_call () expects
8596 * iargs [0] to be a boxed instance, but luckily the vcall
8597 * will be transformed into a normal call there.
8599 } else if (context_used) {
8600 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
8601 *sp = alloc;
8602 } else {
8603 MonoVTable *vtable = NULL;
8605 if (!cfg->compile_aot)
8606 vtable = mono_class_vtable_checked (cfg->domain, cmethod->klass, &cfg->error);
8607 CHECK_CFG_ERROR;
8608 CHECK_TYPELOAD (cmethod->klass);
8611 * TypeInitializationExceptions thrown from the mono_runtime_class_init
8612 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
8613 * As a workaround, we call class cctors before allocating objects.
8615 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
8616 emit_class_init (cfg, cmethod->klass);
8617 if (cfg->verbose_level > 2)
8618 printf ("class %s.%s needs init call for ctor\n", m_class_get_name_space (cmethod->klass), m_class_get_name (cmethod->klass));
8619 class_inits = g_slist_prepend (class_inits, cmethod->klass);
8622 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
8623 *sp = alloc;
8625 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
8627 if (alloc)
8628 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
8630 /* Now call the actual ctor */
8631 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
8632 CHECK_CFG_EXCEPTION;
8635 if (alloc == NULL) {
8636 /* Valuetype */
8637 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
8638 mini_type_to_eval_stack_type (cfg, m_class_get_byval_arg (ins->klass), ins);
8639 *sp++= ins;
8640 } else {
8641 *sp++ = alloc;
8644 inline_costs += 5;
8645 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, next_ip - header->code)))
8646 emit_seq_point (cfg, method, next_ip, FALSE, TRUE);
8647 break;
8649 case MONO_CEE_CASTCLASS:
8650 case MONO_CEE_ISINST: {
8651 --sp;
8652 klass = mini_get_class (method, token, generic_context);
8653 CHECK_TYPELOAD (klass);
8654 if (sp [0]->type != STACK_OBJ)
8655 UNVERIFIED;
8657 MONO_INST_NEW (cfg, ins, (il_op == MONO_CEE_ISINST) ? OP_ISINST : OP_CASTCLASS);
8658 ins->dreg = alloc_preg (cfg);
8659 ins->sreg1 = (*sp)->dreg;
8660 ins->klass = klass;
8661 ins->type = STACK_OBJ;
8662 MONO_ADD_INS (cfg->cbb, ins);
8664 CHECK_CFG_EXCEPTION;
8665 *sp++ = ins;
8667 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
8668 break;
8670 case MONO_CEE_UNBOX_ANY: {
8671 MonoInst *res, *addr;
8673 --sp;
8674 klass = mini_get_class (method, token, generic_context);
8675 CHECK_TYPELOAD (klass);
8677 mono_save_token_info (cfg, image, token, klass);
8679 context_used = mini_class_check_context_used (cfg, klass);
8681 if (mini_is_gsharedvt_klass (klass)) {
8682 res = handle_unbox_gsharedvt (cfg, klass, *sp);
8683 inline_costs += 2;
8684 } else if (mini_class_is_reference (klass)) {
8685 if (MONO_INS_IS_PCONST_NULL (*sp)) {
8686 EMIT_NEW_PCONST (cfg, res, NULL);
8687 res->type = STACK_OBJ;
8688 } else {
8689 MONO_INST_NEW (cfg, res, OP_CASTCLASS);
8690 res->dreg = alloc_preg (cfg);
8691 res->sreg1 = (*sp)->dreg;
8692 res->klass = klass;
8693 res->type = STACK_OBJ;
8694 MONO_ADD_INS (cfg->cbb, res);
8695 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
8697 } else if (mono_class_is_nullable (klass)) {
8698 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
8699 } else {
8700 addr = handle_unbox (cfg, klass, sp, context_used);
8701 /* LDOBJ */
8702 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0);
8703 res = ins;
8704 inline_costs += 2;
8707 *sp ++ = res;
8708 break;
8710 case MONO_CEE_BOX: {
8711 MonoInst *val;
8712 MonoClass *enum_class;
8713 MonoMethod *has_flag;
8715 --sp;
8716 val = *sp;
8717 klass = mini_get_class (method, token, generic_context);
8718 CHECK_TYPELOAD (klass);
8720 mono_save_token_info (cfg, image, token, klass);
8722 context_used = mini_class_check_context_used (cfg, klass);
8724 if (mini_class_is_reference (klass)) {
8725 *sp++ = val;
8726 break;
8729 val = convert_value (cfg, m_class_get_byval_arg (klass), val);
8731 if (klass == mono_defaults.void_class)
8732 UNVERIFIED;
8733 if (target_type_is_incompatible (cfg, m_class_get_byval_arg (klass), val))
8734 UNVERIFIED;
8735 /* frequent check in generic code: box (struct), brtrue */
8738 * Look for:
8740 * <push int/long ptr>
8741 * <push int/long>
8742 * box MyFlags
8743 * constrained. MyFlags
8744 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
8746 * If we find this sequence and the operand types on box and constrained
8747 * are equal, we can emit a specialized instruction sequence instead of
8748 * the very slow HasFlag () call.
8749 * This code sequence is generated by older mcs/csc, the newer one is handled in
8750 * emit_inst_for_method ().
8752 guint32 constrained_token;
8753 guint32 callvirt_token;
8755 if ((cfg->opt & MONO_OPT_INTRINS) &&
8756 // FIXME ip_in_bb as we go?
8757 next_ip < end && ip_in_bb (cfg, cfg->cbb, next_ip) &&
8758 (ip = il_read_constrained (next_ip, end, &constrained_token)) &&
8759 ip_in_bb (cfg, cfg->cbb, ip) &&
8760 (ip = il_read_callvirt (ip, end, &callvirt_token)) &&
8761 ip_in_bb (cfg, cfg->cbb, ip) &&
8762 m_class_is_enumtype (klass) &&
8763 (enum_class = mini_get_class (method, constrained_token, generic_context)) &&
8764 (has_flag = mini_get_method (cfg, method, callvirt_token, NULL, generic_context)) &&
8765 has_flag->klass == mono_defaults.enum_class &&
8766 !strcmp (has_flag->name, "HasFlag") &&
8767 has_flag->signature->hasthis &&
8768 has_flag->signature->param_count == 1) {
8769 CHECK_TYPELOAD (enum_class);
8771 if (enum_class == klass) {
8772 MonoInst *enum_this, *enum_flag;
8774 next_ip = ip;
8775 il_op = MONO_CEE_CALLVIRT;
8776 --sp;
8778 enum_this = sp [0];
8779 enum_flag = sp [1];
8781 *sp++ = mini_handle_enum_has_flag (cfg, klass, enum_this, -1, enum_flag);
8782 break;
8786 gboolean is_true;
8788 // FIXME: LLVM can't handle the inconsistent bb linking
8789 if (!mono_class_is_nullable (klass) &&
8790 !mini_is_gsharedvt_klass (klass) &&
8791 next_ip < end && ip_in_bb (cfg, cfg->cbb, next_ip) &&
8792 ( (is_true = !!(ip = il_read_brtrue (next_ip, end, &target))) ||
8793 (is_true = !!(ip = il_read_brtrue_s (next_ip, end, &target))) ||
8794 (ip = il_read_brfalse (next_ip, end, &target)) ||
8795 (ip = il_read_brfalse_s (next_ip, end, &target)))) {
8797 int dreg;
8798 MonoBasicBlock *true_bb, *false_bb;
8800 il_op = (MonoOpcodeEnum)next_ip [0];
8801 next_ip = ip;
8803 if (cfg->verbose_level > 3) {
8804 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8805 printf ("<box+brtrue opt>\n");
8809 * We need to link both bblocks, since it is needed for handling stack
8810 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8811 * Branching to only one of them would lead to inconsistencies, so
8812 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8814 GET_BBLOCK (cfg, true_bb, target);
8815 GET_BBLOCK (cfg, false_bb, next_ip);
8817 mono_link_bblock (cfg, cfg->cbb, true_bb);
8818 mono_link_bblock (cfg, cfg->cbb, false_bb);
8820 if (sp != stack_start) {
8821 handle_stack_args (cfg, stack_start, sp - stack_start);
8822 sp = stack_start;
8823 CHECK_UNVERIFIABLE (cfg);
8826 if (COMPILE_LLVM (cfg)) {
8827 dreg = alloc_ireg (cfg);
8828 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
8829 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
8831 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
8832 } else {
8833 /* The JIT can't eliminate the iconst+compare */
8834 MONO_INST_NEW (cfg, ins, OP_BR);
8835 ins->inst_target_bb = is_true ? true_bb : false_bb;
8836 MONO_ADD_INS (cfg->cbb, ins);
8839 start_new_bblock = 1;
8840 break;
8843 if (m_class_is_enumtype (klass) && !(val->type == STACK_I8 && TARGET_SIZEOF_VOID_P == 4)) {
8844 /* Can't do this with 64 bit enums on 32 bit since the vtype decomp pass is ran after the long decomp pass */
8845 if (val->opcode == OP_ICONST) {
8846 MONO_INST_NEW (cfg, ins, OP_BOX_ICONST);
8847 ins->type = STACK_OBJ;
8848 ins->klass = klass;
8849 ins->inst_c0 = val->inst_c0;
8850 ins->dreg = alloc_dreg (cfg, (MonoStackType)val->type);
8851 } else {
8852 MONO_INST_NEW (cfg, ins, OP_BOX);
8853 ins->type = STACK_OBJ;
8854 ins->klass = klass;
8855 ins->sreg1 = val->dreg;
8856 ins->dreg = alloc_dreg (cfg, (MonoStackType)val->type);
8858 MONO_ADD_INS (cfg->cbb, ins);
8859 *sp++ = ins;
8860 /* Create domainvar early so it gets initialized earlier than this code */
8861 if (cfg->opt & MONO_OPT_SHARED)
8862 mono_get_domainvar (cfg);
8863 } else {
8864 *sp++ = mini_emit_box (cfg, val, klass, context_used);
8866 CHECK_CFG_EXCEPTION;
8867 inline_costs += 1;
8868 break;
8870 case MONO_CEE_UNBOX: {
8871 --sp;
8872 klass = mini_get_class (method, token, generic_context);
8873 CHECK_TYPELOAD (klass);
8875 mono_save_token_info (cfg, image, token, klass);
8877 context_used = mini_class_check_context_used (cfg, klass);
8879 if (mono_class_is_nullable (klass)) {
8880 MonoInst *val;
8882 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8883 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), m_class_get_byval_arg (val->klass));
8885 *sp++= ins;
8886 } else {
8887 ins = handle_unbox (cfg, klass, sp, context_used);
8888 *sp++ = ins;
8890 inline_costs += 2;
8891 break;
8893 case MONO_CEE_LDFLD:
8894 case MONO_CEE_LDFLDA:
8895 case MONO_CEE_STFLD:
8896 case MONO_CEE_LDSFLD:
8897 case MONO_CEE_LDSFLDA:
8898 case MONO_CEE_STSFLD: {
8899 MonoClassField *field;
8900 #ifndef DISABLE_REMOTING
8901 int costs;
8902 #endif
8903 guint foffset;
8904 gboolean is_instance;
8905 gpointer addr = NULL;
8906 gboolean is_special_static;
8907 MonoType *ftype;
8908 MonoInst *store_val = NULL;
8909 MonoInst *thread_ins;
8911 is_instance = (il_op == MONO_CEE_LDFLD || il_op == MONO_CEE_LDFLDA || il_op == MONO_CEE_STFLD);
8912 if (is_instance) {
8913 if (il_op == MONO_CEE_STFLD) {
8914 sp -= 2;
8915 store_val = sp [1];
8916 } else {
8917 --sp;
8919 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8920 UNVERIFIED;
8921 if (il_op != MONO_CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8922 UNVERIFIED;
8923 } else {
8924 if (il_op == MONO_CEE_STSFLD) {
8925 sp--;
8926 store_val = sp [0];
8930 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8931 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
8932 klass = field->parent;
8934 else {
8935 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
8936 CHECK_CFG_ERROR;
8938 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8939 FIELD_ACCESS_FAILURE (method, field);
8940 mono_class_init_internal (klass);
8942 /* if the class is Critical then transparent code cannot access it's fields */
8943 if (!is_instance && mono_security_core_clr_enabled ())
8944 ensure_method_is_allowed_to_access_field (cfg, method, field);
8946 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8947 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8948 if (mono_security_core_clr_enabled ())
8949 ensure_method_is_allowed_to_access_field (cfg, method, field);
8952 ftype = mono_field_get_type_internal (field);
8955 * LDFLD etc. is usable on static fields as well, so convert those cases to
8956 * the static case.
8958 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
8959 switch (il_op) {
8960 case MONO_CEE_LDFLD:
8961 il_op = MONO_CEE_LDSFLD;
8962 break;
8963 case MONO_CEE_STFLD:
8964 il_op = MONO_CEE_STSFLD;
8965 break;
8966 case MONO_CEE_LDFLDA:
8967 il_op = MONO_CEE_LDSFLDA;
8968 break;
8969 default:
8970 g_assert_not_reached ();
8972 is_instance = FALSE;
8975 context_used = mini_class_check_context_used (cfg, klass);
8977 if (il_op == MONO_CEE_LDSFLD) {
8978 ins = mini_emit_inst_for_field_load (cfg, field);
8979 if (ins) {
8980 *sp++ = ins;
8981 goto field_access_end;
8985 /* INSTANCE CASE */
8987 foffset = m_class_is_valuetype (klass) ? field->offset - MONO_ABI_SIZEOF (MonoObject): field->offset;
8988 if (il_op == MONO_CEE_STFLD) {
8989 sp [1] = convert_value (cfg, field->type, sp [1]);
8990 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8991 UNVERIFIED;
8992 #ifndef DISABLE_REMOTING
8993 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
8994 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8995 MonoInst *iargs [5];
8997 GSHAREDVT_FAILURE (il_op);
8999 iargs [0] = sp [0];
9000 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9001 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9002 EMIT_NEW_ICONST (cfg, iargs [3], m_class_is_valuetype (klass) ? field->offset - MONO_ABI_SIZEOF (MonoObject) :
9003 field->offset);
9004 iargs [4] = sp [1];
9006 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9007 costs = inline_method (cfg, stfld_wrapper, mono_method_signature_internal (stfld_wrapper),
9008 iargs, ip, cfg->real_offset, TRUE);
9009 CHECK_CFG_EXCEPTION;
9010 g_assert (costs > 0);
9012 cfg->real_offset += 5;
9014 inline_costs += costs;
9015 } else {
9016 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
9018 } else
9019 #endif
9021 MonoInst *store, *wbarrier_ptr_ins = NULL;
9023 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg, foffset > mono_target_pagesize ());
9025 if (ins_flag & MONO_INST_VOLATILE) {
9026 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9027 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9030 if (mini_is_gsharedvt_klass (klass)) {
9031 MonoInst *offset_ins;
9033 context_used = mini_class_check_context_used (cfg, klass);
9035 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9036 /* The value is offset by 1 */
9037 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
9038 dreg = alloc_ireg_mp (cfg);
9039 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9040 wbarrier_ptr_ins = ins;
9041 /* The decomposition will call mini_emit_memory_copy () which will emit a wbarrier if needed */
9042 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
9043 } else {
9044 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
9046 if (sp [0]->opcode != OP_LDADDR)
9047 store->flags |= MONO_INST_FAULT;
9049 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
9050 if (mini_is_gsharedvt_klass (klass)) {
9051 g_assert (wbarrier_ptr_ins);
9052 mini_emit_write_barrier (cfg, wbarrier_ptr_ins, sp [1]);
9053 } else {
9054 /* insert call to write barrier */
9055 MonoInst *ptr;
9056 int dreg;
9058 dreg = alloc_ireg_mp (cfg);
9059 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9060 mini_emit_write_barrier (cfg, ptr, sp [1]);
9064 store->flags |= ins_flag;
9066 goto field_access_end;
9069 #ifndef DISABLE_REMOTING
9070 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
9071 MonoMethod *wrapper = (il_op == MONO_CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
9072 MonoInst *iargs [4];
9074 GSHAREDVT_FAILURE (il_op);
9076 iargs [0] = sp [0];
9077 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9078 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9079 EMIT_NEW_ICONST (cfg, iargs [3], m_class_is_valuetype (klass) ? field->offset - MONO_ABI_SIZEOF (MonoObject) : field->offset);
9080 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9081 costs = inline_method (cfg, wrapper, mono_method_signature_internal (wrapper),
9082 iargs, ip, cfg->real_offset, TRUE);
9083 CHECK_CFG_EXCEPTION;
9084 g_assert (costs > 0);
9086 cfg->real_offset += 5;
9088 *sp++ = iargs [0];
9090 inline_costs += costs;
9091 } else {
9092 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
9093 *sp++ = ins;
9095 } else
9096 #endif
9097 if (is_instance) {
9098 if (sp [0]->type == STACK_VTYPE) {
9099 MonoInst *var;
9101 /* Have to compute the address of the variable */
9103 var = get_vreg_to_inst (cfg, sp [0]->dreg);
9104 if (!var)
9105 var = mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (klass), OP_LOCAL, sp [0]->dreg);
9106 else
9107 g_assert (var->klass == klass);
9109 EMIT_NEW_VARLOADA (cfg, ins, var, m_class_get_byval_arg (var->klass));
9110 sp [0] = ins;
9113 if (il_op == MONO_CEE_LDFLDA) {
9114 if (sp [0]->type == STACK_OBJ) {
9115 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
9116 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
9119 dreg = alloc_ireg_mp (cfg);
9121 if (mini_is_gsharedvt_klass (klass)) {
9122 MonoInst *offset_ins;
9124 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9125 /* The value is offset by 1 */
9126 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
9127 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9128 } else {
9129 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9131 ins->klass = mono_class_from_mono_type_internal (field->type);
9132 ins->type = STACK_MP;
9133 *sp++ = ins;
9134 } else {
9135 MonoInst *load;
9137 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg, foffset > mono_target_pagesize ());
9139 #ifdef MONO_ARCH_SIMD_INTRINSICS
9140 if (sp [0]->opcode == OP_LDADDR && m_class_is_simd_type (klass) && cfg->opt & MONO_OPT_SIMD) {
9141 ins = mono_emit_simd_field_load (cfg, field, sp [0]);
9142 if (ins) {
9143 *sp++ = ins;
9144 goto field_access_end;
9147 #endif
9149 MonoInst *field_add_inst = sp [0];
9150 if (mini_is_gsharedvt_klass (klass)) {
9151 MonoInst *offset_ins;
9153 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9154 /* The value is offset by 1 */
9155 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
9156 EMIT_NEW_BIALU (cfg, field_add_inst, OP_PADD, alloc_ireg_mp (cfg), sp [0]->dreg, offset_ins->dreg);
9157 foffset = 0;
9160 load = mini_emit_memory_load (cfg, field->type, field_add_inst, foffset, ins_flag);
9162 if (sp [0]->opcode != OP_LDADDR)
9163 load->flags |= MONO_INST_FAULT;
9164 *sp++ = load;
9168 if (is_instance)
9169 goto field_access_end;
9171 /* STATIC CASE */
9172 context_used = mini_class_check_context_used (cfg, klass);
9174 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) {
9175 mono_error_set_field_missing (&cfg->error, field->parent, field->name, NULL, "Using static instructions with literal field");
9176 CHECK_CFG_ERROR;
9179 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
9180 * to be called here.
9182 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
9183 mono_class_vtable_checked (cfg->domain, klass, &cfg->error);
9184 CHECK_CFG_ERROR;
9185 CHECK_TYPELOAD (klass);
9187 mono_domain_lock (cfg->domain);
9188 if (cfg->domain->special_static_fields)
9189 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
9190 mono_domain_unlock (cfg->domain);
9192 is_special_static = mono_class_field_is_special_static (field);
9194 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
9195 thread_ins = mono_create_tls_get (cfg, TLS_KEY_THREAD);
9196 else
9197 thread_ins = NULL;
9199 /* Generate IR to compute the field address */
9200 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
9202 * Fast access to TLS data
9203 * Inline version of get_thread_static_data () in
9204 * threads.c.
9206 guint32 offset;
9207 int idx, static_data_reg, array_reg, dreg;
9209 if (context_used && cfg->gsharedvt && mini_is_gsharedvt_klass (klass))
9210 GSHAREDVT_FAILURE (il_op);
9212 static_data_reg = alloc_ireg (cfg);
9213 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
9215 if (cfg->compile_aot) {
9216 int offset_reg, offset2_reg, idx_reg;
9218 /* For TLS variables, this will return the TLS offset */
9219 EMIT_NEW_SFLDACONST (cfg, ins, field);
9220 offset_reg = ins->dreg;
9221 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
9222 idx_reg = alloc_ireg (cfg);
9223 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
9224 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, TARGET_SIZEOF_VOID_P == 8 ? 3 : 2);
9225 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
9226 array_reg = alloc_ireg (cfg);
9227 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
9228 offset2_reg = alloc_ireg (cfg);
9229 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
9230 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
9231 dreg = alloc_ireg (cfg);
9232 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
9233 } else {
9234 offset = (gsize)addr & 0x7fffffff;
9235 idx = offset & 0x3f;
9237 array_reg = alloc_ireg (cfg);
9238 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * TARGET_SIZEOF_VOID_P);
9239 dreg = alloc_ireg (cfg);
9240 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
9242 } else if ((cfg->opt & MONO_OPT_SHARED) ||
9243 (cfg->compile_aot && is_special_static) ||
9244 (context_used && is_special_static)) {
9245 MonoInst *iargs [2];
9247 g_assert (field->parent);
9248 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9249 if (context_used) {
9250 iargs [1] = emit_get_rgctx_field (cfg, context_used,
9251 field, MONO_RGCTX_INFO_CLASS_FIELD);
9252 } else {
9253 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
9255 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
9256 } else if (context_used) {
9257 MonoInst *static_data;
9260 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
9261 method->klass->name_space, method->klass->name, method->name,
9262 depth, field->offset);
9265 if (mono_class_needs_cctor_run (klass, method))
9266 emit_class_init (cfg, klass);
9269 * The pointer we're computing here is
9271 * super_info.static_data + field->offset
9273 static_data = mini_emit_get_rgctx_klass (cfg, context_used,
9274 klass, MONO_RGCTX_INFO_STATIC_DATA);
9276 if (mini_is_gsharedvt_klass (klass)) {
9277 MonoInst *offset_ins;
9279 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9280 /* The value is offset by 1 */
9281 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
9282 dreg = alloc_ireg_mp (cfg);
9283 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
9284 } else if (field->offset == 0) {
9285 ins = static_data;
9286 } else {
9287 int addr_reg = mono_alloc_preg (cfg);
9288 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
9290 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
9291 MonoInst *iargs [2];
9293 g_assert (field->parent);
9294 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9295 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
9296 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
9297 } else {
9298 MonoVTable *vtable = NULL;
9300 if (!cfg->compile_aot)
9301 vtable = mono_class_vtable_checked (cfg->domain, klass, &cfg->error);
9302 CHECK_CFG_ERROR;
9303 CHECK_TYPELOAD (klass);
9305 if (!addr) {
9306 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
9307 if (!(g_slist_find (class_inits, klass))) {
9308 emit_class_init (cfg, klass);
9309 if (cfg->verbose_level > 2)
9310 printf ("class %s.%s needs init call for %s\n", m_class_get_name_space (klass), m_class_get_name (klass), mono_field_get_name (field));
9311 class_inits = g_slist_prepend (class_inits, klass);
9313 } else {
9314 if (cfg->run_cctors) {
9315 /* This makes so that inline cannot trigger */
9316 /* .cctors: too many apps depend on them */
9317 /* running with a specific order... */
9318 g_assert (vtable);
9319 if (!vtable->initialized && m_class_has_cctor (vtable->klass))
9320 INLINE_FAILURE ("class init");
9321 if (!mono_runtime_class_init_full (vtable, &cfg->error)) {
9322 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
9323 goto exception_exit;
9327 if (cfg->compile_aot)
9328 EMIT_NEW_SFLDACONST (cfg, ins, field);
9329 else {
9330 g_assert (vtable);
9331 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
9332 g_assert (addr);
9333 EMIT_NEW_PCONST (cfg, ins, addr);
9335 } else {
9336 MonoInst *iargs [1];
9337 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
9338 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
9342 /* Generate IR to do the actual load/store operation */
9344 if ((il_op == MONO_CEE_STFLD || il_op == MONO_CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
9345 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9346 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9349 if (il_op == MONO_CEE_LDSFLDA) {
9350 ins->klass = mono_class_from_mono_type_internal (ftype);
9351 ins->type = STACK_PTR;
9352 *sp++ = ins;
9353 } else if (il_op == MONO_CEE_STSFLD) {
9354 MonoInst *store;
9356 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
9357 store->flags |= ins_flag;
9358 } else {
9359 gboolean is_const = FALSE;
9360 MonoVTable *vtable = NULL;
9361 gpointer addr = NULL;
9363 if (!context_used) {
9364 vtable = mono_class_vtable_checked (cfg->domain, klass, &cfg->error);
9365 CHECK_CFG_ERROR;
9366 CHECK_TYPELOAD (klass);
9368 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
9369 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
9370 int ro_type = ftype->type;
9371 if (!addr)
9372 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
9373 if (ro_type == MONO_TYPE_VALUETYPE && m_class_is_enumtype (ftype->data.klass)) {
9374 ro_type = mono_class_enum_basetype_internal (ftype->data.klass)->type;
9377 GSHAREDVT_FAILURE (il_op);
9379 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
9380 is_const = TRUE;
9381 switch (ro_type) {
9382 case MONO_TYPE_BOOLEAN:
9383 case MONO_TYPE_U1:
9384 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
9385 sp++;
9386 break;
9387 case MONO_TYPE_I1:
9388 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
9389 sp++;
9390 break;
9391 case MONO_TYPE_CHAR:
9392 case MONO_TYPE_U2:
9393 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
9394 sp++;
9395 break;
9396 case MONO_TYPE_I2:
9397 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
9398 sp++;
9399 break;
9400 break;
9401 case MONO_TYPE_I4:
9402 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
9403 sp++;
9404 break;
9405 case MONO_TYPE_U4:
9406 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
9407 sp++;
9408 break;
9409 case MONO_TYPE_I:
9410 case MONO_TYPE_U:
9411 case MONO_TYPE_PTR:
9412 case MONO_TYPE_FNPTR:
9413 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
9414 mini_type_to_eval_stack_type ((cfg), field->type, *sp);
9415 sp++;
9416 break;
9417 case MONO_TYPE_STRING:
9418 case MONO_TYPE_OBJECT:
9419 case MONO_TYPE_CLASS:
9420 case MONO_TYPE_SZARRAY:
9421 case MONO_TYPE_ARRAY:
9422 if (!mono_gc_is_moving ()) {
9423 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
9424 mini_type_to_eval_stack_type ((cfg), field->type, *sp);
9425 sp++;
9426 } else {
9427 is_const = FALSE;
9429 break;
9430 case MONO_TYPE_I8:
9431 case MONO_TYPE_U8:
9432 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
9433 sp++;
9434 break;
9435 case MONO_TYPE_R4:
9436 case MONO_TYPE_R8:
9437 case MONO_TYPE_VALUETYPE:
9438 default:
9439 is_const = FALSE;
9440 break;
9444 if (!is_const) {
9445 MonoInst *load;
9447 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
9448 load->flags |= ins_flag;
9449 *sp++ = load;
9453 field_access_end:
9454 if ((il_op == MONO_CEE_LDFLD || il_op == MONO_CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
9455 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9456 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9459 ins_flag = 0;
9460 break;
9462 case MONO_CEE_STOBJ:
9463 sp -= 2;
9464 klass = mini_get_class (method, token, generic_context);
9465 CHECK_TYPELOAD (klass);
9467 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
9468 mini_emit_memory_store (cfg, m_class_get_byval_arg (klass), sp [0], sp [1], ins_flag);
9469 ins_flag = 0;
9470 inline_costs += 1;
9471 break;
9474 * Array opcodes
9476 case MONO_CEE_NEWARR: {
9477 MonoInst *len_ins;
9478 const char *data_ptr;
9479 int data_size = 0;
9480 guint32 field_token;
9482 --sp;
9484 klass = mini_get_class (method, token, generic_context);
9485 CHECK_TYPELOAD (klass);
9486 if (m_class_get_byval_arg (klass)->type == MONO_TYPE_VOID)
9487 UNVERIFIED;
9489 context_used = mini_class_check_context_used (cfg, klass);
9491 if (sp [0]->type == STACK_I8 || (TARGET_SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
9492 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
9493 ins->sreg1 = sp [0]->dreg;
9494 ins->type = STACK_I4;
9495 ins->dreg = alloc_ireg (cfg);
9496 MONO_ADD_INS (cfg->cbb, ins);
9497 *sp = mono_decompose_opcode (cfg, ins);
9500 if (context_used) {
9501 MonoInst *args [3];
9502 MonoClass *array_class = mono_class_create_array (klass, 1);
9503 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
9505 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
9507 /* vtable */
9508 args [0] = mini_emit_get_rgctx_klass (cfg, context_used,
9509 array_class, MONO_RGCTX_INFO_VTABLE);
9510 /* array len */
9511 args [1] = sp [0];
9513 if (managed_alloc)
9514 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
9515 else
9516 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
9517 } else {
9518 if (cfg->opt & MONO_OPT_SHARED) {
9519 /* Decompose now to avoid problems with references to the domainvar */
9520 MonoInst *iargs [3];
9522 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9523 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9524 iargs [2] = sp [0];
9526 ins = mono_emit_jit_icall (cfg, ves_icall_array_new, iargs);
9527 } else {
9528 /* Decompose later since it is needed by abcrem */
9529 MonoClass *array_type = mono_class_create_array (klass, 1);
9530 mono_class_vtable_checked (cfg->domain, array_type, &cfg->error);
9531 CHECK_CFG_ERROR;
9532 CHECK_TYPELOAD (array_type);
9534 MONO_INST_NEW (cfg, ins, OP_NEWARR);
9535 ins->dreg = alloc_ireg_ref (cfg);
9536 ins->sreg1 = sp [0]->dreg;
9537 ins->inst_newa_class = klass;
9538 ins->type = STACK_OBJ;
9539 ins->klass = array_type;
9540 MONO_ADD_INS (cfg->cbb, ins);
9541 cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE;
9542 cfg->cbb->needs_decompose = TRUE;
9544 /* Needed so mono_emit_load_get_addr () gets called */
9545 mono_get_got_var (cfg);
9549 len_ins = sp [0];
9550 ip += 5;
9551 *sp++ = ins;
9552 inline_costs += 1;
9555 * we inline/optimize the initialization sequence if possible.
9556 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
9557 * for small sizes open code the memcpy
9558 * ensure the rva field is big enough
9560 if ((cfg->opt & MONO_OPT_INTRINS) && next_ip < end
9561 && ip_in_bb (cfg, cfg->cbb, next_ip)
9562 && (len_ins->opcode == OP_ICONST)
9563 && (data_ptr = initialize_array_data (cfg, method,
9564 cfg->compile_aot, next_ip, end, klass,
9565 len_ins->inst_c0, &data_size, &field_token,
9566 &il_op, &next_ip))) {
9567 MonoMethod *memcpy_method = mini_get_memcpy_method ();
9568 MonoInst *iargs [3];
9569 int add_reg = alloc_ireg_mp (cfg);
9571 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
9572 if (cfg->compile_aot) {
9573 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, m_class_get_image (method->klass), GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
9574 } else {
9575 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
9577 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
9578 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9581 break;
9583 case MONO_CEE_LDLEN:
9584 --sp;
9585 if (sp [0]->type != STACK_OBJ)
9586 UNVERIFIED;
9588 MONO_INST_NEW (cfg, ins, OP_LDLEN);
9589 ins->dreg = alloc_preg (cfg);
9590 ins->sreg1 = sp [0]->dreg;
9591 ins->inst_imm = MONO_STRUCT_OFFSET (MonoArray, max_length);
9592 ins->type = STACK_I4;
9593 /* This flag will be inherited by the decomposition */
9594 ins->flags |= MONO_INST_FAULT | MONO_INST_INVARIANT_LOAD;
9595 MONO_ADD_INS (cfg->cbb, ins);
9596 cfg->flags |= MONO_CFG_NEEDS_DECOMPOSE;
9597 cfg->cbb->needs_decompose = TRUE;
9598 *sp++ = ins;
9599 break;
9600 case MONO_CEE_LDELEMA:
9601 sp -= 2;
9602 if (sp [0]->type != STACK_OBJ)
9603 UNVERIFIED;
9605 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9607 klass = mini_get_class (method, token, generic_context);
9608 CHECK_TYPELOAD (klass);
9609 /* we need to make sure that this array is exactly the type it needs
9610 * to be for correctness. the wrappers are lax with their usage
9611 * so we need to ignore them here
9613 if (!m_class_is_valuetype (klass) && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
9614 MonoClass *array_class = mono_class_create_array (klass, 1);
9615 mini_emit_check_array_type (cfg, sp [0], array_class);
9616 CHECK_TYPELOAD (array_class);
9619 readonly = FALSE;
9620 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9621 *sp++ = ins;
9622 break;
9623 case MONO_CEE_LDELEM:
9624 case MONO_CEE_LDELEM_I1:
9625 case MONO_CEE_LDELEM_U1:
9626 case MONO_CEE_LDELEM_I2:
9627 case MONO_CEE_LDELEM_U2:
9628 case MONO_CEE_LDELEM_I4:
9629 case MONO_CEE_LDELEM_U4:
9630 case MONO_CEE_LDELEM_I8:
9631 case MONO_CEE_LDELEM_I:
9632 case MONO_CEE_LDELEM_R4:
9633 case MONO_CEE_LDELEM_R8:
9634 case MONO_CEE_LDELEM_REF: {
9635 MonoInst *addr;
9637 sp -= 2;
9639 if (il_op == MONO_CEE_LDELEM) {
9640 klass = mini_get_class (method, token, generic_context);
9641 CHECK_TYPELOAD (klass);
9642 mono_class_init_internal (klass);
9644 else
9645 klass = array_access_to_klass (il_op);
9647 if (sp [0]->type != STACK_OBJ)
9648 UNVERIFIED;
9650 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9652 if (mini_is_gsharedvt_variable_klass (klass)) {
9653 // FIXME-VT: OP_ICONST optimization
9654 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9655 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0);
9656 ins->opcode = OP_LOADV_MEMBASE;
9657 } else if (sp [1]->opcode == OP_ICONST) {
9658 int array_reg = sp [0]->dreg;
9659 int index_reg = sp [1]->dreg;
9660 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
9662 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
9663 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
9665 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9666 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), array_reg, offset);
9667 } else {
9668 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9669 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (klass), addr->dreg, 0);
9671 *sp++ = ins;
9672 break;
9674 case MONO_CEE_STELEM_I:
9675 case MONO_CEE_STELEM_I1:
9676 case MONO_CEE_STELEM_I2:
9677 case MONO_CEE_STELEM_I4:
9678 case MONO_CEE_STELEM_I8:
9679 case MONO_CEE_STELEM_R4:
9680 case MONO_CEE_STELEM_R8:
9681 case MONO_CEE_STELEM_REF:
9682 case MONO_CEE_STELEM: {
9683 sp -= 3;
9685 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9687 if (il_op == MONO_CEE_STELEM) {
9688 klass = mini_get_class (method, token, generic_context);
9689 CHECK_TYPELOAD (klass);
9690 mono_class_init_internal (klass);
9692 else
9693 klass = array_access_to_klass (il_op);
9695 if (sp [0]->type != STACK_OBJ)
9696 UNVERIFIED;
9698 sp [2] = convert_value (cfg, m_class_get_byval_arg (klass), sp [2]);
9699 mini_emit_array_store (cfg, klass, sp, TRUE);
9701 inline_costs += 1;
9702 break;
9704 case MONO_CEE_CKFINITE: {
9705 --sp;
9707 if (cfg->llvm_only) {
9708 MonoInst *iargs [1];
9710 iargs [0] = sp [0];
9711 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
9712 } else {
9713 sp [0] = convert_value (cfg, m_class_get_byval_arg (mono_defaults.double_class), sp [0]);
9714 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
9715 ins->sreg1 = sp [0]->dreg;
9716 ins->dreg = alloc_freg (cfg);
9717 ins->type = STACK_R8;
9718 MONO_ADD_INS (cfg->cbb, ins);
9720 *sp++ = mono_decompose_opcode (cfg, ins);
9723 break;
9725 case MONO_CEE_REFANYVAL: {
9726 MonoInst *src_var, *src;
9728 int klass_reg = alloc_preg (cfg);
9729 int dreg = alloc_preg (cfg);
9731 GSHAREDVT_FAILURE (il_op);
9733 MONO_INST_NEW (cfg, ins, il_op);
9734 --sp;
9735 klass = mini_get_class (method, token, generic_context);
9736 CHECK_TYPELOAD (klass);
9738 context_used = mini_class_check_context_used (cfg, klass);
9740 // FIXME:
9741 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9742 if (!src_var)
9743 src_var = mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.typed_reference_class), OP_LOCAL, sp [0]->dreg);
9744 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9745 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
9747 if (context_used) {
9748 MonoInst *klass_ins;
9750 klass_ins = mini_emit_get_rgctx_klass (cfg, context_used,
9751 klass, MONO_RGCTX_INFO_KLASS);
9753 // FIXME:
9754 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
9755 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
9756 } else {
9757 mini_emit_class_check (cfg, klass_reg, klass);
9759 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
9760 ins->type = STACK_MP;
9761 ins->klass = klass;
9762 *sp++ = ins;
9763 break;
9765 case MONO_CEE_MKREFANY: {
9766 MonoInst *loc, *addr;
9768 GSHAREDVT_FAILURE (il_op);
9770 MONO_INST_NEW (cfg, ins, il_op);
9771 --sp;
9772 klass = mini_get_class (method, token, generic_context);
9773 CHECK_TYPELOAD (klass);
9775 context_used = mini_class_check_context_used (cfg, klass);
9777 loc = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.typed_reference_class), OP_LOCAL);
9778 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
9780 MonoInst *const_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
9781 int type_reg = alloc_preg (cfg);
9783 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
9784 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, m_class_offsetof_byval_arg ());
9785 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9787 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
9789 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
9790 ins->type = STACK_VTYPE;
9791 ins->klass = mono_defaults.typed_reference_class;
9792 *sp++ = ins;
9793 break;
9795 case MONO_CEE_LDTOKEN: {
9796 gpointer handle;
9797 MonoClass *handle_class;
9799 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
9800 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9801 handle = mono_method_get_wrapper_data (method, n);
9802 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
9803 if (handle_class == mono_defaults.typehandle_class)
9804 handle = m_class_get_byval_arg ((MonoClass*)handle);
9806 else {
9807 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
9808 CHECK_CFG_ERROR;
9810 if (!handle)
9811 LOAD_ERROR;
9812 mono_class_init_internal (handle_class);
9813 if (cfg->gshared) {
9814 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
9815 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
9816 /* This case handles ldtoken
9817 of an open type, like for
9818 typeof(Gen<>). */
9819 context_used = 0;
9820 } else if (handle_class == mono_defaults.typehandle_class) {
9821 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type_internal ((MonoType *)handle));
9822 } else if (handle_class == mono_defaults.fieldhandle_class)
9823 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
9824 else if (handle_class == mono_defaults.methodhandle_class)
9825 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
9826 else
9827 g_assert_not_reached ();
9830 if ((cfg->opt & MONO_OPT_SHARED) &&
9831 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
9832 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
9833 MonoInst *addr, *vtvar, *iargs [3];
9834 int method_context_used;
9836 method_context_used = mini_method_check_context_used (cfg, method);
9838 vtvar = mono_compile_create_var (cfg, m_class_get_byval_arg (handle_class), OP_LOCAL);
9840 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9841 EMIT_NEW_ICONST (cfg, iargs [1], n);
9842 if (method_context_used) {
9843 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
9844 method, MONO_RGCTX_INFO_METHOD);
9845 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
9846 } else {
9847 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9848 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9850 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9852 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9854 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9855 } else {
9856 if ((next_ip + 4 < end) && ip_in_bb (cfg, cfg->cbb, next_ip) &&
9857 ((next_ip [0] == CEE_CALL) || (next_ip [0] == CEE_CALLVIRT)) &&
9858 (cmethod = mini_get_method (cfg, method, read32 (next_ip + 1), NULL, generic_context)) &&
9859 (cmethod->klass == mono_defaults.systemtype_class) &&
9860 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9861 MonoClass *tclass = mono_class_from_mono_type_internal ((MonoType *)handle);
9863 mono_class_init_internal (tclass);
9864 if (context_used) {
9865 ins = mini_emit_get_rgctx_klass (cfg, context_used,
9866 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9867 } else if (cfg->compile_aot) {
9868 if (method->wrapper_type) {
9869 error_init (error); //got to do it since there are multiple conditionals below
9870 if (mono_class_get_checked (m_class_get_image (tclass), m_class_get_type_token (tclass), error) == tclass && !generic_context) {
9871 /* Special case for static synchronized wrappers */
9872 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, m_class_get_image (tclass), m_class_get_type_token (tclass), generic_context);
9873 } else {
9874 mono_error_cleanup (error); /* FIXME don't swallow the error */
9875 /* FIXME: n is not a normal token */
9876 DISABLE_AOT (cfg);
9877 EMIT_NEW_PCONST (cfg, ins, NULL);
9879 } else {
9880 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
9882 } else {
9883 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &cfg->error);
9884 CHECK_CFG_ERROR;
9885 EMIT_NEW_PCONST (cfg, ins, rt);
9887 ins->type = STACK_OBJ;
9888 ins->klass = cmethod->klass;
9889 il_op = (MonoOpcodeEnum)next_ip [0];
9890 next_ip += 5;
9891 } else {
9892 MonoInst *addr, *vtvar;
9894 vtvar = mono_compile_create_var (cfg, m_class_get_byval_arg (handle_class), OP_LOCAL);
9896 if (context_used) {
9897 if (handle_class == mono_defaults.typehandle_class) {
9898 ins = mini_emit_get_rgctx_klass (cfg, context_used,
9899 mono_class_from_mono_type_internal ((MonoType *)handle),
9900 MONO_RGCTX_INFO_TYPE);
9901 } else if (handle_class == mono_defaults.methodhandle_class) {
9902 ins = emit_get_rgctx_method (cfg, context_used,
9903 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
9904 } else if (handle_class == mono_defaults.fieldhandle_class) {
9905 ins = emit_get_rgctx_field (cfg, context_used,
9906 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
9907 } else {
9908 g_assert_not_reached ();
9910 } else if (cfg->compile_aot) {
9911 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
9912 } else {
9913 EMIT_NEW_PCONST (cfg, ins, handle);
9915 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9916 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9917 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9921 *sp++ = ins;
9922 break;
9924 case MONO_CEE_THROW:
9925 if (sp [-1]->type != STACK_OBJ)
9926 UNVERIFIED;
9928 MONO_INST_NEW (cfg, ins, OP_THROW);
9929 --sp;
9930 ins->sreg1 = sp [0]->dreg;
9931 cfg->cbb->out_of_line = TRUE;
9932 MONO_ADD_INS (cfg->cbb, ins);
9933 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9934 MONO_ADD_INS (cfg->cbb, ins);
9935 sp = stack_start;
9937 link_bblock (cfg, cfg->cbb, end_bblock);
9938 start_new_bblock = 1;
9939 /* This can complicate code generation for llvm since the return value might not be defined */
9940 if (COMPILE_LLVM (cfg))
9941 INLINE_FAILURE ("throw");
9942 break;
9943 case MONO_CEE_ENDFINALLY:
9944 if (!ip_in_finally_clause (cfg, ip - header->code))
9945 UNVERIFIED;
9946 /* mono_save_seq_point_info () depends on this */
9947 if (sp != stack_start)
9948 emit_seq_point (cfg, method, ip, FALSE, FALSE);
9949 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9950 MONO_ADD_INS (cfg->cbb, ins);
9951 start_new_bblock = 1;
9954 * Control will leave the method so empty the stack, otherwise
9955 * the next basic block will start with a nonempty stack.
9957 while (sp != stack_start) {
9958 sp--;
9960 break;
9961 case MONO_CEE_LEAVE:
9962 case MONO_CEE_LEAVE_S: {
9963 GList *handlers;
9965 /* empty the stack */
9966 g_assert (sp >= stack_start);
9967 sp = stack_start;
9970 * If this leave statement is in a catch block, check for a
9971 * pending exception, and rethrow it if necessary.
9972 * We avoid doing this in runtime invoke wrappers, since those are called
9973 * by native code which excepts the wrapper to catch all exceptions.
9975 for (i = 0; i < header->num_clauses; ++i) {
9976 MonoExceptionClause *clause = &header->clauses [i];
9979 * Use <= in the final comparison to handle clauses with multiple
9980 * leave statements, like in bug #78024.
9981 * The ordering of the exception clauses guarantees that we find the
9982 * innermost clause.
9984 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((il_op == MONO_CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9985 MonoInst *exc_ins;
9986 MonoBasicBlock *dont_throw;
9989 MonoInst *load;
9991 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9994 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9996 NEW_BBLOCK (cfg, dont_throw);
9999 * Currently, we always rethrow the abort exception, despite the
10000 * fact that this is not correct. See thread6.cs for an example.
10001 * But propagating the abort exception is more important than
10002 * getting the semantics right.
10004 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
10005 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
10006 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
10008 MONO_START_BB (cfg, dont_throw);
10012 #ifdef ENABLE_LLVM
10013 cfg->cbb->try_end = (intptr_t)(ip - header->code);
10014 #endif
10016 if ((handlers = mono_find_leave_clauses (cfg, ip, target))) {
10017 GList *tmp;
10019 * For each finally clause that we exit we need to invoke the finally block.
10020 * After each invocation we need to add try holes for all the clauses that
10021 * we already exited.
10023 for (tmp = handlers; tmp; tmp = tmp->next) {
10024 MonoLeaveClause *leave = (MonoLeaveClause *) tmp->data;
10025 MonoExceptionClause *clause = leave->clause;
10027 if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY)
10028 continue;
10030 MonoInst *abort_exc = (MonoInst *)mono_find_exvar_for_offset (cfg, clause->handler_offset);
10031 MonoBasicBlock *dont_throw;
10034 * Emit instrumentation code before linking the basic blocks below as this
10035 * will alter cfg->cbb.
10037 mini_profiler_emit_call_finally (cfg, header, ip, leave->index, clause);
10039 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
10040 g_assert (tblock);
10041 link_bblock (cfg, cfg->cbb, tblock);
10043 MONO_EMIT_NEW_PCONST (cfg, abort_exc->dreg, 0);
10045 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
10046 ins->inst_target_bb = tblock;
10047 ins->inst_eh_blocks = tmp;
10048 MONO_ADD_INS (cfg->cbb, ins);
10049 cfg->cbb->has_call_handler = 1;
10051 /* Throw exception if exvar is set */
10052 /* FIXME Do we need this for calls from catch/filter ? */
10053 NEW_BBLOCK (cfg, dont_throw);
10054 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, abort_exc->dreg, 0);
10055 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
10056 mono_emit_jit_icall (cfg, ves_icall_thread_finish_async_abort, NULL);
10057 cfg->cbb->clause_holes = tmp;
10059 MONO_START_BB (cfg, dont_throw);
10060 cfg->cbb->clause_holes = tmp;
10062 if (COMPILE_LLVM (cfg)) {
10063 MonoBasicBlock *target_bb;
10066 * Link the finally bblock with the target, since it will
10067 * conceptually branch there.
10069 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
10070 GET_BBLOCK (cfg, target_bb, target);
10071 link_bblock (cfg, tblock, target_bb);
10076 MONO_INST_NEW (cfg, ins, OP_BR);
10077 MONO_ADD_INS (cfg->cbb, ins);
10078 GET_BBLOCK (cfg, tblock, target);
10079 link_bblock (cfg, cfg->cbb, tblock);
10080 ins->inst_target_bb = tblock;
10082 start_new_bblock = 1;
10083 break;
10087 * Mono specific opcodes
10090 case MONO_CEE_MONO_ICALL: {
10091 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10092 gpointer func;
10093 MonoJitICallInfo *info;
10095 func = mono_method_get_wrapper_data (method, token);
10096 info = mono_find_jit_icall_by_addr (func);
10097 if (!info)
10098 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
10099 g_assert (info);
10101 CHECK_STACK (info->sig->param_count);
10102 sp -= info->sig->param_count;
10104 if (!strcmp (info->name, "mono_threads_attach_coop")) {
10105 MonoInst *addr;
10106 MonoBasicBlock *next_bb;
10108 if (cfg->compile_aot) {
10110 * This is called on unattached threads, so it cannot go through the trampoline
10111 * infrastructure. Use an indirect call through a got slot initialized at load time
10112 * instead.
10114 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL, (char*)info->name);
10115 ins = mini_emit_calli (cfg, info->sig, sp, addr, NULL, NULL);
10116 } else {
10117 ins = mono_emit_jit_icall (cfg, info->func, sp);
10121 * Parts of the initlocals code needs to come after this, since it might call methods like memset.
10123 init_localsbb2 = cfg->cbb;
10124 NEW_BBLOCK (cfg, next_bb);
10125 MONO_START_BB (cfg, next_bb);
10126 } else {
10127 ins = mono_emit_jit_icall (cfg, info->func, sp);
10130 if (!MONO_TYPE_IS_VOID (info->sig->ret))
10131 *sp++ = ins;
10133 inline_costs += CALL_COST * MIN(10, num_calls++);
10134 break;
10137 MonoJumpInfoType ldptr_type;
10139 case MONO_CEE_MONO_LDPTR_CARD_TABLE:
10140 ldptr_type = MONO_PATCH_INFO_GC_CARD_TABLE_ADDR;
10141 goto mono_ldptr;
10142 case MONO_CEE_MONO_LDPTR_NURSERY_START:
10143 ldptr_type = MONO_PATCH_INFO_GC_NURSERY_START;
10144 goto mono_ldptr;
10145 case MONO_CEE_MONO_LDPTR_NURSERY_BITS:
10146 ldptr_type = MONO_PATCH_INFO_GC_NURSERY_BITS;
10147 goto mono_ldptr;
10148 case MONO_CEE_MONO_LDPTR_INT_REQ_FLAG:
10149 ldptr_type = MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG;
10150 goto mono_ldptr;
10151 case MONO_CEE_MONO_LDPTR_PROFILER_ALLOCATION_COUNT:
10152 ldptr_type = MONO_PATCH_INFO_PROFILER_ALLOCATION_COUNT;
10153 mono_ldptr:
10154 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10155 ins = mini_emit_runtime_constant (cfg, ldptr_type, NULL);
10156 *sp++ = ins;
10157 inline_costs += CALL_COST * MIN(10, num_calls++);
10158 break;
10160 case MONO_CEE_MONO_LDPTR: {
10161 gpointer ptr;
10163 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10164 ptr = mono_method_get_wrapper_data (method, token);
10165 EMIT_NEW_PCONST (cfg, ins, ptr);
10166 *sp++ = ins;
10167 inline_costs += CALL_COST * MIN(10, num_calls++);
10168 /* Can't embed random pointers into AOT code */
10169 DISABLE_AOT (cfg);
10170 break;
10172 case MONO_CEE_MONO_JIT_ICALL_ADDR: {
10173 MonoJitICallInfo *callinfo;
10174 gpointer ptr;
10176 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10177 ptr = mono_method_get_wrapper_data (method, token);
10178 callinfo = mono_find_jit_icall_by_addr (ptr);
10179 g_assert (callinfo);
10180 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
10181 *sp++ = ins;
10182 inline_costs += CALL_COST * MIN(10, num_calls++);
10183 break;
10185 case MONO_CEE_MONO_ICALL_ADDR: {
10186 MonoMethod *cmethod;
10187 gpointer ptr;
10189 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10191 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
10193 if (cfg->compile_aot) {
10194 if (cfg->direct_pinvoke && ip + 6 < end && (ip [6] == CEE_POP)) {
10196 * This is generated by emit_native_wrapper () to resolve the pinvoke address
10197 * before the call, its not needed when using direct pinvoke.
10198 * This is not an optimization, but its used to avoid looking up pinvokes
10199 * on platforms which don't support dlopen ().
10201 EMIT_NEW_PCONST (cfg, ins, NULL);
10202 } else {
10203 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
10205 } else {
10206 ptr = mono_lookup_internal_call (cmethod);
10207 g_assert (ptr);
10208 EMIT_NEW_PCONST (cfg, ins, ptr);
10210 *sp++ = ins;
10211 break;
10213 case MONO_CEE_MONO_VTADDR: {
10214 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10215 MonoInst *src_var, *src;
10217 --sp;
10219 // FIXME:
10220 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10221 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
10222 *sp++ = src;
10223 break;
10225 case MONO_CEE_MONO_NEWOBJ: {
10226 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10227 MonoInst *iargs [2];
10229 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10230 mono_class_init_internal (klass);
10231 NEW_DOMAINCONST (cfg, iargs [0]);
10232 MONO_ADD_INS (cfg->cbb, iargs [0]);
10233 NEW_CLASSCONST (cfg, iargs [1], klass);
10234 MONO_ADD_INS (cfg->cbb, iargs [1]);
10235 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
10236 inline_costs += CALL_COST * MIN(10, num_calls++);
10237 break;
10239 case MONO_CEE_MONO_OBJADDR:
10240 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10241 --sp;
10242 MONO_INST_NEW (cfg, ins, OP_MOVE);
10243 ins->dreg = alloc_ireg_mp (cfg);
10244 ins->sreg1 = sp [0]->dreg;
10245 ins->type = STACK_MP;
10246 MONO_ADD_INS (cfg->cbb, ins);
10247 *sp++ = ins;
10248 break;
10249 case MONO_CEE_MONO_LDNATIVEOBJ:
10251 * Similar to LDOBJ, but instead load the unmanaged
10252 * representation of the vtype to the stack.
10254 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10255 --sp;
10256 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10257 g_assert (m_class_is_valuetype (klass));
10258 mono_class_init_internal (klass);
10261 MonoInst *src, *dest, *temp;
10263 src = sp [0];
10264 temp = mono_compile_create_var (cfg, m_class_get_byval_arg (klass), OP_LOCAL);
10265 temp->backend.is_pinvoke = 1;
10266 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
10267 mini_emit_memory_copy (cfg, dest, src, klass, TRUE, 0);
10269 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
10270 dest->type = STACK_VTYPE;
10271 dest->klass = klass;
10273 *sp ++ = dest;
10275 break;
10276 case MONO_CEE_MONO_RETOBJ: {
10278 * Same as RET, but return the native representation of a vtype
10279 * to the caller.
10281 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10282 g_assert (cfg->ret);
10283 g_assert (mono_method_signature_internal (method)->pinvoke);
10284 --sp;
10286 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10288 if (!cfg->vret_addr) {
10289 g_assert (cfg->ret_var_is_local);
10291 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
10292 } else {
10293 EMIT_NEW_RETLOADA (cfg, ins);
10295 mini_emit_memory_copy (cfg, ins, sp [0], klass, TRUE, 0);
10297 if (sp != stack_start)
10298 UNVERIFIED;
10300 mini_profiler_emit_leave (cfg, sp [0]);
10302 MONO_INST_NEW (cfg, ins, OP_BR);
10303 ins->inst_target_bb = end_bblock;
10304 MONO_ADD_INS (cfg->cbb, ins);
10305 link_bblock (cfg, cfg->cbb, end_bblock);
10306 start_new_bblock = 1;
10307 break;
10309 case MONO_CEE_MONO_SAVE_LMF:
10310 case MONO_CEE_MONO_RESTORE_LMF:
10311 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10312 break;
10313 case MONO_CEE_MONO_CLASSCONST:
10314 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10315 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
10316 *sp++ = ins;
10317 inline_costs += CALL_COST * MIN(10, num_calls++);
10318 break;
10319 case MONO_CEE_MONO_NOT_TAKEN:
10320 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10321 cfg->cbb->out_of_line = TRUE;
10322 break;
10323 case MONO_CEE_MONO_TLS: {
10324 MonoTlsKey key;
10326 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10327 key = (MonoTlsKey)n;
10328 g_assert (key < TLS_KEY_NUM);
10330 ins = mono_create_tls_get (cfg, key);
10331 g_assert (ins);
10332 ins->type = STACK_PTR;
10333 *sp++ = ins;
10334 break;
10336 case MONO_CEE_MONO_DYN_CALL: {
10337 MonoCallInst *call;
10339 /* It would be easier to call a trampoline, but that would put an
10340 * extra frame on the stack, confusing exception handling. So
10341 * implement it inline using an opcode for now.
10344 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10345 if (!cfg->dyn_call_var) {
10346 cfg->dyn_call_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
10347 /* prevent it from being register allocated */
10348 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
10351 /* Has to use a call inst since local regalloc expects it */
10352 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
10353 ins = (MonoInst*)call;
10354 sp -= 2;
10355 ins->sreg1 = sp [0]->dreg;
10356 ins->sreg2 = sp [1]->dreg;
10357 MONO_ADD_INS (cfg->cbb, ins);
10359 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
10360 /* OP_DYN_CALL might need to allocate a dynamically sized param area */
10361 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10363 inline_costs += CALL_COST * MIN(10, num_calls++);
10364 break;
10366 case MONO_CEE_MONO_MEMORY_BARRIER: {
10367 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10368 mini_emit_memory_barrier (cfg, (int)n);
10369 break;
10371 case MONO_CEE_MONO_ATOMIC_STORE_I4: {
10372 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10373 g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4));
10375 sp -= 2;
10377 MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4);
10378 ins->dreg = sp [0]->dreg;
10379 ins->sreg1 = sp [1]->dreg;
10380 ins->backend.memory_barrier_kind = (int)n;
10381 MONO_ADD_INS (cfg->cbb, ins);
10382 break;
10384 case MONO_CEE_MONO_LD_DELEGATE_METHOD_PTR: {
10385 CHECK_STACK (1);
10386 --sp;
10388 dreg = alloc_preg (cfg);
10389 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
10390 *sp++ = ins;
10391 break;
10393 case MONO_CEE_MONO_CALLI_EXTRA_ARG: {
10394 MonoInst *addr;
10395 MonoMethodSignature *fsig;
10396 MonoInst *arg;
10399 * This is the same as CEE_CALLI, but passes an additional argument
10400 * to the called method in llvmonly mode.
10401 * This is only used by delegate invoke wrappers to call the
10402 * actual delegate method.
10404 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
10406 ins = NULL;
10408 cmethod = NULL;
10409 CHECK_STACK (1);
10410 --sp;
10411 addr = *sp;
10412 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
10413 CHECK_CFG_ERROR;
10415 if (cfg->llvm_only)
10416 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
10418 n = fsig->param_count + fsig->hasthis + 1;
10420 CHECK_STACK (n);
10422 sp -= n;
10423 arg = sp [n - 1];
10425 if (cfg->llvm_only) {
10427 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
10428 * cconv. This is set by mono_init_delegate ().
10430 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
10431 MonoInst *callee = addr;
10432 MonoInst *call, *localloc_ins;
10433 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
10434 int low_bit_reg = alloc_preg (cfg);
10436 NEW_BBLOCK (cfg, is_gsharedvt_bb);
10437 NEW_BBLOCK (cfg, end_bb);
10439 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
10440 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
10441 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
10443 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
10444 addr = emit_get_rgctx_sig (cfg, context_used,
10445 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
10447 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
10449 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
10450 ins->dreg = alloc_preg (cfg);
10451 ins->inst_imm = 2 * TARGET_SIZEOF_VOID_P;
10452 MONO_ADD_INS (cfg->cbb, ins);
10453 localloc_ins = ins;
10454 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10455 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
10456 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, TARGET_SIZEOF_VOID_P, arg->dreg);
10458 call = mini_emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
10459 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
10461 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
10462 MONO_START_BB (cfg, is_gsharedvt_bb);
10463 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
10464 ins = mini_emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
10465 ins->dreg = call->dreg;
10467 MONO_START_BB (cfg, end_bb);
10468 } else {
10469 /* Caller uses a normal calling conv */
10471 MonoInst *callee = addr;
10472 MonoInst *call, *localloc_ins;
10473 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
10474 int low_bit_reg = alloc_preg (cfg);
10476 NEW_BBLOCK (cfg, is_gsharedvt_bb);
10477 NEW_BBLOCK (cfg, end_bb);
10479 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
10480 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
10481 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
10483 /* Normal case: callee uses a normal cconv, no conversion is needed */
10484 call = mini_emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
10485 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
10486 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
10487 MONO_START_BB (cfg, is_gsharedvt_bb);
10488 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
10489 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
10490 MONO_ADD_INS (cfg->cbb, addr);
10492 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
10494 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
10495 ins->dreg = alloc_preg (cfg);
10496 ins->inst_imm = 2 * TARGET_SIZEOF_VOID_P;
10497 MONO_ADD_INS (cfg->cbb, ins);
10498 localloc_ins = ins;
10499 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10500 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
10501 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, TARGET_SIZEOF_VOID_P, arg->dreg);
10503 ins = mini_emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
10504 ins->dreg = call->dreg;
10505 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
10507 MONO_START_BB (cfg, end_bb);
10509 } else {
10510 /* Same as CEE_CALLI */
10511 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
10513 * We pass the address to the gsharedvt trampoline in the rgctx reg
10515 MonoInst *callee = addr;
10517 addr = emit_get_rgctx_sig (cfg, context_used,
10518 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
10519 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, callee);
10520 } else {
10521 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
10525 if (!MONO_TYPE_IS_VOID (fsig->ret))
10526 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
10528 CHECK_CFG_EXCEPTION;
10530 ins_flag = 0;
10531 constrained_class = NULL;
10532 break;
10534 case MONO_CEE_MONO_LDDOMAIN:
10535 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10536 EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : cfg->domain);
10537 *sp++ = ins;
10538 break;
10539 case MONO_CEE_MONO_GET_LAST_ERROR:
10540 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10542 MONO_INST_NEW (cfg, ins, OP_GET_LAST_ERROR);
10543 ins->dreg = alloc_dreg (cfg, STACK_I4);
10544 ins->type = STACK_I4;
10545 MONO_ADD_INS (cfg->cbb, ins);
10547 *sp++ = ins;
10548 break;
10549 case MONO_CEE_MONO_GET_RGCTX_ARG:
10550 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10552 mono_create_rgctx_var (cfg);
10554 MONO_INST_NEW (cfg, ins, OP_MOVE);
10555 ins->dreg = alloc_dreg (cfg, STACK_PTR);
10556 ins->sreg1 = cfg->rgctx_var->dreg;
10557 ins->type = STACK_PTR;
10558 MONO_ADD_INS (cfg->cbb, ins);
10560 *sp++ = ins;
10561 break;
10563 case MONO_CEE_ARGLIST: {
10564 /* somewhat similar to LDTOKEN */
10565 MonoInst *addr, *vtvar;
10566 vtvar = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.argumenthandle_class), OP_LOCAL);
10568 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10569 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
10571 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10572 ins->type = STACK_VTYPE;
10573 ins->klass = mono_defaults.argumenthandle_class;
10574 *sp++ = ins;
10575 break;
10577 case MONO_CEE_CEQ:
10578 case MONO_CEE_CGT:
10579 case MONO_CEE_CGT_UN:
10580 case MONO_CEE_CLT:
10581 case MONO_CEE_CLT_UN: {
10582 MonoInst *cmp, *arg1, *arg2;
10584 sp -= 2;
10585 arg1 = sp [0];
10586 arg2 = sp [1];
10589 * The following transforms:
10590 * CEE_CEQ into OP_CEQ
10591 * CEE_CGT into OP_CGT
10592 * CEE_CGT_UN into OP_CGT_UN
10593 * CEE_CLT into OP_CLT
10594 * CEE_CLT_UN into OP_CLT_UN
10596 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
10598 MONO_INST_NEW (cfg, ins, cmp->opcode);
10599 cmp->sreg1 = arg1->dreg;
10600 cmp->sreg2 = arg2->dreg;
10601 type_from_op (cfg, cmp, arg1, arg2);
10602 CHECK_TYPE (cmp);
10603 add_widen_op (cfg, cmp, &arg1, &arg2);
10604 if ((arg1->type == STACK_I8) || ((TARGET_SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
10605 cmp->opcode = OP_LCOMPARE;
10606 else if (arg1->type == STACK_R4)
10607 cmp->opcode = OP_RCOMPARE;
10608 else if (arg1->type == STACK_R8)
10609 cmp->opcode = OP_FCOMPARE;
10610 else
10611 cmp->opcode = OP_ICOMPARE;
10612 MONO_ADD_INS (cfg->cbb, cmp);
10613 ins->type = STACK_I4;
10614 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
10615 type_from_op (cfg, ins, arg1, arg2);
10617 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
10619 * The backends expect the fceq opcodes to do the
10620 * comparison too.
10622 ins->sreg1 = cmp->sreg1;
10623 ins->sreg2 = cmp->sreg2;
10624 NULLIFY_INS (cmp);
10626 MONO_ADD_INS (cfg->cbb, ins);
10627 *sp++ = ins;
10628 break;
10630 case MONO_CEE_LDFTN: {
10631 MonoInst *argconst;
10632 MonoMethod *cil_method;
10634 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10635 CHECK_CFG_ERROR;
10637 mono_class_init_internal (cmethod->klass);
10639 mono_save_token_info (cfg, image, n, cmethod);
10641 context_used = mini_method_check_context_used (cfg, cmethod);
10643 cil_method = cmethod;
10644 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
10645 emit_method_access_failure (cfg, method, cil_method);
10647 if (mono_security_core_clr_enabled ())
10648 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10651 * Optimize the common case of ldftn+delegate creation
10653 if ((sp > stack_start) && (next_ip + 4 < end) && ip_in_bb (cfg, cfg->cbb, next_ip) && (next_ip [0] == CEE_NEWOBJ)) {
10654 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (next_ip + 1), NULL, generic_context);
10655 if (ctor_method && (m_class_get_parent (ctor_method->klass) == mono_defaults.multicastdelegate_class)) {
10656 MonoInst *target_ins, *handle_ins;
10657 MonoMethod *invoke;
10658 int invoke_context_used;
10660 invoke = mono_get_delegate_invoke_internal (ctor_method->klass);
10661 if (!invoke || !mono_method_signature_internal (invoke))
10662 LOAD_ERROR;
10664 invoke_context_used = mini_method_check_context_used (cfg, invoke);
10666 target_ins = sp [-1];
10668 if (mono_security_core_clr_enabled ())
10669 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
10671 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
10672 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
10673 if (mono_method_signature_internal (invoke)->param_count == mono_method_signature_internal (cmethod)->param_count) {
10674 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
10675 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
10679 if ((invoke_context_used == 0 || !cfg->gsharedvt) || cfg->llvm_only) {
10680 if (cfg->verbose_level > 3)
10681 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip + 6, NULL));
10682 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, invoke_context_used, FALSE))) {
10683 sp --;
10684 *sp = handle_ins;
10685 CHECK_CFG_EXCEPTION;
10686 sp ++;
10687 next_ip += 5;
10688 il_op = MONO_CEE_NEWOBJ;
10689 break;
10690 } else {
10691 CHECK_CFG_ERROR;
10697 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
10698 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
10699 *sp++ = ins;
10701 inline_costs += CALL_COST * MIN(10, num_calls++);
10702 break;
10704 case MONO_CEE_LDVIRTFTN: {
10705 MonoInst *args [2];
10707 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10708 CHECK_CFG_ERROR;
10710 mono_class_init_internal (cmethod->klass);
10712 context_used = mini_method_check_context_used (cfg, cmethod);
10714 if (mono_security_core_clr_enabled ())
10715 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10718 * Optimize the common case of ldvirtftn+delegate creation
10720 if (previous_il_op == MONO_CEE_DUP && (sp > stack_start) && (next_ip + 4 < end) && ip_in_bb (cfg, cfg->cbb, next_ip) && (next_ip [0] == CEE_NEWOBJ)) {
10721 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (next_ip + 1), NULL, generic_context);
10722 if (ctor_method && (m_class_get_parent (ctor_method->klass) == mono_defaults.multicastdelegate_class)) {
10723 MonoInst *target_ins, *handle_ins;
10724 MonoMethod *invoke;
10725 int invoke_context_used;
10726 const gboolean is_virtual = (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) != 0;
10728 invoke = mono_get_delegate_invoke_internal (ctor_method->klass);
10729 if (!invoke || !mono_method_signature_internal (invoke))
10730 LOAD_ERROR;
10732 invoke_context_used = mini_method_check_context_used (cfg, invoke);
10734 target_ins = sp [-1];
10736 if (mono_security_core_clr_enabled ())
10737 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
10739 if (invoke_context_used == 0 || !cfg->gsharedvt || cfg->llvm_only) {
10740 if (cfg->verbose_level > 3)
10741 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip + 6, NULL));
10742 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, invoke_context_used, is_virtual))) {
10743 sp -= 2;
10744 *sp = handle_ins;
10745 CHECK_CFG_EXCEPTION;
10746 next_ip += 5;
10747 previous_il_op = MONO_CEE_NEWOBJ;
10748 sp ++;
10749 break;
10750 } else {
10751 CHECK_CFG_ERROR;
10757 --sp;
10758 args [0] = *sp;
10760 args [1] = emit_get_rgctx_method (cfg, context_used,
10761 cmethod, MONO_RGCTX_INFO_METHOD);
10763 if (context_used)
10764 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
10765 else
10766 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
10768 inline_costs += CALL_COST * MIN(10, num_calls++);
10769 break;
10771 case MONO_CEE_LOCALLOC: {
10772 MonoBasicBlock *non_zero_bb, *end_bb;
10773 int alloc_ptr = alloc_preg (cfg);
10774 --sp;
10775 if (sp != stack_start)
10776 UNVERIFIED;
10777 if (cfg->method != method)
10779 * Inlining this into a loop in a parent could lead to
10780 * stack overflows which is different behavior than the
10781 * non-inlined case, thus disable inlining in this case.
10783 INLINE_FAILURE("localloc");
10785 NEW_BBLOCK (cfg, non_zero_bb);
10786 NEW_BBLOCK (cfg, end_bb);
10788 /* if size != zero */
10789 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10790 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_zero_bb);
10792 //size is zero, so result is NULL
10793 MONO_EMIT_NEW_PCONST (cfg, alloc_ptr, NULL);
10794 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
10796 MONO_START_BB (cfg, non_zero_bb);
10797 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
10798 ins->dreg = alloc_ptr;
10799 ins->sreg1 = sp [0]->dreg;
10800 ins->type = STACK_PTR;
10801 MONO_ADD_INS (cfg->cbb, ins);
10803 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10804 if (init_locals)
10805 ins->flags |= MONO_INST_INIT;
10807 MONO_START_BB (cfg, end_bb);
10808 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), alloc_ptr);
10809 ins->type = STACK_PTR;
10811 *sp++ = ins;
10812 break;
10814 case MONO_CEE_ENDFILTER: {
10815 MonoExceptionClause *clause, *nearest;
10816 int cc;
10818 --sp;
10819 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
10820 UNVERIFIED;
10821 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
10822 ins->sreg1 = (*sp)->dreg;
10823 MONO_ADD_INS (cfg->cbb, ins);
10824 start_new_bblock = 1;
10826 nearest = NULL;
10827 for (cc = 0; cc < header->num_clauses; ++cc) {
10828 clause = &header->clauses [cc];
10829 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
10830 ((next_ip - header->code) > clause->data.filter_offset && (next_ip - header->code) <= clause->handler_offset) &&
10831 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
10832 nearest = clause;
10834 g_assert (nearest);
10835 if ((next_ip - header->code) != nearest->handler_offset)
10836 UNVERIFIED;
10838 break;
10840 case MONO_CEE_UNALIGNED_:
10841 ins_flag |= MONO_INST_UNALIGNED;
10842 /* FIXME: record alignment? we can assume 1 for now */
10843 break;
10844 case MONO_CEE_VOLATILE_:
10845 ins_flag |= MONO_INST_VOLATILE;
10846 break;
10847 case MONO_CEE_TAIL_:
10848 ins_flag |= MONO_INST_TAILCALL;
10849 cfg->flags |= MONO_CFG_HAS_TAILCALL;
10850 /* Can't inline tailcalls at this time */
10851 inline_costs += 100000;
10852 break;
10853 case MONO_CEE_INITOBJ:
10854 --sp;
10855 klass = mini_get_class (method, token, generic_context);
10856 CHECK_TYPELOAD (klass);
10857 if (mini_class_is_reference (klass))
10858 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
10859 else
10860 mini_emit_initobj (cfg, *sp, NULL, klass);
10861 inline_costs += 1;
10862 break;
10863 case MONO_CEE_CONSTRAINED_:
10864 constrained_class = mini_get_class (method, token, generic_context);
10865 CHECK_TYPELOAD (constrained_class);
10866 break;
10867 case MONO_CEE_CPBLK:
10868 sp -= 3;
10869 mini_emit_memory_copy_bytes (cfg, sp [0], sp [1], sp [2], ins_flag);
10870 ins_flag = 0;
10871 inline_costs += 1;
10872 break;
10873 case MONO_CEE_INITBLK:
10874 sp -= 3;
10875 mini_emit_memory_init_bytes (cfg, sp [0], sp [1], sp [2], ins_flag);
10876 ins_flag = 0;
10877 inline_costs += 1;
10878 break;
10879 case MONO_CEE_NO_:
10880 if (ip [2] & 1)
10881 ins_flag |= MONO_INST_NOTYPECHECK;
10882 if (ip [2] & 2)
10883 ins_flag |= MONO_INST_NORANGECHECK;
10884 /* we ignore the no-nullcheck for now since we
10885 * really do it explicitly only when doing callvirt->call
10887 break;
10888 case MONO_CEE_RETHROW: {
10889 MonoInst *load;
10890 int handler_offset = -1;
10892 for (i = 0; i < header->num_clauses; ++i) {
10893 MonoExceptionClause *clause = &header->clauses [i];
10894 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
10895 handler_offset = clause->handler_offset;
10896 break;
10900 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
10902 if (handler_offset == -1)
10903 UNVERIFIED;
10905 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
10906 MONO_INST_NEW (cfg, ins, OP_RETHROW);
10907 ins->sreg1 = load->dreg;
10908 MONO_ADD_INS (cfg->cbb, ins);
10910 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10911 MONO_ADD_INS (cfg->cbb, ins);
10913 sp = stack_start;
10914 link_bblock (cfg, cfg->cbb, end_bblock);
10915 start_new_bblock = 1;
10916 break;
10918 case MONO_CEE_MONO_RETHROW: {
10919 if (sp [-1]->type != STACK_OBJ)
10920 UNVERIFIED;
10922 MONO_INST_NEW (cfg, ins, OP_RETHROW);
10923 --sp;
10924 ins->sreg1 = sp [0]->dreg;
10925 cfg->cbb->out_of_line = TRUE;
10926 MONO_ADD_INS (cfg->cbb, ins);
10927 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10928 MONO_ADD_INS (cfg->cbb, ins);
10929 sp = stack_start;
10931 link_bblock (cfg, cfg->cbb, end_bblock);
10932 start_new_bblock = 1;
10933 /* This can complicate code generation for llvm since the return value might not be defined */
10934 if (COMPILE_LLVM (cfg))
10935 INLINE_FAILURE ("mono_rethrow");
10936 break;
10938 case MONO_CEE_SIZEOF: {
10939 guint32 val;
10940 int ialign;
10942 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (m_class_get_image (method->klass)) && !generic_context) {
10943 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
10944 CHECK_CFG_ERROR;
10946 val = mono_type_size (type, &ialign);
10947 } else {
10948 MonoClass *klass = mini_get_class (method, token, generic_context);
10949 CHECK_TYPELOAD (klass);
10951 val = mono_type_size (m_class_get_byval_arg (klass), &ialign);
10953 if (mini_is_gsharedvt_klass (klass))
10954 GSHAREDVT_FAILURE (il_op);
10956 EMIT_NEW_ICONST (cfg, ins, val);
10957 *sp++ = ins;
10958 break;
10960 case MONO_CEE_REFANYTYPE: {
10961 MonoInst *src_var, *src;
10963 GSHAREDVT_FAILURE (il_op);
10965 --sp;
10967 // FIXME:
10968 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10969 if (!src_var)
10970 src_var = mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.typed_reference_class), OP_LOCAL, sp [0]->dreg);
10971 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10972 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, m_class_get_byval_arg (mono_defaults.typehandle_class), src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
10973 *sp++ = ins;
10974 break;
10976 case MONO_CEE_READONLY_:
10977 readonly = TRUE;
10978 break;
10980 case MONO_CEE_UNUSED56:
10981 case MONO_CEE_UNUSED57:
10982 case MONO_CEE_UNUSED70:
10983 case MONO_CEE_UNUSED:
10984 case MONO_CEE_UNUSED99:
10985 case MONO_CEE_UNUSED58:
10986 case MONO_CEE_UNUSED1:
10987 UNVERIFIED;
10989 default:
10990 g_warning ("opcode 0x%02x not handled", il_op);
10991 UNVERIFIED;
10994 if (start_new_bblock != 1)
10995 UNVERIFIED;
10997 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
10998 if (cfg->cbb->next_bb) {
10999 /* This could already be set because of inlining, #693905 */
11000 MonoBasicBlock *bb = cfg->cbb;
11002 while (bb->next_bb)
11003 bb = bb->next_bb;
11004 bb->next_bb = end_bblock;
11005 } else {
11006 cfg->cbb->next_bb = end_bblock;
11009 if (cfg->method == method && cfg->domainvar) {
11010 MonoInst *store;
11011 MonoInst *get_domain;
11013 cfg->cbb = init_localsbb;
11015 get_domain = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
11016 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
11017 MONO_ADD_INS (cfg->cbb, store);
11018 cfg->domainvar_inited = TRUE;
11021 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
11022 if (cfg->compile_aot)
11023 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
11024 mono_get_got_var (cfg);
11025 #endif
11027 if (cfg->method == method && cfg->got_var)
11028 mono_emit_load_got_addr (cfg);
11030 if (init_localsbb) {
11031 cfg->cbb = init_localsbb;
11032 cfg->ip = NULL;
11033 for (i = 0; i < header->num_locals; ++i) {
11035 * Vtype initialization might need to be done after CEE_JIT_ATTACH, since it can make calls to memset (),
11036 * which need the trampoline code to work.
11038 if (MONO_TYPE_ISSTRUCT (header->locals [i]))
11039 cfg->cbb = init_localsbb2;
11040 else
11041 cfg->cbb = init_localsbb;
11042 emit_init_local (cfg, i, header->locals [i], init_locals);
11046 if (cfg->init_ref_vars && cfg->method == method) {
11047 /* Emit initialization for ref vars */
11048 // FIXME: Avoid duplication initialization for IL locals.
11049 for (i = 0; i < cfg->num_varinfo; ++i) {
11050 MonoInst *ins = cfg->varinfo [i];
11052 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
11053 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
11057 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
11058 cfg->cbb = init_localsbb;
11059 emit_push_lmf (cfg);
11062 cfg->cbb = init_localsbb;
11063 mini_profiler_emit_enter (cfg);
11065 if (seq_points) {
11066 MonoBasicBlock *bb;
11069 * Make seq points at backward branch targets interruptable.
11071 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
11072 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
11073 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
11076 /* Add a sequence point for method entry/exit events */
11077 if (seq_points && cfg->gen_sdb_seq_points) {
11078 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
11079 MONO_ADD_INS (init_localsbb, ins);
11080 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
11081 MONO_ADD_INS (cfg->bb_exit, ins);
11085 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
11086 * the code they refer to was dead (#11880).
11088 if (sym_seq_points) {
11089 for (i = 0; i < header->code_size; ++i) {
11090 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
11091 MonoInst *ins;
11093 NEW_SEQ_POINT (cfg, ins, i, FALSE);
11094 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
11099 cfg->ip = NULL;
11101 if (cfg->method == method) {
11102 compute_bb_regions (cfg);
11103 } else {
11104 MonoBasicBlock *bb;
11105 /* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
11106 for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) {
11107 bb->real_offset = inline_offset;
11111 if (inline_costs < 0) {
11112 char *mname;
11114 /* Method is too large */
11115 mname = mono_method_full_name (method, TRUE);
11116 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
11117 g_free (mname);
11120 if ((cfg->verbose_level > 2) && (cfg->method == method))
11121 mono_print_code (cfg, "AFTER METHOD-TO-IR");
11123 goto cleanup;
11125 mono_error_exit:
11126 if (cfg->verbose_level > 3)
11127 g_print ("exiting due to error");
11129 g_assert (!mono_error_ok (&cfg->error));
11130 goto cleanup;
11132 exception_exit:
11133 if (cfg->verbose_level > 3)
11134 g_print ("exiting due to exception");
11136 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
11137 goto cleanup;
11139 unverified:
11140 if (cfg->verbose_level > 3)
11141 g_print ("exiting due to invalid il");
11143 set_exception_type_from_invalid_il (cfg, method, ip);
11144 goto cleanup;
11146 cleanup:
11147 g_slist_free (class_inits);
11148 mono_basic_block_free (original_bb);
11149 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
11150 if (cfg->exception_type)
11151 return -1;
11152 else
11153 return inline_costs;
11156 static int
11157 store_membase_reg_to_store_membase_imm (int opcode)
11159 switch (opcode) {
11160 case OP_STORE_MEMBASE_REG:
11161 return OP_STORE_MEMBASE_IMM;
11162 case OP_STOREI1_MEMBASE_REG:
11163 return OP_STOREI1_MEMBASE_IMM;
11164 case OP_STOREI2_MEMBASE_REG:
11165 return OP_STOREI2_MEMBASE_IMM;
11166 case OP_STOREI4_MEMBASE_REG:
11167 return OP_STOREI4_MEMBASE_IMM;
11168 case OP_STOREI8_MEMBASE_REG:
11169 return OP_STOREI8_MEMBASE_IMM;
11170 default:
11171 g_assert_not_reached ();
11174 return -1;
11178 mono_op_to_op_imm (int opcode)
11180 switch (opcode) {
11181 case OP_IADD:
11182 return OP_IADD_IMM;
11183 case OP_ISUB:
11184 return OP_ISUB_IMM;
11185 case OP_IDIV:
11186 return OP_IDIV_IMM;
11187 case OP_IDIV_UN:
11188 return OP_IDIV_UN_IMM;
11189 case OP_IREM:
11190 return OP_IREM_IMM;
11191 case OP_IREM_UN:
11192 return OP_IREM_UN_IMM;
11193 case OP_IMUL:
11194 return OP_IMUL_IMM;
11195 case OP_IAND:
11196 return OP_IAND_IMM;
11197 case OP_IOR:
11198 return OP_IOR_IMM;
11199 case OP_IXOR:
11200 return OP_IXOR_IMM;
11201 case OP_ISHL:
11202 return OP_ISHL_IMM;
11203 case OP_ISHR:
11204 return OP_ISHR_IMM;
11205 case OP_ISHR_UN:
11206 return OP_ISHR_UN_IMM;
11208 case OP_LADD:
11209 return OP_LADD_IMM;
11210 case OP_LSUB:
11211 return OP_LSUB_IMM;
11212 case OP_LAND:
11213 return OP_LAND_IMM;
11214 case OP_LOR:
11215 return OP_LOR_IMM;
11216 case OP_LXOR:
11217 return OP_LXOR_IMM;
11218 case OP_LSHL:
11219 return OP_LSHL_IMM;
11220 case OP_LSHR:
11221 return OP_LSHR_IMM;
11222 case OP_LSHR_UN:
11223 return OP_LSHR_UN_IMM;
11224 #if SIZEOF_REGISTER == 8
11225 case OP_LMUL:
11226 return OP_LMUL_IMM;
11227 case OP_LREM:
11228 return OP_LREM_IMM;
11229 #endif
11231 case OP_COMPARE:
11232 return OP_COMPARE_IMM;
11233 case OP_ICOMPARE:
11234 return OP_ICOMPARE_IMM;
11235 case OP_LCOMPARE:
11236 return OP_LCOMPARE_IMM;
11238 case OP_STORE_MEMBASE_REG:
11239 return OP_STORE_MEMBASE_IMM;
11240 case OP_STOREI1_MEMBASE_REG:
11241 return OP_STOREI1_MEMBASE_IMM;
11242 case OP_STOREI2_MEMBASE_REG:
11243 return OP_STOREI2_MEMBASE_IMM;
11244 case OP_STOREI4_MEMBASE_REG:
11245 return OP_STOREI4_MEMBASE_IMM;
11247 #if defined(TARGET_X86) || defined (TARGET_AMD64)
11248 case OP_X86_PUSH:
11249 return OP_X86_PUSH_IMM;
11250 case OP_X86_COMPARE_MEMBASE_REG:
11251 return OP_X86_COMPARE_MEMBASE_IMM;
11252 #endif
11253 #if defined(TARGET_AMD64)
11254 case OP_AMD64_ICOMPARE_MEMBASE_REG:
11255 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
11256 #endif
11257 case OP_VOIDCALL_REG:
11258 return OP_VOIDCALL;
11259 case OP_CALL_REG:
11260 return OP_CALL;
11261 case OP_LCALL_REG:
11262 return OP_LCALL;
11263 case OP_FCALL_REG:
11264 return OP_FCALL;
11265 case OP_LOCALLOC:
11266 return OP_LOCALLOC_IMM;
11269 return -1;
11272 static int
11273 stind_to_store_membase (int opcode)
11275 switch (opcode) {
11276 case MONO_CEE_STIND_I1:
11277 return OP_STOREI1_MEMBASE_REG;
11278 case MONO_CEE_STIND_I2:
11279 return OP_STOREI2_MEMBASE_REG;
11280 case MONO_CEE_STIND_I4:
11281 return OP_STOREI4_MEMBASE_REG;
11282 case MONO_CEE_STIND_I:
11283 case MONO_CEE_STIND_REF:
11284 return OP_STORE_MEMBASE_REG;
11285 case MONO_CEE_STIND_I8:
11286 return OP_STOREI8_MEMBASE_REG;
11287 case MONO_CEE_STIND_R4:
11288 return OP_STORER4_MEMBASE_REG;
11289 case MONO_CEE_STIND_R8:
11290 return OP_STORER8_MEMBASE_REG;
11291 default:
11292 g_assert_not_reached ();
11295 return -1;
11299 mono_load_membase_to_load_mem (int opcode)
11301 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
11302 #if defined(TARGET_X86) || defined(TARGET_AMD64)
11303 switch (opcode) {
11304 case OP_LOAD_MEMBASE:
11305 return OP_LOAD_MEM;
11306 case OP_LOADU1_MEMBASE:
11307 return OP_LOADU1_MEM;
11308 case OP_LOADU2_MEMBASE:
11309 return OP_LOADU2_MEM;
11310 case OP_LOADI4_MEMBASE:
11311 return OP_LOADI4_MEM;
11312 case OP_LOADU4_MEMBASE:
11313 return OP_LOADU4_MEM;
11314 #if SIZEOF_REGISTER == 8
11315 case OP_LOADI8_MEMBASE:
11316 return OP_LOADI8_MEM;
11317 #endif
11319 #endif
11321 return -1;
11324 static inline int
11325 op_to_op_dest_membase (int store_opcode, int opcode)
11327 #if defined(TARGET_X86)
11328 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
11329 return -1;
11331 switch (opcode) {
11332 case OP_IADD:
11333 return OP_X86_ADD_MEMBASE_REG;
11334 case OP_ISUB:
11335 return OP_X86_SUB_MEMBASE_REG;
11336 case OP_IAND:
11337 return OP_X86_AND_MEMBASE_REG;
11338 case OP_IOR:
11339 return OP_X86_OR_MEMBASE_REG;
11340 case OP_IXOR:
11341 return OP_X86_XOR_MEMBASE_REG;
11342 case OP_ADD_IMM:
11343 case OP_IADD_IMM:
11344 return OP_X86_ADD_MEMBASE_IMM;
11345 case OP_SUB_IMM:
11346 case OP_ISUB_IMM:
11347 return OP_X86_SUB_MEMBASE_IMM;
11348 case OP_AND_IMM:
11349 case OP_IAND_IMM:
11350 return OP_X86_AND_MEMBASE_IMM;
11351 case OP_OR_IMM:
11352 case OP_IOR_IMM:
11353 return OP_X86_OR_MEMBASE_IMM;
11354 case OP_XOR_IMM:
11355 case OP_IXOR_IMM:
11356 return OP_X86_XOR_MEMBASE_IMM;
11357 case OP_MOVE:
11358 return OP_NOP;
11360 #endif
11362 #if defined(TARGET_AMD64)
11363 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
11364 return -1;
11366 switch (opcode) {
11367 case OP_IADD:
11368 return OP_X86_ADD_MEMBASE_REG;
11369 case OP_ISUB:
11370 return OP_X86_SUB_MEMBASE_REG;
11371 case OP_IAND:
11372 return OP_X86_AND_MEMBASE_REG;
11373 case OP_IOR:
11374 return OP_X86_OR_MEMBASE_REG;
11375 case OP_IXOR:
11376 return OP_X86_XOR_MEMBASE_REG;
11377 case OP_IADD_IMM:
11378 return OP_X86_ADD_MEMBASE_IMM;
11379 case OP_ISUB_IMM:
11380 return OP_X86_SUB_MEMBASE_IMM;
11381 case OP_IAND_IMM:
11382 return OP_X86_AND_MEMBASE_IMM;
11383 case OP_IOR_IMM:
11384 return OP_X86_OR_MEMBASE_IMM;
11385 case OP_IXOR_IMM:
11386 return OP_X86_XOR_MEMBASE_IMM;
11387 case OP_LADD:
11388 return OP_AMD64_ADD_MEMBASE_REG;
11389 case OP_LSUB:
11390 return OP_AMD64_SUB_MEMBASE_REG;
11391 case OP_LAND:
11392 return OP_AMD64_AND_MEMBASE_REG;
11393 case OP_LOR:
11394 return OP_AMD64_OR_MEMBASE_REG;
11395 case OP_LXOR:
11396 return OP_AMD64_XOR_MEMBASE_REG;
11397 case OP_ADD_IMM:
11398 case OP_LADD_IMM:
11399 return OP_AMD64_ADD_MEMBASE_IMM;
11400 case OP_SUB_IMM:
11401 case OP_LSUB_IMM:
11402 return OP_AMD64_SUB_MEMBASE_IMM;
11403 case OP_AND_IMM:
11404 case OP_LAND_IMM:
11405 return OP_AMD64_AND_MEMBASE_IMM;
11406 case OP_OR_IMM:
11407 case OP_LOR_IMM:
11408 return OP_AMD64_OR_MEMBASE_IMM;
11409 case OP_XOR_IMM:
11410 case OP_LXOR_IMM:
11411 return OP_AMD64_XOR_MEMBASE_IMM;
11412 case OP_MOVE:
11413 return OP_NOP;
11415 #endif
11417 return -1;
11420 static inline int
11421 op_to_op_store_membase (int store_opcode, int opcode)
11423 #if defined(TARGET_X86) || defined(TARGET_AMD64)
11424 switch (opcode) {
11425 case OP_ICEQ:
11426 if (store_opcode == OP_STOREI1_MEMBASE_REG)
11427 return OP_X86_SETEQ_MEMBASE;
11428 case OP_CNE:
11429 if (store_opcode == OP_STOREI1_MEMBASE_REG)
11430 return OP_X86_SETNE_MEMBASE;
11432 #endif
11434 return -1;
11437 static inline int
11438 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
11440 #ifdef TARGET_X86
11441 /* FIXME: This has sign extension issues */
11443 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
11444 return OP_X86_COMPARE_MEMBASE8_IMM;
11447 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
11448 return -1;
11450 switch (opcode) {
11451 case OP_X86_PUSH:
11452 return OP_X86_PUSH_MEMBASE;
11453 case OP_COMPARE_IMM:
11454 case OP_ICOMPARE_IMM:
11455 return OP_X86_COMPARE_MEMBASE_IMM;
11456 case OP_COMPARE:
11457 case OP_ICOMPARE:
11458 return OP_X86_COMPARE_MEMBASE_REG;
11460 #endif
11462 #ifdef TARGET_AMD64
11463 /* FIXME: This has sign extension issues */
11465 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
11466 return OP_X86_COMPARE_MEMBASE8_IMM;
11469 switch (opcode) {
11470 case OP_X86_PUSH:
11471 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
11472 return OP_X86_PUSH_MEMBASE;
11473 break;
11474 /* FIXME: This only works for 32 bit immediates
11475 case OP_COMPARE_IMM:
11476 case OP_LCOMPARE_IMM:
11477 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11478 return OP_AMD64_COMPARE_MEMBASE_IMM;
11480 case OP_ICOMPARE_IMM:
11481 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
11482 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
11483 break;
11484 case OP_COMPARE:
11485 case OP_LCOMPARE:
11486 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
11487 return OP_AMD64_ICOMPARE_MEMBASE_REG;
11488 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
11489 return OP_AMD64_COMPARE_MEMBASE_REG;
11490 break;
11491 case OP_ICOMPARE:
11492 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
11493 return OP_AMD64_ICOMPARE_MEMBASE_REG;
11494 break;
11496 #endif
11498 return -1;
11501 static inline int
11502 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
11504 #ifdef TARGET_X86
11505 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
11506 return -1;
11508 switch (opcode) {
11509 case OP_COMPARE:
11510 case OP_ICOMPARE:
11511 return OP_X86_COMPARE_REG_MEMBASE;
11512 case OP_IADD:
11513 return OP_X86_ADD_REG_MEMBASE;
11514 case OP_ISUB:
11515 return OP_X86_SUB_REG_MEMBASE;
11516 case OP_IAND:
11517 return OP_X86_AND_REG_MEMBASE;
11518 case OP_IOR:
11519 return OP_X86_OR_REG_MEMBASE;
11520 case OP_IXOR:
11521 return OP_X86_XOR_REG_MEMBASE;
11523 #endif
11525 #ifdef TARGET_AMD64
11526 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
11527 switch (opcode) {
11528 case OP_ICOMPARE:
11529 return OP_AMD64_ICOMPARE_REG_MEMBASE;
11530 case OP_IADD:
11531 return OP_X86_ADD_REG_MEMBASE;
11532 case OP_ISUB:
11533 return OP_X86_SUB_REG_MEMBASE;
11534 case OP_IAND:
11535 return OP_X86_AND_REG_MEMBASE;
11536 case OP_IOR:
11537 return OP_X86_OR_REG_MEMBASE;
11538 case OP_IXOR:
11539 return OP_X86_XOR_REG_MEMBASE;
11541 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
11542 switch (opcode) {
11543 case OP_COMPARE:
11544 case OP_LCOMPARE:
11545 return OP_AMD64_COMPARE_REG_MEMBASE;
11546 case OP_LADD:
11547 return OP_AMD64_ADD_REG_MEMBASE;
11548 case OP_LSUB:
11549 return OP_AMD64_SUB_REG_MEMBASE;
11550 case OP_LAND:
11551 return OP_AMD64_AND_REG_MEMBASE;
11552 case OP_LOR:
11553 return OP_AMD64_OR_REG_MEMBASE;
11554 case OP_LXOR:
11555 return OP_AMD64_XOR_REG_MEMBASE;
11558 #endif
11560 return -1;
11564 mono_op_to_op_imm_noemul (int opcode)
11566 switch (opcode) {
11567 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
11568 case OP_LSHR:
11569 case OP_LSHL:
11570 case OP_LSHR_UN:
11571 return -1;
11572 #endif
11573 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
11574 case OP_IDIV:
11575 case OP_IDIV_UN:
11576 case OP_IREM:
11577 case OP_IREM_UN:
11578 return -1;
11579 #endif
11580 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
11581 case OP_IMUL:
11582 return -1;
11583 #endif
11584 default:
11585 return mono_op_to_op_imm (opcode);
11590 * mono_handle_global_vregs:
11592 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
11593 * for them.
11595 void
11596 mono_handle_global_vregs (MonoCompile *cfg)
11598 gint32 *vreg_to_bb;
11599 MonoBasicBlock *bb;
11600 int i, pos;
11602 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
11604 #ifdef MONO_ARCH_SIMD_INTRINSICS
11605 if (cfg->uses_simd_intrinsics & MONO_CFG_USES_SIMD_INTRINSICS_SIMPLIFY_INDIRECTION)
11606 mono_simd_simplify_indirection (cfg);
11607 #endif
11609 /* Find local vregs used in more than one bb */
11610 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11611 MonoInst *ins = bb->code;
11612 int block_num = bb->block_num;
11614 if (cfg->verbose_level > 2)
11615 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
11617 cfg->cbb = bb;
11618 for (; ins; ins = ins->next) {
11619 const char *spec = INS_INFO (ins->opcode);
11620 int regtype = 0, regindex;
11621 gint32 prev_bb;
11623 if (G_UNLIKELY (cfg->verbose_level > 2))
11624 mono_print_ins (ins);
11626 g_assert (ins->opcode >= MONO_CEE_LAST);
11628 for (regindex = 0; regindex < 4; regindex ++) {
11629 int vreg = 0;
11631 if (regindex == 0) {
11632 regtype = spec [MONO_INST_DEST];
11633 if (regtype == ' ')
11634 continue;
11635 vreg = ins->dreg;
11636 } else if (regindex == 1) {
11637 regtype = spec [MONO_INST_SRC1];
11638 if (regtype == ' ')
11639 continue;
11640 vreg = ins->sreg1;
11641 } else if (regindex == 2) {
11642 regtype = spec [MONO_INST_SRC2];
11643 if (regtype == ' ')
11644 continue;
11645 vreg = ins->sreg2;
11646 } else if (regindex == 3) {
11647 regtype = spec [MONO_INST_SRC3];
11648 if (regtype == ' ')
11649 continue;
11650 vreg = ins->sreg3;
11653 #if SIZEOF_REGISTER == 4
11654 /* In the LLVM case, the long opcodes are not decomposed */
11655 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
11657 * Since some instructions reference the original long vreg,
11658 * and some reference the two component vregs, it is quite hard
11659 * to determine when it needs to be global. So be conservative.
11661 if (!get_vreg_to_inst (cfg, vreg)) {
11662 mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.int64_class), OP_LOCAL, vreg);
11664 if (cfg->verbose_level > 2)
11665 printf ("LONG VREG R%d made global.\n", vreg);
11669 * Make the component vregs volatile since the optimizations can
11670 * get confused otherwise.
11672 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
11673 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
11675 #endif
11677 g_assert (vreg != -1);
11679 prev_bb = vreg_to_bb [vreg];
11680 if (prev_bb == 0) {
11681 /* 0 is a valid block num */
11682 vreg_to_bb [vreg] = block_num + 1;
11683 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
11684 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
11685 continue;
11687 if (!get_vreg_to_inst (cfg, vreg)) {
11688 if (G_UNLIKELY (cfg->verbose_level > 2))
11689 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
11691 switch (regtype) {
11692 case 'i':
11693 if (vreg_is_ref (cfg, vreg))
11694 mono_compile_create_var_for_vreg (cfg, mono_get_object_type (), OP_LOCAL, vreg);
11695 else
11696 mono_compile_create_var_for_vreg (cfg, mono_get_int_type (), OP_LOCAL, vreg);
11697 break;
11698 case 'l':
11699 mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.int64_class), OP_LOCAL, vreg);
11700 break;
11701 case 'f':
11702 mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (mono_defaults.double_class), OP_LOCAL, vreg);
11703 break;
11704 case 'v':
11705 case 'x':
11706 mono_compile_create_var_for_vreg (cfg, m_class_get_byval_arg (ins->klass), OP_LOCAL, vreg);
11707 break;
11708 default:
11709 g_assert_not_reached ();
11713 /* Flag as having been used in more than one bb */
11714 vreg_to_bb [vreg] = -1;
11720 /* If a variable is used in only one bblock, convert it into a local vreg */
11721 for (i = 0; i < cfg->num_varinfo; i++) {
11722 MonoInst *var = cfg->varinfo [i];
11723 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
11725 switch (var->type) {
11726 case STACK_I4:
11727 case STACK_OBJ:
11728 case STACK_PTR:
11729 case STACK_MP:
11730 case STACK_VTYPE:
11731 #if SIZEOF_REGISTER == 8
11732 case STACK_I8:
11733 #endif
11734 #if !defined(TARGET_X86)
11735 /* Enabling this screws up the fp stack on x86 */
11736 case STACK_R8:
11737 #endif
11738 if (mono_arch_is_soft_float ())
11739 break;
11742 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
11743 break;
11746 /* Arguments are implicitly global */
11747 /* Putting R4 vars into registers doesn't work currently */
11748 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
11749 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (m_class_get_byval_arg (var->klass)->type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
11751 * Make that the variable's liveness interval doesn't contain a call, since
11752 * that would cause the lvreg to be spilled, making the whole optimization
11753 * useless.
11755 /* This is too slow for JIT compilation */
11756 #if 0
11757 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
11758 MonoInst *ins;
11759 int def_index, call_index, ins_index;
11760 gboolean spilled = FALSE;
11762 def_index = -1;
11763 call_index = -1;
11764 ins_index = 0;
11765 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
11766 const char *spec = INS_INFO (ins->opcode);
11768 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
11769 def_index = ins_index;
11771 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
11772 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
11773 if (call_index > def_index) {
11774 spilled = TRUE;
11775 break;
11779 if (MONO_IS_CALL (ins))
11780 call_index = ins_index;
11782 ins_index ++;
11785 if (spilled)
11786 break;
11788 #endif
11790 if (G_UNLIKELY (cfg->verbose_level > 2))
11791 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
11792 var->flags |= MONO_INST_IS_DEAD;
11793 cfg->vreg_to_inst [var->dreg] = NULL;
11795 break;
11800 * Compress the varinfo and vars tables so the liveness computation is faster and
11801 * takes up less space.
11803 pos = 0;
11804 for (i = 0; i < cfg->num_varinfo; ++i) {
11805 MonoInst *var = cfg->varinfo [i];
11806 if (pos < i && cfg->locals_start == i)
11807 cfg->locals_start = pos;
11808 if (!(var->flags & MONO_INST_IS_DEAD)) {
11809 if (pos < i) {
11810 cfg->varinfo [pos] = cfg->varinfo [i];
11811 cfg->varinfo [pos]->inst_c0 = pos;
11812 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
11813 cfg->vars [pos].idx = pos;
11814 #if SIZEOF_REGISTER == 4
11815 if (cfg->varinfo [pos]->type == STACK_I8) {
11816 /* Modify the two component vars too */
11817 MonoInst *var1;
11819 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
11820 var1->inst_c0 = pos;
11821 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
11822 var1->inst_c0 = pos;
11824 #endif
11826 pos ++;
11829 cfg->num_varinfo = pos;
11830 if (cfg->locals_start > cfg->num_varinfo)
11831 cfg->locals_start = cfg->num_varinfo;
11835 * mono_allocate_gsharedvt_vars:
11837 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
11838 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
11840 void
11841 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
11843 int i;
11845 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
11847 for (i = 0; i < cfg->num_varinfo; ++i) {
11848 MonoInst *ins = cfg->varinfo [i];
11849 int idx;
11851 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
11852 if (i >= cfg->locals_start) {
11853 /* Local */
11854 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
11855 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
11856 ins->opcode = OP_GSHAREDVT_LOCAL;
11857 ins->inst_imm = idx;
11858 } else {
11859 /* Arg */
11860 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
11861 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
11868 * mono_spill_global_vars:
11870 * Generate spill code for variables which are not allocated to registers,
11871 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
11872 * code is generated which could be optimized by the local optimization passes.
11874 void
11875 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
11877 MonoBasicBlock *bb;
11878 char spec2 [16];
11879 int orig_next_vreg;
11880 guint32 *vreg_to_lvreg;
11881 guint32 *lvregs;
11882 guint32 i, lvregs_len, lvregs_size;
11883 gboolean dest_has_lvreg = FALSE;
11884 MonoStackType stacktypes [128];
11885 MonoInst **live_range_start, **live_range_end;
11886 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
11888 *need_local_opts = FALSE;
11890 memset (spec2, 0, sizeof (spec2));
11892 /* FIXME: Move this function to mini.c */
11893 stacktypes [(int)'i'] = STACK_PTR;
11894 stacktypes [(int)'l'] = STACK_I8;
11895 stacktypes [(int)'f'] = STACK_R8;
11896 #ifdef MONO_ARCH_SIMD_INTRINSICS
11897 stacktypes [(int)'x'] = STACK_VTYPE;
11898 #endif
11900 #if SIZEOF_REGISTER == 4
11901 /* Create MonoInsts for longs */
11902 for (i = 0; i < cfg->num_varinfo; i++) {
11903 MonoInst *ins = cfg->varinfo [i];
11905 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
11906 switch (ins->type) {
11907 case STACK_R8:
11908 case STACK_I8: {
11909 MonoInst *tree;
11911 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
11912 break;
11914 g_assert (ins->opcode == OP_REGOFFSET);
11916 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
11917 g_assert (tree);
11918 tree->opcode = OP_REGOFFSET;
11919 tree->inst_basereg = ins->inst_basereg;
11920 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
11922 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
11923 g_assert (tree);
11924 tree->opcode = OP_REGOFFSET;
11925 tree->inst_basereg = ins->inst_basereg;
11926 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
11927 break;
11929 default:
11930 break;
11934 #endif
11936 if (cfg->compute_gc_maps) {
11937 /* registers need liveness info even for !non refs */
11938 for (i = 0; i < cfg->num_varinfo; i++) {
11939 MonoInst *ins = cfg->varinfo [i];
11941 if (ins->opcode == OP_REGVAR)
11942 ins->flags |= MONO_INST_GC_TRACK;
11946 /* FIXME: widening and truncation */
11949 * As an optimization, when a variable allocated to the stack is first loaded into
11950 * an lvreg, we will remember the lvreg and use it the next time instead of loading
11951 * the variable again.
11953 orig_next_vreg = cfg->next_vreg;
11954 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
11955 lvregs_size = 1024;
11956 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * lvregs_size);
11957 lvregs_len = 0;
11960 * These arrays contain the first and last instructions accessing a given
11961 * variable.
11962 * Since we emit bblocks in the same order we process them here, and we
11963 * don't split live ranges, these will precisely describe the live range of
11964 * the variable, i.e. the instruction range where a valid value can be found
11965 * in the variables location.
11966 * The live range is computed using the liveness info computed by the liveness pass.
11967 * We can't use vmv->range, since that is an abstract live range, and we need
11968 * one which is instruction precise.
11969 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
11971 /* FIXME: Only do this if debugging info is requested */
11972 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
11973 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
11974 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11975 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11977 /* Add spill loads/stores */
11978 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11979 MonoInst *ins;
11981 if (cfg->verbose_level > 2)
11982 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
11984 /* Clear vreg_to_lvreg array */
11985 for (i = 0; i < lvregs_len; i++)
11986 vreg_to_lvreg [lvregs [i]] = 0;
11987 lvregs_len = 0;
11989 cfg->cbb = bb;
11990 MONO_BB_FOR_EACH_INS (bb, ins) {
11991 const char *spec = INS_INFO (ins->opcode);
11992 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
11993 gboolean store, no_lvreg;
11994 int sregs [MONO_MAX_SRC_REGS];
11996 if (G_UNLIKELY (cfg->verbose_level > 2))
11997 mono_print_ins (ins);
11999 if (ins->opcode == OP_NOP)
12000 continue;
12003 * We handle LDADDR here as well, since it can only be decomposed
12004 * when variable addresses are known.
12006 if (ins->opcode == OP_LDADDR) {
12007 MonoInst *var = (MonoInst *)ins->inst_p0;
12009 if (var->opcode == OP_VTARG_ADDR) {
12010 /* Happens on SPARC/S390 where vtypes are passed by reference */
12011 MonoInst *vtaddr = var->inst_left;
12012 if (vtaddr->opcode == OP_REGVAR) {
12013 ins->opcode = OP_MOVE;
12014 ins->sreg1 = vtaddr->dreg;
12016 else if (var->inst_left->opcode == OP_REGOFFSET) {
12017 ins->opcode = OP_LOAD_MEMBASE;
12018 ins->inst_basereg = vtaddr->inst_basereg;
12019 ins->inst_offset = vtaddr->inst_offset;
12020 } else
12021 NOT_IMPLEMENTED;
12022 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
12023 /* gsharedvt arg passed by ref */
12024 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
12026 ins->opcode = OP_LOAD_MEMBASE;
12027 ins->inst_basereg = var->inst_basereg;
12028 ins->inst_offset = var->inst_offset;
12029 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
12030 MonoInst *load, *load2, *load3;
12031 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
12032 int reg1, reg2, reg3;
12033 MonoInst *info_var = cfg->gsharedvt_info_var;
12034 MonoInst *locals_var = cfg->gsharedvt_locals_var;
12037 * gsharedvt local.
12038 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
12041 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
12043 g_assert (info_var);
12044 g_assert (locals_var);
12046 /* Mark the instruction used to compute the locals var as used */
12047 cfg->gsharedvt_locals_var_ins = NULL;
12049 /* Load the offset */
12050 if (info_var->opcode == OP_REGOFFSET) {
12051 reg1 = alloc_ireg (cfg);
12052 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
12053 } else if (info_var->opcode == OP_REGVAR) {
12054 load = NULL;
12055 reg1 = info_var->dreg;
12056 } else {
12057 g_assert_not_reached ();
12059 reg2 = alloc_ireg (cfg);
12060 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * TARGET_SIZEOF_VOID_P));
12061 /* Load the locals area address */
12062 reg3 = alloc_ireg (cfg);
12063 if (locals_var->opcode == OP_REGOFFSET) {
12064 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
12065 } else if (locals_var->opcode == OP_REGVAR) {
12066 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
12067 } else {
12068 g_assert_not_reached ();
12070 /* Compute the address */
12071 ins->opcode = OP_PADD;
12072 ins->sreg1 = reg3;
12073 ins->sreg2 = reg2;
12075 mono_bblock_insert_before_ins (bb, ins, load3);
12076 mono_bblock_insert_before_ins (bb, load3, load2);
12077 if (load)
12078 mono_bblock_insert_before_ins (bb, load2, load);
12079 } else {
12080 g_assert (var->opcode == OP_REGOFFSET);
12082 ins->opcode = OP_ADD_IMM;
12083 ins->sreg1 = var->inst_basereg;
12084 ins->inst_imm = var->inst_offset;
12087 *need_local_opts = TRUE;
12088 spec = INS_INFO (ins->opcode);
12091 if (ins->opcode < MONO_CEE_LAST) {
12092 mono_print_ins (ins);
12093 g_assert_not_reached ();
12097 * Store opcodes have destbasereg in the dreg, but in reality, it is an
12098 * src register.
12099 * FIXME:
12101 if (MONO_IS_STORE_MEMBASE (ins)) {
12102 tmp_reg = ins->dreg;
12103 ins->dreg = ins->sreg2;
12104 ins->sreg2 = tmp_reg;
12105 store = TRUE;
12107 spec2 [MONO_INST_DEST] = ' ';
12108 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
12109 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
12110 spec2 [MONO_INST_SRC3] = ' ';
12111 spec = spec2;
12112 } else if (MONO_IS_STORE_MEMINDEX (ins))
12113 g_assert_not_reached ();
12114 else
12115 store = FALSE;
12116 no_lvreg = FALSE;
12118 if (G_UNLIKELY (cfg->verbose_level > 2)) {
12119 printf ("\t %.3s %d", spec, ins->dreg);
12120 num_sregs = mono_inst_get_src_registers (ins, sregs);
12121 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
12122 printf (" %d", sregs [srcindex]);
12123 printf ("\n");
12126 /***************/
12127 /* DREG */
12128 /***************/
12129 regtype = spec [MONO_INST_DEST];
12130 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
12131 prev_dreg = -1;
12132 int dreg_using_dest_to_membase_op = -1;
12134 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
12135 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
12136 MonoInst *store_ins;
12137 int store_opcode;
12138 MonoInst *def_ins = ins;
12139 int dreg = ins->dreg; /* The original vreg */
12141 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
12143 if (var->opcode == OP_REGVAR) {
12144 ins->dreg = var->dreg;
12145 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
12147 * Instead of emitting a load+store, use a _membase opcode.
12149 g_assert (var->opcode == OP_REGOFFSET);
12150 if (ins->opcode == OP_MOVE) {
12151 NULLIFY_INS (ins);
12152 def_ins = NULL;
12153 } else {
12154 dreg_using_dest_to_membase_op = ins->dreg;
12155 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
12156 ins->inst_basereg = var->inst_basereg;
12157 ins->inst_offset = var->inst_offset;
12158 ins->dreg = -1;
12160 spec = INS_INFO (ins->opcode);
12161 } else {
12162 guint32 lvreg;
12164 g_assert (var->opcode == OP_REGOFFSET);
12166 prev_dreg = ins->dreg;
12168 /* Invalidate any previous lvreg for this vreg */
12169 vreg_to_lvreg [ins->dreg] = 0;
12171 lvreg = 0;
12173 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
12174 regtype = 'l';
12175 store_opcode = OP_STOREI8_MEMBASE_REG;
12178 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
12180 #if SIZEOF_REGISTER != 8
12181 if (regtype == 'l') {
12182 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
12183 mono_bblock_insert_after_ins (bb, ins, store_ins);
12184 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
12185 mono_bblock_insert_after_ins (bb, ins, store_ins);
12186 def_ins = store_ins;
12188 else
12189 #endif
12191 g_assert (store_opcode != OP_STOREV_MEMBASE);
12193 /* Try to fuse the store into the instruction itself */
12194 /* FIXME: Add more instructions */
12195 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
12196 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
12197 ins->inst_imm = ins->inst_c0;
12198 ins->inst_destbasereg = var->inst_basereg;
12199 ins->inst_offset = var->inst_offset;
12200 spec = INS_INFO (ins->opcode);
12201 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
12202 ins->opcode = store_opcode;
12203 ins->inst_destbasereg = var->inst_basereg;
12204 ins->inst_offset = var->inst_offset;
12206 no_lvreg = TRUE;
12208 tmp_reg = ins->dreg;
12209 ins->dreg = ins->sreg2;
12210 ins->sreg2 = tmp_reg;
12211 store = TRUE;
12213 spec2 [MONO_INST_DEST] = ' ';
12214 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
12215 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
12216 spec2 [MONO_INST_SRC3] = ' ';
12217 spec = spec2;
12218 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
12219 // FIXME: The backends expect the base reg to be in inst_basereg
12220 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
12221 ins->dreg = -1;
12222 ins->inst_basereg = var->inst_basereg;
12223 ins->inst_offset = var->inst_offset;
12224 spec = INS_INFO (ins->opcode);
12225 } else {
12226 /* printf ("INS: "); mono_print_ins (ins); */
12227 /* Create a store instruction */
12228 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
12230 /* Insert it after the instruction */
12231 mono_bblock_insert_after_ins (bb, ins, store_ins);
12233 def_ins = store_ins;
12236 * We can't assign ins->dreg to var->dreg here, since the
12237 * sregs could use it. So set a flag, and do it after
12238 * the sregs.
12240 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
12241 dest_has_lvreg = TRUE;
12246 if (def_ins && !live_range_start [dreg]) {
12247 live_range_start [dreg] = def_ins;
12248 live_range_start_bb [dreg] = bb;
12251 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
12252 MonoInst *tmp;
12254 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
12255 tmp->inst_c1 = dreg;
12256 mono_bblock_insert_after_ins (bb, def_ins, tmp);
12260 /************/
12261 /* SREGS */
12262 /************/
12263 num_sregs = mono_inst_get_src_registers (ins, sregs);
12264 for (srcindex = 0; srcindex < 3; ++srcindex) {
12265 regtype = spec [MONO_INST_SRC1 + srcindex];
12266 sreg = sregs [srcindex];
12268 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
12269 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
12270 MonoInst *var = get_vreg_to_inst (cfg, sreg);
12271 MonoInst *use_ins = ins;
12272 MonoInst *load_ins;
12273 guint32 load_opcode;
12275 if (var->opcode == OP_REGVAR) {
12276 sregs [srcindex] = var->dreg;
12277 //mono_inst_set_src_registers (ins, sregs);
12278 live_range_end [sreg] = use_ins;
12279 live_range_end_bb [sreg] = bb;
12281 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
12282 MonoInst *tmp;
12284 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
12285 /* var->dreg is a hreg */
12286 tmp->inst_c1 = sreg;
12287 mono_bblock_insert_after_ins (bb, ins, tmp);
12290 continue;
12293 g_assert (var->opcode == OP_REGOFFSET);
12295 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
12297 g_assert (load_opcode != OP_LOADV_MEMBASE);
12299 if (vreg_to_lvreg [sreg]) {
12300 g_assert (vreg_to_lvreg [sreg] != -1);
12302 /* The variable is already loaded to an lvreg */
12303 if (G_UNLIKELY (cfg->verbose_level > 2))
12304 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
12305 sregs [srcindex] = vreg_to_lvreg [sreg];
12306 //mono_inst_set_src_registers (ins, sregs);
12307 continue;
12310 /* Try to fuse the load into the instruction */
12311 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
12312 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
12313 sregs [0] = var->inst_basereg;
12314 //mono_inst_set_src_registers (ins, sregs);
12315 ins->inst_offset = var->inst_offset;
12316 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
12317 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
12318 sregs [1] = var->inst_basereg;
12319 //mono_inst_set_src_registers (ins, sregs);
12320 ins->inst_offset = var->inst_offset;
12321 } else {
12322 if (MONO_IS_REAL_MOVE (ins)) {
12323 ins->opcode = OP_NOP;
12324 sreg = ins->dreg;
12325 } else {
12326 //printf ("%d ", srcindex); mono_print_ins (ins);
12328 sreg = alloc_dreg (cfg, stacktypes [regtype]);
12330 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
12331 if (var->dreg == prev_dreg) {
12333 * sreg refers to the value loaded by the load
12334 * emitted below, but we need to use ins->dreg
12335 * since it refers to the store emitted earlier.
12337 sreg = ins->dreg;
12339 g_assert (sreg != -1);
12340 if (var->dreg == dreg_using_dest_to_membase_op) {
12341 if (cfg->verbose_level > 2)
12342 printf ("\tCan't cache R%d because it's part of a dreg dest_membase optimization\n", var->dreg);
12343 } else {
12344 vreg_to_lvreg [var->dreg] = sreg;
12346 if (lvregs_len >= lvregs_size) {
12347 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
12348 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
12349 lvregs = new_lvregs;
12350 lvregs_size *= 2;
12352 lvregs [lvregs_len ++] = var->dreg;
12356 sregs [srcindex] = sreg;
12357 //mono_inst_set_src_registers (ins, sregs);
12359 #if SIZEOF_REGISTER != 8
12360 if (regtype == 'l') {
12361 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
12362 mono_bblock_insert_before_ins (bb, ins, load_ins);
12363 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
12364 mono_bblock_insert_before_ins (bb, ins, load_ins);
12365 use_ins = load_ins;
12367 else
12368 #endif
12370 #if SIZEOF_REGISTER == 4
12371 g_assert (load_opcode != OP_LOADI8_MEMBASE);
12372 #endif
12373 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
12374 mono_bblock_insert_before_ins (bb, ins, load_ins);
12375 use_ins = load_ins;
12379 if (var->dreg < orig_next_vreg) {
12380 live_range_end [var->dreg] = use_ins;
12381 live_range_end_bb [var->dreg] = bb;
12384 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
12385 MonoInst *tmp;
12387 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
12388 tmp->inst_c1 = var->dreg;
12389 mono_bblock_insert_after_ins (bb, ins, tmp);
12393 mono_inst_set_src_registers (ins, sregs);
12395 if (dest_has_lvreg) {
12396 g_assert (ins->dreg != -1);
12397 vreg_to_lvreg [prev_dreg] = ins->dreg;
12398 if (lvregs_len >= lvregs_size) {
12399 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
12400 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
12401 lvregs = new_lvregs;
12402 lvregs_size *= 2;
12404 lvregs [lvregs_len ++] = prev_dreg;
12405 dest_has_lvreg = FALSE;
12408 if (store) {
12409 tmp_reg = ins->dreg;
12410 ins->dreg = ins->sreg2;
12411 ins->sreg2 = tmp_reg;
12414 if (MONO_IS_CALL (ins)) {
12415 /* Clear vreg_to_lvreg array */
12416 for (i = 0; i < lvregs_len; i++)
12417 vreg_to_lvreg [lvregs [i]] = 0;
12418 lvregs_len = 0;
12419 } else if (ins->opcode == OP_NOP) {
12420 ins->dreg = -1;
12421 MONO_INST_NULLIFY_SREGS (ins);
12424 if (cfg->verbose_level > 2)
12425 mono_print_ins_index (1, ins);
12428 /* Extend the live range based on the liveness info */
12429 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
12430 for (i = 0; i < cfg->num_varinfo; i ++) {
12431 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
12433 if (vreg_is_volatile (cfg, vi->vreg))
12434 /* The liveness info is incomplete */
12435 continue;
12437 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
12438 /* Live from at least the first ins of this bb */
12439 live_range_start [vi->vreg] = bb->code;
12440 live_range_start_bb [vi->vreg] = bb;
12443 if (mono_bitset_test_fast (bb->live_out_set, i)) {
12444 /* Live at least until the last ins of this bb */
12445 live_range_end [vi->vreg] = bb->last_ins;
12446 live_range_end_bb [vi->vreg] = bb;
12453 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
12454 * by storing the current native offset into MonoMethodVar->live_range_start/end.
12456 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
12457 for (i = 0; i < cfg->num_varinfo; ++i) {
12458 int vreg = MONO_VARINFO (cfg, i)->vreg;
12459 MonoInst *ins;
12461 if (live_range_start [vreg]) {
12462 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
12463 ins->inst_c0 = i;
12464 ins->inst_c1 = vreg;
12465 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
12467 if (live_range_end [vreg]) {
12468 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
12469 ins->inst_c0 = i;
12470 ins->inst_c1 = vreg;
12471 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
12472 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
12473 else
12474 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
12479 if (cfg->gsharedvt_locals_var_ins) {
12480 /* Nullify if unused */
12481 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
12482 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
12485 g_free (live_range_start);
12486 g_free (live_range_end);
12487 g_free (live_range_start_bb);
12488 g_free (live_range_end_bb);
12493 * FIXME:
12494 * - use 'iadd' instead of 'int_add'
12495 * - handling ovf opcodes: decompose in method_to_ir.
12496 * - unify iregs/fregs
12497 * -> partly done, the missing parts are:
12498 * - a more complete unification would involve unifying the hregs as well, so
12499 * code wouldn't need if (fp) all over the place. but that would mean the hregs
12500 * would no longer map to the machine hregs, so the code generators would need to
12501 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
12502 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
12503 * fp/non-fp branches speeds it up by about 15%.
12504 * - use sext/zext opcodes instead of shifts
12505 * - add OP_ICALL
12506 * - get rid of TEMPLOADs if possible and use vregs instead
12507 * - clean up usage of OP_P/OP_ opcodes
12508 * - cleanup usage of DUMMY_USE
12509 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
12510 * stack
12511 * - set the stack type and allocate a dreg in the EMIT_NEW macros
12512 * - get rid of all the <foo>2 stuff when the new JIT is ready.
12513 * - make sure handle_stack_args () is called before the branch is emitted
12514 * - when the new IR is done, get rid of all unused stuff
12515 * - COMPARE/BEQ as separate instructions or unify them ?
12516 * - keeping them separate allows specialized compare instructions like
12517 * compare_imm, compare_membase
12518 * - most back ends unify fp compare+branch, fp compare+ceq
12519 * - integrate mono_save_args into inline_method
12520 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
12521 * - handle long shift opts on 32 bit platforms somehow: they require
12522 * 3 sregs (2 for arg1 and 1 for arg2)
12523 * - make byref a 'normal' type.
12524 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
12525 * variable if needed.
12526 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
12527 * like inline_method.
12528 * - remove inlining restrictions
12529 * - fix LNEG and enable cfold of INEG
12530 * - generalize x86 optimizations like ldelema as a peephole optimization
12531 * - add store_mem_imm for amd64
12532 * - optimize the loading of the interruption flag in the managed->native wrappers
12533 * - avoid special handling of OP_NOP in passes
12534 * - move code inserting instructions into one function/macro.
12535 * - try a coalescing phase after liveness analysis
12536 * - add float -> vreg conversion + local optimizations on !x86
12537 * - figure out how to handle decomposed branches during optimizations, ie.
12538 * compare+branch, op_jump_table+op_br etc.
12539 * - promote RuntimeXHandles to vregs
12540 * - vtype cleanups:
12541 * - add a NEW_VARLOADA_VREG macro
12542 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
12543 * accessing vtype fields.
12544 * - get rid of I8CONST on 64 bit platforms
12545 * - dealing with the increase in code size due to branches created during opcode
12546 * decomposition:
12547 * - use extended basic blocks
12548 * - all parts of the JIT
12549 * - handle_global_vregs () && local regalloc
12550 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
12551 * - sources of increase in code size:
12552 * - vtypes
12553 * - long compares
12554 * - isinst and castclass
12555 * - lvregs not allocated to global registers even if used multiple times
12556 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
12557 * meaningful.
12558 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
12559 * - add all micro optimizations from the old JIT
12560 * - put tree optimizations into the deadce pass
12561 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
12562 * specific function.
12563 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
12564 * fcompare + branchCC.
12565 * - create a helper function for allocating a stack slot, taking into account
12566 * MONO_CFG_HAS_SPILLUP.
12567 * - merge r68207.
12568 * - optimize mono_regstate2_alloc_int/float.
12569 * - fix the pessimistic handling of variables accessed in exception handler blocks.
12570 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
12571 * parts of the tree could be separated by other instructions, killing the tree
12572 * arguments, or stores killing loads etc. Also, should we fold loads into other
12573 * instructions if the result of the load is used multiple times ?
12574 * - make the REM_IMM optimization in mini-x86.c arch-independent.
12575 * - LAST MERGE: 108395.
12576 * - when returning vtypes in registers, generate IR and append it to the end of the
12577 * last bb instead of doing it in the epilog.
12578 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
12583 NOTES
12584 -----
12586 - When to decompose opcodes:
12587 - earlier: this makes some optimizations hard to implement, since the low level IR
12588 no longer contains the neccessary information. But it is easier to do.
12589 - later: harder to implement, enables more optimizations.
12590 - Branches inside bblocks:
12591 - created when decomposing complex opcodes.
12592 - branches to another bblock: harmless, but not tracked by the branch
12593 optimizations, so need to branch to a label at the start of the bblock.
12594 - branches to inside the same bblock: very problematic, trips up the local
12595 reg allocator. Can be fixed by spitting the current bblock, but that is a
12596 complex operation, since some local vregs can become global vregs etc.
12597 - Local/global vregs:
12598 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
12599 local register allocator.
12600 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
12601 structure, created by mono_create_var (). Assigned to hregs or the stack by
12602 the global register allocator.
12603 - When to do optimizations like alu->alu_imm:
12604 - earlier -> saves work later on since the IR will be smaller/simpler
12605 - later -> can work on more instructions
12606 - Handling of valuetypes:
12607 - When a vtype is pushed on the stack, a new temporary is created, an
12608 instruction computing its address (LDADDR) is emitted and pushed on
12609 the stack. Need to optimize cases when the vtype is used immediately as in
12610 argument passing, stloc etc.
12611 - Instead of the to_end stuff in the old JIT, simply call the function handling
12612 the values on the stack before emitting the last instruction of the bb.
12614 #else /* !DISABLE_JIT */
12616 MONO_EMPTY_SOURCE_FILE (method_to_ir);
12617 #endif /* !DISABLE_JIT */