[llvmonly] Use right IMT slow path for lazy array interfaces (#4199)
[mono-project.git] / mono / mini / method-to-ir.c
blobb6aa5b538cb5e1e2dbd24ec52b75c8557c8f563f
1 /*
2 * method-to-ir.c: Convert CIL to the JIT internal representation
4 * Author:
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
11 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
14 #include <config.h>
15 #include <mono/utils/mono-compiler.h>
17 #ifndef DISABLE_JIT
19 #include <signal.h>
21 #ifdef HAVE_UNISTD_H
22 #include <unistd.h>
23 #endif
25 #include <math.h>
26 #include <string.h>
27 #include <ctype.h>
29 #ifdef HAVE_SYS_TIME_H
30 #include <sys/time.h>
31 #endif
33 #ifdef HAVE_ALLOCA_H
34 #include <alloca.h>
35 #endif
37 #include <mono/utils/memcheck.h>
38 #include "mini.h"
39 #include <mono/metadata/abi-details.h>
40 #include <mono/metadata/assembly.h>
41 #include <mono/metadata/attrdefs.h>
42 #include <mono/metadata/loader.h>
43 #include <mono/metadata/tabledefs.h>
44 #include <mono/metadata/class.h>
45 #include <mono/metadata/object.h>
46 #include <mono/metadata/exception.h>
47 #include <mono/metadata/opcodes.h>
48 #include <mono/metadata/mono-endian.h>
49 #include <mono/metadata/tokentype.h>
50 #include <mono/metadata/tabledefs.h>
51 #include <mono/metadata/marshal.h>
52 #include <mono/metadata/debug-helpers.h>
53 #include <mono/metadata/mono-debug.h>
54 #include <mono/metadata/mono-debug-debugger.h>
55 #include <mono/metadata/gc-internals.h>
56 #include <mono/metadata/security-manager.h>
57 #include <mono/metadata/threads-types.h>
58 #include <mono/metadata/security-core-clr.h>
59 #include <mono/metadata/profiler-private.h>
60 #include <mono/metadata/profiler.h>
61 #include <mono/metadata/monitor.h>
62 #include <mono/metadata/debug-mono-symfile.h>
63 #include <mono/utils/mono-compiler.h>
64 #include <mono/utils/mono-memory-model.h>
65 #include <mono/utils/mono-error-internals.h>
66 #include <mono/metadata/mono-basic-block.h>
67 #include <mono/metadata/reflection-internals.h>
68 #include <mono/utils/mono-threads-coop.h>
70 #include "trace.h"
72 #include "ir-emit.h"
74 #include "jit-icalls.h"
75 #include "jit.h"
76 #include "debugger-agent.h"
77 #include "seq-points.h"
78 #include "aot-compiler.h"
79 #include "mini-llvm.h"
81 #define BRANCH_COST 10
82 #define INLINE_LENGTH_LIMIT 20
84 /* These have 'cfg' as an implicit argument */
85 #define INLINE_FAILURE(msg) do { \
86 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
87 inline_failure (cfg, msg); \
88 goto exception_exit; \
89 } \
90 } while (0)
91 #define CHECK_CFG_EXCEPTION do {\
92 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
93 goto exception_exit; \
94 } while (0)
95 #define FIELD_ACCESS_FAILURE(method, field) do { \
96 field_access_failure ((cfg), (method), (field)); \
97 goto exception_exit; \
98 } while (0)
99 #define GENERIC_SHARING_FAILURE(opcode) do { \
100 if (cfg->gshared) { \
101 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
102 goto exception_exit; \
104 } while (0)
105 #define GSHAREDVT_FAILURE(opcode) do { \
106 if (cfg->gsharedvt) { \
107 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
108 goto exception_exit; \
110 } while (0)
111 #define OUT_OF_MEMORY_FAILURE do { \
112 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
113 mono_error_set_out_of_memory (&cfg->error, ""); \
114 goto exception_exit; \
115 } while (0)
116 #define DISABLE_AOT(cfg) do { \
117 if ((cfg)->verbose_level >= 2) \
118 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
119 (cfg)->disable_aot = TRUE; \
120 } while (0)
121 #define LOAD_ERROR do { \
122 break_on_unverified (); \
123 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
124 goto exception_exit; \
125 } while (0)
127 #define TYPE_LOAD_ERROR(klass) do { \
128 cfg->exception_ptr = klass; \
129 LOAD_ERROR; \
130 } while (0)
132 #define CHECK_CFG_ERROR do {\
133 if (!mono_error_ok (&cfg->error)) { \
134 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
135 goto mono_error_exit; \
137 } while (0)
139 /* Determine whenever 'ins' represents a load of the 'this' argument */
140 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
142 static int ldind_to_load_membase (int opcode);
143 static int stind_to_store_membase (int opcode);
145 int mono_op_to_op_imm (int opcode);
146 int mono_op_to_op_imm_noemul (int opcode);
148 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
150 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
151 guchar *ip, guint real_offset, gboolean inline_always);
152 static MonoInst*
153 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
155 inline static MonoInst*
156 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg);
158 /* helper methods signatures */
159 static MonoMethodSignature *helper_sig_domain_get;
160 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
161 static MonoMethodSignature *helper_sig_llvmonly_imt_trampoline;
162 static MonoMethodSignature *helper_sig_jit_thread_attach;
163 static MonoMethodSignature *helper_sig_get_tls_tramp;
164 static MonoMethodSignature *helper_sig_set_tls_tramp;
166 /* type loading helpers */
167 static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, System.Runtime.CompilerServices, RuntimeHelpers)
168 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, System.Diagnostics, DebuggableAttribute)
171 * Instruction metadata
173 #ifdef MINI_OP
174 #undef MINI_OP
175 #endif
176 #ifdef MINI_OP3
177 #undef MINI_OP3
178 #endif
179 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
180 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
181 #define NONE ' '
182 #define IREG 'i'
183 #define FREG 'f'
184 #define VREG 'v'
185 #define XREG 'x'
186 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
187 #define LREG IREG
188 #else
189 #define LREG 'l'
190 #endif
191 /* keep in sync with the enum in mini.h */
192 const char
193 ins_info[] = {
194 #include "mini-ops.h"
196 #undef MINI_OP
197 #undef MINI_OP3
199 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
200 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
202 * This should contain the index of the last sreg + 1. This is not the same
203 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
205 const gint8 ins_sreg_counts[] = {
206 #include "mini-ops.h"
208 #undef MINI_OP
209 #undef MINI_OP3
211 #define MONO_INIT_VARINFO(vi,id) do { \
212 (vi)->range.first_use.pos.bid = 0xffff; \
213 (vi)->reg = -1; \
214 (vi)->idx = (id); \
215 } while (0)
217 guint32
218 mono_alloc_ireg (MonoCompile *cfg)
220 return alloc_ireg (cfg);
223 guint32
224 mono_alloc_lreg (MonoCompile *cfg)
226 return alloc_lreg (cfg);
229 guint32
230 mono_alloc_freg (MonoCompile *cfg)
232 return alloc_freg (cfg);
235 guint32
236 mono_alloc_preg (MonoCompile *cfg)
238 return alloc_preg (cfg);
241 guint32
242 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
244 return alloc_dreg (cfg, stack_type);
248 * mono_alloc_ireg_ref:
250 * Allocate an IREG, and mark it as holding a GC ref.
252 guint32
253 mono_alloc_ireg_ref (MonoCompile *cfg)
255 return alloc_ireg_ref (cfg);
259 * mono_alloc_ireg_mp:
261 * Allocate an IREG, and mark it as holding a managed pointer.
263 guint32
264 mono_alloc_ireg_mp (MonoCompile *cfg)
266 return alloc_ireg_mp (cfg);
270 * mono_alloc_ireg_copy:
272 * Allocate an IREG with the same GC type as VREG.
274 guint32
275 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
277 if (vreg_is_ref (cfg, vreg))
278 return alloc_ireg_ref (cfg);
279 else if (vreg_is_mp (cfg, vreg))
280 return alloc_ireg_mp (cfg);
281 else
282 return alloc_ireg (cfg);
285 guint
286 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
288 if (type->byref)
289 return OP_MOVE;
291 type = mini_get_underlying_type (type);
292 handle_enum:
293 switch (type->type) {
294 case MONO_TYPE_I1:
295 case MONO_TYPE_U1:
296 return OP_MOVE;
297 case MONO_TYPE_I2:
298 case MONO_TYPE_U2:
299 return OP_MOVE;
300 case MONO_TYPE_I4:
301 case MONO_TYPE_U4:
302 return OP_MOVE;
303 case MONO_TYPE_I:
304 case MONO_TYPE_U:
305 case MONO_TYPE_PTR:
306 case MONO_TYPE_FNPTR:
307 return OP_MOVE;
308 case MONO_TYPE_CLASS:
309 case MONO_TYPE_STRING:
310 case MONO_TYPE_OBJECT:
311 case MONO_TYPE_SZARRAY:
312 case MONO_TYPE_ARRAY:
313 return OP_MOVE;
314 case MONO_TYPE_I8:
315 case MONO_TYPE_U8:
316 #if SIZEOF_REGISTER == 8
317 return OP_MOVE;
318 #else
319 return OP_LMOVE;
320 #endif
321 case MONO_TYPE_R4:
322 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
323 case MONO_TYPE_R8:
324 return OP_FMOVE;
325 case MONO_TYPE_VALUETYPE:
326 if (type->data.klass->enumtype) {
327 type = mono_class_enum_basetype (type->data.klass);
328 goto handle_enum;
330 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
331 return OP_XMOVE;
332 return OP_VMOVE;
333 case MONO_TYPE_TYPEDBYREF:
334 return OP_VMOVE;
335 case MONO_TYPE_GENERICINST:
336 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
337 return OP_XMOVE;
338 type = &type->data.generic_class->container_class->byval_arg;
339 goto handle_enum;
340 case MONO_TYPE_VAR:
341 case MONO_TYPE_MVAR:
342 g_assert (cfg->gshared);
343 if (mini_type_var_is_vt (type))
344 return OP_VMOVE;
345 else
346 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
347 default:
348 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
350 return -1;
353 void
354 mono_print_bb (MonoBasicBlock *bb, const char *msg)
356 int i;
357 MonoInst *tree;
359 printf ("\n%s %d: [IN: ", msg, bb->block_num);
360 for (i = 0; i < bb->in_count; ++i)
361 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
362 printf (", OUT: ");
363 for (i = 0; i < bb->out_count; ++i)
364 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
365 printf (" ]\n");
366 for (tree = bb->code; tree; tree = tree->next)
367 mono_print_ins_index (-1, tree);
370 void
371 mono_create_helper_signatures (void)
373 helper_sig_domain_get = mono_create_icall_signature ("ptr");
374 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
375 helper_sig_llvmonly_imt_trampoline = mono_create_icall_signature ("ptr ptr ptr");
376 helper_sig_jit_thread_attach = mono_create_icall_signature ("ptr ptr");
377 helper_sig_get_tls_tramp = mono_create_icall_signature ("ptr");
378 helper_sig_set_tls_tramp = mono_create_icall_signature ("void ptr");
381 static MONO_NEVER_INLINE void
382 break_on_unverified (void)
384 if (mini_get_debug_options ()->break_on_unverified)
385 G_BREAKPOINT ();
388 static MONO_NEVER_INLINE void
389 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
391 char *method_fname = mono_method_full_name (method, TRUE);
392 char *field_fname = mono_field_full_name (field);
393 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
394 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
395 g_free (method_fname);
396 g_free (field_fname);
399 static MONO_NEVER_INLINE void
400 inline_failure (MonoCompile *cfg, const char *msg)
402 if (cfg->verbose_level >= 2)
403 printf ("inline failed: %s\n", msg);
404 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
407 static MONO_NEVER_INLINE void
408 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
410 if (cfg->verbose_level > 2) \
411 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
412 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
415 static MONO_NEVER_INLINE void
416 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
418 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
419 if (cfg->verbose_level >= 2)
420 printf ("%s\n", cfg->exception_message);
421 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
425 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
426 * foo<T> (int i) { ldarg.0; box T; }
428 #define UNVERIFIED do { \
429 if (cfg->gsharedvt) { \
430 if (cfg->verbose_level > 2) \
431 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
432 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
433 goto exception_exit; \
435 break_on_unverified (); \
436 goto unverified; \
437 } while (0)
439 #define GET_BBLOCK(cfg,tblock,ip) do { \
440 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
441 if (!(tblock)) { \
442 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
443 NEW_BBLOCK (cfg, (tblock)); \
444 (tblock)->cil_code = (ip); \
445 ADD_BBLOCK (cfg, (tblock)); \
447 } while (0)
449 #if defined(TARGET_X86) || defined(TARGET_AMD64)
450 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
451 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
452 (dest)->dreg = alloc_ireg_mp ((cfg)); \
453 (dest)->sreg1 = (sr1); \
454 (dest)->sreg2 = (sr2); \
455 (dest)->inst_imm = (imm); \
456 (dest)->backend.shift_amount = (shift); \
457 MONO_ADD_INS ((cfg)->cbb, (dest)); \
458 } while (0)
459 #endif
461 /* Emit conversions so both operands of a binary opcode are of the same type */
462 static void
463 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
465 MonoInst *arg1 = *arg1_ref;
466 MonoInst *arg2 = *arg2_ref;
468 if (cfg->r4fp &&
469 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
470 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
471 MonoInst *conv;
473 /* Mixing r4/r8 is allowed by the spec */
474 if (arg1->type == STACK_R4) {
475 int dreg = alloc_freg (cfg);
477 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
478 conv->type = STACK_R8;
479 ins->sreg1 = dreg;
480 *arg1_ref = conv;
482 if (arg2->type == STACK_R4) {
483 int dreg = alloc_freg (cfg);
485 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
486 conv->type = STACK_R8;
487 ins->sreg2 = dreg;
488 *arg2_ref = conv;
492 #if SIZEOF_REGISTER == 8
493 /* FIXME: Need to add many more cases */
494 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
495 MonoInst *widen;
497 int dr = alloc_preg (cfg);
498 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
499 (ins)->sreg2 = widen->dreg;
501 #endif
504 #define ADD_BINOP(op) do { \
505 MONO_INST_NEW (cfg, ins, (op)); \
506 sp -= 2; \
507 ins->sreg1 = sp [0]->dreg; \
508 ins->sreg2 = sp [1]->dreg; \
509 type_from_op (cfg, ins, sp [0], sp [1]); \
510 CHECK_TYPE (ins); \
511 /* Have to insert a widening op */ \
512 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
513 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
514 MONO_ADD_INS ((cfg)->cbb, (ins)); \
515 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
516 } while (0)
518 #define ADD_UNOP(op) do { \
519 MONO_INST_NEW (cfg, ins, (op)); \
520 sp--; \
521 ins->sreg1 = sp [0]->dreg; \
522 type_from_op (cfg, ins, sp [0], NULL); \
523 CHECK_TYPE (ins); \
524 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
525 MONO_ADD_INS ((cfg)->cbb, (ins)); \
526 *sp++ = mono_decompose_opcode (cfg, ins); \
527 } while (0)
529 #define ADD_BINCOND(next_block) do { \
530 MonoInst *cmp; \
531 sp -= 2; \
532 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
533 cmp->sreg1 = sp [0]->dreg; \
534 cmp->sreg2 = sp [1]->dreg; \
535 type_from_op (cfg, cmp, sp [0], sp [1]); \
536 CHECK_TYPE (cmp); \
537 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
538 type_from_op (cfg, ins, sp [0], sp [1]); \
539 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
540 GET_BBLOCK (cfg, tblock, target); \
541 link_bblock (cfg, cfg->cbb, tblock); \
542 ins->inst_true_bb = tblock; \
543 if ((next_block)) { \
544 link_bblock (cfg, cfg->cbb, (next_block)); \
545 ins->inst_false_bb = (next_block); \
546 start_new_bblock = 1; \
547 } else { \
548 GET_BBLOCK (cfg, tblock, ip); \
549 link_bblock (cfg, cfg->cbb, tblock); \
550 ins->inst_false_bb = tblock; \
551 start_new_bblock = 2; \
553 if (sp != stack_start) { \
554 handle_stack_args (cfg, stack_start, sp - stack_start); \
555 CHECK_UNVERIFIABLE (cfg); \
557 MONO_ADD_INS (cfg->cbb, cmp); \
558 MONO_ADD_INS (cfg->cbb, ins); \
559 } while (0)
561 /* *
562 * link_bblock: Links two basic blocks
564 * links two basic blocks in the control flow graph, the 'from'
565 * argument is the starting block and the 'to' argument is the block
566 * the control flow ends to after 'from'.
568 static void
569 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
571 MonoBasicBlock **newa;
572 int i, found;
574 #if 0
575 if (from->cil_code) {
576 if (to->cil_code)
577 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
578 else
579 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
580 } else {
581 if (to->cil_code)
582 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
583 else
584 printf ("edge from entry to exit\n");
586 #endif
588 found = FALSE;
589 for (i = 0; i < from->out_count; ++i) {
590 if (to == from->out_bb [i]) {
591 found = TRUE;
592 break;
595 if (!found) {
596 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
597 for (i = 0; i < from->out_count; ++i) {
598 newa [i] = from->out_bb [i];
600 newa [i] = to;
601 from->out_count++;
602 from->out_bb = newa;
605 found = FALSE;
606 for (i = 0; i < to->in_count; ++i) {
607 if (from == to->in_bb [i]) {
608 found = TRUE;
609 break;
612 if (!found) {
613 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
614 for (i = 0; i < to->in_count; ++i) {
615 newa [i] = to->in_bb [i];
617 newa [i] = from;
618 to->in_count++;
619 to->in_bb = newa;
623 void
624 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
626 link_bblock (cfg, from, to);
630 * mono_find_block_region:
632 * We mark each basic block with a region ID. We use that to avoid BB
633 * optimizations when blocks are in different regions.
635 * Returns:
636 * A region token that encodes where this region is, and information
637 * about the clause owner for this block.
639 * The region encodes the try/catch/filter clause that owns this block
640 * as well as the type. -1 is a special value that represents a block
641 * that is in none of try/catch/filter.
643 static int
644 mono_find_block_region (MonoCompile *cfg, int offset)
646 MonoMethodHeader *header = cfg->header;
647 MonoExceptionClause *clause;
648 int i;
650 for (i = 0; i < header->num_clauses; ++i) {
651 clause = &header->clauses [i];
652 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
653 (offset < (clause->handler_offset)))
654 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
656 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
657 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
658 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
659 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
660 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
661 else
662 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
665 for (i = 0; i < header->num_clauses; ++i) {
666 clause = &header->clauses [i];
668 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
669 return ((i + 1) << 8) | clause->flags;
672 return -1;
675 static gboolean
676 ip_in_finally_clause (MonoCompile *cfg, int offset)
678 MonoMethodHeader *header = cfg->header;
679 MonoExceptionClause *clause;
680 int i;
682 for (i = 0; i < header->num_clauses; ++i) {
683 clause = &header->clauses [i];
684 if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)
685 continue;
687 if (MONO_OFFSET_IN_HANDLER (clause, offset))
688 return TRUE;
690 return FALSE;
693 static GList*
694 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
696 MonoMethodHeader *header = cfg->header;
697 MonoExceptionClause *clause;
698 int i;
699 GList *res = NULL;
701 for (i = 0; i < header->num_clauses; ++i) {
702 clause = &header->clauses [i];
703 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
704 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
705 if (clause->flags == type)
706 res = g_list_append (res, clause);
709 return res;
712 static void
713 mono_create_spvar_for_region (MonoCompile *cfg, int region)
715 MonoInst *var;
717 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
718 if (var)
719 return;
721 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
722 /* prevent it from being register allocated */
723 var->flags |= MONO_INST_VOLATILE;
725 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
728 MonoInst *
729 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
731 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
734 static MonoInst*
735 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
737 MonoInst *var;
739 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
740 if (var)
741 return var;
743 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
744 /* prevent it from being register allocated */
745 var->flags |= MONO_INST_VOLATILE;
747 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
749 return var;
753 * Returns the type used in the eval stack when @type is loaded.
754 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
756 void
757 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
759 MonoClass *klass;
761 type = mini_get_underlying_type (type);
762 inst->klass = klass = mono_class_from_mono_type (type);
763 if (type->byref) {
764 inst->type = STACK_MP;
765 return;
768 handle_enum:
769 switch (type->type) {
770 case MONO_TYPE_VOID:
771 inst->type = STACK_INV;
772 return;
773 case MONO_TYPE_I1:
774 case MONO_TYPE_U1:
775 case MONO_TYPE_I2:
776 case MONO_TYPE_U2:
777 case MONO_TYPE_I4:
778 case MONO_TYPE_U4:
779 inst->type = STACK_I4;
780 return;
781 case MONO_TYPE_I:
782 case MONO_TYPE_U:
783 case MONO_TYPE_PTR:
784 case MONO_TYPE_FNPTR:
785 inst->type = STACK_PTR;
786 return;
787 case MONO_TYPE_CLASS:
788 case MONO_TYPE_STRING:
789 case MONO_TYPE_OBJECT:
790 case MONO_TYPE_SZARRAY:
791 case MONO_TYPE_ARRAY:
792 inst->type = STACK_OBJ;
793 return;
794 case MONO_TYPE_I8:
795 case MONO_TYPE_U8:
796 inst->type = STACK_I8;
797 return;
798 case MONO_TYPE_R4:
799 inst->type = cfg->r4_stack_type;
800 break;
801 case MONO_TYPE_R8:
802 inst->type = STACK_R8;
803 return;
804 case MONO_TYPE_VALUETYPE:
805 if (type->data.klass->enumtype) {
806 type = mono_class_enum_basetype (type->data.klass);
807 goto handle_enum;
808 } else {
809 inst->klass = klass;
810 inst->type = STACK_VTYPE;
811 return;
813 case MONO_TYPE_TYPEDBYREF:
814 inst->klass = mono_defaults.typed_reference_class;
815 inst->type = STACK_VTYPE;
816 return;
817 case MONO_TYPE_GENERICINST:
818 type = &type->data.generic_class->container_class->byval_arg;
819 goto handle_enum;
820 case MONO_TYPE_VAR:
821 case MONO_TYPE_MVAR:
822 g_assert (cfg->gshared);
823 if (mini_is_gsharedvt_type (type)) {
824 g_assert (cfg->gsharedvt);
825 inst->type = STACK_VTYPE;
826 } else {
827 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
829 return;
830 default:
831 g_error ("unknown type 0x%02x in eval stack type", type->type);
836 * The following tables are used to quickly validate the IL code in type_from_op ().
838 static const char
839 bin_num_table [STACK_MAX] [STACK_MAX] = {
840 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
841 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
842 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
843 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
844 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
845 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
846 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
847 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
848 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
851 static const char
852 neg_table [] = {
853 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
856 /* reduce the size of this table */
857 static const char
858 bin_int_table [STACK_MAX] [STACK_MAX] = {
859 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
860 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
861 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
862 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
863 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
864 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
865 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
866 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
869 static const char
870 bin_comp_table [STACK_MAX] [STACK_MAX] = {
871 /* Inv i L p F & O vt r4 */
872 {0},
873 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
874 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
875 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
876 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
877 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
878 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
879 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
880 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
883 /* reduce the size of this table */
884 static const char
885 shift_table [STACK_MAX] [STACK_MAX] = {
886 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
887 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
888 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
889 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
890 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
891 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
892 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
893 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
897 * Tables to map from the non-specific opcode to the matching
898 * type-specific opcode.
900 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
901 static const guint16
902 binops_op_map [STACK_MAX] = {
903 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
906 /* handles from CEE_NEG to CEE_CONV_U8 */
907 static const guint16
908 unops_op_map [STACK_MAX] = {
909 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
912 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
913 static const guint16
914 ovfops_op_map [STACK_MAX] = {
915 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
918 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
919 static const guint16
920 ovf2ops_op_map [STACK_MAX] = {
921 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
924 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
925 static const guint16
926 ovf3ops_op_map [STACK_MAX] = {
927 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
930 /* handles from CEE_BEQ to CEE_BLT_UN */
931 static const guint16
932 beqops_op_map [STACK_MAX] = {
933 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
936 /* handles from CEE_CEQ to CEE_CLT_UN */
937 static const guint16
938 ceqops_op_map [STACK_MAX] = {
939 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
943 * Sets ins->type (the type on the eval stack) according to the
944 * type of the opcode and the arguments to it.
945 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
947 * FIXME: this function sets ins->type unconditionally in some cases, but
948 * it should set it to invalid for some types (a conv.x on an object)
950 static void
951 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
953 switch (ins->opcode) {
954 /* binops */
955 case CEE_ADD:
956 case CEE_SUB:
957 case CEE_MUL:
958 case CEE_DIV:
959 case CEE_REM:
960 /* FIXME: check unverifiable args for STACK_MP */
961 ins->type = bin_num_table [src1->type] [src2->type];
962 ins->opcode += binops_op_map [ins->type];
963 break;
964 case CEE_DIV_UN:
965 case CEE_REM_UN:
966 case CEE_AND:
967 case CEE_OR:
968 case CEE_XOR:
969 ins->type = bin_int_table [src1->type] [src2->type];
970 ins->opcode += binops_op_map [ins->type];
971 break;
972 case CEE_SHL:
973 case CEE_SHR:
974 case CEE_SHR_UN:
975 ins->type = shift_table [src1->type] [src2->type];
976 ins->opcode += binops_op_map [ins->type];
977 break;
978 case OP_COMPARE:
979 case OP_LCOMPARE:
980 case OP_ICOMPARE:
981 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
982 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
983 ins->opcode = OP_LCOMPARE;
984 else if (src1->type == STACK_R4)
985 ins->opcode = OP_RCOMPARE;
986 else if (src1->type == STACK_R8)
987 ins->opcode = OP_FCOMPARE;
988 else
989 ins->opcode = OP_ICOMPARE;
990 break;
991 case OP_ICOMPARE_IMM:
992 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
993 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
994 ins->opcode = OP_LCOMPARE_IMM;
995 break;
996 case CEE_BEQ:
997 case CEE_BGE:
998 case CEE_BGT:
999 case CEE_BLE:
1000 case CEE_BLT:
1001 case CEE_BNE_UN:
1002 case CEE_BGE_UN:
1003 case CEE_BGT_UN:
1004 case CEE_BLE_UN:
1005 case CEE_BLT_UN:
1006 ins->opcode += beqops_op_map [src1->type];
1007 break;
1008 case OP_CEQ:
1009 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
1010 ins->opcode += ceqops_op_map [src1->type];
1011 break;
1012 case OP_CGT:
1013 case OP_CGT_UN:
1014 case OP_CLT:
1015 case OP_CLT_UN:
1016 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
1017 ins->opcode += ceqops_op_map [src1->type];
1018 break;
1019 /* unops */
1020 case CEE_NEG:
1021 ins->type = neg_table [src1->type];
1022 ins->opcode += unops_op_map [ins->type];
1023 break;
1024 case CEE_NOT:
1025 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1026 ins->type = src1->type;
1027 else
1028 ins->type = STACK_INV;
1029 ins->opcode += unops_op_map [ins->type];
1030 break;
1031 case CEE_CONV_I1:
1032 case CEE_CONV_I2:
1033 case CEE_CONV_I4:
1034 case CEE_CONV_U4:
1035 ins->type = STACK_I4;
1036 ins->opcode += unops_op_map [src1->type];
1037 break;
1038 case CEE_CONV_R_UN:
1039 ins->type = STACK_R8;
1040 switch (src1->type) {
1041 case STACK_I4:
1042 case STACK_PTR:
1043 ins->opcode = OP_ICONV_TO_R_UN;
1044 break;
1045 case STACK_I8:
1046 ins->opcode = OP_LCONV_TO_R_UN;
1047 break;
1049 break;
1050 case CEE_CONV_OVF_I1:
1051 case CEE_CONV_OVF_U1:
1052 case CEE_CONV_OVF_I2:
1053 case CEE_CONV_OVF_U2:
1054 case CEE_CONV_OVF_I4:
1055 case CEE_CONV_OVF_U4:
1056 ins->type = STACK_I4;
1057 ins->opcode += ovf3ops_op_map [src1->type];
1058 break;
1059 case CEE_CONV_OVF_I_UN:
1060 case CEE_CONV_OVF_U_UN:
1061 ins->type = STACK_PTR;
1062 ins->opcode += ovf2ops_op_map [src1->type];
1063 break;
1064 case CEE_CONV_OVF_I1_UN:
1065 case CEE_CONV_OVF_I2_UN:
1066 case CEE_CONV_OVF_I4_UN:
1067 case CEE_CONV_OVF_U1_UN:
1068 case CEE_CONV_OVF_U2_UN:
1069 case CEE_CONV_OVF_U4_UN:
1070 ins->type = STACK_I4;
1071 ins->opcode += ovf2ops_op_map [src1->type];
1072 break;
1073 case CEE_CONV_U:
1074 ins->type = STACK_PTR;
1075 switch (src1->type) {
1076 case STACK_I4:
1077 ins->opcode = OP_ICONV_TO_U;
1078 break;
1079 case STACK_PTR:
1080 case STACK_MP:
1081 #if SIZEOF_VOID_P == 8
1082 ins->opcode = OP_LCONV_TO_U;
1083 #else
1084 ins->opcode = OP_MOVE;
1085 #endif
1086 break;
1087 case STACK_I8:
1088 ins->opcode = OP_LCONV_TO_U;
1089 break;
1090 case STACK_R8:
1091 ins->opcode = OP_FCONV_TO_U;
1092 break;
1094 break;
1095 case CEE_CONV_I8:
1096 case CEE_CONV_U8:
1097 ins->type = STACK_I8;
1098 ins->opcode += unops_op_map [src1->type];
1099 break;
1100 case CEE_CONV_OVF_I8:
1101 case CEE_CONV_OVF_U8:
1102 ins->type = STACK_I8;
1103 ins->opcode += ovf3ops_op_map [src1->type];
1104 break;
1105 case CEE_CONV_OVF_U8_UN:
1106 case CEE_CONV_OVF_I8_UN:
1107 ins->type = STACK_I8;
1108 ins->opcode += ovf2ops_op_map [src1->type];
1109 break;
1110 case CEE_CONV_R4:
1111 ins->type = cfg->r4_stack_type;
1112 ins->opcode += unops_op_map [src1->type];
1113 break;
1114 case CEE_CONV_R8:
1115 ins->type = STACK_R8;
1116 ins->opcode += unops_op_map [src1->type];
1117 break;
1118 case OP_CKFINITE:
1119 ins->type = STACK_R8;
1120 break;
1121 case CEE_CONV_U2:
1122 case CEE_CONV_U1:
1123 ins->type = STACK_I4;
1124 ins->opcode += ovfops_op_map [src1->type];
1125 break;
1126 case CEE_CONV_I:
1127 case CEE_CONV_OVF_I:
1128 case CEE_CONV_OVF_U:
1129 ins->type = STACK_PTR;
1130 ins->opcode += ovfops_op_map [src1->type];
1131 break;
1132 case CEE_ADD_OVF:
1133 case CEE_ADD_OVF_UN:
1134 case CEE_MUL_OVF:
1135 case CEE_MUL_OVF_UN:
1136 case CEE_SUB_OVF:
1137 case CEE_SUB_OVF_UN:
1138 ins->type = bin_num_table [src1->type] [src2->type];
1139 ins->opcode += ovfops_op_map [src1->type];
1140 if (ins->type == STACK_R8)
1141 ins->type = STACK_INV;
1142 break;
1143 case OP_LOAD_MEMBASE:
1144 ins->type = STACK_PTR;
1145 break;
1146 case OP_LOADI1_MEMBASE:
1147 case OP_LOADU1_MEMBASE:
1148 case OP_LOADI2_MEMBASE:
1149 case OP_LOADU2_MEMBASE:
1150 case OP_LOADI4_MEMBASE:
1151 case OP_LOADU4_MEMBASE:
1152 ins->type = STACK_PTR;
1153 break;
1154 case OP_LOADI8_MEMBASE:
1155 ins->type = STACK_I8;
1156 break;
1157 case OP_LOADR4_MEMBASE:
1158 ins->type = cfg->r4_stack_type;
1159 break;
1160 case OP_LOADR8_MEMBASE:
1161 ins->type = STACK_R8;
1162 break;
1163 default:
1164 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1165 break;
1168 if (ins->type == STACK_MP)
1169 ins->klass = mono_defaults.object_class;
1172 static const char
1173 ldind_type [] = {
1174 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1177 #if 0
1179 static const char
1180 param_table [STACK_MAX] [STACK_MAX] = {
1181 {0},
1184 static int
1185 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1187 int i;
1189 if (sig->hasthis) {
1190 switch (args->type) {
1191 case STACK_I4:
1192 case STACK_I8:
1193 case STACK_R8:
1194 case STACK_VTYPE:
1195 case STACK_INV:
1196 return 0;
1198 args++;
1200 for (i = 0; i < sig->param_count; ++i) {
1201 switch (args [i].type) {
1202 case STACK_INV:
1203 return 0;
1204 case STACK_MP:
1205 if (!sig->params [i]->byref)
1206 return 0;
1207 continue;
1208 case STACK_OBJ:
1209 if (sig->params [i]->byref)
1210 return 0;
1211 switch (sig->params [i]->type) {
1212 case MONO_TYPE_CLASS:
1213 case MONO_TYPE_STRING:
1214 case MONO_TYPE_OBJECT:
1215 case MONO_TYPE_SZARRAY:
1216 case MONO_TYPE_ARRAY:
1217 break;
1218 default:
1219 return 0;
1221 continue;
1222 case STACK_R8:
1223 if (sig->params [i]->byref)
1224 return 0;
1225 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1226 return 0;
1227 continue;
1228 case STACK_PTR:
1229 case STACK_I4:
1230 case STACK_I8:
1231 case STACK_VTYPE:
1232 break;
1234 /*if (!param_table [args [i].type] [sig->params [i]->type])
1235 return 0;*/
1237 return 1;
1239 #endif
1242 * When we need a pointer to the current domain many times in a method, we
1243 * call mono_domain_get() once and we store the result in a local variable.
1244 * This function returns the variable that represents the MonoDomain*.
1246 inline static MonoInst *
1247 mono_get_domainvar (MonoCompile *cfg)
1249 if (!cfg->domainvar)
1250 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1251 return cfg->domainvar;
1255 * The got_var contains the address of the Global Offset Table when AOT
1256 * compiling.
1258 MonoInst *
1259 mono_get_got_var (MonoCompile *cfg)
1261 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1262 return NULL;
1263 if (!cfg->got_var) {
1264 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1266 return cfg->got_var;
1269 static MonoInst *
1270 mono_get_vtable_var (MonoCompile *cfg)
1272 g_assert (cfg->gshared);
1274 if (!cfg->rgctx_var) {
1275 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1276 /* force the var to be stack allocated */
1277 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1280 return cfg->rgctx_var;
1283 static MonoType*
1284 type_from_stack_type (MonoInst *ins) {
1285 switch (ins->type) {
1286 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1287 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1288 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1289 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1290 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1291 case STACK_MP:
1292 return &ins->klass->this_arg;
1293 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1294 case STACK_VTYPE: return &ins->klass->byval_arg;
1295 default:
1296 g_error ("stack type %d to monotype not handled\n", ins->type);
1298 return NULL;
1301 static G_GNUC_UNUSED int
1302 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1304 t = mono_type_get_underlying_type (t);
1305 switch (t->type) {
1306 case MONO_TYPE_I1:
1307 case MONO_TYPE_U1:
1308 case MONO_TYPE_I2:
1309 case MONO_TYPE_U2:
1310 case MONO_TYPE_I4:
1311 case MONO_TYPE_U4:
1312 return STACK_I4;
1313 case MONO_TYPE_I:
1314 case MONO_TYPE_U:
1315 case MONO_TYPE_PTR:
1316 case MONO_TYPE_FNPTR:
1317 return STACK_PTR;
1318 case MONO_TYPE_CLASS:
1319 case MONO_TYPE_STRING:
1320 case MONO_TYPE_OBJECT:
1321 case MONO_TYPE_SZARRAY:
1322 case MONO_TYPE_ARRAY:
1323 return STACK_OBJ;
1324 case MONO_TYPE_I8:
1325 case MONO_TYPE_U8:
1326 return STACK_I8;
1327 case MONO_TYPE_R4:
1328 return cfg->r4_stack_type;
1329 case MONO_TYPE_R8:
1330 return STACK_R8;
1331 case MONO_TYPE_VALUETYPE:
1332 case MONO_TYPE_TYPEDBYREF:
1333 return STACK_VTYPE;
1334 case MONO_TYPE_GENERICINST:
1335 if (mono_type_generic_inst_is_valuetype (t))
1336 return STACK_VTYPE;
1337 else
1338 return STACK_OBJ;
1339 break;
1340 default:
1341 g_assert_not_reached ();
1344 return -1;
1347 static MonoClass*
1348 array_access_to_klass (int opcode)
1350 switch (opcode) {
1351 case CEE_LDELEM_U1:
1352 return mono_defaults.byte_class;
1353 case CEE_LDELEM_U2:
1354 return mono_defaults.uint16_class;
1355 case CEE_LDELEM_I:
1356 case CEE_STELEM_I:
1357 return mono_defaults.int_class;
1358 case CEE_LDELEM_I1:
1359 case CEE_STELEM_I1:
1360 return mono_defaults.sbyte_class;
1361 case CEE_LDELEM_I2:
1362 case CEE_STELEM_I2:
1363 return mono_defaults.int16_class;
1364 case CEE_LDELEM_I4:
1365 case CEE_STELEM_I4:
1366 return mono_defaults.int32_class;
1367 case CEE_LDELEM_U4:
1368 return mono_defaults.uint32_class;
1369 case CEE_LDELEM_I8:
1370 case CEE_STELEM_I8:
1371 return mono_defaults.int64_class;
1372 case CEE_LDELEM_R4:
1373 case CEE_STELEM_R4:
1374 return mono_defaults.single_class;
1375 case CEE_LDELEM_R8:
1376 case CEE_STELEM_R8:
1377 return mono_defaults.double_class;
1378 case CEE_LDELEM_REF:
1379 case CEE_STELEM_REF:
1380 return mono_defaults.object_class;
1381 default:
1382 g_assert_not_reached ();
1384 return NULL;
1388 * We try to share variables when possible
1390 static MonoInst *
1391 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1393 MonoInst *res;
1394 int pos, vnum;
1396 /* inlining can result in deeper stacks */
1397 if (slot >= cfg->header->max_stack)
1398 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1400 pos = ins->type - 1 + slot * STACK_MAX;
1402 switch (ins->type) {
1403 case STACK_I4:
1404 case STACK_I8:
1405 case STACK_R8:
1406 case STACK_PTR:
1407 case STACK_MP:
1408 case STACK_OBJ:
1409 if ((vnum = cfg->intvars [pos]))
1410 return cfg->varinfo [vnum];
1411 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1412 cfg->intvars [pos] = res->inst_c0;
1413 break;
1414 default:
1415 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1417 return res;
1420 static void
1421 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1424 * Don't use this if a generic_context is set, since that means AOT can't
1425 * look up the method using just the image+token.
1426 * table == 0 means this is a reference made from a wrapper.
1428 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1429 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1430 jump_info_token->image = image;
1431 jump_info_token->token = token;
1432 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1437 * This function is called to handle items that are left on the evaluation stack
1438 * at basic block boundaries. What happens is that we save the values to local variables
1439 * and we reload them later when first entering the target basic block (with the
1440 * handle_loaded_temps () function).
1441 * A single joint point will use the same variables (stored in the array bb->out_stack or
1442 * bb->in_stack, if the basic block is before or after the joint point).
1444 * This function needs to be called _before_ emitting the last instruction of
1445 * the bb (i.e. before emitting a branch).
1446 * If the stack merge fails at a join point, cfg->unverifiable is set.
1448 static void
1449 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1451 int i, bindex;
1452 MonoBasicBlock *bb = cfg->cbb;
1453 MonoBasicBlock *outb;
1454 MonoInst *inst, **locals;
1455 gboolean found;
1457 if (!count)
1458 return;
1459 if (cfg->verbose_level > 3)
1460 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1461 if (!bb->out_scount) {
1462 bb->out_scount = count;
1463 //printf ("bblock %d has out:", bb->block_num);
1464 found = FALSE;
1465 for (i = 0; i < bb->out_count; ++i) {
1466 outb = bb->out_bb [i];
1467 /* exception handlers are linked, but they should not be considered for stack args */
1468 if (outb->flags & BB_EXCEPTION_HANDLER)
1469 continue;
1470 //printf (" %d", outb->block_num);
1471 if (outb->in_stack) {
1472 found = TRUE;
1473 bb->out_stack = outb->in_stack;
1474 break;
1477 //printf ("\n");
1478 if (!found) {
1479 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1480 for (i = 0; i < count; ++i) {
1482 * try to reuse temps already allocated for this purpouse, if they occupy the same
1483 * stack slot and if they are of the same type.
1484 * This won't cause conflicts since if 'local' is used to
1485 * store one of the values in the in_stack of a bblock, then
1486 * the same variable will be used for the same outgoing stack
1487 * slot as well.
1488 * This doesn't work when inlining methods, since the bblocks
1489 * in the inlined methods do not inherit their in_stack from
1490 * the bblock they are inlined to. See bug #58863 for an
1491 * example.
1493 if (cfg->inlined_method)
1494 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1495 else
1496 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1501 for (i = 0; i < bb->out_count; ++i) {
1502 outb = bb->out_bb [i];
1503 /* exception handlers are linked, but they should not be considered for stack args */
1504 if (outb->flags & BB_EXCEPTION_HANDLER)
1505 continue;
1506 if (outb->in_scount) {
1507 if (outb->in_scount != bb->out_scount) {
1508 cfg->unverifiable = TRUE;
1509 return;
1511 continue; /* check they are the same locals */
1513 outb->in_scount = count;
1514 outb->in_stack = bb->out_stack;
1517 locals = bb->out_stack;
1518 cfg->cbb = bb;
1519 for (i = 0; i < count; ++i) {
1520 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1521 inst->cil_code = sp [i]->cil_code;
1522 sp [i] = locals [i];
1523 if (cfg->verbose_level > 3)
1524 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1528 * It is possible that the out bblocks already have in_stack assigned, and
1529 * the in_stacks differ. In this case, we will store to all the different
1530 * in_stacks.
1533 found = TRUE;
1534 bindex = 0;
1535 while (found) {
1536 /* Find a bblock which has a different in_stack */
1537 found = FALSE;
1538 while (bindex < bb->out_count) {
1539 outb = bb->out_bb [bindex];
1540 /* exception handlers are linked, but they should not be considered for stack args */
1541 if (outb->flags & BB_EXCEPTION_HANDLER) {
1542 bindex++;
1543 continue;
1545 if (outb->in_stack != locals) {
1546 for (i = 0; i < count; ++i) {
1547 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1548 inst->cil_code = sp [i]->cil_code;
1549 sp [i] = locals [i];
1550 if (cfg->verbose_level > 3)
1551 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1553 locals = outb->in_stack;
1554 found = TRUE;
1555 break;
1557 bindex ++;
1562 static MonoInst*
1563 emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1565 MonoInst *ins;
1567 if (cfg->compile_aot) {
1568 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1569 } else {
1570 MonoJumpInfo ji;
1571 gpointer target;
1572 MonoError error;
1574 ji.type = patch_type;
1575 ji.data.target = data;
1576 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, &error);
1577 mono_error_assert_ok (&error);
1579 EMIT_NEW_PCONST (cfg, ins, target);
1581 return ins;
1584 MonoInst*
1585 mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1587 return emit_runtime_constant (cfg, patch_type, data);
1590 static void
1591 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1593 int val_reg;
1595 g_assert (val == 0);
1597 if (align == 0)
1598 align = 4;
1600 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1601 switch (size) {
1602 case 1:
1603 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1604 return;
1605 case 2:
1606 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1607 return;
1608 case 4:
1609 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1610 return;
1611 #if SIZEOF_REGISTER == 8
1612 case 8:
1613 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1614 return;
1615 #endif
1619 val_reg = alloc_preg (cfg);
1621 if (SIZEOF_REGISTER == 8)
1622 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1623 else
1624 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1626 if (align < 4) {
1627 /* This could be optimized further if neccesary */
1628 while (size >= 1) {
1629 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1630 offset += 1;
1631 size -= 1;
1633 return;
1636 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1637 if (offset % 8) {
1638 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1639 offset += 4;
1640 size -= 4;
1642 while (size >= 8) {
1643 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1644 offset += 8;
1645 size -= 8;
1649 while (size >= 4) {
1650 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1651 offset += 4;
1652 size -= 4;
1654 while (size >= 2) {
1655 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1656 offset += 2;
1657 size -= 2;
1659 while (size >= 1) {
1660 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1661 offset += 1;
1662 size -= 1;
1666 void
1667 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1669 int cur_reg;
1671 if (align == 0)
1672 align = 4;
1674 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1675 g_assert (size < 10000);
1677 if (align < 4) {
1678 /* This could be optimized further if neccesary */
1679 while (size >= 1) {
1680 cur_reg = alloc_preg (cfg);
1681 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1682 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1683 doffset += 1;
1684 soffset += 1;
1685 size -= 1;
1689 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1690 while (size >= 8) {
1691 cur_reg = alloc_preg (cfg);
1692 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1693 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1694 doffset += 8;
1695 soffset += 8;
1696 size -= 8;
1700 while (size >= 4) {
1701 cur_reg = alloc_preg (cfg);
1702 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1703 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1704 doffset += 4;
1705 soffset += 4;
1706 size -= 4;
1708 while (size >= 2) {
1709 cur_reg = alloc_preg (cfg);
1710 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1711 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1712 doffset += 2;
1713 soffset += 2;
1714 size -= 2;
1716 while (size >= 1) {
1717 cur_reg = alloc_preg (cfg);
1718 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1719 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1720 doffset += 1;
1721 soffset += 1;
1722 size -= 1;
1726 static MonoInst*
1727 mono_create_fast_tls_getter (MonoCompile *cfg, MonoTlsKey key)
1729 int tls_offset = mono_tls_get_tls_offset (key);
1731 if (cfg->compile_aot)
1732 return NULL;
1734 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1735 MonoInst *ins;
1736 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
1737 ins->dreg = mono_alloc_preg (cfg);
1738 ins->inst_offset = tls_offset;
1739 return ins;
1741 return NULL;
1744 static MonoInst*
1745 mono_create_fast_tls_setter (MonoCompile *cfg, MonoInst* value, MonoTlsKey key)
1747 int tls_offset = mono_tls_get_tls_offset (key);
1749 if (cfg->compile_aot)
1750 return NULL;
1752 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1753 MonoInst *ins;
1754 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1755 ins->sreg1 = value->dreg;
1756 ins->inst_offset = tls_offset;
1757 return ins;
1759 return NULL;
1763 MonoInst*
1764 mono_create_tls_get (MonoCompile *cfg, MonoTlsKey key)
1766 MonoInst *fast_tls = NULL;
1768 if (!mini_get_debug_options ()->use_fallback_tls)
1769 fast_tls = mono_create_fast_tls_getter (cfg, key);
1771 if (fast_tls) {
1772 MONO_ADD_INS (cfg->cbb, fast_tls);
1773 return fast_tls;
1776 if (cfg->compile_aot) {
1777 MonoInst *addr;
1779 * tls getters are critical pieces of code and we don't want to resolve them
1780 * through the standard plt/tramp mechanism since we might expose ourselves
1781 * to crashes and infinite recursions.
1783 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GET_TLS_TRAMP, (void*)key);
1784 return mono_emit_calli (cfg, helper_sig_get_tls_tramp, NULL, addr, NULL, NULL);
1785 } else {
1786 gpointer getter = mono_tls_get_tls_getter (key, FALSE);
1787 return mono_emit_jit_icall (cfg, getter, NULL);
1791 static MonoInst*
1792 mono_create_tls_set (MonoCompile *cfg, MonoInst *value, MonoTlsKey key)
1794 MonoInst *fast_tls = NULL;
1796 if (!mini_get_debug_options ()->use_fallback_tls)
1797 fast_tls = mono_create_fast_tls_setter (cfg, value, key);
1799 if (fast_tls) {
1800 MONO_ADD_INS (cfg->cbb, fast_tls);
1801 return fast_tls;
1804 if (cfg->compile_aot) {
1805 MonoInst *addr;
1806 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_SET_TLS_TRAMP, (void*)key);
1807 return mono_emit_calli (cfg, helper_sig_set_tls_tramp, &value, addr, NULL, NULL);
1808 } else {
1809 gpointer setter = mono_tls_get_tls_setter (key, FALSE);
1810 return mono_emit_jit_icall (cfg, setter, &value);
1815 * emit_push_lmf:
1817 * Emit IR to push the current LMF onto the LMF stack.
1819 static void
1820 emit_push_lmf (MonoCompile *cfg)
1823 * Emit IR to push the LMF:
1824 * lmf_addr = <lmf_addr from tls>
1825 * lmf->lmf_addr = lmf_addr
1826 * lmf->prev_lmf = *lmf_addr
1827 * *lmf_addr = lmf
1829 MonoInst *ins, *lmf_ins;
1831 if (!cfg->lmf_ir)
1832 return;
1834 if (cfg->lmf_ir_mono_lmf) {
1835 MonoInst *lmf_vara_ins, *lmf_ins;
1836 /* Load current lmf */
1837 lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF);
1838 g_assert (lmf_ins);
1839 EMIT_NEW_VARLOADA (cfg, lmf_vara_ins, cfg->lmf_var, NULL);
1840 /* Save previous_lmf */
1841 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_vara_ins->dreg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
1842 /* Set new LMF */
1843 mono_create_tls_set (cfg, lmf_vara_ins, TLS_KEY_LMF);
1844 } else {
1845 int lmf_reg, prev_lmf_reg;
1847 * Store lmf_addr in a variable, so it can be allocated to a global register.
1849 if (!cfg->lmf_addr_var)
1850 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1852 #ifdef HOST_WIN32
1853 ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
1854 g_assert (ins);
1855 int jit_tls_dreg = ins->dreg;
1857 lmf_reg = alloc_preg (cfg);
1858 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
1859 #else
1860 lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR);
1861 g_assert (lmf_ins);
1862 #endif
1863 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1865 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1866 lmf_reg = ins->dreg;
1868 prev_lmf_reg = alloc_preg (cfg);
1869 /* Save previous_lmf */
1870 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1871 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1872 /* Set new lmf */
1873 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1878 * emit_pop_lmf:
1880 * Emit IR to pop the current LMF from the LMF stack.
1882 static void
1883 emit_pop_lmf (MonoCompile *cfg)
1885 int lmf_reg, lmf_addr_reg;
1886 MonoInst *ins;
1888 if (!cfg->lmf_ir)
1889 return;
1891 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1892 lmf_reg = ins->dreg;
1894 if (cfg->lmf_ir_mono_lmf) {
1895 /* Load previous_lmf */
1896 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, alloc_preg (cfg), lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
1897 /* Set new LMF */
1898 mono_create_tls_set (cfg, ins, TLS_KEY_LMF);
1899 } else {
1900 int prev_lmf_reg;
1902 * Emit IR to pop the LMF:
1903 * *(lmf->lmf_addr) = lmf->prev_lmf
1905 /* This could be called before emit_push_lmf () */
1906 if (!cfg->lmf_addr_var)
1907 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1908 lmf_addr_reg = cfg->lmf_addr_var->dreg;
1910 prev_lmf_reg = alloc_preg (cfg);
1911 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
1912 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
1916 static void
1917 emit_instrumentation_call (MonoCompile *cfg, void *func)
1919 MonoInst *iargs [1];
1922 * Avoid instrumenting inlined methods since it can
1923 * distort profiling results.
1925 if (cfg->method != cfg->current_method)
1926 return;
1928 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
1929 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
1930 mono_emit_jit_icall (cfg, func, iargs);
1934 static int
1935 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
1937 handle_enum:
1938 type = mini_get_underlying_type (type);
1939 switch (type->type) {
1940 case MONO_TYPE_VOID:
1941 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
1942 case MONO_TYPE_I1:
1943 case MONO_TYPE_U1:
1944 case MONO_TYPE_I2:
1945 case MONO_TYPE_U2:
1946 case MONO_TYPE_I4:
1947 case MONO_TYPE_U4:
1948 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1949 case MONO_TYPE_I:
1950 case MONO_TYPE_U:
1951 case MONO_TYPE_PTR:
1952 case MONO_TYPE_FNPTR:
1953 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1954 case MONO_TYPE_CLASS:
1955 case MONO_TYPE_STRING:
1956 case MONO_TYPE_OBJECT:
1957 case MONO_TYPE_SZARRAY:
1958 case MONO_TYPE_ARRAY:
1959 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1960 case MONO_TYPE_I8:
1961 case MONO_TYPE_U8:
1962 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
1963 case MONO_TYPE_R4:
1964 if (cfg->r4fp)
1965 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
1966 else
1967 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1968 case MONO_TYPE_R8:
1969 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1970 case MONO_TYPE_VALUETYPE:
1971 if (type->data.klass->enumtype) {
1972 type = mono_class_enum_basetype (type->data.klass);
1973 goto handle_enum;
1974 } else
1975 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1976 case MONO_TYPE_TYPEDBYREF:
1977 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1978 case MONO_TYPE_GENERICINST:
1979 type = &type->data.generic_class->container_class->byval_arg;
1980 goto handle_enum;
1981 case MONO_TYPE_VAR:
1982 case MONO_TYPE_MVAR:
1983 /* gsharedvt */
1984 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1985 default:
1986 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1988 return -1;
1991 //XXX this ignores if t is byref
1992 #define MONO_TYPE_IS_PRIMITIVE_SCALAR(t) ((((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_U8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U)))))
1995 * target_type_is_incompatible:
1996 * @cfg: MonoCompile context
1998 * Check that the item @arg on the evaluation stack can be stored
1999 * in the target type (can be a local, or field, etc).
2000 * The cfg arg can be used to check if we need verification or just
2001 * validity checks.
2003 * Returns: non-0 value if arg can't be stored on a target.
2005 static int
2006 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2008 MonoType *simple_type;
2009 MonoClass *klass;
2011 if (target->byref) {
2012 /* FIXME: check that the pointed to types match */
2013 if (arg->type == STACK_MP) {
2014 /* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
2015 MonoClass *target_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&mono_class_from_mono_type (target)->byval_arg));
2016 MonoClass *source_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg));
2018 /* if the target is native int& or same type */
2019 if (target->type == MONO_TYPE_I || target_class_lowered == source_class_lowered)
2020 return 0;
2022 /* Both are primitive type byrefs and the source points to a larger type that the destination */
2023 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (&target_class_lowered->byval_arg) && MONO_TYPE_IS_PRIMITIVE_SCALAR (&source_class_lowered->byval_arg) &&
2024 mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
2025 return 0;
2026 return 1;
2028 if (arg->type == STACK_PTR)
2029 return 0;
2030 return 1;
2033 simple_type = mini_get_underlying_type (target);
2034 switch (simple_type->type) {
2035 case MONO_TYPE_VOID:
2036 return 1;
2037 case MONO_TYPE_I1:
2038 case MONO_TYPE_U1:
2039 case MONO_TYPE_I2:
2040 case MONO_TYPE_U2:
2041 case MONO_TYPE_I4:
2042 case MONO_TYPE_U4:
2043 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2044 return 1;
2045 return 0;
2046 case MONO_TYPE_PTR:
2047 /* STACK_MP is needed when setting pinned locals */
2048 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2049 return 1;
2050 return 0;
2051 case MONO_TYPE_I:
2052 case MONO_TYPE_U:
2053 case MONO_TYPE_FNPTR:
2055 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2056 * in native int. (#688008).
2058 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2059 return 1;
2060 return 0;
2061 case MONO_TYPE_CLASS:
2062 case MONO_TYPE_STRING:
2063 case MONO_TYPE_OBJECT:
2064 case MONO_TYPE_SZARRAY:
2065 case MONO_TYPE_ARRAY:
2066 if (arg->type != STACK_OBJ)
2067 return 1;
2068 /* FIXME: check type compatibility */
2069 return 0;
2070 case MONO_TYPE_I8:
2071 case MONO_TYPE_U8:
2072 if (arg->type != STACK_I8)
2073 return 1;
2074 return 0;
2075 case MONO_TYPE_R4:
2076 if (arg->type != cfg->r4_stack_type)
2077 return 1;
2078 return 0;
2079 case MONO_TYPE_R8:
2080 if (arg->type != STACK_R8)
2081 return 1;
2082 return 0;
2083 case MONO_TYPE_VALUETYPE:
2084 if (arg->type != STACK_VTYPE)
2085 return 1;
2086 klass = mono_class_from_mono_type (simple_type);
2087 if (klass != arg->klass)
2088 return 1;
2089 return 0;
2090 case MONO_TYPE_TYPEDBYREF:
2091 if (arg->type != STACK_VTYPE)
2092 return 1;
2093 klass = mono_class_from_mono_type (simple_type);
2094 if (klass != arg->klass)
2095 return 1;
2096 return 0;
2097 case MONO_TYPE_GENERICINST:
2098 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2099 MonoClass *target_class;
2100 if (arg->type != STACK_VTYPE)
2101 return 1;
2102 klass = mono_class_from_mono_type (simple_type);
2103 target_class = mono_class_from_mono_type (target);
2104 /* The second cases is needed when doing partial sharing */
2105 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
2106 return 1;
2107 return 0;
2108 } else {
2109 if (arg->type != STACK_OBJ)
2110 return 1;
2111 /* FIXME: check type compatibility */
2112 return 0;
2114 case MONO_TYPE_VAR:
2115 case MONO_TYPE_MVAR:
2116 g_assert (cfg->gshared);
2117 if (mini_type_var_is_vt (simple_type)) {
2118 if (arg->type != STACK_VTYPE)
2119 return 1;
2120 } else {
2121 if (arg->type != STACK_OBJ)
2122 return 1;
2124 return 0;
2125 default:
2126 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2128 return 1;
2132 * Prepare arguments for passing to a function call.
2133 * Return a non-zero value if the arguments can't be passed to the given
2134 * signature.
2135 * The type checks are not yet complete and some conversions may need
2136 * casts on 32 or 64 bit architectures.
2138 * FIXME: implement this using target_type_is_incompatible ()
2140 static int
2141 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2143 MonoType *simple_type;
2144 int i;
2146 if (sig->hasthis) {
2147 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2148 return 1;
2149 args++;
2151 for (i = 0; i < sig->param_count; ++i) {
2152 if (sig->params [i]->byref) {
2153 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2154 return 1;
2155 continue;
2157 simple_type = mini_get_underlying_type (sig->params [i]);
2158 handle_enum:
2159 switch (simple_type->type) {
2160 case MONO_TYPE_VOID:
2161 return 1;
2162 continue;
2163 case MONO_TYPE_I1:
2164 case MONO_TYPE_U1:
2165 case MONO_TYPE_I2:
2166 case MONO_TYPE_U2:
2167 case MONO_TYPE_I4:
2168 case MONO_TYPE_U4:
2169 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2170 return 1;
2171 continue;
2172 case MONO_TYPE_I:
2173 case MONO_TYPE_U:
2174 case MONO_TYPE_PTR:
2175 case MONO_TYPE_FNPTR:
2176 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2177 return 1;
2178 continue;
2179 case MONO_TYPE_CLASS:
2180 case MONO_TYPE_STRING:
2181 case MONO_TYPE_OBJECT:
2182 case MONO_TYPE_SZARRAY:
2183 case MONO_TYPE_ARRAY:
2184 if (args [i]->type != STACK_OBJ)
2185 return 1;
2186 continue;
2187 case MONO_TYPE_I8:
2188 case MONO_TYPE_U8:
2189 if (args [i]->type != STACK_I8)
2190 return 1;
2191 continue;
2192 case MONO_TYPE_R4:
2193 if (args [i]->type != cfg->r4_stack_type)
2194 return 1;
2195 continue;
2196 case MONO_TYPE_R8:
2197 if (args [i]->type != STACK_R8)
2198 return 1;
2199 continue;
2200 case MONO_TYPE_VALUETYPE:
2201 if (simple_type->data.klass->enumtype) {
2202 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2203 goto handle_enum;
2205 if (args [i]->type != STACK_VTYPE)
2206 return 1;
2207 continue;
2208 case MONO_TYPE_TYPEDBYREF:
2209 if (args [i]->type != STACK_VTYPE)
2210 return 1;
2211 continue;
2212 case MONO_TYPE_GENERICINST:
2213 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2214 goto handle_enum;
2215 case MONO_TYPE_VAR:
2216 case MONO_TYPE_MVAR:
2217 /* gsharedvt */
2218 if (args [i]->type != STACK_VTYPE)
2219 return 1;
2220 continue;
2221 default:
2222 g_error ("unknown type 0x%02x in check_call_signature",
2223 simple_type->type);
2226 return 0;
2229 static int
2230 callvirt_to_call (int opcode)
2232 switch (opcode) {
2233 case OP_CALL_MEMBASE:
2234 return OP_CALL;
2235 case OP_VOIDCALL_MEMBASE:
2236 return OP_VOIDCALL;
2237 case OP_FCALL_MEMBASE:
2238 return OP_FCALL;
2239 case OP_RCALL_MEMBASE:
2240 return OP_RCALL;
2241 case OP_VCALL_MEMBASE:
2242 return OP_VCALL;
2243 case OP_LCALL_MEMBASE:
2244 return OP_LCALL;
2245 default:
2246 g_assert_not_reached ();
2249 return -1;
2252 static int
2253 callvirt_to_call_reg (int opcode)
2255 switch (opcode) {
2256 case OP_CALL_MEMBASE:
2257 return OP_CALL_REG;
2258 case OP_VOIDCALL_MEMBASE:
2259 return OP_VOIDCALL_REG;
2260 case OP_FCALL_MEMBASE:
2261 return OP_FCALL_REG;
2262 case OP_RCALL_MEMBASE:
2263 return OP_RCALL_REG;
2264 case OP_VCALL_MEMBASE:
2265 return OP_VCALL_REG;
2266 case OP_LCALL_MEMBASE:
2267 return OP_LCALL_REG;
2268 default:
2269 g_assert_not_reached ();
2272 return -1;
2275 /* Either METHOD or IMT_ARG needs to be set */
2276 static void
2277 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2279 int method_reg;
2281 if (COMPILE_LLVM (cfg)) {
2282 if (imt_arg) {
2283 method_reg = alloc_preg (cfg);
2284 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2285 } else {
2286 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2287 method_reg = ins->dreg;
2290 #ifdef ENABLE_LLVM
2291 call->imt_arg_reg = method_reg;
2292 #endif
2293 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2294 return;
2297 if (imt_arg) {
2298 method_reg = alloc_preg (cfg);
2299 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2300 } else {
2301 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2302 method_reg = ins->dreg;
2305 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2308 static MonoJumpInfo *
2309 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2311 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2313 ji->ip.i = ip;
2314 ji->type = type;
2315 ji->data.target = target;
2317 return ji;
2320 static int
2321 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2323 if (cfg->gshared)
2324 return mono_class_check_context_used (klass);
2325 else
2326 return 0;
2329 static int
2330 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2332 if (cfg->gshared)
2333 return mono_method_check_context_used (method);
2334 else
2335 return 0;
2339 * check_method_sharing:
2341 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2343 static void
2344 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2346 gboolean pass_vtable = FALSE;
2347 gboolean pass_mrgctx = FALSE;
2349 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2350 (mono_class_is_ginst (cmethod->klass) || mono_class_is_gtd (cmethod->klass))) {
2351 gboolean sharable = FALSE;
2353 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2354 sharable = TRUE;
2357 * Pass vtable iff target method might
2358 * be shared, which means that sharing
2359 * is enabled for its class and its
2360 * context is sharable (and it's not a
2361 * generic method).
2363 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2364 pass_vtable = TRUE;
2367 if (mini_method_get_context (cmethod) &&
2368 mini_method_get_context (cmethod)->method_inst) {
2369 g_assert (!pass_vtable);
2371 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2372 pass_mrgctx = TRUE;
2373 } else {
2374 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2375 pass_mrgctx = TRUE;
2379 if (out_pass_vtable)
2380 *out_pass_vtable = pass_vtable;
2381 if (out_pass_mrgctx)
2382 *out_pass_mrgctx = pass_mrgctx;
2385 inline static MonoCallInst *
2386 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2387 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline)
2389 MonoType *sig_ret;
2390 MonoCallInst *call;
2391 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2392 int i;
2393 #endif
2395 if (cfg->llvm_only)
2396 tail = FALSE;
2398 if (tail) {
2399 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2401 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2402 } else
2403 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2405 call->args = args;
2406 call->signature = sig;
2407 call->rgctx_reg = rgctx;
2408 sig_ret = mini_get_underlying_type (sig->ret);
2410 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2412 if (tail) {
2413 if (mini_type_is_vtype (sig_ret)) {
2414 call->vret_var = cfg->vret_addr;
2415 //g_assert_not_reached ();
2417 } else if (mini_type_is_vtype (sig_ret)) {
2418 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2419 MonoInst *loada;
2421 temp->backend.is_pinvoke = sig->pinvoke;
2424 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2425 * address of return value to increase optimization opportunities.
2426 * Before vtype decomposition, the dreg of the call ins itself represents the
2427 * fact the call modifies the return value. After decomposition, the call will
2428 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2429 * will be transformed into an LDADDR.
2431 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2432 loada->dreg = alloc_preg (cfg);
2433 loada->inst_p0 = temp;
2434 /* We reference the call too since call->dreg could change during optimization */
2435 loada->inst_p1 = call;
2436 MONO_ADD_INS (cfg->cbb, loada);
2438 call->inst.dreg = temp->dreg;
2440 call->vret_var = loada;
2441 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2442 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2444 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2445 if (COMPILE_SOFT_FLOAT (cfg)) {
2447 * If the call has a float argument, we would need to do an r8->r4 conversion using
2448 * an icall, but that cannot be done during the call sequence since it would clobber
2449 * the call registers + the stack. So we do it before emitting the call.
2451 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2452 MonoType *t;
2453 MonoInst *in = call->args [i];
2455 if (i >= sig->hasthis)
2456 t = sig->params [i - sig->hasthis];
2457 else
2458 t = &mono_defaults.int_class->byval_arg;
2459 t = mono_type_get_underlying_type (t);
2461 if (!t->byref && t->type == MONO_TYPE_R4) {
2462 MonoInst *iargs [1];
2463 MonoInst *conv;
2465 iargs [0] = in;
2466 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2468 /* The result will be in an int vreg */
2469 call->args [i] = conv;
2473 #endif
2475 call->need_unbox_trampoline = unbox_trampoline;
2477 #ifdef ENABLE_LLVM
2478 if (COMPILE_LLVM (cfg))
2479 mono_llvm_emit_call (cfg, call);
2480 else
2481 mono_arch_emit_call (cfg, call);
2482 #else
2483 mono_arch_emit_call (cfg, call);
2484 #endif
2486 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2487 cfg->flags |= MONO_CFG_HAS_CALLS;
2489 return call;
2492 static void
2493 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2495 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2496 cfg->uses_rgctx_reg = TRUE;
2497 call->rgctx_reg = TRUE;
2498 #ifdef ENABLE_LLVM
2499 call->rgctx_arg_reg = rgctx_reg;
2500 #endif
2503 inline static MonoInst*
2504 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2506 MonoCallInst *call;
2507 MonoInst *ins;
2508 int rgctx_reg = -1;
2509 gboolean check_sp = FALSE;
2511 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2512 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2514 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2515 check_sp = TRUE;
2518 if (rgctx_arg) {
2519 rgctx_reg = mono_alloc_preg (cfg);
2520 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2523 if (check_sp) {
2524 if (!cfg->stack_inbalance_var)
2525 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2527 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2528 ins->dreg = cfg->stack_inbalance_var->dreg;
2529 MONO_ADD_INS (cfg->cbb, ins);
2532 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2534 call->inst.sreg1 = addr->dreg;
2536 if (imt_arg)
2537 emit_imt_argument (cfg, call, NULL, imt_arg);
2539 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2541 if (check_sp) {
2542 int sp_reg;
2544 sp_reg = mono_alloc_preg (cfg);
2546 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2547 ins->dreg = sp_reg;
2548 MONO_ADD_INS (cfg->cbb, ins);
2550 /* Restore the stack so we don't crash when throwing the exception */
2551 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2552 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2553 MONO_ADD_INS (cfg->cbb, ins);
2555 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2556 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2559 if (rgctx_arg)
2560 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2562 return (MonoInst*)call;
2565 static MonoInst*
2566 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2568 static MonoInst*
2569 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2571 static MonoInst*
2572 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2573 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2575 #ifndef DISABLE_REMOTING
2576 gboolean might_be_remote = FALSE;
2577 #endif
2578 gboolean virtual_ = this_ins != NULL;
2579 gboolean enable_for_aot = TRUE;
2580 int context_used;
2581 MonoCallInst *call;
2582 MonoInst *call_target = NULL;
2583 int rgctx_reg = 0;
2584 gboolean need_unbox_trampoline;
2586 if (!sig)
2587 sig = mono_method_signature (method);
2589 if (cfg->llvm_only && (mono_class_is_interface (method->klass)))
2590 g_assert_not_reached ();
2592 if (rgctx_arg) {
2593 rgctx_reg = mono_alloc_preg (cfg);
2594 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2597 if (method->string_ctor) {
2598 /* Create the real signature */
2599 /* FIXME: Cache these */
2600 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2601 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2603 sig = ctor_sig;
2606 context_used = mini_method_check_context_used (cfg, method);
2608 #ifndef DISABLE_REMOTING
2609 might_be_remote = this_ins && sig->hasthis &&
2610 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2611 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2613 if (might_be_remote && context_used) {
2614 MonoInst *addr;
2616 g_assert (cfg->gshared);
2618 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2620 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2622 #endif
2624 if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2625 return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
2627 need_unbox_trampoline = method->klass == mono_defaults.object_class || mono_class_is_interface (method->klass);
2629 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2631 #ifndef DISABLE_REMOTING
2632 if (might_be_remote)
2633 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2634 else
2635 #endif
2636 call->method = method;
2637 call->inst.flags |= MONO_INST_HAS_METHOD;
2638 call->inst.inst_left = this_ins;
2639 call->tail_call = tail;
2641 if (virtual_) {
2642 int vtable_reg, slot_reg, this_reg;
2643 int offset;
2645 this_reg = this_ins->dreg;
2647 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2648 MonoInst *dummy_use;
2650 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2652 /* Make a call to delegate->invoke_impl */
2653 call->inst.inst_basereg = this_reg;
2654 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2655 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2657 /* We must emit a dummy use here because the delegate trampoline will
2658 replace the 'this' argument with the delegate target making this activation
2659 no longer a root for the delegate.
2660 This is an issue for delegates that target collectible code such as dynamic
2661 methods of GC'able assemblies.
2663 For a test case look into #667921.
2665 FIXME: a dummy use is not the best way to do it as the local register allocator
2666 will put it on a caller save register and spil it around the call.
2667 Ideally, we would either put it on a callee save register or only do the store part.
2669 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2671 return (MonoInst*)call;
2674 if ((!cfg->compile_aot || enable_for_aot) &&
2675 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2676 (MONO_METHOD_IS_FINAL (method) &&
2677 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2678 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2680 * the method is not virtual, we just need to ensure this is not null
2681 * and then we can call the method directly.
2683 #ifndef DISABLE_REMOTING
2684 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2686 * The check above ensures method is not gshared, this is needed since
2687 * gshared methods can't have wrappers.
2689 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2691 #endif
2693 if (!method->string_ctor)
2694 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2696 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2697 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2699 * the method is virtual, but we can statically dispatch since either
2700 * it's class or the method itself are sealed.
2701 * But first we need to ensure it's not a null reference.
2703 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2705 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2706 } else if (call_target) {
2707 vtable_reg = alloc_preg (cfg);
2708 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2710 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2711 call->inst.sreg1 = call_target->dreg;
2712 call->inst.flags &= !MONO_INST_HAS_METHOD;
2713 } else {
2714 vtable_reg = alloc_preg (cfg);
2715 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2716 if (mono_class_is_interface (method->klass)) {
2717 guint32 imt_slot = mono_method_get_imt_slot (method);
2718 emit_imt_argument (cfg, call, call->method, imt_arg);
2719 slot_reg = vtable_reg;
2720 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2721 } else {
2722 slot_reg = vtable_reg;
2723 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2724 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2725 if (imt_arg) {
2726 g_assert (mono_method_signature (method)->generic_param_count);
2727 emit_imt_argument (cfg, call, call->method, imt_arg);
2731 call->inst.sreg1 = slot_reg;
2732 call->inst.inst_offset = offset;
2733 call->is_virtual = TRUE;
2737 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2739 if (rgctx_arg)
2740 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2742 return (MonoInst*)call;
2745 MonoInst*
2746 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2748 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2751 MonoInst*
2752 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2753 MonoInst **args)
2755 MonoCallInst *call;
2757 g_assert (sig);
2759 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2760 call->fptr = func;
2762 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2764 return (MonoInst*)call;
2767 MonoInst*
2768 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2770 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2772 g_assert (info);
2774 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2778 * mono_emit_abs_call:
2780 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2782 inline static MonoInst*
2783 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2784 MonoMethodSignature *sig, MonoInst **args)
2786 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2787 MonoInst *ins;
2790 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2791 * handle it.
2793 if (cfg->abs_patches == NULL)
2794 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2795 g_hash_table_insert (cfg->abs_patches, ji, ji);
2796 ins = mono_emit_native_call (cfg, ji, sig, args);
2797 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2798 return ins;
2801 static MonoMethodSignature*
2802 sig_to_rgctx_sig (MonoMethodSignature *sig)
2804 // FIXME: memory allocation
2805 MonoMethodSignature *res;
2806 int i;
2808 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
2809 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
2810 res->param_count = sig->param_count + 1;
2811 for (i = 0; i < sig->param_count; ++i)
2812 res->params [i] = sig->params [i];
2813 res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
2814 return res;
2817 /* Make an indirect call to FSIG passing an additional argument */
2818 static MonoInst*
2819 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
2821 MonoMethodSignature *csig;
2822 MonoInst *args_buf [16];
2823 MonoInst **args;
2824 int i, pindex, tmp_reg;
2826 /* Make a call with an rgctx/extra arg */
2827 if (fsig->param_count + 2 < 16)
2828 args = args_buf;
2829 else
2830 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
2831 pindex = 0;
2832 if (fsig->hasthis)
2833 args [pindex ++] = orig_args [0];
2834 for (i = 0; i < fsig->param_count; ++i)
2835 args [pindex ++] = orig_args [fsig->hasthis + i];
2836 tmp_reg = alloc_preg (cfg);
2837 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
2838 csig = sig_to_rgctx_sig (fsig);
2839 return mono_emit_calli (cfg, csig, args, call_target, NULL, NULL);
2842 /* Emit an indirect call to the function descriptor ADDR */
2843 static MonoInst*
2844 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
2846 int addr_reg, arg_reg;
2847 MonoInst *call_target;
2849 g_assert (cfg->llvm_only);
2852 * addr points to a <addr, arg> pair, load both of them, and
2853 * make a call to addr, passing arg as an extra arg.
2855 addr_reg = alloc_preg (cfg);
2856 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
2857 arg_reg = alloc_preg (cfg);
2858 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
2860 return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
2863 static gboolean
2864 direct_icalls_enabled (MonoCompile *cfg)
2866 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2867 #ifdef TARGET_AMD64
2868 if (cfg->compile_llvm && !cfg->llvm_only)
2869 return FALSE;
2870 #endif
2871 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
2872 return FALSE;
2873 return TRUE;
2876 MonoInst*
2877 mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
2880 * Call the jit icall without a wrapper if possible.
2881 * The wrapper is needed for the following reasons:
2882 * - to handle exceptions thrown using mono_raise_exceptions () from the
2883 * icall function. The EH code needs the lmf frame pushed by the
2884 * wrapper to be able to unwind back to managed code.
2885 * - to be able to do stack walks for asynchronously suspended
2886 * threads when debugging.
2888 if (info->no_raise && direct_icalls_enabled (cfg)) {
2889 char *name;
2890 int costs;
2892 if (!info->wrapper_method) {
2893 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
2894 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
2895 g_free (name);
2896 mono_memory_barrier ();
2900 * Inline the wrapper method, which is basically a call to the C icall, and
2901 * an exception check.
2903 costs = inline_method (cfg, info->wrapper_method, NULL,
2904 args, NULL, il_offset, TRUE);
2905 g_assert (costs > 0);
2906 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
2908 return args [0];
2909 } else {
2910 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2914 static MonoInst*
2915 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2917 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2918 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2919 int widen_op = -1;
2922 * Native code might return non register sized integers
2923 * without initializing the upper bits.
2925 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2926 case OP_LOADI1_MEMBASE:
2927 widen_op = OP_ICONV_TO_I1;
2928 break;
2929 case OP_LOADU1_MEMBASE:
2930 widen_op = OP_ICONV_TO_U1;
2931 break;
2932 case OP_LOADI2_MEMBASE:
2933 widen_op = OP_ICONV_TO_I2;
2934 break;
2935 case OP_LOADU2_MEMBASE:
2936 widen_op = OP_ICONV_TO_U2;
2937 break;
2938 default:
2939 break;
2942 if (widen_op != -1) {
2943 int dreg = alloc_preg (cfg);
2944 MonoInst *widen;
2946 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2947 widen->type = ins->type;
2948 ins = widen;
2953 return ins;
2957 static void
2958 emit_method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
2960 MonoInst *args [16];
2962 args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (method), method, MONO_RGCTX_INFO_METHOD);
2963 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cil_method), cil_method, MONO_RGCTX_INFO_METHOD);
2965 mono_emit_jit_icall (cfg, mono_throw_method_access, args);
2968 static MonoMethod*
2969 get_memcpy_method (void)
2971 static MonoMethod *memcpy_method = NULL;
2972 if (!memcpy_method) {
2973 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2974 if (!memcpy_method)
2975 g_error ("Old corlib found. Install a new one");
2977 return memcpy_method;
2980 static void
2981 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2983 MonoClassField *field;
2984 gpointer iter = NULL;
2986 while ((field = mono_class_get_fields (klass, &iter))) {
2987 int foffset;
2989 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2990 continue;
2991 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2992 if (mini_type_is_reference (mono_field_get_type (field))) {
2993 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2994 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2995 } else {
2996 MonoClass *field_class = mono_class_from_mono_type (field->type);
2997 if (field_class->has_references)
2998 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3003 static void
3004 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3006 int card_table_shift_bits;
3007 gpointer card_table_mask;
3008 guint8 *card_table;
3009 MonoInst *dummy_use;
3010 int nursery_shift_bits;
3011 size_t nursery_size;
3013 if (!cfg->gen_write_barriers)
3014 return;
3016 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3018 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3020 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3021 MonoInst *wbarrier;
3023 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3024 wbarrier->sreg1 = ptr->dreg;
3025 wbarrier->sreg2 = value->dreg;
3026 MONO_ADD_INS (cfg->cbb, wbarrier);
3027 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3028 int offset_reg = alloc_preg (cfg);
3029 int card_reg;
3030 MonoInst *ins;
3032 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3033 if (card_table_mask)
3034 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3036 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3037 * IMM's larger than 32bits.
3039 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
3040 card_reg = ins->dreg;
3042 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3043 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3044 } else {
3045 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3046 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3049 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3052 static gboolean
3053 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3055 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3056 unsigned need_wb = 0;
3058 if (align == 0)
3059 align = 4;
3061 /*types with references can't have alignment smaller than sizeof(void*) */
3062 if (align < SIZEOF_VOID_P)
3063 return FALSE;
3065 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3066 if (size > 32 * SIZEOF_VOID_P)
3067 return FALSE;
3069 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3071 /* We don't unroll more than 5 stores to avoid code bloat. */
3072 if (size > 5 * SIZEOF_VOID_P) {
3073 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3074 size += (SIZEOF_VOID_P - 1);
3075 size &= ~(SIZEOF_VOID_P - 1);
3077 EMIT_NEW_ICONST (cfg, iargs [2], size);
3078 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3079 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3080 return TRUE;
3083 destreg = iargs [0]->dreg;
3084 srcreg = iargs [1]->dreg;
3085 offset = 0;
3087 dest_ptr_reg = alloc_preg (cfg);
3088 tmp_reg = alloc_preg (cfg);
3090 /*tmp = dreg*/
3091 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3093 while (size >= SIZEOF_VOID_P) {
3094 MonoInst *load_inst;
3095 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3096 load_inst->dreg = tmp_reg;
3097 load_inst->inst_basereg = srcreg;
3098 load_inst->inst_offset = offset;
3099 MONO_ADD_INS (cfg->cbb, load_inst);
3101 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3103 if (need_wb & 0x1)
3104 emit_write_barrier (cfg, iargs [0], load_inst);
3106 offset += SIZEOF_VOID_P;
3107 size -= SIZEOF_VOID_P;
3108 need_wb >>= 1;
3110 /*tmp += sizeof (void*)*/
3111 if (size >= SIZEOF_VOID_P) {
3112 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3113 MONO_ADD_INS (cfg->cbb, iargs [0]);
3117 /* Those cannot be references since size < sizeof (void*) */
3118 while (size >= 4) {
3119 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3120 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3121 offset += 4;
3122 size -= 4;
3125 while (size >= 2) {
3126 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3127 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3128 offset += 2;
3129 size -= 2;
3132 while (size >= 1) {
3133 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3134 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3135 offset += 1;
3136 size -= 1;
3139 return TRUE;
3143 * Emit code to copy a valuetype of type @klass whose address is stored in
3144 * @src->dreg to memory whose address is stored at @dest->dreg.
3146 void
3147 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3149 MonoInst *iargs [4];
3150 int n;
3151 guint32 align = 0;
3152 MonoMethod *memcpy_method;
3153 MonoInst *size_ins = NULL;
3154 MonoInst *memcpy_ins = NULL;
3156 g_assert (klass);
3157 if (cfg->gshared)
3158 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3161 * This check breaks with spilled vars... need to handle it during verification anyway.
3162 * g_assert (klass && klass == src->klass && klass == dest->klass);
3165 if (mini_is_gsharedvt_klass (klass)) {
3166 g_assert (!native);
3167 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3168 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3171 if (native)
3172 n = mono_class_native_size (klass, &align);
3173 else
3174 n = mono_class_value_size (klass, &align);
3176 /* if native is true there should be no references in the struct */
3177 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3178 /* Avoid barriers when storing to the stack */
3179 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3180 (dest->opcode == OP_LDADDR))) {
3181 int context_used;
3183 iargs [0] = dest;
3184 iargs [1] = src;
3186 context_used = mini_class_check_context_used (cfg, klass);
3188 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3189 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3190 return;
3191 } else if (context_used) {
3192 iargs [2] = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3193 } else {
3194 iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
3195 if (!cfg->compile_aot)
3196 mono_class_compute_gc_descriptor (klass);
3199 if (size_ins)
3200 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3201 else
3202 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3203 return;
3207 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3208 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3209 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3210 } else {
3211 iargs [0] = dest;
3212 iargs [1] = src;
3213 if (size_ins)
3214 iargs [2] = size_ins;
3215 else
3216 EMIT_NEW_ICONST (cfg, iargs [2], n);
3218 memcpy_method = get_memcpy_method ();
3219 if (memcpy_ins)
3220 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3221 else
3222 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3226 static MonoMethod*
3227 get_memset_method (void)
3229 static MonoMethod *memset_method = NULL;
3230 if (!memset_method) {
3231 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3232 if (!memset_method)
3233 g_error ("Old corlib found. Install a new one");
3235 return memset_method;
3238 void
3239 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3241 MonoInst *iargs [3];
3242 int n;
3243 guint32 align;
3244 MonoMethod *memset_method;
3245 MonoInst *size_ins = NULL;
3246 MonoInst *bzero_ins = NULL;
3247 static MonoMethod *bzero_method;
3249 /* FIXME: Optimize this for the case when dest is an LDADDR */
3250 mono_class_init (klass);
3251 if (mini_is_gsharedvt_klass (klass)) {
3252 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3253 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3254 if (!bzero_method)
3255 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3256 g_assert (bzero_method);
3257 iargs [0] = dest;
3258 iargs [1] = size_ins;
3259 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3260 return;
3263 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3265 n = mono_class_value_size (klass, &align);
3267 if (n <= sizeof (gpointer) * 8) {
3268 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3270 else {
3271 memset_method = get_memset_method ();
3272 iargs [0] = dest;
3273 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3274 EMIT_NEW_ICONST (cfg, iargs [2], n);
3275 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3280 * emit_get_rgctx:
3282 * Emit IR to return either the this pointer for instance method,
3283 * or the mrgctx for static methods.
3285 static MonoInst*
3286 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3288 MonoInst *this_ins = NULL;
3290 g_assert (cfg->gshared);
3292 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3293 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3294 !method->klass->valuetype)
3295 EMIT_NEW_VARLOAD (cfg, this_ins, cfg->this_arg, &mono_defaults.object_class->byval_arg);
3297 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3298 MonoInst *mrgctx_loc, *mrgctx_var;
3300 g_assert (!this_ins);
3301 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3303 mrgctx_loc = mono_get_vtable_var (cfg);
3304 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3306 return mrgctx_var;
3307 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3308 MonoInst *vtable_loc, *vtable_var;
3310 g_assert (!this_ins);
3312 vtable_loc = mono_get_vtable_var (cfg);
3313 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3315 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3316 MonoInst *mrgctx_var = vtable_var;
3317 int vtable_reg;
3319 vtable_reg = alloc_preg (cfg);
3320 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3321 vtable_var->type = STACK_PTR;
3324 return vtable_var;
3325 } else {
3326 MonoInst *ins;
3327 int vtable_reg;
3329 vtable_reg = alloc_preg (cfg);
3330 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3331 return ins;
3335 static MonoJumpInfoRgctxEntry *
3336 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3338 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3339 res->method = method;
3340 res->in_mrgctx = in_mrgctx;
3341 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3342 res->data->type = patch_type;
3343 res->data->data.target = patch_data;
3344 res->info_type = info_type;
3346 return res;
3349 static inline MonoInst*
3350 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3352 MonoInst *args [16];
3353 MonoInst *call;
3355 // FIXME: No fastpath since the slot is not a compile time constant
3356 args [0] = rgctx;
3357 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3358 if (entry->in_mrgctx)
3359 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3360 else
3361 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3362 return call;
3363 #if 0
3365 * FIXME: This can be called during decompose, which is a problem since it creates
3366 * new bblocks.
3367 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3369 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3370 gboolean mrgctx;
3371 MonoBasicBlock *is_null_bb, *end_bb;
3372 MonoInst *res, *ins, *call;
3373 MonoInst *args[16];
3375 slot = mini_get_rgctx_entry_slot (entry);
3377 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3378 index = MONO_RGCTX_SLOT_INDEX (slot);
3379 if (mrgctx)
3380 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3381 for (depth = 0; ; ++depth) {
3382 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3384 if (index < size - 1)
3385 break;
3386 index -= size - 1;
3389 NEW_BBLOCK (cfg, end_bb);
3390 NEW_BBLOCK (cfg, is_null_bb);
3392 if (mrgctx) {
3393 rgctx_reg = rgctx->dreg;
3394 } else {
3395 rgctx_reg = alloc_preg (cfg);
3397 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3398 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3399 NEW_BBLOCK (cfg, is_null_bb);
3401 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3402 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3405 for (i = 0; i < depth; ++i) {
3406 int array_reg = alloc_preg (cfg);
3408 /* load ptr to next array */
3409 if (mrgctx && i == 0)
3410 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3411 else
3412 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3413 rgctx_reg = array_reg;
3414 /* is the ptr null? */
3415 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3416 /* if yes, jump to actual trampoline */
3417 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3420 /* fetch slot */
3421 val_reg = alloc_preg (cfg);
3422 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3423 /* is the slot null? */
3424 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3425 /* if yes, jump to actual trampoline */
3426 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3428 /* Fastpath */
3429 res_reg = alloc_preg (cfg);
3430 MONO_INST_NEW (cfg, ins, OP_MOVE);
3431 ins->dreg = res_reg;
3432 ins->sreg1 = val_reg;
3433 MONO_ADD_INS (cfg->cbb, ins);
3434 res = ins;
3435 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3437 /* Slowpath */
3438 MONO_START_BB (cfg, is_null_bb);
3439 args [0] = rgctx;
3440 EMIT_NEW_ICONST (cfg, args [1], index);
3441 if (mrgctx)
3442 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3443 else
3444 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3445 MONO_INST_NEW (cfg, ins, OP_MOVE);
3446 ins->dreg = res_reg;
3447 ins->sreg1 = call->dreg;
3448 MONO_ADD_INS (cfg->cbb, ins);
3449 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3451 MONO_START_BB (cfg, end_bb);
3453 return res;
3454 #endif
3458 * emit_rgctx_fetch:
3460 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3461 * given by RGCTX.
3463 static inline MonoInst*
3464 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3466 if (cfg->llvm_only)
3467 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3468 else
3469 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3472 MonoInst*
3473 mini_emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3474 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3476 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3477 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3479 return emit_rgctx_fetch (cfg, rgctx, entry);
3482 static MonoInst*
3483 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3484 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3486 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3487 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3489 return emit_rgctx_fetch (cfg, rgctx, entry);
3492 static MonoInst*
3493 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3494 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3496 MonoJumpInfoGSharedVtCall *call_info;
3497 MonoJumpInfoRgctxEntry *entry;
3498 MonoInst *rgctx;
3500 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3501 call_info->sig = sig;
3502 call_info->method = cmethod;
3504 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3505 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3507 return emit_rgctx_fetch (cfg, rgctx, entry);
3511 * emit_get_rgctx_virt_method:
3513 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3515 static MonoInst*
3516 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3517 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3519 MonoJumpInfoVirtMethod *info;
3520 MonoJumpInfoRgctxEntry *entry;
3521 MonoInst *rgctx;
3523 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3524 info->klass = klass;
3525 info->method = virt_method;
3527 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3528 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3530 return emit_rgctx_fetch (cfg, rgctx, entry);
3533 static MonoInst*
3534 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3535 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3537 MonoJumpInfoRgctxEntry *entry;
3538 MonoInst *rgctx;
3540 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3541 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3543 return emit_rgctx_fetch (cfg, rgctx, entry);
3547 * emit_get_rgctx_method:
3549 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3550 * normal constants, else emit a load from the rgctx.
3552 static MonoInst*
3553 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3554 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3556 if (!context_used) {
3557 MonoInst *ins;
3559 switch (rgctx_type) {
3560 case MONO_RGCTX_INFO_METHOD:
3561 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3562 return ins;
3563 case MONO_RGCTX_INFO_METHOD_RGCTX:
3564 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3565 return ins;
3566 default:
3567 g_assert_not_reached ();
3569 } else {
3570 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3571 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3573 return emit_rgctx_fetch (cfg, rgctx, entry);
3577 static MonoInst*
3578 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3579 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3581 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3582 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3584 return emit_rgctx_fetch (cfg, rgctx, entry);
3587 static int
3588 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3590 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3591 MonoRuntimeGenericContextInfoTemplate *template_;
3592 int i, idx;
3594 g_assert (info);
3596 for (i = 0; i < info->num_entries; ++i) {
3597 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3599 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3600 return i;
3603 if (info->num_entries == info->count_entries) {
3604 MonoRuntimeGenericContextInfoTemplate *new_entries;
3605 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3607 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3609 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3610 info->entries = new_entries;
3611 info->count_entries = new_count_entries;
3614 idx = info->num_entries;
3615 template_ = &info->entries [idx];
3616 template_->info_type = rgctx_type;
3617 template_->data = data;
3619 info->num_entries ++;
3621 return idx;
3625 * emit_get_gsharedvt_info:
3627 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3629 static MonoInst*
3630 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3632 MonoInst *ins;
3633 int idx, dreg;
3635 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3636 /* Load info->entries [idx] */
3637 dreg = alloc_preg (cfg);
3638 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3640 return ins;
3643 static MonoInst*
3644 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3646 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3650 * On return the caller must check @klass for load errors.
3652 static void
3653 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3655 MonoInst *vtable_arg;
3656 int context_used;
3658 context_used = mini_class_check_context_used (cfg, klass);
3660 if (context_used) {
3661 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
3662 klass, MONO_RGCTX_INFO_VTABLE);
3663 } else {
3664 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3666 if (!vtable)
3667 return;
3668 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3671 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3672 MonoInst *ins;
3675 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3676 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3678 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3679 ins->sreg1 = vtable_arg->dreg;
3680 MONO_ADD_INS (cfg->cbb, ins);
3681 } else {
3682 int inited_reg;
3683 MonoBasicBlock *inited_bb;
3684 MonoInst *args [16];
3686 inited_reg = alloc_ireg (cfg);
3688 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, inited_reg, vtable_arg->dreg, MONO_STRUCT_OFFSET (MonoVTable, initialized));
3690 NEW_BBLOCK (cfg, inited_bb);
3692 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3693 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3695 args [0] = vtable_arg;
3696 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3698 MONO_START_BB (cfg, inited_bb);
3702 static void
3703 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3705 MonoInst *ins;
3707 if (cfg->gen_seq_points && cfg->method == method) {
3708 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3709 if (nonempty_stack)
3710 ins->flags |= MONO_INST_NONEMPTY_STACK;
3711 MONO_ADD_INS (cfg->cbb, ins);
3715 void
3716 mini_save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3718 if (mini_get_debug_options ()->better_cast_details) {
3719 int vtable_reg = alloc_preg (cfg);
3720 int klass_reg = alloc_preg (cfg);
3721 MonoBasicBlock *is_null_bb = NULL;
3722 MonoInst *tls_get;
3723 int to_klass_reg, context_used;
3725 if (null_check) {
3726 NEW_BBLOCK (cfg, is_null_bb);
3728 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3729 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3732 tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3733 if (!tls_get) {
3734 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3735 exit (1);
3738 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3739 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3741 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3743 context_used = mini_class_check_context_used (cfg, klass);
3744 if (context_used) {
3745 MonoInst *class_ins;
3747 class_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3748 to_klass_reg = class_ins->dreg;
3749 } else {
3750 to_klass_reg = alloc_preg (cfg);
3751 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3753 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3755 if (null_check)
3756 MONO_START_BB (cfg, is_null_bb);
3760 void
3761 mini_reset_cast_details (MonoCompile *cfg)
3763 /* Reset the variables holding the cast details */
3764 if (mini_get_debug_options ()->better_cast_details) {
3765 MonoInst *tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3766 /* It is enough to reset the from field */
3767 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3772 * On return the caller must check @array_class for load errors
3774 static void
3775 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3777 int vtable_reg = alloc_preg (cfg);
3778 int context_used;
3780 context_used = mini_class_check_context_used (cfg, array_class);
3782 mini_save_cast_details (cfg, array_class, obj->dreg, FALSE);
3784 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3786 if (cfg->opt & MONO_OPT_SHARED) {
3787 int class_reg = alloc_preg (cfg);
3788 MonoInst *ins;
3790 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3791 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
3792 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
3793 } else if (context_used) {
3794 MonoInst *vtable_ins;
3796 vtable_ins = mini_emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3797 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3798 } else {
3799 if (cfg->compile_aot) {
3800 int vt_reg;
3801 MonoVTable *vtable;
3803 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3804 return;
3805 vt_reg = alloc_preg (cfg);
3806 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3807 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3808 } else {
3809 MonoVTable *vtable;
3810 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3811 return;
3812 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3816 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3818 mini_reset_cast_details (cfg);
3822 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3823 * generic code is generated.
3825 static MonoInst*
3826 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3828 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3830 if (context_used) {
3831 MonoInst *rgctx, *addr;
3833 /* FIXME: What if the class is shared? We might not
3834 have to get the address of the method from the
3835 RGCTX. */
3836 addr = emit_get_rgctx_method (cfg, context_used, method,
3837 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3838 if (cfg->llvm_only) {
3839 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature (method));
3840 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
3841 } else {
3842 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3844 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3846 } else {
3847 gboolean pass_vtable, pass_mrgctx;
3848 MonoInst *rgctx_arg = NULL;
3850 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3851 g_assert (!pass_mrgctx);
3853 if (pass_vtable) {
3854 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3856 g_assert (vtable);
3857 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3860 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3864 static MonoInst*
3865 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3867 MonoInst *add;
3868 int obj_reg;
3869 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3870 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3871 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3872 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3874 obj_reg = sp [0]->dreg;
3875 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3876 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3878 /* FIXME: generics */
3879 g_assert (klass->rank == 0);
3881 // Check rank == 0
3882 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3883 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3885 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3886 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3888 if (context_used) {
3889 MonoInst *element_class;
3891 /* This assertion is from the unboxcast insn */
3892 g_assert (klass->rank == 0);
3894 element_class = mini_emit_get_rgctx_klass (cfg, context_used,
3895 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3897 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3898 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3899 } else {
3900 mini_save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
3901 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3902 mini_reset_cast_details (cfg);
3905 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3906 MONO_ADD_INS (cfg->cbb, add);
3907 add->type = STACK_MP;
3908 add->klass = klass;
3910 return add;
3913 static MonoInst*
3914 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
3916 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3917 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3918 MonoInst *ins;
3919 int dreg, addr_reg;
3921 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3923 /* obj */
3924 args [0] = obj;
3926 /* klass */
3927 args [1] = klass_inst;
3929 /* CASTCLASS */
3930 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3932 NEW_BBLOCK (cfg, is_ref_bb);
3933 NEW_BBLOCK (cfg, is_nullable_bb);
3934 NEW_BBLOCK (cfg, end_bb);
3935 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3936 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
3937 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3939 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
3940 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3942 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3943 addr_reg = alloc_dreg (cfg, STACK_MP);
3945 /* Non-ref case */
3946 /* UNBOX */
3947 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3948 MONO_ADD_INS (cfg->cbb, addr);
3950 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3952 /* Ref case */
3953 MONO_START_BB (cfg, is_ref_bb);
3955 /* Save the ref to a temporary */
3956 dreg = alloc_ireg (cfg);
3957 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3958 addr->dreg = addr_reg;
3959 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3960 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3962 /* Nullable case */
3963 MONO_START_BB (cfg, is_nullable_bb);
3966 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3967 MonoInst *unbox_call;
3968 MonoMethodSignature *unbox_sig;
3970 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3971 unbox_sig->ret = &klass->byval_arg;
3972 unbox_sig->param_count = 1;
3973 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3975 if (cfg->llvm_only)
3976 unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
3977 else
3978 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3980 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3981 addr->dreg = addr_reg;
3984 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3986 /* End */
3987 MONO_START_BB (cfg, end_bb);
3989 /* LDOBJ */
3990 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3992 return ins;
3996 * Returns NULL and set the cfg exception on error.
3998 static MonoInst*
3999 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
4001 MonoInst *iargs [2];
4002 void *alloc_ftn;
4004 if (context_used) {
4005 MonoInst *data;
4006 MonoRgctxInfoType rgctx_info;
4007 MonoInst *iargs [2];
4008 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
4010 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4012 if (cfg->opt & MONO_OPT_SHARED)
4013 rgctx_info = MONO_RGCTX_INFO_KLASS;
4014 else
4015 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4016 data = mini_emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4018 if (cfg->opt & MONO_OPT_SHARED) {
4019 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4020 iargs [1] = data;
4021 alloc_ftn = ves_icall_object_new;
4022 } else {
4023 iargs [0] = data;
4024 alloc_ftn = ves_icall_object_new_specific;
4027 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4028 if (known_instance_size) {
4029 int size = mono_class_instance_size (klass);
4030 if (size < sizeof (MonoObject))
4031 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4033 EMIT_NEW_ICONST (cfg, iargs [1], size);
4035 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4038 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4041 if (cfg->opt & MONO_OPT_SHARED) {
4042 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4043 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4045 alloc_ftn = ves_icall_object_new;
4046 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !mono_class_is_ginst (klass)) {
4047 /* This happens often in argument checking code, eg. throw new FooException... */
4048 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4049 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4050 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4051 } else {
4052 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4053 MonoMethod *managed_alloc = NULL;
4054 gboolean pass_lw;
4056 if (!vtable) {
4057 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4058 cfg->exception_ptr = klass;
4059 return NULL;
4062 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4064 if (managed_alloc) {
4065 int size = mono_class_instance_size (klass);
4066 if (size < sizeof (MonoObject))
4067 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4069 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4070 EMIT_NEW_ICONST (cfg, iargs [1], size);
4071 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4073 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4074 if (pass_lw) {
4075 guint32 lw = vtable->klass->instance_size;
4076 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4077 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4078 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4080 else {
4081 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4085 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4089 * Returns NULL and set the cfg exception on error.
4091 static MonoInst*
4092 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4094 MonoInst *alloc, *ins;
4096 if (mono_class_is_nullable (klass)) {
4097 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4099 if (context_used) {
4100 if (cfg->llvm_only && cfg->gsharedvt) {
4101 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4102 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4103 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4104 } else {
4105 /* FIXME: What if the class is shared? We might not
4106 have to get the method address from the RGCTX. */
4107 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4108 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4109 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
4111 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4113 } else {
4114 gboolean pass_vtable, pass_mrgctx;
4115 MonoInst *rgctx_arg = NULL;
4117 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4118 g_assert (!pass_mrgctx);
4120 if (pass_vtable) {
4121 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4123 g_assert (vtable);
4124 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4127 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4131 if (mini_is_gsharedvt_klass (klass)) {
4132 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4133 MonoInst *res, *is_ref, *src_var, *addr;
4134 int dreg;
4136 dreg = alloc_ireg (cfg);
4138 NEW_BBLOCK (cfg, is_ref_bb);
4139 NEW_BBLOCK (cfg, is_nullable_bb);
4140 NEW_BBLOCK (cfg, end_bb);
4141 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4142 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4143 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4145 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4146 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4148 /* Non-ref case */
4149 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4150 if (!alloc)
4151 return NULL;
4152 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4153 ins->opcode = OP_STOREV_MEMBASE;
4155 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4156 res->type = STACK_OBJ;
4157 res->klass = klass;
4158 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4160 /* Ref case */
4161 MONO_START_BB (cfg, is_ref_bb);
4163 /* val is a vtype, so has to load the value manually */
4164 src_var = get_vreg_to_inst (cfg, val->dreg);
4165 if (!src_var)
4166 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4167 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4168 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4169 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4171 /* Nullable case */
4172 MONO_START_BB (cfg, is_nullable_bb);
4175 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4176 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4177 MonoInst *box_call;
4178 MonoMethodSignature *box_sig;
4181 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4182 * construct that method at JIT time, so have to do things by hand.
4184 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4185 box_sig->ret = &mono_defaults.object_class->byval_arg;
4186 box_sig->param_count = 1;
4187 box_sig->params [0] = &klass->byval_arg;
4189 if (cfg->llvm_only)
4190 box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
4191 else
4192 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4193 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4194 res->type = STACK_OBJ;
4195 res->klass = klass;
4198 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4200 MONO_START_BB (cfg, end_bb);
4202 return res;
4203 } else {
4204 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4205 if (!alloc)
4206 return NULL;
4208 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4209 return alloc;
4213 static GHashTable* direct_icall_type_hash;
4215 static gboolean
4216 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4218 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4219 if (!direct_icalls_enabled (cfg))
4220 return FALSE;
4223 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4224 * Whitelist a few icalls for now.
4226 if (!direct_icall_type_hash) {
4227 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4229 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4230 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4231 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4232 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4233 mono_memory_barrier ();
4234 direct_icall_type_hash = h;
4237 if (cmethod->klass == mono_defaults.math_class)
4238 return TRUE;
4239 /* No locking needed */
4240 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4241 return TRUE;
4242 return FALSE;
4245 static gboolean
4246 method_needs_stack_walk (MonoCompile *cfg, MonoMethod *cmethod)
4248 if (cmethod->klass == mono_defaults.systemtype_class) {
4249 if (!strcmp (cmethod->name, "GetType"))
4250 return TRUE;
4252 return FALSE;
4255 static G_GNUC_UNUSED MonoInst*
4256 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4258 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4259 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4260 gboolean is_i4;
4262 switch (enum_type->type) {
4263 case MONO_TYPE_I8:
4264 case MONO_TYPE_U8:
4265 #if SIZEOF_REGISTER == 8
4266 case MONO_TYPE_I:
4267 case MONO_TYPE_U:
4268 #endif
4269 is_i4 = FALSE;
4270 break;
4271 default:
4272 is_i4 = TRUE;
4273 break;
4277 MonoInst *load, *and_, *cmp, *ceq;
4278 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4279 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4280 int dest_reg = alloc_ireg (cfg);
4282 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4283 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4284 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4285 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4287 ceq->type = STACK_I4;
4289 if (!is_i4) {
4290 load = mono_decompose_opcode (cfg, load);
4291 and_ = mono_decompose_opcode (cfg, and_);
4292 cmp = mono_decompose_opcode (cfg, cmp);
4293 ceq = mono_decompose_opcode (cfg, ceq);
4296 return ceq;
4301 * Returns NULL and set the cfg exception on error.
4303 static G_GNUC_UNUSED MonoInst*
4304 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
4306 MonoInst *ptr;
4307 int dreg;
4308 gpointer trampoline;
4309 MonoInst *obj, *method_ins, *tramp_ins;
4310 MonoDomain *domain;
4311 guint8 **code_slot;
4313 if (virtual_ && !cfg->llvm_only) {
4314 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4315 g_assert (invoke);
4317 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4318 return NULL;
4321 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
4322 if (!obj)
4323 return NULL;
4325 /* Inline the contents of mono_delegate_ctor */
4327 /* Set target field */
4328 /* Optimize away setting of NULL target */
4329 if (!MONO_INS_IS_PCONST_NULL (target)) {
4330 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4331 if (cfg->gen_write_barriers) {
4332 dreg = alloc_preg (cfg);
4333 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4334 emit_write_barrier (cfg, ptr, target);
4338 /* Set method field */
4339 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4340 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4343 * To avoid looking up the compiled code belonging to the target method
4344 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4345 * store it, and we fill it after the method has been compiled.
4347 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4348 MonoInst *code_slot_ins;
4350 if (context_used) {
4351 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4352 } else {
4353 domain = mono_domain_get ();
4354 mono_domain_lock (domain);
4355 if (!domain_jit_info (domain)->method_code_hash)
4356 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4357 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4358 if (!code_slot) {
4359 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
4360 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4362 mono_domain_unlock (domain);
4364 code_slot_ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4366 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4369 if (cfg->llvm_only) {
4370 MonoInst *args [16];
4372 if (virtual_) {
4373 args [0] = obj;
4374 args [1] = target;
4375 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4376 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
4377 } else {
4378 args [0] = obj;
4379 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
4382 return obj;
4385 if (cfg->compile_aot) {
4386 MonoDelegateClassMethodPair *del_tramp;
4388 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4389 del_tramp->klass = klass;
4390 del_tramp->method = context_used ? NULL : method;
4391 del_tramp->is_virtual = virtual_;
4392 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4393 } else {
4394 if (virtual_)
4395 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4396 else
4397 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4398 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4401 /* Set invoke_impl field */
4402 if (virtual_) {
4403 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4404 } else {
4405 dreg = alloc_preg (cfg);
4406 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4407 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4409 dreg = alloc_preg (cfg);
4410 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4411 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4414 dreg = alloc_preg (cfg);
4415 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
4416 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
4418 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4420 return obj;
4423 static MonoInst*
4424 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4426 MonoJitICallInfo *info;
4428 /* Need to register the icall so it gets an icall wrapper */
4429 info = mono_get_array_new_va_icall (rank);
4431 cfg->flags |= MONO_CFG_HAS_VARARGS;
4433 /* mono_array_new_va () needs a vararg calling convention */
4434 cfg->exception_message = g_strdup ("array-new");
4435 cfg->disable_llvm = TRUE;
4437 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4438 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4442 * handle_constrained_gsharedvt_call:
4444 * Handle constrained calls where the receiver is a gsharedvt type.
4445 * Return the instruction representing the call. Set the cfg exception on failure.
4447 static MonoInst*
4448 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
4449 gboolean *ref_emit_widen)
4451 MonoInst *ins = NULL;
4452 gboolean emit_widen = *ref_emit_widen;
4455 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
4456 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
4457 * pack the arguments into an array, and do the rest of the work in in an icall.
4459 if (((cmethod->klass == mono_defaults.object_class) || mono_class_is_interface (cmethod->klass) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
4460 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (fsig->ret)) &&
4461 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
4462 MonoInst *args [16];
4465 * This case handles calls to
4466 * - object:ToString()/Equals()/GetHashCode(),
4467 * - System.IComparable<T>:CompareTo()
4468 * - System.IEquatable<T>:Equals ()
4469 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
4472 args [0] = sp [0];
4473 if (mono_method_check_context_used (cmethod))
4474 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
4475 else
4476 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
4477 args [2] = mini_emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
4479 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
4480 if (fsig->hasthis && fsig->param_count) {
4481 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
4482 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
4483 ins->dreg = alloc_preg (cfg);
4484 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
4485 MONO_ADD_INS (cfg->cbb, ins);
4486 args [4] = ins;
4488 if (mini_is_gsharedvt_type (fsig->params [0])) {
4489 int addr_reg, deref_arg_reg;
4491 ins = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4492 deref_arg_reg = alloc_preg (cfg);
4493 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
4494 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
4496 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
4497 addr_reg = ins->dreg;
4498 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
4499 } else {
4500 EMIT_NEW_ICONST (cfg, args [3], 0);
4501 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
4503 } else {
4504 EMIT_NEW_ICONST (cfg, args [3], 0);
4505 EMIT_NEW_ICONST (cfg, args [4], 0);
4507 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
4508 emit_widen = FALSE;
4510 if (mini_is_gsharedvt_type (fsig->ret)) {
4511 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
4512 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
4513 MonoInst *add;
4515 /* Unbox */
4516 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
4517 MONO_ADD_INS (cfg->cbb, add);
4518 /* Load value */
4519 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
4520 MONO_ADD_INS (cfg->cbb, ins);
4521 /* ins represents the call result */
4523 } else {
4524 GSHAREDVT_FAILURE (CEE_CALLVIRT);
4527 *ref_emit_widen = emit_widen;
4529 return ins;
4531 exception_exit:
4532 return NULL;
4535 static void
4536 mono_emit_load_got_addr (MonoCompile *cfg)
4538 MonoInst *getaddr, *dummy_use;
4540 if (!cfg->got_var || cfg->got_var_allocated)
4541 return;
4543 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4544 getaddr->cil_code = cfg->header->code;
4545 getaddr->dreg = cfg->got_var->dreg;
4547 /* Add it to the start of the first bblock */
4548 if (cfg->bb_entry->code) {
4549 getaddr->next = cfg->bb_entry->code;
4550 cfg->bb_entry->code = getaddr;
4552 else
4553 MONO_ADD_INS (cfg->bb_entry, getaddr);
4555 cfg->got_var_allocated = TRUE;
4558 * Add a dummy use to keep the got_var alive, since real uses might
4559 * only be generated by the back ends.
4560 * Add it to end_bblock, so the variable's lifetime covers the whole
4561 * method.
4562 * It would be better to make the usage of the got var explicit in all
4563 * cases when the backend needs it (i.e. calls, throw etc.), so this
4564 * wouldn't be needed.
4566 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4567 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4570 static int inline_limit;
4571 static gboolean inline_limit_inited;
4573 static gboolean
4574 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4576 MonoMethodHeaderSummary header;
4577 MonoVTable *vtable;
4578 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4579 MonoMethodSignature *sig = mono_method_signature (method);
4580 int i;
4581 #endif
4583 if (cfg->disable_inline)
4584 return FALSE;
4585 if (cfg->gsharedvt)
4586 return FALSE;
4588 if (cfg->inline_depth > 10)
4589 return FALSE;
4591 if (!mono_method_get_header_summary (method, &header))
4592 return FALSE;
4594 /*runtime, icall and pinvoke are checked by summary call*/
4595 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4596 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4597 (mono_class_is_marshalbyref (method->klass)) ||
4598 header.has_clauses)
4599 return FALSE;
4601 /* also consider num_locals? */
4602 /* Do the size check early to avoid creating vtables */
4603 if (!inline_limit_inited) {
4604 if (g_getenv ("MONO_INLINELIMIT"))
4605 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
4606 else
4607 inline_limit = INLINE_LENGTH_LIMIT;
4608 inline_limit_inited = TRUE;
4610 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4611 return FALSE;
4614 * if we can initialize the class of the method right away, we do,
4615 * otherwise we don't allow inlining if the class needs initialization,
4616 * since it would mean inserting a call to mono_runtime_class_init()
4617 * inside the inlined code
4619 if (cfg->gshared && method->klass->has_cctor && mini_class_check_context_used (cfg, method->klass))
4620 return FALSE;
4622 if (!(cfg->opt & MONO_OPT_SHARED)) {
4623 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4624 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4625 if (method->klass->has_cctor) {
4626 vtable = mono_class_vtable (cfg->domain, method->klass);
4627 if (!vtable)
4628 return FALSE;
4629 if (!cfg->compile_aot) {
4630 MonoError error;
4631 if (!mono_runtime_class_init_full (vtable, &error)) {
4632 mono_error_cleanup (&error);
4633 return FALSE;
4637 } else if (mono_class_is_before_field_init (method->klass)) {
4638 if (cfg->run_cctors && method->klass->has_cctor) {
4639 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4640 if (!method->klass->runtime_info)
4641 /* No vtable created yet */
4642 return FALSE;
4643 vtable = mono_class_vtable (cfg->domain, method->klass);
4644 if (!vtable)
4645 return FALSE;
4646 /* This makes so that inline cannot trigger */
4647 /* .cctors: too many apps depend on them */
4648 /* running with a specific order... */
4649 if (! vtable->initialized)
4650 return FALSE;
4651 MonoError error;
4652 if (!mono_runtime_class_init_full (vtable, &error)) {
4653 mono_error_cleanup (&error);
4654 return FALSE;
4657 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4658 if (!method->klass->runtime_info)
4659 /* No vtable created yet */
4660 return FALSE;
4661 vtable = mono_class_vtable (cfg->domain, method->klass);
4662 if (!vtable)
4663 return FALSE;
4664 if (!vtable->initialized)
4665 return FALSE;
4667 } else {
4669 * If we're compiling for shared code
4670 * the cctor will need to be run at aot method load time, for example,
4671 * or at the end of the compilation of the inlining method.
4673 if (mono_class_needs_cctor_run (method->klass, NULL) && !mono_class_is_before_field_init (method->klass))
4674 return FALSE;
4677 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4678 if (mono_arch_is_soft_float ()) {
4679 /* FIXME: */
4680 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4681 return FALSE;
4682 for (i = 0; i < sig->param_count; ++i)
4683 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4684 return FALSE;
4686 #endif
4688 if (g_list_find (cfg->dont_inline, method))
4689 return FALSE;
4691 return TRUE;
4694 static gboolean
4695 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4697 if (!cfg->compile_aot) {
4698 g_assert (vtable);
4699 if (vtable->initialized)
4700 return FALSE;
4703 if (mono_class_is_before_field_init (klass)) {
4704 if (cfg->method == method)
4705 return FALSE;
4708 if (!mono_class_needs_cctor_run (klass, method))
4709 return FALSE;
4711 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4712 /* The initialization is already done before the method is called */
4713 return FALSE;
4715 return TRUE;
4718 MonoInst*
4719 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4721 MonoInst *ins;
4722 guint32 size;
4723 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4724 int context_used;
4726 if (mini_is_gsharedvt_variable_klass (klass)) {
4727 size = -1;
4728 } else {
4729 mono_class_init (klass);
4730 size = mono_class_array_element_size (klass);
4733 mult_reg = alloc_preg (cfg);
4734 array_reg = arr->dreg;
4735 index_reg = index->dreg;
4737 #if SIZEOF_REGISTER == 8
4738 /* The array reg is 64 bits but the index reg is only 32 */
4739 if (COMPILE_LLVM (cfg)) {
4740 /* Not needed */
4741 index2_reg = index_reg;
4742 } else {
4743 index2_reg = alloc_preg (cfg);
4744 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4746 #else
4747 if (index->type == STACK_I8) {
4748 index2_reg = alloc_preg (cfg);
4749 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4750 } else {
4751 index2_reg = index_reg;
4753 #endif
4755 if (bcheck)
4756 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4758 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4759 if (size == 1 || size == 2 || size == 4 || size == 8) {
4760 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4762 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
4763 ins->klass = mono_class_get_element_class (klass);
4764 ins->type = STACK_MP;
4766 return ins;
4768 #endif
4770 add_reg = alloc_ireg_mp (cfg);
4772 if (size == -1) {
4773 MonoInst *rgctx_ins;
4775 /* gsharedvt */
4776 g_assert (cfg->gshared);
4777 context_used = mini_class_check_context_used (cfg, klass);
4778 g_assert (context_used);
4779 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4780 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4781 } else {
4782 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4784 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4785 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4786 ins->klass = mono_class_get_element_class (klass);
4787 ins->type = STACK_MP;
4788 MONO_ADD_INS (cfg->cbb, ins);
4790 return ins;
4793 static MonoInst*
4794 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4796 int bounds_reg = alloc_preg (cfg);
4797 int add_reg = alloc_ireg_mp (cfg);
4798 int mult_reg = alloc_preg (cfg);
4799 int mult2_reg = alloc_preg (cfg);
4800 int low1_reg = alloc_preg (cfg);
4801 int low2_reg = alloc_preg (cfg);
4802 int high1_reg = alloc_preg (cfg);
4803 int high2_reg = alloc_preg (cfg);
4804 int realidx1_reg = alloc_preg (cfg);
4805 int realidx2_reg = alloc_preg (cfg);
4806 int sum_reg = alloc_preg (cfg);
4807 int index1, index2, tmpreg;
4808 MonoInst *ins;
4809 guint32 size;
4811 mono_class_init (klass);
4812 size = mono_class_array_element_size (klass);
4814 index1 = index_ins1->dreg;
4815 index2 = index_ins2->dreg;
4817 #if SIZEOF_REGISTER == 8
4818 /* The array reg is 64 bits but the index reg is only 32 */
4819 if (COMPILE_LLVM (cfg)) {
4820 /* Not needed */
4821 } else {
4822 tmpreg = alloc_preg (cfg);
4823 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4824 index1 = tmpreg;
4825 tmpreg = alloc_preg (cfg);
4826 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4827 index2 = tmpreg;
4829 #else
4830 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4831 tmpreg = -1;
4832 #endif
4834 /* range checking */
4835 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4836 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4838 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4839 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4840 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4841 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4842 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4843 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4844 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4846 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4847 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4848 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4849 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4850 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4851 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4852 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4854 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4855 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4856 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4857 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4858 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4860 ins->type = STACK_MP;
4861 ins->klass = klass;
4862 MONO_ADD_INS (cfg->cbb, ins);
4864 return ins;
4867 static MonoInst*
4868 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4870 int rank;
4871 MonoInst *addr;
4872 MonoMethod *addr_method;
4873 int element_size;
4874 MonoClass *eclass = cmethod->klass->element_class;
4876 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4878 if (rank == 1)
4879 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
4881 /* emit_ldelema_2 depends on OP_LMUL */
4882 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
4883 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
4886 if (mini_is_gsharedvt_variable_klass (eclass))
4887 element_size = 0;
4888 else
4889 element_size = mono_class_array_element_size (eclass);
4890 addr_method = mono_marshal_get_array_address (rank, element_size);
4891 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4893 return addr;
4896 static MonoBreakPolicy
4897 always_insert_breakpoint (MonoMethod *method)
4899 return MONO_BREAK_POLICY_ALWAYS;
4902 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4905 * mono_set_break_policy:
4906 * policy_callback: the new callback function
4908 * Allow embedders to decide wherther to actually obey breakpoint instructions
4909 * (both break IL instructions and Debugger.Break () method calls), for example
4910 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4911 * untrusted or semi-trusted code.
4913 * @policy_callback will be called every time a break point instruction needs to
4914 * be inserted with the method argument being the method that calls Debugger.Break()
4915 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4916 * if it wants the breakpoint to not be effective in the given method.
4917 * #MONO_BREAK_POLICY_ALWAYS is the default.
4919 void
4920 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4922 if (policy_callback)
4923 break_policy_func = policy_callback;
4924 else
4925 break_policy_func = always_insert_breakpoint;
4928 static gboolean
4929 should_insert_brekpoint (MonoMethod *method) {
4930 switch (break_policy_func (method)) {
4931 case MONO_BREAK_POLICY_ALWAYS:
4932 return TRUE;
4933 case MONO_BREAK_POLICY_NEVER:
4934 return FALSE;
4935 case MONO_BREAK_POLICY_ON_DBG:
4936 g_warning ("mdb no longer supported");
4937 return FALSE;
4938 default:
4939 g_warning ("Incorrect value returned from break policy callback");
4940 return FALSE;
4944 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4945 static MonoInst*
4946 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4948 MonoInst *addr, *store, *load;
4949 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4951 /* the bounds check is already done by the callers */
4952 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4953 if (is_set) {
4954 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4955 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4956 if (mini_type_is_reference (&eklass->byval_arg))
4957 emit_write_barrier (cfg, addr, load);
4958 } else {
4959 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4960 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4962 return store;
4966 static gboolean
4967 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4969 return mini_type_is_reference (&klass->byval_arg);
4972 static MonoInst*
4973 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4975 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4976 !(MONO_INS_IS_PCONST_NULL (sp [2]))) {
4977 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4978 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4979 MonoInst *iargs [3];
4981 if (!helper->slot)
4982 mono_class_setup_vtable (obj_array);
4983 g_assert (helper->slot);
4985 if (sp [0]->type != STACK_OBJ)
4986 return NULL;
4987 if (sp [2]->type != STACK_OBJ)
4988 return NULL;
4990 iargs [2] = sp [2];
4991 iargs [1] = sp [1];
4992 iargs [0] = sp [0];
4994 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4995 } else {
4996 MonoInst *ins;
4998 if (mini_is_gsharedvt_variable_klass (klass)) {
4999 MonoInst *addr;
5001 // FIXME-VT: OP_ICONST optimization
5002 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5003 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5004 ins->opcode = OP_STOREV_MEMBASE;
5005 } else if (sp [1]->opcode == OP_ICONST) {
5006 int array_reg = sp [0]->dreg;
5007 int index_reg = sp [1]->dreg;
5008 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5010 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
5011 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
5013 if (safety_checks)
5014 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5015 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5016 } else {
5017 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5018 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5019 if (generic_class_is_reference_type (cfg, klass))
5020 emit_write_barrier (cfg, addr, sp [2]);
5022 return ins;
5026 static MonoInst*
5027 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5029 MonoClass *eklass;
5031 if (is_set)
5032 eklass = mono_class_from_mono_type (fsig->params [2]);
5033 else
5034 eklass = mono_class_from_mono_type (fsig->ret);
5036 if (is_set) {
5037 return emit_array_store (cfg, eklass, args, FALSE);
5038 } else {
5039 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5040 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5041 return ins;
5045 static gboolean
5046 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5048 uint32_t align;
5049 int param_size, return_size;
5051 param_klass = mono_class_from_mono_type (mini_get_underlying_type (&param_klass->byval_arg));
5052 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
5054 if (cfg->verbose_level > 3)
5055 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
5057 //Don't allow mixing reference types with value types
5058 if (param_klass->valuetype != return_klass->valuetype) {
5059 if (cfg->verbose_level > 3)
5060 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
5061 return FALSE;
5064 if (!param_klass->valuetype) {
5065 if (cfg->verbose_level > 3)
5066 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
5067 return TRUE;
5070 //That are blitable
5071 if (param_klass->has_references || return_klass->has_references)
5072 return FALSE;
5074 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5075 if ((MONO_TYPE_ISSTRUCT (&param_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5076 (!MONO_TYPE_ISSTRUCT (&param_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
5077 if (cfg->verbose_level > 3)
5078 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
5079 return FALSE;
5082 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5083 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
5084 if (cfg->verbose_level > 3)
5085 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
5086 return FALSE;
5089 param_size = mono_class_value_size (param_klass, &align);
5090 return_size = mono_class_value_size (return_klass, &align);
5092 //We can do it if sizes match
5093 if (param_size == return_size) {
5094 if (cfg->verbose_level > 3)
5095 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
5096 return TRUE;
5099 //No simple way to handle struct if sizes don't match
5100 if (MONO_TYPE_ISSTRUCT (&param_klass->byval_arg)) {
5101 if (cfg->verbose_level > 3)
5102 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
5103 return FALSE;
5107 * Same reg size category.
5108 * A quick note on why we don't require widening here.
5109 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
5111 * Since the source value comes from a function argument, the JIT will already have
5112 * the value in a VREG and performed any widening needed before (say, when loading from a field).
5114 if (param_size <= 4 && return_size <= 4) {
5115 if (cfg->verbose_level > 3)
5116 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
5117 return TRUE;
5120 return FALSE;
5123 static MonoInst*
5124 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5126 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5127 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5129 if (mini_is_gsharedvt_variable_type (fsig->ret))
5130 return NULL;
5132 //Valuetypes that are semantically equivalent or numbers than can be widened to
5133 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5134 return args [0];
5136 //Arrays of valuetypes that are semantically equivalent
5137 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5138 return args [0];
5140 return NULL;
5143 static MonoInst*
5144 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5146 #ifdef MONO_ARCH_SIMD_INTRINSICS
5147 MonoInst *ins = NULL;
5149 if (cfg->opt & MONO_OPT_SIMD) {
5150 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5151 if (ins)
5152 return ins;
5154 #endif
5156 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5159 static MonoInst*
5160 emit_memory_barrier (MonoCompile *cfg, int kind)
5162 MonoInst *ins = NULL;
5163 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5164 MONO_ADD_INS (cfg->cbb, ins);
5165 ins->backend.memory_barrier_kind = kind;
5167 return ins;
5170 static MonoInst*
5171 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5173 MonoInst *ins = NULL;
5174 int opcode = 0;
5176 /* The LLVM backend supports these intrinsics */
5177 if (cmethod->klass == mono_defaults.math_class) {
5178 if (strcmp (cmethod->name, "Sin") == 0) {
5179 opcode = OP_SIN;
5180 } else if (strcmp (cmethod->name, "Cos") == 0) {
5181 opcode = OP_COS;
5182 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5183 opcode = OP_SQRT;
5184 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5185 opcode = OP_ABS;
5188 if (opcode && fsig->param_count == 1) {
5189 MONO_INST_NEW (cfg, ins, opcode);
5190 ins->type = STACK_R8;
5191 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5192 ins->sreg1 = args [0]->dreg;
5193 MONO_ADD_INS (cfg->cbb, ins);
5196 opcode = 0;
5197 if (cfg->opt & MONO_OPT_CMOV) {
5198 if (strcmp (cmethod->name, "Min") == 0) {
5199 if (fsig->params [0]->type == MONO_TYPE_I4)
5200 opcode = OP_IMIN;
5201 if (fsig->params [0]->type == MONO_TYPE_U4)
5202 opcode = OP_IMIN_UN;
5203 else if (fsig->params [0]->type == MONO_TYPE_I8)
5204 opcode = OP_LMIN;
5205 else if (fsig->params [0]->type == MONO_TYPE_U8)
5206 opcode = OP_LMIN_UN;
5207 } else if (strcmp (cmethod->name, "Max") == 0) {
5208 if (fsig->params [0]->type == MONO_TYPE_I4)
5209 opcode = OP_IMAX;
5210 if (fsig->params [0]->type == MONO_TYPE_U4)
5211 opcode = OP_IMAX_UN;
5212 else if (fsig->params [0]->type == MONO_TYPE_I8)
5213 opcode = OP_LMAX;
5214 else if (fsig->params [0]->type == MONO_TYPE_U8)
5215 opcode = OP_LMAX_UN;
5219 if (opcode && fsig->param_count == 2) {
5220 MONO_INST_NEW (cfg, ins, opcode);
5221 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5222 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5223 ins->sreg1 = args [0]->dreg;
5224 ins->sreg2 = args [1]->dreg;
5225 MONO_ADD_INS (cfg->cbb, ins);
5229 return ins;
5232 static MonoInst*
5233 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5235 if (cmethod->klass == mono_defaults.array_class) {
5236 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5237 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5238 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5239 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5240 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5241 return emit_array_unsafe_mov (cfg, fsig, args);
5244 return NULL;
5247 static MonoInst*
5248 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5250 MonoInst *ins = NULL;
5252 MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
5254 if (cmethod->klass == mono_defaults.string_class) {
5255 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5256 int dreg = alloc_ireg (cfg);
5257 int index_reg = alloc_preg (cfg);
5258 int add_reg = alloc_preg (cfg);
5260 #if SIZEOF_REGISTER == 8
5261 if (COMPILE_LLVM (cfg)) {
5262 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
5263 } else {
5264 /* The array reg is 64 bits but the index reg is only 32 */
5265 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5267 #else
5268 index_reg = args [1]->dreg;
5269 #endif
5270 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5272 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5273 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5274 add_reg = ins->dreg;
5275 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5276 add_reg, 0);
5277 #else
5278 int mult_reg = alloc_preg (cfg);
5279 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5280 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5281 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5282 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5283 #endif
5284 type_from_op (cfg, ins, NULL, NULL);
5285 return ins;
5286 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5287 int dreg = alloc_ireg (cfg);
5288 /* Decompose later to allow more optimizations */
5289 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5290 ins->type = STACK_I4;
5291 ins->flags |= MONO_INST_FAULT;
5292 cfg->cbb->has_array_access = TRUE;
5293 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5295 return ins;
5296 } else
5297 return NULL;
5298 } else if (cmethod->klass == mono_defaults.object_class) {
5299 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5300 int dreg = alloc_ireg_ref (cfg);
5301 int vt_reg = alloc_preg (cfg);
5302 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5303 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5304 type_from_op (cfg, ins, NULL, NULL);
5306 return ins;
5307 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5308 int dreg = alloc_ireg (cfg);
5309 int t1 = alloc_ireg (cfg);
5311 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5312 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5313 ins->type = STACK_I4;
5315 return ins;
5316 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5317 MONO_INST_NEW (cfg, ins, OP_NOP);
5318 MONO_ADD_INS (cfg->cbb, ins);
5319 return ins;
5320 } else
5321 return NULL;
5322 } else if (cmethod->klass == mono_defaults.array_class) {
5323 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5324 return emit_array_generic_access (cfg, fsig, args, FALSE);
5325 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5326 return emit_array_generic_access (cfg, fsig, args, TRUE);
5328 #ifndef MONO_BIG_ARRAYS
5330 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5331 * Array methods.
5333 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
5334 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
5335 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5336 int dreg = alloc_ireg (cfg);
5337 int bounds_reg = alloc_ireg_mp (cfg);
5338 MonoBasicBlock *end_bb, *szarray_bb;
5339 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5341 NEW_BBLOCK (cfg, end_bb);
5342 NEW_BBLOCK (cfg, szarray_bb);
5344 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5345 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5346 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5347 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5348 /* Non-szarray case */
5349 if (get_length)
5350 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5351 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5352 else
5353 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5354 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5355 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5356 MONO_START_BB (cfg, szarray_bb);
5357 /* Szarray case */
5358 if (get_length)
5359 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5360 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5361 else
5362 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5363 MONO_START_BB (cfg, end_bb);
5365 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5366 ins->type = STACK_I4;
5368 return ins;
5370 #endif
5372 if (cmethod->name [0] != 'g')
5373 return NULL;
5375 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
5376 int dreg = alloc_ireg (cfg);
5377 int vtable_reg = alloc_preg (cfg);
5378 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5379 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5380 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5381 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5382 type_from_op (cfg, ins, NULL, NULL);
5384 return ins;
5385 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5386 int dreg = alloc_ireg (cfg);
5388 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5389 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5390 type_from_op (cfg, ins, NULL, NULL);
5392 return ins;
5393 } else
5394 return NULL;
5395 } else if (cmethod->klass == runtime_helpers_class) {
5396 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5397 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5398 return ins;
5399 } else
5400 return NULL;
5401 } else if (cmethod->klass == mono_defaults.monitor_class) {
5402 gboolean is_enter = FALSE;
5403 gboolean is_v4 = FALSE;
5405 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 2 && fsig->params [1]->byref) {
5406 is_enter = TRUE;
5407 is_v4 = TRUE;
5409 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 1)
5410 is_enter = TRUE;
5412 if (is_enter) {
5414 * To make async stack traces work, icalls which can block should have a wrapper.
5415 * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
5417 MonoBasicBlock *end_bb;
5419 NEW_BBLOCK (cfg, end_bb);
5421 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_fast : (gpointer)mono_monitor_enter_fast, args);
5422 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
5423 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
5424 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_internal : (gpointer)mono_monitor_enter_internal, args);
5425 MONO_START_BB (cfg, end_bb);
5426 return ins;
5428 } else if (cmethod->klass == mono_defaults.thread_class) {
5429 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5430 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5431 MONO_ADD_INS (cfg->cbb, ins);
5432 return ins;
5433 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5434 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5435 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5436 guint32 opcode = 0;
5437 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5439 if (fsig->params [0]->type == MONO_TYPE_I1)
5440 opcode = OP_LOADI1_MEMBASE;
5441 else if (fsig->params [0]->type == MONO_TYPE_U1)
5442 opcode = OP_LOADU1_MEMBASE;
5443 else if (fsig->params [0]->type == MONO_TYPE_I2)
5444 opcode = OP_LOADI2_MEMBASE;
5445 else if (fsig->params [0]->type == MONO_TYPE_U2)
5446 opcode = OP_LOADU2_MEMBASE;
5447 else if (fsig->params [0]->type == MONO_TYPE_I4)
5448 opcode = OP_LOADI4_MEMBASE;
5449 else if (fsig->params [0]->type == MONO_TYPE_U4)
5450 opcode = OP_LOADU4_MEMBASE;
5451 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5452 opcode = OP_LOADI8_MEMBASE;
5453 else if (fsig->params [0]->type == MONO_TYPE_R4)
5454 opcode = OP_LOADR4_MEMBASE;
5455 else if (fsig->params [0]->type == MONO_TYPE_R8)
5456 opcode = OP_LOADR8_MEMBASE;
5457 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5458 opcode = OP_LOAD_MEMBASE;
5460 if (opcode) {
5461 MONO_INST_NEW (cfg, ins, opcode);
5462 ins->inst_basereg = args [0]->dreg;
5463 ins->inst_offset = 0;
5464 MONO_ADD_INS (cfg->cbb, ins);
5466 switch (fsig->params [0]->type) {
5467 case MONO_TYPE_I1:
5468 case MONO_TYPE_U1:
5469 case MONO_TYPE_I2:
5470 case MONO_TYPE_U2:
5471 case MONO_TYPE_I4:
5472 case MONO_TYPE_U4:
5473 ins->dreg = mono_alloc_ireg (cfg);
5474 ins->type = STACK_I4;
5475 break;
5476 case MONO_TYPE_I8:
5477 case MONO_TYPE_U8:
5478 ins->dreg = mono_alloc_lreg (cfg);
5479 ins->type = STACK_I8;
5480 break;
5481 case MONO_TYPE_I:
5482 case MONO_TYPE_U:
5483 ins->dreg = mono_alloc_ireg (cfg);
5484 #if SIZEOF_REGISTER == 8
5485 ins->type = STACK_I8;
5486 #else
5487 ins->type = STACK_I4;
5488 #endif
5489 break;
5490 case MONO_TYPE_R4:
5491 case MONO_TYPE_R8:
5492 ins->dreg = mono_alloc_freg (cfg);
5493 ins->type = STACK_R8;
5494 break;
5495 default:
5496 g_assert (mini_type_is_reference (fsig->params [0]));
5497 ins->dreg = mono_alloc_ireg_ref (cfg);
5498 ins->type = STACK_OBJ;
5499 break;
5502 if (opcode == OP_LOADI8_MEMBASE)
5503 ins = mono_decompose_opcode (cfg, ins);
5505 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5507 return ins;
5509 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5510 guint32 opcode = 0;
5511 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5513 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5514 opcode = OP_STOREI1_MEMBASE_REG;
5515 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5516 opcode = OP_STOREI2_MEMBASE_REG;
5517 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5518 opcode = OP_STOREI4_MEMBASE_REG;
5519 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5520 opcode = OP_STOREI8_MEMBASE_REG;
5521 else if (fsig->params [0]->type == MONO_TYPE_R4)
5522 opcode = OP_STORER4_MEMBASE_REG;
5523 else if (fsig->params [0]->type == MONO_TYPE_R8)
5524 opcode = OP_STORER8_MEMBASE_REG;
5525 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5526 opcode = OP_STORE_MEMBASE_REG;
5528 if (opcode) {
5529 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5531 MONO_INST_NEW (cfg, ins, opcode);
5532 ins->sreg1 = args [1]->dreg;
5533 ins->inst_destbasereg = args [0]->dreg;
5534 ins->inst_offset = 0;
5535 MONO_ADD_INS (cfg->cbb, ins);
5537 if (opcode == OP_STOREI8_MEMBASE_REG)
5538 ins = mono_decompose_opcode (cfg, ins);
5540 return ins;
5543 } else if (cmethod->klass->image == mono_defaults.corlib &&
5544 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5545 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5546 ins = NULL;
5548 #if SIZEOF_REGISTER == 8
5549 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5550 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
5551 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
5552 ins->dreg = mono_alloc_preg (cfg);
5553 ins->sreg1 = args [0]->dreg;
5554 ins->type = STACK_I8;
5555 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
5556 MONO_ADD_INS (cfg->cbb, ins);
5557 } else {
5558 MonoInst *load_ins;
5560 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5562 /* 64 bit reads are already atomic */
5563 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
5564 load_ins->dreg = mono_alloc_preg (cfg);
5565 load_ins->inst_basereg = args [0]->dreg;
5566 load_ins->inst_offset = 0;
5567 load_ins->type = STACK_I8;
5568 MONO_ADD_INS (cfg->cbb, load_ins);
5570 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5572 ins = load_ins;
5575 #endif
5577 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
5578 MonoInst *ins_iconst;
5579 guint32 opcode = 0;
5581 if (fsig->params [0]->type == MONO_TYPE_I4) {
5582 opcode = OP_ATOMIC_ADD_I4;
5583 cfg->has_atomic_add_i4 = TRUE;
5585 #if SIZEOF_REGISTER == 8
5586 else if (fsig->params [0]->type == MONO_TYPE_I8)
5587 opcode = OP_ATOMIC_ADD_I8;
5588 #endif
5589 if (opcode) {
5590 if (!mono_arch_opcode_supported (opcode))
5591 return NULL;
5592 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5593 ins_iconst->inst_c0 = 1;
5594 ins_iconst->dreg = mono_alloc_ireg (cfg);
5595 MONO_ADD_INS (cfg->cbb, ins_iconst);
5597 MONO_INST_NEW (cfg, ins, opcode);
5598 ins->dreg = mono_alloc_ireg (cfg);
5599 ins->inst_basereg = args [0]->dreg;
5600 ins->inst_offset = 0;
5601 ins->sreg2 = ins_iconst->dreg;
5602 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5603 MONO_ADD_INS (cfg->cbb, ins);
5605 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
5606 MonoInst *ins_iconst;
5607 guint32 opcode = 0;
5609 if (fsig->params [0]->type == MONO_TYPE_I4) {
5610 opcode = OP_ATOMIC_ADD_I4;
5611 cfg->has_atomic_add_i4 = TRUE;
5613 #if SIZEOF_REGISTER == 8
5614 else if (fsig->params [0]->type == MONO_TYPE_I8)
5615 opcode = OP_ATOMIC_ADD_I8;
5616 #endif
5617 if (opcode) {
5618 if (!mono_arch_opcode_supported (opcode))
5619 return NULL;
5620 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5621 ins_iconst->inst_c0 = -1;
5622 ins_iconst->dreg = mono_alloc_ireg (cfg);
5623 MONO_ADD_INS (cfg->cbb, ins_iconst);
5625 MONO_INST_NEW (cfg, ins, opcode);
5626 ins->dreg = mono_alloc_ireg (cfg);
5627 ins->inst_basereg = args [0]->dreg;
5628 ins->inst_offset = 0;
5629 ins->sreg2 = ins_iconst->dreg;
5630 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5631 MONO_ADD_INS (cfg->cbb, ins);
5633 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
5634 guint32 opcode = 0;
5636 if (fsig->params [0]->type == MONO_TYPE_I4) {
5637 opcode = OP_ATOMIC_ADD_I4;
5638 cfg->has_atomic_add_i4 = TRUE;
5640 #if SIZEOF_REGISTER == 8
5641 else if (fsig->params [0]->type == MONO_TYPE_I8)
5642 opcode = OP_ATOMIC_ADD_I8;
5643 #endif
5644 if (opcode) {
5645 if (!mono_arch_opcode_supported (opcode))
5646 return NULL;
5647 MONO_INST_NEW (cfg, ins, opcode);
5648 ins->dreg = mono_alloc_ireg (cfg);
5649 ins->inst_basereg = args [0]->dreg;
5650 ins->inst_offset = 0;
5651 ins->sreg2 = args [1]->dreg;
5652 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5653 MONO_ADD_INS (cfg->cbb, ins);
5656 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
5657 MonoInst *f2i = NULL, *i2f;
5658 guint32 opcode, f2i_opcode, i2f_opcode;
5659 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5660 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
5662 if (fsig->params [0]->type == MONO_TYPE_I4 ||
5663 fsig->params [0]->type == MONO_TYPE_R4) {
5664 opcode = OP_ATOMIC_EXCHANGE_I4;
5665 f2i_opcode = OP_MOVE_F_TO_I4;
5666 i2f_opcode = OP_MOVE_I4_TO_F;
5667 cfg->has_atomic_exchange_i4 = TRUE;
5669 #if SIZEOF_REGISTER == 8
5670 else if (is_ref ||
5671 fsig->params [0]->type == MONO_TYPE_I8 ||
5672 fsig->params [0]->type == MONO_TYPE_R8 ||
5673 fsig->params [0]->type == MONO_TYPE_I) {
5674 opcode = OP_ATOMIC_EXCHANGE_I8;
5675 f2i_opcode = OP_MOVE_F_TO_I8;
5676 i2f_opcode = OP_MOVE_I8_TO_F;
5678 #else
5679 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
5680 opcode = OP_ATOMIC_EXCHANGE_I4;
5681 cfg->has_atomic_exchange_i4 = TRUE;
5683 #endif
5684 else
5685 return NULL;
5687 if (!mono_arch_opcode_supported (opcode))
5688 return NULL;
5690 if (is_float) {
5691 /* TODO: Decompose these opcodes instead of bailing here. */
5692 if (COMPILE_SOFT_FLOAT (cfg))
5693 return NULL;
5695 MONO_INST_NEW (cfg, f2i, f2i_opcode);
5696 f2i->dreg = mono_alloc_ireg (cfg);
5697 f2i->sreg1 = args [1]->dreg;
5698 if (f2i_opcode == OP_MOVE_F_TO_I4)
5699 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5700 MONO_ADD_INS (cfg->cbb, f2i);
5703 MONO_INST_NEW (cfg, ins, opcode);
5704 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5705 ins->inst_basereg = args [0]->dreg;
5706 ins->inst_offset = 0;
5707 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
5708 MONO_ADD_INS (cfg->cbb, ins);
5710 switch (fsig->params [0]->type) {
5711 case MONO_TYPE_I4:
5712 ins->type = STACK_I4;
5713 break;
5714 case MONO_TYPE_I8:
5715 ins->type = STACK_I8;
5716 break;
5717 case MONO_TYPE_I:
5718 #if SIZEOF_REGISTER == 8
5719 ins->type = STACK_I8;
5720 #else
5721 ins->type = STACK_I4;
5722 #endif
5723 break;
5724 case MONO_TYPE_R4:
5725 case MONO_TYPE_R8:
5726 ins->type = STACK_R8;
5727 break;
5728 default:
5729 g_assert (mini_type_is_reference (fsig->params [0]));
5730 ins->type = STACK_OBJ;
5731 break;
5734 if (is_float) {
5735 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5736 i2f->dreg = mono_alloc_freg (cfg);
5737 i2f->sreg1 = ins->dreg;
5738 i2f->type = STACK_R8;
5739 if (i2f_opcode == OP_MOVE_I4_TO_F)
5740 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5741 MONO_ADD_INS (cfg->cbb, i2f);
5743 ins = i2f;
5746 if (cfg->gen_write_barriers && is_ref)
5747 emit_write_barrier (cfg, args [0], args [1]);
5749 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
5750 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
5751 guint32 opcode, f2i_opcode, i2f_opcode;
5752 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
5753 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
5755 if (fsig->params [1]->type == MONO_TYPE_I4 ||
5756 fsig->params [1]->type == MONO_TYPE_R4) {
5757 opcode = OP_ATOMIC_CAS_I4;
5758 f2i_opcode = OP_MOVE_F_TO_I4;
5759 i2f_opcode = OP_MOVE_I4_TO_F;
5760 cfg->has_atomic_cas_i4 = TRUE;
5762 #if SIZEOF_REGISTER == 8
5763 else if (is_ref ||
5764 fsig->params [1]->type == MONO_TYPE_I8 ||
5765 fsig->params [1]->type == MONO_TYPE_R8 ||
5766 fsig->params [1]->type == MONO_TYPE_I) {
5767 opcode = OP_ATOMIC_CAS_I8;
5768 f2i_opcode = OP_MOVE_F_TO_I8;
5769 i2f_opcode = OP_MOVE_I8_TO_F;
5771 #else
5772 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
5773 opcode = OP_ATOMIC_CAS_I4;
5774 cfg->has_atomic_cas_i4 = TRUE;
5776 #endif
5777 else
5778 return NULL;
5780 if (!mono_arch_opcode_supported (opcode))
5781 return NULL;
5783 if (is_float) {
5784 /* TODO: Decompose these opcodes instead of bailing here. */
5785 if (COMPILE_SOFT_FLOAT (cfg))
5786 return NULL;
5788 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
5789 f2i_new->dreg = mono_alloc_ireg (cfg);
5790 f2i_new->sreg1 = args [1]->dreg;
5791 if (f2i_opcode == OP_MOVE_F_TO_I4)
5792 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5793 MONO_ADD_INS (cfg->cbb, f2i_new);
5795 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
5796 f2i_cmp->dreg = mono_alloc_ireg (cfg);
5797 f2i_cmp->sreg1 = args [2]->dreg;
5798 if (f2i_opcode == OP_MOVE_F_TO_I4)
5799 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5800 MONO_ADD_INS (cfg->cbb, f2i_cmp);
5803 MONO_INST_NEW (cfg, ins, opcode);
5804 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5805 ins->sreg1 = args [0]->dreg;
5806 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
5807 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
5808 MONO_ADD_INS (cfg->cbb, ins);
5810 switch (fsig->params [1]->type) {
5811 case MONO_TYPE_I4:
5812 ins->type = STACK_I4;
5813 break;
5814 case MONO_TYPE_I8:
5815 ins->type = STACK_I8;
5816 break;
5817 case MONO_TYPE_I:
5818 #if SIZEOF_REGISTER == 8
5819 ins->type = STACK_I8;
5820 #else
5821 ins->type = STACK_I4;
5822 #endif
5823 break;
5824 case MONO_TYPE_R4:
5825 ins->type = cfg->r4_stack_type;
5826 break;
5827 case MONO_TYPE_R8:
5828 ins->type = STACK_R8;
5829 break;
5830 default:
5831 g_assert (mini_type_is_reference (fsig->params [1]));
5832 ins->type = STACK_OBJ;
5833 break;
5836 if (is_float) {
5837 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5838 i2f->dreg = mono_alloc_freg (cfg);
5839 i2f->sreg1 = ins->dreg;
5840 i2f->type = STACK_R8;
5841 if (i2f_opcode == OP_MOVE_I4_TO_F)
5842 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5843 MONO_ADD_INS (cfg->cbb, i2f);
5845 ins = i2f;
5848 if (cfg->gen_write_barriers && is_ref)
5849 emit_write_barrier (cfg, args [0], args [1]);
5851 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
5852 fsig->params [1]->type == MONO_TYPE_I4) {
5853 MonoInst *cmp, *ceq;
5855 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
5856 return NULL;
5858 /* int32 r = CAS (location, value, comparand); */
5859 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5860 ins->dreg = alloc_ireg (cfg);
5861 ins->sreg1 = args [0]->dreg;
5862 ins->sreg2 = args [1]->dreg;
5863 ins->sreg3 = args [2]->dreg;
5864 ins->type = STACK_I4;
5865 MONO_ADD_INS (cfg->cbb, ins);
5867 /* bool result = r == comparand; */
5868 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
5869 cmp->sreg1 = ins->dreg;
5870 cmp->sreg2 = args [2]->dreg;
5871 cmp->type = STACK_I4;
5872 MONO_ADD_INS (cfg->cbb, cmp);
5874 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
5875 ceq->dreg = alloc_ireg (cfg);
5876 ceq->type = STACK_I4;
5877 MONO_ADD_INS (cfg->cbb, ceq);
5879 /* *success = result; */
5880 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
5882 cfg->has_atomic_cas_i4 = TRUE;
5884 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
5885 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5887 if (ins)
5888 return ins;
5889 } else if (cmethod->klass->image == mono_defaults.corlib &&
5890 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5891 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
5892 ins = NULL;
5894 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
5895 guint32 opcode = 0;
5896 MonoType *t = fsig->params [0];
5897 gboolean is_ref;
5898 gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
5900 g_assert (t->byref);
5901 /* t is a byref type, so the reference check is more complicated */
5902 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
5903 if (t->type == MONO_TYPE_I1)
5904 opcode = OP_ATOMIC_LOAD_I1;
5905 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
5906 opcode = OP_ATOMIC_LOAD_U1;
5907 else if (t->type == MONO_TYPE_I2)
5908 opcode = OP_ATOMIC_LOAD_I2;
5909 else if (t->type == MONO_TYPE_U2)
5910 opcode = OP_ATOMIC_LOAD_U2;
5911 else if (t->type == MONO_TYPE_I4)
5912 opcode = OP_ATOMIC_LOAD_I4;
5913 else if (t->type == MONO_TYPE_U4)
5914 opcode = OP_ATOMIC_LOAD_U4;
5915 else if (t->type == MONO_TYPE_R4)
5916 opcode = OP_ATOMIC_LOAD_R4;
5917 else if (t->type == MONO_TYPE_R8)
5918 opcode = OP_ATOMIC_LOAD_R8;
5919 #if SIZEOF_REGISTER == 8
5920 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
5921 opcode = OP_ATOMIC_LOAD_I8;
5922 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
5923 opcode = OP_ATOMIC_LOAD_U8;
5924 #else
5925 else if (t->type == MONO_TYPE_I)
5926 opcode = OP_ATOMIC_LOAD_I4;
5927 else if (is_ref || t->type == MONO_TYPE_U)
5928 opcode = OP_ATOMIC_LOAD_U4;
5929 #endif
5931 if (opcode) {
5932 if (!mono_arch_opcode_supported (opcode))
5933 return NULL;
5935 MONO_INST_NEW (cfg, ins, opcode);
5936 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
5937 ins->sreg1 = args [0]->dreg;
5938 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
5939 MONO_ADD_INS (cfg->cbb, ins);
5941 switch (t->type) {
5942 case MONO_TYPE_BOOLEAN:
5943 case MONO_TYPE_I1:
5944 case MONO_TYPE_U1:
5945 case MONO_TYPE_I2:
5946 case MONO_TYPE_U2:
5947 case MONO_TYPE_I4:
5948 case MONO_TYPE_U4:
5949 ins->type = STACK_I4;
5950 break;
5951 case MONO_TYPE_I8:
5952 case MONO_TYPE_U8:
5953 ins->type = STACK_I8;
5954 break;
5955 case MONO_TYPE_I:
5956 case MONO_TYPE_U:
5957 #if SIZEOF_REGISTER == 8
5958 ins->type = STACK_I8;
5959 #else
5960 ins->type = STACK_I4;
5961 #endif
5962 break;
5963 case MONO_TYPE_R4:
5964 ins->type = cfg->r4_stack_type;
5965 break;
5966 case MONO_TYPE_R8:
5967 ins->type = STACK_R8;
5968 break;
5969 default:
5970 g_assert (is_ref);
5971 ins->type = STACK_OBJ;
5972 break;
5977 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
5978 guint32 opcode = 0;
5979 MonoType *t = fsig->params [0];
5980 gboolean is_ref;
5982 g_assert (t->byref);
5983 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
5984 if (t->type == MONO_TYPE_I1)
5985 opcode = OP_ATOMIC_STORE_I1;
5986 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
5987 opcode = OP_ATOMIC_STORE_U1;
5988 else if (t->type == MONO_TYPE_I2)
5989 opcode = OP_ATOMIC_STORE_I2;
5990 else if (t->type == MONO_TYPE_U2)
5991 opcode = OP_ATOMIC_STORE_U2;
5992 else if (t->type == MONO_TYPE_I4)
5993 opcode = OP_ATOMIC_STORE_I4;
5994 else if (t->type == MONO_TYPE_U4)
5995 opcode = OP_ATOMIC_STORE_U4;
5996 else if (t->type == MONO_TYPE_R4)
5997 opcode = OP_ATOMIC_STORE_R4;
5998 else if (t->type == MONO_TYPE_R8)
5999 opcode = OP_ATOMIC_STORE_R8;
6000 #if SIZEOF_REGISTER == 8
6001 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6002 opcode = OP_ATOMIC_STORE_I8;
6003 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6004 opcode = OP_ATOMIC_STORE_U8;
6005 #else
6006 else if (t->type == MONO_TYPE_I)
6007 opcode = OP_ATOMIC_STORE_I4;
6008 else if (is_ref || t->type == MONO_TYPE_U)
6009 opcode = OP_ATOMIC_STORE_U4;
6010 #endif
6012 if (opcode) {
6013 if (!mono_arch_opcode_supported (opcode))
6014 return NULL;
6016 MONO_INST_NEW (cfg, ins, opcode);
6017 ins->dreg = args [0]->dreg;
6018 ins->sreg1 = args [1]->dreg;
6019 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6020 MONO_ADD_INS (cfg->cbb, ins);
6022 if (cfg->gen_write_barriers && is_ref)
6023 emit_write_barrier (cfg, args [0], args [1]);
6027 if (ins)
6028 return ins;
6029 } else if (cmethod->klass->image == mono_defaults.corlib &&
6030 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6031 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6032 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6033 if (should_insert_brekpoint (cfg->method)) {
6034 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6035 } else {
6036 MONO_INST_NEW (cfg, ins, OP_NOP);
6037 MONO_ADD_INS (cfg->cbb, ins);
6039 return ins;
6041 } else if (cmethod->klass->image == mono_defaults.corlib &&
6042 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6043 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6044 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6045 #ifdef TARGET_WIN32
6046 EMIT_NEW_ICONST (cfg, ins, 1);
6047 #else
6048 EMIT_NEW_ICONST (cfg, ins, 0);
6049 #endif
6051 } else if (cmethod->klass->image == mono_defaults.corlib &&
6052 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6053 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6054 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6055 /* No stack walks are currently available, so implement this as an intrinsic */
6056 MonoInst *assembly_ins;
6058 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6059 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6060 return ins;
6062 } else if (cmethod->klass->image == mono_defaults.corlib &&
6063 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6064 (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
6065 if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
6066 /* No stack walks are currently available, so implement this as an intrinsic */
6067 MonoInst *method_ins;
6068 MonoMethod *declaring = cfg->method;
6070 /* This returns the declaring generic method */
6071 if (declaring->is_inflated)
6072 declaring = ((MonoMethodInflated*)cfg->method)->declaring;
6073 EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
6074 ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
6075 cfg->no_inline = TRUE;
6076 if (cfg->method != cfg->current_method)
6077 inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
6078 return ins;
6080 } else if (cmethod->klass == mono_defaults.math_class) {
6082 * There is general branchless code for Min/Max, but it does not work for
6083 * all inputs:
6084 * http://everything2.com/?node_id=1051618
6086 } else if (cmethod->klass == mono_defaults.systemtype_class && !strcmp (cmethod->name, "op_Equality")) {
6087 EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg);
6088 MONO_INST_NEW (cfg, ins, OP_PCEQ);
6089 ins->dreg = alloc_preg (cfg);
6090 ins->type = STACK_I4;
6091 MONO_ADD_INS (cfg->cbb, ins);
6092 return ins;
6093 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6094 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6095 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6096 !strcmp (cmethod->klass->name, "Selector")) ||
6097 ((!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") ||
6098 !strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.Mac")) &&
6099 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6100 !strcmp (cmethod->klass->name, "Selector"))
6102 if ((cfg->backend->have_objc_get_selector || cfg->compile_llvm) &&
6103 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6104 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6105 cfg->compile_aot) {
6106 MonoInst *pi;
6107 MonoJumpInfoToken *ji;
6108 char *s;
6110 if (args [0]->opcode == OP_GOT_ENTRY) {
6111 pi = (MonoInst *)args [0]->inst_p1;
6112 g_assert (pi->opcode == OP_PATCH_INFO);
6113 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6114 ji = (MonoJumpInfoToken *)pi->inst_p0;
6115 } else {
6116 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6117 ji = (MonoJumpInfoToken *)args [0]->inst_p0;
6120 NULLIFY_INS (args [0]);
6122 s = mono_ldstr_utf8 (ji->image, mono_metadata_token_index (ji->token), &cfg->error);
6123 return_val_if_nok (&cfg->error, NULL);
6125 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6126 ins->dreg = mono_alloc_ireg (cfg);
6127 // FIXME: Leaks
6128 ins->inst_p0 = s;
6129 MONO_ADD_INS (cfg->cbb, ins);
6130 return ins;
6134 #ifdef MONO_ARCH_SIMD_INTRINSICS
6135 if (cfg->opt & MONO_OPT_SIMD) {
6136 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6137 if (ins)
6138 return ins;
6140 #endif
6142 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6143 if (ins)
6144 return ins;
6146 if (COMPILE_LLVM (cfg)) {
6147 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6148 if (ins)
6149 return ins;
6152 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6156 * This entry point could be used later for arbitrary method
6157 * redirection.
6159 inline static MonoInst*
6160 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6161 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6163 if (method->klass == mono_defaults.string_class) {
6164 /* managed string allocation support */
6165 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6166 MonoInst *iargs [2];
6167 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6168 MonoMethod *managed_alloc = NULL;
6170 g_assert (vtable); /*Should not fail since it System.String*/
6171 #ifndef MONO_CROSS_COMPILE
6172 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6173 #endif
6174 if (!managed_alloc)
6175 return NULL;
6176 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6177 iargs [1] = args [0];
6178 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6181 return NULL;
6184 static void
6185 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6187 MonoInst *store, *temp;
6188 int i;
6190 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6191 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6194 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6195 * would be different than the MonoInst's used to represent arguments, and
6196 * the ldelema implementation can't deal with that.
6197 * Solution: When ldelema is used on an inline argument, create a var for
6198 * it, emit ldelema on that var, and emit the saving code below in
6199 * inline_method () if needed.
6201 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6202 cfg->args [i] = temp;
6203 /* This uses cfg->args [i] which is set by the preceeding line */
6204 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6205 store->cil_code = sp [0]->cil_code;
6206 sp++;
6210 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6211 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6213 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6214 static gboolean
6215 check_inline_called_method_name_limit (MonoMethod *called_method)
6217 int strncmp_result;
6218 static const char *limit = NULL;
6220 if (limit == NULL) {
6221 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6223 if (limit_string != NULL)
6224 limit = limit_string;
6225 else
6226 limit = "";
6229 if (limit [0] != '\0') {
6230 char *called_method_name = mono_method_full_name (called_method, TRUE);
6232 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6233 g_free (called_method_name);
6235 //return (strncmp_result <= 0);
6236 return (strncmp_result == 0);
6237 } else {
6238 return TRUE;
6241 #endif
6243 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6244 static gboolean
6245 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6247 int strncmp_result;
6248 static const char *limit = NULL;
6250 if (limit == NULL) {
6251 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6252 if (limit_string != NULL) {
6253 limit = limit_string;
6254 } else {
6255 limit = "";
6259 if (limit [0] != '\0') {
6260 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6262 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6263 g_free (caller_method_name);
6265 //return (strncmp_result <= 0);
6266 return (strncmp_result == 0);
6267 } else {
6268 return TRUE;
6271 #endif
6273 static void
6274 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6276 static double r8_0 = 0.0;
6277 static float r4_0 = 0.0;
6278 MonoInst *ins;
6279 int t;
6281 rtype = mini_get_underlying_type (rtype);
6282 t = rtype->type;
6284 if (rtype->byref) {
6285 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6286 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6287 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6288 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6289 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6290 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6291 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6292 ins->type = STACK_R4;
6293 ins->inst_p0 = (void*)&r4_0;
6294 ins->dreg = dreg;
6295 MONO_ADD_INS (cfg->cbb, ins);
6296 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6297 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6298 ins->type = STACK_R8;
6299 ins->inst_p0 = (void*)&r8_0;
6300 ins->dreg = dreg;
6301 MONO_ADD_INS (cfg->cbb, ins);
6302 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6303 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6304 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6305 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6306 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6307 } else {
6308 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6312 static void
6313 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6315 int t;
6317 rtype = mini_get_underlying_type (rtype);
6318 t = rtype->type;
6320 if (rtype->byref) {
6321 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6322 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6323 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6324 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6325 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6326 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6327 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6328 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6329 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6330 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6331 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6332 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6333 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6334 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6335 } else {
6336 emit_init_rvar (cfg, dreg, rtype);
6340 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6341 static void
6342 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6344 MonoInst *var = cfg->locals [local];
6345 if (COMPILE_SOFT_FLOAT (cfg)) {
6346 MonoInst *store;
6347 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
6348 emit_init_rvar (cfg, reg, type);
6349 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6350 } else {
6351 if (init)
6352 emit_init_rvar (cfg, var->dreg, type);
6353 else
6354 emit_dummy_init_rvar (cfg, var->dreg, type);
6359 mini_inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always)
6361 return inline_method (cfg, cmethod, fsig, sp, ip, real_offset, inline_always);
6365 * inline_method:
6367 * Return the cost of inlining CMETHOD, or zero if it should not be inlined.
6369 static int
6370 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6371 guchar *ip, guint real_offset, gboolean inline_always)
6373 MonoError error;
6374 MonoInst *ins, *rvar = NULL;
6375 MonoMethodHeader *cheader;
6376 MonoBasicBlock *ebblock, *sbblock;
6377 int i, costs;
6378 MonoMethod *prev_inlined_method;
6379 MonoInst **prev_locals, **prev_args;
6380 MonoType **prev_arg_types;
6381 guint prev_real_offset;
6382 GHashTable *prev_cbb_hash;
6383 MonoBasicBlock **prev_cil_offset_to_bb;
6384 MonoBasicBlock *prev_cbb;
6385 const unsigned char *prev_ip;
6386 unsigned char *prev_cil_start;
6387 guint32 prev_cil_offset_to_bb_len;
6388 MonoMethod *prev_current_method;
6389 MonoGenericContext *prev_generic_context;
6390 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
6392 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6394 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6395 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6396 return 0;
6397 #endif
6398 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6399 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6400 return 0;
6401 #endif
6403 if (!fsig)
6404 fsig = mono_method_signature (cmethod);
6406 if (cfg->verbose_level > 2)
6407 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6409 if (!cmethod->inline_info) {
6410 cfg->stat_inlineable_methods++;
6411 cmethod->inline_info = 1;
6414 /* allocate local variables */
6415 cheader = mono_method_get_header_checked (cmethod, &error);
6416 if (!cheader) {
6417 if (inline_always) {
6418 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
6419 mono_error_move (&cfg->error, &error);
6420 } else {
6421 mono_error_cleanup (&error);
6423 return 0;
6426 /*Must verify before creating locals as it can cause the JIT to assert.*/
6427 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6428 mono_metadata_free_mh (cheader);
6429 return 0;
6432 /* allocate space to store the return value */
6433 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6434 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6437 prev_locals = cfg->locals;
6438 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6439 for (i = 0; i < cheader->num_locals; ++i)
6440 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6442 /* allocate start and end blocks */
6443 /* This is needed so if the inline is aborted, we can clean up */
6444 NEW_BBLOCK (cfg, sbblock);
6445 sbblock->real_offset = real_offset;
6447 NEW_BBLOCK (cfg, ebblock);
6448 ebblock->block_num = cfg->num_bblocks++;
6449 ebblock->real_offset = real_offset;
6451 prev_args = cfg->args;
6452 prev_arg_types = cfg->arg_types;
6453 prev_inlined_method = cfg->inlined_method;
6454 cfg->inlined_method = cmethod;
6455 cfg->ret_var_set = FALSE;
6456 cfg->inline_depth ++;
6457 prev_real_offset = cfg->real_offset;
6458 prev_cbb_hash = cfg->cbb_hash;
6459 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6460 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6461 prev_cil_start = cfg->cil_start;
6462 prev_ip = cfg->ip;
6463 prev_cbb = cfg->cbb;
6464 prev_current_method = cfg->current_method;
6465 prev_generic_context = cfg->generic_context;
6466 prev_ret_var_set = cfg->ret_var_set;
6467 prev_disable_inline = cfg->disable_inline;
6469 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6470 virtual_ = TRUE;
6472 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
6474 ret_var_set = cfg->ret_var_set;
6476 cfg->inlined_method = prev_inlined_method;
6477 cfg->real_offset = prev_real_offset;
6478 cfg->cbb_hash = prev_cbb_hash;
6479 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6480 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6481 cfg->cil_start = prev_cil_start;
6482 cfg->ip = prev_ip;
6483 cfg->locals = prev_locals;
6484 cfg->args = prev_args;
6485 cfg->arg_types = prev_arg_types;
6486 cfg->current_method = prev_current_method;
6487 cfg->generic_context = prev_generic_context;
6488 cfg->ret_var_set = prev_ret_var_set;
6489 cfg->disable_inline = prev_disable_inline;
6490 cfg->inline_depth --;
6492 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
6493 if (cfg->verbose_level > 2)
6494 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6496 cfg->stat_inlined_methods++;
6498 /* always add some code to avoid block split failures */
6499 MONO_INST_NEW (cfg, ins, OP_NOP);
6500 MONO_ADD_INS (prev_cbb, ins);
6502 prev_cbb->next_bb = sbblock;
6503 link_bblock (cfg, prev_cbb, sbblock);
6506 * Get rid of the begin and end bblocks if possible to aid local
6507 * optimizations.
6509 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6511 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6512 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6514 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6515 MonoBasicBlock *prev = ebblock->in_bb [0];
6517 if (prev->next_bb == ebblock) {
6518 mono_merge_basic_blocks (cfg, prev, ebblock);
6519 cfg->cbb = prev;
6520 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6521 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6522 cfg->cbb = prev_cbb;
6524 } else {
6525 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
6526 cfg->cbb = ebblock;
6528 } else {
6530 * Its possible that the rvar is set in some prev bblock, but not in others.
6531 * (#1835).
6533 if (rvar) {
6534 MonoBasicBlock *bb;
6536 for (i = 0; i < ebblock->in_count; ++i) {
6537 bb = ebblock->in_bb [i];
6539 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6540 cfg->cbb = bb;
6542 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6547 cfg->cbb = ebblock;
6550 if (rvar) {
6552 * If the inlined method contains only a throw, then the ret var is not
6553 * set, so set it to a dummy value.
6555 if (!ret_var_set)
6556 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6558 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6559 *sp++ = ins;
6561 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6562 return costs + 1;
6563 } else {
6564 if (cfg->verbose_level > 2)
6565 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6566 cfg->exception_type = MONO_EXCEPTION_NONE;
6568 /* This gets rid of the newly added bblocks */
6569 cfg->cbb = prev_cbb;
6571 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6572 return 0;
6576 * Some of these comments may well be out-of-date.
6577 * Design decisions: we do a single pass over the IL code (and we do bblock
6578 * splitting/merging in the few cases when it's required: a back jump to an IL
6579 * address that was not already seen as bblock starting point).
6580 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6581 * Complex operations are decomposed in simpler ones right away. We need to let the
6582 * arch-specific code peek and poke inside this process somehow (except when the
6583 * optimizations can take advantage of the full semantic info of coarse opcodes).
6584 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6585 * MonoInst->opcode initially is the IL opcode or some simplification of that
6586 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6587 * opcode with value bigger than OP_LAST.
6588 * At this point the IR can be handed over to an interpreter, a dumb code generator
6589 * or to the optimizing code generator that will translate it to SSA form.
6591 * Profiling directed optimizations.
6592 * We may compile by default with few or no optimizations and instrument the code
6593 * or the user may indicate what methods to optimize the most either in a config file
6594 * or through repeated runs where the compiler applies offline the optimizations to
6595 * each method and then decides if it was worth it.
6598 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6599 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6600 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6601 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6602 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6603 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6604 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6605 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
6607 /* offset from br.s -> br like opcodes */
6608 #define BIG_BRANCH_OFFSET 13
6610 static gboolean
6611 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6613 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6615 return b == NULL || b == bb;
6618 static int
6619 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6621 unsigned char *ip = start;
6622 unsigned char *target;
6623 int i;
6624 guint cli_addr;
6625 MonoBasicBlock *bblock;
6626 const MonoOpcode *opcode;
6628 while (ip < end) {
6629 cli_addr = ip - start;
6630 i = mono_opcode_value ((const guint8 **)&ip, end);
6631 if (i < 0)
6632 UNVERIFIED;
6633 opcode = &mono_opcodes [i];
6634 switch (opcode->argument) {
6635 case MonoInlineNone:
6636 ip++;
6637 break;
6638 case MonoInlineString:
6639 case MonoInlineType:
6640 case MonoInlineField:
6641 case MonoInlineMethod:
6642 case MonoInlineTok:
6643 case MonoInlineSig:
6644 case MonoShortInlineR:
6645 case MonoInlineI:
6646 ip += 5;
6647 break;
6648 case MonoInlineVar:
6649 ip += 3;
6650 break;
6651 case MonoShortInlineVar:
6652 case MonoShortInlineI:
6653 ip += 2;
6654 break;
6655 case MonoShortInlineBrTarget:
6656 target = start + cli_addr + 2 + (signed char)ip [1];
6657 GET_BBLOCK (cfg, bblock, target);
6658 ip += 2;
6659 if (ip < end)
6660 GET_BBLOCK (cfg, bblock, ip);
6661 break;
6662 case MonoInlineBrTarget:
6663 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6664 GET_BBLOCK (cfg, bblock, target);
6665 ip += 5;
6666 if (ip < end)
6667 GET_BBLOCK (cfg, bblock, ip);
6668 break;
6669 case MonoInlineSwitch: {
6670 guint32 n = read32 (ip + 1);
6671 guint32 j;
6672 ip += 5;
6673 cli_addr += 5 + 4 * n;
6674 target = start + cli_addr;
6675 GET_BBLOCK (cfg, bblock, target);
6677 for (j = 0; j < n; ++j) {
6678 target = start + cli_addr + (gint32)read32 (ip);
6679 GET_BBLOCK (cfg, bblock, target);
6680 ip += 4;
6682 break;
6684 case MonoInlineR:
6685 case MonoInlineI8:
6686 ip += 9;
6687 break;
6688 default:
6689 g_assert_not_reached ();
6692 if (i == CEE_THROW) {
6693 unsigned char *bb_start = ip - 1;
6695 /* Find the start of the bblock containing the throw */
6696 bblock = NULL;
6697 while ((bb_start >= start) && !bblock) {
6698 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6699 bb_start --;
6701 if (bblock)
6702 bblock->out_of_line = 1;
6705 return 0;
6706 unverified:
6707 exception_exit:
6708 *pos = ip;
6709 return 1;
6712 static inline MonoMethod *
6713 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
6715 MonoMethod *method;
6717 mono_error_init (error);
6719 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6720 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
6721 if (context) {
6722 method = mono_class_inflate_generic_method_checked (method, context, error);
6724 } else {
6725 method = mono_get_method_checked (m->klass->image, token, klass, context, error);
6728 return method;
6731 static inline MonoMethod *
6732 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6734 MonoError error;
6735 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
6737 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
6738 mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
6739 method = NULL;
6742 if (!method && !cfg)
6743 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6745 return method;
6748 static inline MonoClass*
6749 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6751 MonoError error;
6752 MonoClass *klass;
6754 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6755 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
6756 if (context) {
6757 klass = mono_class_inflate_generic_class_checked (klass, context, &error);
6758 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6760 } else {
6761 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
6762 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6764 if (klass)
6765 mono_class_init (klass);
6766 return klass;
6769 static inline MonoMethodSignature*
6770 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
6772 MonoMethodSignature *fsig;
6774 mono_error_init (error);
6775 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6776 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6777 } else {
6778 fsig = mono_metadata_parse_signature_checked (method->klass->image, token, error);
6779 return_val_if_nok (error, NULL);
6781 if (context) {
6782 fsig = mono_inflate_generic_signature(fsig, context, error);
6784 return fsig;
6787 static MonoMethod*
6788 throw_exception (void)
6790 static MonoMethod *method = NULL;
6792 if (!method) {
6793 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6794 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6796 g_assert (method);
6797 return method;
6800 static void
6801 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6803 MonoMethod *thrower = throw_exception ();
6804 MonoInst *args [1];
6806 EMIT_NEW_PCONST (cfg, args [0], ex);
6807 mono_emit_method_call (cfg, thrower, args, NULL);
6811 * Return the original method is a wrapper is specified. We can only access
6812 * the custom attributes from the original method.
6814 static MonoMethod*
6815 get_original_method (MonoMethod *method)
6817 if (method->wrapper_type == MONO_WRAPPER_NONE)
6818 return method;
6820 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6821 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6822 return NULL;
6824 /* in other cases we need to find the original method */
6825 return mono_marshal_method_from_wrapper (method);
6828 static void
6829 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
6831 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6832 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6833 if (ex)
6834 emit_throw_exception (cfg, ex);
6837 static void
6838 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6840 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6841 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6842 if (ex)
6843 emit_throw_exception (cfg, ex);
6847 * Check that the IL instructions at ip are the array initialization
6848 * sequence and return the pointer to the data and the size.
6850 static const char*
6851 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6854 * newarr[System.Int32]
6855 * dup
6856 * ldtoken field valuetype ...
6857 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6859 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6860 MonoError error;
6861 guint32 token = read32 (ip + 7);
6862 guint32 field_token = read32 (ip + 2);
6863 guint32 field_index = field_token & 0xffffff;
6864 guint32 rva;
6865 const char *data_ptr;
6866 int size = 0;
6867 MonoMethod *cmethod;
6868 MonoClass *dummy_class;
6869 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
6870 int dummy_align;
6872 if (!field) {
6873 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6874 return NULL;
6877 *out_field_token = field_token;
6879 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6880 if (!cmethod)
6881 return NULL;
6882 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6883 return NULL;
6884 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6885 case MONO_TYPE_BOOLEAN:
6886 case MONO_TYPE_I1:
6887 case MONO_TYPE_U1:
6888 size = 1; break;
6889 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6890 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6891 case MONO_TYPE_CHAR:
6892 case MONO_TYPE_I2:
6893 case MONO_TYPE_U2:
6894 size = 2; break;
6895 case MONO_TYPE_I4:
6896 case MONO_TYPE_U4:
6897 case MONO_TYPE_R4:
6898 size = 4; break;
6899 case MONO_TYPE_R8:
6900 case MONO_TYPE_I8:
6901 case MONO_TYPE_U8:
6902 size = 8; break;
6903 #endif
6904 default:
6905 return NULL;
6907 size *= len;
6908 if (size > mono_type_size (field->type, &dummy_align))
6909 return NULL;
6910 *out_size = size;
6911 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6912 if (!image_is_dynamic (method->klass->image)) {
6913 field_index = read32 (ip + 2) & 0xffffff;
6914 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6915 data_ptr = mono_image_rva_map (method->klass->image, rva);
6916 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6917 /* for aot code we do the lookup on load */
6918 if (aot && data_ptr)
6919 return (const char *)GUINT_TO_POINTER (rva);
6920 } else {
6921 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6922 g_assert (!aot);
6923 data_ptr = mono_field_get_data (field);
6925 return data_ptr;
6927 return NULL;
6930 static void
6931 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6933 MonoError error;
6934 char *method_fname = mono_method_full_name (method, TRUE);
6935 char *method_code;
6936 MonoMethodHeader *header = mono_method_get_header_checked (method, &error);
6938 if (!header) {
6939 method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (&error));
6940 mono_error_cleanup (&error);
6941 } else if (header->code_size == 0)
6942 method_code = g_strdup ("method body is empty.");
6943 else
6944 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6945 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
6946 g_free (method_fname);
6947 g_free (method_code);
6948 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6951 static void
6952 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6954 MonoInst *ins;
6955 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6956 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6957 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6958 /* Optimize reg-reg moves away */
6960 * Can't optimize other opcodes, since sp[0] might point to
6961 * the last ins of a decomposed opcode.
6963 sp [0]->dreg = (cfg)->locals [n]->dreg;
6964 } else {
6965 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6970 * ldloca inhibits many optimizations so try to get rid of it in common
6971 * cases.
6973 static inline unsigned char *
6974 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6976 int local, token;
6977 MonoClass *klass;
6978 MonoType *type;
6980 if (size == 1) {
6981 local = ip [1];
6982 ip += 2;
6983 } else {
6984 local = read16 (ip + 2);
6985 ip += 4;
6988 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6989 /* From the INITOBJ case */
6990 token = read32 (ip + 2);
6991 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6992 CHECK_TYPELOAD (klass);
6993 type = mini_get_underlying_type (&klass->byval_arg);
6994 emit_init_local (cfg, local, type, TRUE);
6995 return ip + 6;
6997 exception_exit:
6998 return NULL;
7001 static MonoInst*
7002 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
7004 MonoInst *icall_args [16];
7005 MonoInst *call_target, *ins, *vtable_ins;
7006 int arg_reg, this_reg, vtable_reg;
7007 gboolean is_iface = mono_class_is_interface (cmethod->klass);
7008 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
7009 gboolean variant_iface = FALSE;
7010 guint32 slot;
7011 int offset;
7012 gboolean special_array_interface = cmethod->klass->is_array_special_interface;
7015 * In llvm-only mode, vtables contain function descriptors instead of
7016 * method addresses/trampolines.
7018 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7020 if (is_iface)
7021 slot = mono_method_get_imt_slot (cmethod);
7022 else
7023 slot = mono_method_get_vtable_index (cmethod);
7025 this_reg = sp [0]->dreg;
7027 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
7028 variant_iface = TRUE;
7030 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
7032 * The simplest case, a normal virtual call.
7034 int slot_reg = alloc_preg (cfg);
7035 int addr_reg = alloc_preg (cfg);
7036 int arg_reg = alloc_preg (cfg);
7037 MonoBasicBlock *non_null_bb;
7039 vtable_reg = alloc_preg (cfg);
7040 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7041 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7043 /* Load the vtable slot, which contains a function descriptor. */
7044 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7046 NEW_BBLOCK (cfg, non_null_bb);
7048 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7049 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
7050 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
7052 /* Slow path */
7053 // FIXME: Make the wrapper use the preserveall cconv
7054 // FIXME: Use one icall per slot for small slot numbers ?
7055 icall_args [0] = vtable_ins;
7056 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7057 /* Make the icall return the vtable slot value to save some code space */
7058 ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
7059 ins->dreg = slot_reg;
7060 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
7062 /* Fastpath */
7063 MONO_START_BB (cfg, non_null_bb);
7064 /* Load the address + arg from the vtable slot */
7065 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7066 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
7068 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7071 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt && !special_array_interface) {
7073 * A simple interface call
7075 * We make a call through an imt slot to obtain the function descriptor we need to call.
7076 * The imt slot contains a function descriptor for a runtime function + arg.
7078 int slot_reg = alloc_preg (cfg);
7079 int addr_reg = alloc_preg (cfg);
7080 int arg_reg = alloc_preg (cfg);
7081 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7083 vtable_reg = alloc_preg (cfg);
7084 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7085 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7088 * The slot is already initialized when the vtable is created so there is no need
7089 * to check it here.
7092 /* Load the imt slot, which contains a function descriptor. */
7093 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7095 /* Load the address + arg of the imt thunk from the imt slot */
7096 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7097 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7099 * IMT thunks in llvm-only mode are C functions which take an info argument
7100 * plus the imt method and return the ftndesc to call.
7102 icall_args [0] = thunk_arg_ins;
7103 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7104 cmethod, MONO_RGCTX_INFO_METHOD);
7105 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
7107 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7110 if ((fsig->generic_param_count || variant_iface || special_array_interface) && !is_gsharedvt) {
7112 * This is similar to the interface case, the vtable slot points to an imt thunk which is
7113 * dynamically extended as more instantiations are discovered.
7114 * This handles generic virtual methods both on classes and interfaces.
7116 int slot_reg = alloc_preg (cfg);
7117 int addr_reg = alloc_preg (cfg);
7118 int arg_reg = alloc_preg (cfg);
7119 int ftndesc_reg = alloc_preg (cfg);
7120 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7121 MonoBasicBlock *slowpath_bb, *end_bb;
7123 NEW_BBLOCK (cfg, slowpath_bb);
7124 NEW_BBLOCK (cfg, end_bb);
7126 vtable_reg = alloc_preg (cfg);
7127 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7128 if (is_iface)
7129 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7130 else
7131 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7133 /* Load the slot, which contains a function descriptor. */
7134 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7136 /* These slots are not initialized, so fall back to the slow path until they are initialized */
7137 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
7138 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7139 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7141 /* Fastpath */
7142 /* Same as with iface calls */
7143 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7144 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7145 icall_args [0] = thunk_arg_ins;
7146 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7147 cmethod, MONO_RGCTX_INFO_METHOD);
7148 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
7149 ftndesc_ins->dreg = ftndesc_reg;
7151 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
7152 * they don't know about yet. Fall back to the slowpath in that case.
7154 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
7155 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7157 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7159 /* Slowpath */
7160 MONO_START_BB (cfg, slowpath_bb);
7161 icall_args [0] = vtable_ins;
7162 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7163 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7164 cmethod, MONO_RGCTX_INFO_METHOD);
7165 if (is_iface)
7166 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_iface_call, icall_args);
7167 else
7168 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
7169 ftndesc_ins->dreg = ftndesc_reg;
7170 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7172 /* Common case */
7173 MONO_START_BB (cfg, end_bb);
7174 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7178 * Non-optimized cases
7180 icall_args [0] = sp [0];
7181 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7183 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7184 cmethod, MONO_RGCTX_INFO_METHOD);
7186 arg_reg = alloc_preg (cfg);
7187 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
7188 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
7190 g_assert (is_gsharedvt);
7191 if (is_iface)
7192 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
7193 else
7194 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
7197 * Pass the extra argument even if the callee doesn't receive it, most
7198 * calling conventions allow this.
7200 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7203 static gboolean
7204 is_exception_class (MonoClass *klass)
7206 while (klass) {
7207 if (klass == mono_defaults.exception_class)
7208 return TRUE;
7209 klass = klass->parent;
7211 return FALSE;
7215 * is_jit_optimizer_disabled:
7217 * Determine whenever M's assembly has a DebuggableAttribute with the
7218 * IsJITOptimizerDisabled flag set.
7220 static gboolean
7221 is_jit_optimizer_disabled (MonoMethod *m)
7223 MonoError error;
7224 MonoAssembly *ass = m->klass->image->assembly;
7225 MonoCustomAttrInfo* attrs;
7226 MonoClass *klass;
7227 int i;
7228 gboolean val = FALSE;
7230 g_assert (ass);
7231 if (ass->jit_optimizer_disabled_inited)
7232 return ass->jit_optimizer_disabled;
7234 klass = mono_class_try_get_debuggable_attribute_class ();
7236 if (!klass) {
7237 /* Linked away */
7238 ass->jit_optimizer_disabled = FALSE;
7239 mono_memory_barrier ();
7240 ass->jit_optimizer_disabled_inited = TRUE;
7241 return FALSE;
7244 attrs = mono_custom_attrs_from_assembly_checked (ass, FALSE, &error);
7245 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7246 if (attrs) {
7247 for (i = 0; i < attrs->num_attrs; ++i) {
7248 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7249 const gchar *p;
7250 MonoMethodSignature *sig;
7252 if (!attr->ctor || attr->ctor->klass != klass)
7253 continue;
7254 /* Decode the attribute. See reflection.c */
7255 p = (const char*)attr->data;
7256 g_assert (read16 (p) == 0x0001);
7257 p += 2;
7259 // FIXME: Support named parameters
7260 sig = mono_method_signature (attr->ctor);
7261 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7262 continue;
7263 /* Two boolean arguments */
7264 p ++;
7265 val = *p;
7267 mono_custom_attrs_free (attrs);
7270 ass->jit_optimizer_disabled = val;
7271 mono_memory_barrier ();
7272 ass->jit_optimizer_disabled_inited = TRUE;
7274 return val;
7277 static gboolean
7278 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7280 gboolean supported_tail_call;
7281 int i;
7283 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7285 for (i = 0; i < fsig->param_count; ++i) {
7286 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7287 /* These can point to the current method's stack */
7288 supported_tail_call = FALSE;
7290 if (fsig->hasthis && cmethod->klass->valuetype)
7291 /* this might point to the current method's stack */
7292 supported_tail_call = FALSE;
7293 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7294 supported_tail_call = FALSE;
7295 if (cfg->method->save_lmf)
7296 supported_tail_call = FALSE;
7297 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7298 supported_tail_call = FALSE;
7299 if (call_opcode != CEE_CALL)
7300 supported_tail_call = FALSE;
7302 /* Debugging support */
7303 #if 0
7304 if (supported_tail_call) {
7305 if (!mono_debug_count ())
7306 supported_tail_call = FALSE;
7308 #endif
7310 return supported_tail_call;
7314 * handle_ctor_call:
7316 * Handle calls made to ctors from NEWOBJ opcodes.
7318 static void
7319 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7320 MonoInst **sp, guint8 *ip, int *inline_costs)
7322 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7324 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7325 mono_method_is_generic_sharable (cmethod, TRUE)) {
7326 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7327 mono_class_vtable (cfg->domain, cmethod->klass);
7328 CHECK_TYPELOAD (cmethod->klass);
7330 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7331 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7332 } else {
7333 if (context_used) {
7334 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
7335 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7336 } else {
7337 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7339 CHECK_TYPELOAD (cmethod->klass);
7340 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7345 /* Avoid virtual calls to ctors if possible */
7346 if (mono_class_is_marshalbyref (cmethod->klass))
7347 callvirt_this_arg = sp [0];
7349 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7350 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7351 CHECK_CFG_EXCEPTION;
7352 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7353 mono_method_check_inlining (cfg, cmethod) &&
7354 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7355 int costs;
7357 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
7358 cfg->real_offset += 5;
7360 *inline_costs += costs - 5;
7361 } else {
7362 INLINE_FAILURE ("inline failure");
7363 // FIXME-VT: Clean this up
7364 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7365 GSHAREDVT_FAILURE(*ip);
7366 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7368 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
7369 MonoInst *addr;
7371 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7373 if (cfg->llvm_only) {
7374 // FIXME: Avoid initializing vtable_arg
7375 emit_llvmonly_calli (cfg, fsig, sp, addr);
7376 } else {
7377 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7379 } else if (context_used &&
7380 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7381 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7382 MonoInst *cmethod_addr;
7384 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7386 if (cfg->llvm_only) {
7387 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
7388 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7389 emit_llvmonly_calli (cfg, fsig, sp, addr);
7390 } else {
7391 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7392 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7394 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7396 } else {
7397 INLINE_FAILURE ("ctor call");
7398 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7399 callvirt_this_arg, NULL, vtable_arg);
7401 exception_exit:
7402 return;
7405 static void
7406 emit_setret (MonoCompile *cfg, MonoInst *val)
7408 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
7409 MonoInst *ins;
7411 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7412 MonoInst *ret_addr;
7414 if (!cfg->vret_addr) {
7415 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
7416 } else {
7417 EMIT_NEW_RETLOADA (cfg, ret_addr);
7419 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
7420 ins->klass = mono_class_from_mono_type (ret_type);
7422 } else {
7423 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
7424 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7425 MonoInst *iargs [1];
7426 MonoInst *conv;
7428 iargs [0] = val;
7429 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7430 mono_arch_emit_setret (cfg, cfg->method, conv);
7431 } else {
7432 mono_arch_emit_setret (cfg, cfg->method, val);
7434 #else
7435 mono_arch_emit_setret (cfg, cfg->method, val);
7436 #endif
7441 * mono_method_to_ir:
7443 * Translate the .net IL into linear IR.
7445 * @start_bblock: if not NULL, the starting basic block, used during inlining.
7446 * @end_bblock: if not NULL, the ending basic block, used during inlining.
7447 * @return_var: if not NULL, the place where the return value is stored, used during inlining.
7448 * @inline_args: if not NULL, contains the arguments to the inline call
7449 * @inline_offset: if not zero, the real offset from the inline call, or zero otherwise.
7450 * @is_virtual_call: whether this method is being called as a result of a call to callvirt
7452 * This method is used to turn ECMA IL into Mono's internal Linear IR
7453 * reprensetation. It is used both for entire methods, as well as
7454 * inlining existing methods. In the former case, the @start_bblock,
7455 * @end_bblock, @return_var, @inline_args are all set to NULL, and the
7456 * inline_offset is set to zero.
7458 * Returns: the inline cost, or -1 if there was an error processing this method.
7461 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7462 MonoInst *return_var, MonoInst **inline_args,
7463 guint inline_offset, gboolean is_virtual_call)
7465 MonoError error;
7466 MonoInst *ins, **sp, **stack_start;
7467 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
7468 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7469 MonoMethod *cmethod, *method_definition;
7470 MonoInst **arg_array;
7471 MonoMethodHeader *header;
7472 MonoImage *image;
7473 guint32 token, ins_flag;
7474 MonoClass *klass;
7475 MonoClass *constrained_class = NULL;
7476 unsigned char *ip, *end, *target, *err_pos;
7477 MonoMethodSignature *sig;
7478 MonoGenericContext *generic_context = NULL;
7479 MonoGenericContainer *generic_container = NULL;
7480 MonoType **param_types;
7481 int i, n, start_new_bblock, dreg;
7482 int num_calls = 0, inline_costs = 0;
7483 int breakpoint_id = 0;
7484 guint num_args;
7485 GSList *class_inits = NULL;
7486 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7487 int context_used;
7488 gboolean init_locals, seq_points, skip_dead_blocks;
7489 gboolean sym_seq_points = FALSE;
7490 MonoDebugMethodInfo *minfo;
7491 MonoBitSet *seq_point_locs = NULL;
7492 MonoBitSet *seq_point_set_locs = NULL;
7494 cfg->disable_inline = is_jit_optimizer_disabled (method);
7496 /* serialization and xdomain stuff may need access to private fields and methods */
7497 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7498 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7499 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7500 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7501 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7502 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7504 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7505 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7506 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7507 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7508 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7510 image = method->klass->image;
7511 header = mono_method_get_header_checked (method, &cfg->error);
7512 if (!header) {
7513 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
7514 goto exception_exit;
7515 } else {
7516 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7519 generic_container = mono_method_get_generic_container (method);
7520 sig = mono_method_signature (method);
7521 num_args = sig->hasthis + sig->param_count;
7522 ip = (unsigned char*)header->code;
7523 cfg->cil_start = ip;
7524 end = ip + header->code_size;
7525 cfg->stat_cil_code_size += header->code_size;
7527 seq_points = cfg->gen_seq_points && cfg->method == method;
7529 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7530 /* We could hit a seq point before attaching to the JIT (#8338) */
7531 seq_points = FALSE;
7534 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7535 minfo = mono_debug_lookup_method (method);
7536 if (minfo) {
7537 MonoSymSeqPoint *sps;
7538 int i, n_il_offsets;
7540 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7541 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7542 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7543 sym_seq_points = TRUE;
7544 for (i = 0; i < n_il_offsets; ++i) {
7545 if (sps [i].il_offset < header->code_size)
7546 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7548 g_free (sps);
7549 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7550 /* Methods without line number info like auto-generated property accessors */
7551 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7552 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7553 sym_seq_points = TRUE;
7558 * Methods without init_locals set could cause asserts in various passes
7559 * (#497220). To work around this, we emit dummy initialization opcodes
7560 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7561 * on some platforms.
7563 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
7564 init_locals = header->init_locals;
7565 else
7566 init_locals = TRUE;
7568 method_definition = method;
7569 while (method_definition->is_inflated) {
7570 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7571 method_definition = imethod->declaring;
7574 /* SkipVerification is not allowed if core-clr is enabled */
7575 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7576 dont_verify = TRUE;
7577 dont_verify_stloc = TRUE;
7580 if (sig->is_inflated)
7581 generic_context = mono_method_get_context (method);
7582 else if (generic_container)
7583 generic_context = &generic_container->context;
7584 cfg->generic_context = generic_context;
7586 if (!cfg->gshared)
7587 g_assert (!sig->has_type_parameters);
7589 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7590 g_assert (method->is_inflated);
7591 g_assert (mono_method_get_context (method)->method_inst);
7593 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7594 g_assert (sig->generic_param_count);
7596 if (cfg->method == method) {
7597 cfg->real_offset = 0;
7598 } else {
7599 cfg->real_offset = inline_offset;
7602 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7603 cfg->cil_offset_to_bb_len = header->code_size;
7605 cfg->current_method = method;
7607 if (cfg->verbose_level > 2)
7608 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7610 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7611 if (sig->hasthis)
7612 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7613 for (n = 0; n < sig->param_count; ++n)
7614 param_types [n + sig->hasthis] = sig->params [n];
7615 cfg->arg_types = param_types;
7617 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7618 if (cfg->method == method) {
7620 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7621 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7623 /* ENTRY BLOCK */
7624 NEW_BBLOCK (cfg, start_bblock);
7625 cfg->bb_entry = start_bblock;
7626 start_bblock->cil_code = NULL;
7627 start_bblock->cil_length = 0;
7629 /* EXIT BLOCK */
7630 NEW_BBLOCK (cfg, end_bblock);
7631 cfg->bb_exit = end_bblock;
7632 end_bblock->cil_code = NULL;
7633 end_bblock->cil_length = 0;
7634 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7635 g_assert (cfg->num_bblocks == 2);
7637 arg_array = cfg->args;
7639 if (header->num_clauses) {
7640 cfg->spvars = g_hash_table_new (NULL, NULL);
7641 cfg->exvars = g_hash_table_new (NULL, NULL);
7643 /* handle exception clauses */
7644 for (i = 0; i < header->num_clauses; ++i) {
7645 MonoBasicBlock *try_bb;
7646 MonoExceptionClause *clause = &header->clauses [i];
7647 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7649 try_bb->real_offset = clause->try_offset;
7650 try_bb->try_start = TRUE;
7651 try_bb->region = ((i + 1) << 8) | clause->flags;
7652 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7653 tblock->real_offset = clause->handler_offset;
7654 tblock->flags |= BB_EXCEPTION_HANDLER;
7657 * Linking the try block with the EH block hinders inlining as we won't be able to
7658 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7660 if (COMPILE_LLVM (cfg))
7661 link_bblock (cfg, try_bb, tblock);
7663 if (*(ip + clause->handler_offset) == CEE_POP)
7664 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7666 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7667 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7668 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7669 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7670 MONO_ADD_INS (tblock, ins);
7672 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
7673 /* finally clauses already have a seq point */
7674 /* seq points for filter clauses are emitted below */
7675 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7676 MONO_ADD_INS (tblock, ins);
7679 /* todo: is a fault block unsafe to optimize? */
7680 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7681 tblock->flags |= BB_EXCEPTION_UNSAFE;
7684 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7685 while (p < end) {
7686 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7688 /* catch and filter blocks get the exception object on the stack */
7689 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7690 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7692 /* mostly like handle_stack_args (), but just sets the input args */
7693 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7694 tblock->in_scount = 1;
7695 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7696 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7698 cfg->cbb = tblock;
7700 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
7701 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
7702 if (!cfg->compile_llvm) {
7703 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
7704 ins->dreg = tblock->in_stack [0]->dreg;
7705 MONO_ADD_INS (tblock, ins);
7707 #else
7708 MonoInst *dummy_use;
7711 * Add a dummy use for the exvar so its liveness info will be
7712 * correct.
7714 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7715 #endif
7717 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7718 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7719 MONO_ADD_INS (tblock, ins);
7722 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7723 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7724 tblock->flags |= BB_EXCEPTION_HANDLER;
7725 tblock->real_offset = clause->data.filter_offset;
7726 tblock->in_scount = 1;
7727 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7728 /* The filter block shares the exvar with the handler block */
7729 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7730 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7731 MONO_ADD_INS (tblock, ins);
7735 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7736 clause->data.catch_class &&
7737 cfg->gshared &&
7738 mono_class_check_context_used (clause->data.catch_class)) {
7740 * In shared generic code with catch
7741 * clauses containing type variables
7742 * the exception handling code has to
7743 * be able to get to the rgctx.
7744 * Therefore we have to make sure that
7745 * the vtable/mrgctx argument (for
7746 * static or generic methods) or the
7747 * "this" argument (for non-static
7748 * methods) are live.
7750 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7751 mini_method_get_context (method)->method_inst ||
7752 method->klass->valuetype) {
7753 mono_get_vtable_var (cfg);
7754 } else {
7755 MonoInst *dummy_use;
7757 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7761 } else {
7762 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7763 cfg->cbb = start_bblock;
7764 cfg->args = arg_array;
7765 mono_save_args (cfg, sig, inline_args);
7768 /* FIRST CODE BLOCK */
7769 NEW_BBLOCK (cfg, tblock);
7770 tblock->cil_code = ip;
7771 cfg->cbb = tblock;
7772 cfg->ip = ip;
7774 ADD_BBLOCK (cfg, tblock);
7776 if (cfg->method == method) {
7777 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7778 if (breakpoint_id) {
7779 MONO_INST_NEW (cfg, ins, OP_BREAK);
7780 MONO_ADD_INS (cfg->cbb, ins);
7784 /* we use a separate basic block for the initialization code */
7785 NEW_BBLOCK (cfg, init_localsbb);
7786 if (cfg->method == method)
7787 cfg->bb_init = init_localsbb;
7788 init_localsbb->real_offset = cfg->real_offset;
7789 start_bblock->next_bb = init_localsbb;
7790 init_localsbb->next_bb = cfg->cbb;
7791 link_bblock (cfg, start_bblock, init_localsbb);
7792 link_bblock (cfg, init_localsbb, cfg->cbb);
7794 cfg->cbb = init_localsbb;
7796 if (cfg->gsharedvt && cfg->method == method) {
7797 MonoGSharedVtMethodInfo *info;
7798 MonoInst *var, *locals_var;
7799 int dreg;
7801 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7802 info->method = cfg->method;
7803 info->count_entries = 16;
7804 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7805 cfg->gsharedvt_info = info;
7807 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7808 /* prevent it from being register allocated */
7809 //var->flags |= MONO_INST_VOLATILE;
7810 cfg->gsharedvt_info_var = var;
7812 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7813 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7815 /* Allocate locals */
7816 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7817 /* prevent it from being register allocated */
7818 //locals_var->flags |= MONO_INST_VOLATILE;
7819 cfg->gsharedvt_locals_var = locals_var;
7821 dreg = alloc_ireg (cfg);
7822 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7824 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7825 ins->dreg = locals_var->dreg;
7826 ins->sreg1 = dreg;
7827 MONO_ADD_INS (cfg->cbb, ins);
7828 cfg->gsharedvt_locals_var_ins = ins;
7830 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7832 if (init_locals)
7833 ins->flags |= MONO_INST_INIT;
7837 if (mono_security_core_clr_enabled ()) {
7838 /* check if this is native code, e.g. an icall or a p/invoke */
7839 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7840 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7841 if (wrapped) {
7842 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7843 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7845 /* if this ia a native call then it can only be JITted from platform code */
7846 if ((icall || pinvk) && method->klass && method->klass->image) {
7847 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7848 MonoException *ex = icall ? mono_get_exception_security () :
7849 mono_get_exception_method_access ();
7850 emit_throw_exception (cfg, ex);
7857 CHECK_CFG_EXCEPTION;
7859 if (header->code_size == 0)
7860 UNVERIFIED;
7862 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7863 ip = err_pos;
7864 UNVERIFIED;
7867 if (cfg->method == method)
7868 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
7870 for (n = 0; n < header->num_locals; ++n) {
7871 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7872 UNVERIFIED;
7874 class_inits = NULL;
7876 /* We force the vtable variable here for all shared methods
7877 for the possibility that they might show up in a stack
7878 trace where their exact instantiation is needed. */
7879 if (cfg->gshared && method == cfg->method) {
7880 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7881 mini_method_get_context (method)->method_inst ||
7882 method->klass->valuetype) {
7883 mono_get_vtable_var (cfg);
7884 } else {
7885 /* FIXME: Is there a better way to do this?
7886 We need the variable live for the duration
7887 of the whole method. */
7888 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7892 /* add a check for this != NULL to inlined methods */
7893 if (is_virtual_call) {
7894 MonoInst *arg_ins;
7896 NEW_ARGLOAD (cfg, arg_ins, 0);
7897 MONO_ADD_INS (cfg->cbb, arg_ins);
7898 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7901 skip_dead_blocks = !dont_verify;
7902 if (skip_dead_blocks) {
7903 original_bb = bb = mono_basic_block_split (method, &cfg->error, header);
7904 CHECK_CFG_ERROR;
7905 g_assert (bb);
7908 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7909 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7911 ins_flag = 0;
7912 start_new_bblock = 0;
7913 while (ip < end) {
7914 if (cfg->method == method)
7915 cfg->real_offset = ip - header->code;
7916 else
7917 cfg->real_offset = inline_offset;
7918 cfg->ip = ip;
7920 context_used = 0;
7922 if (start_new_bblock) {
7923 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
7924 if (start_new_bblock == 2) {
7925 g_assert (ip == tblock->cil_code);
7926 } else {
7927 GET_BBLOCK (cfg, tblock, ip);
7929 cfg->cbb->next_bb = tblock;
7930 cfg->cbb = tblock;
7931 start_new_bblock = 0;
7932 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7933 if (cfg->verbose_level > 3)
7934 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7935 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7936 *sp++ = ins;
7938 if (class_inits)
7939 g_slist_free (class_inits);
7940 class_inits = NULL;
7941 } else {
7942 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
7943 link_bblock (cfg, cfg->cbb, tblock);
7944 if (sp != stack_start) {
7945 handle_stack_args (cfg, stack_start, sp - stack_start);
7946 sp = stack_start;
7947 CHECK_UNVERIFIABLE (cfg);
7949 cfg->cbb->next_bb = tblock;
7950 cfg->cbb = tblock;
7951 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7952 if (cfg->verbose_level > 3)
7953 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7954 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7955 *sp++ = ins;
7957 g_slist_free (class_inits);
7958 class_inits = NULL;
7962 if (skip_dead_blocks) {
7963 int ip_offset = ip - header->code;
7965 if (ip_offset == bb->end)
7966 bb = bb->next;
7968 if (bb->dead) {
7969 int op_size = mono_opcode_size (ip, end);
7970 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7972 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7974 if (ip_offset + op_size == bb->end) {
7975 MONO_INST_NEW (cfg, ins, OP_NOP);
7976 MONO_ADD_INS (cfg->cbb, ins);
7977 start_new_bblock = 1;
7980 ip += op_size;
7981 continue;
7985 * Sequence points are points where the debugger can place a breakpoint.
7986 * Currently, we generate these automatically at points where the IL
7987 * stack is empty.
7989 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7991 * Make methods interruptable at the beginning, and at the targets of
7992 * backward branches.
7993 * Also, do this at the start of every bblock in methods with clauses too,
7994 * to be able to handle instructions with inprecise control flow like
7995 * throw/endfinally.
7996 * Backward branches are handled at the end of method-to-ir ().
7998 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7999 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8001 /* Avoid sequence points on empty IL like .volatile */
8002 // FIXME: Enable this
8003 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8004 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8005 if ((sp != stack_start) && !sym_seq_point)
8006 ins->flags |= MONO_INST_NONEMPTY_STACK;
8007 MONO_ADD_INS (cfg->cbb, ins);
8009 if (sym_seq_points)
8010 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8013 cfg->cbb->real_offset = cfg->real_offset;
8015 if ((cfg->method == method) && cfg->coverage_info) {
8016 guint32 cil_offset = ip - header->code;
8017 cfg->coverage_info->data [cil_offset].cil_code = ip;
8019 /* TODO: Use an increment here */
8020 #if defined(TARGET_X86)
8021 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8022 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8023 ins->inst_imm = 1;
8024 MONO_ADD_INS (cfg->cbb, ins);
8025 #else
8026 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8027 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8028 #endif
8031 if (cfg->verbose_level > 3)
8032 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8034 switch (*ip) {
8035 case CEE_NOP:
8036 if (seq_points && !sym_seq_points && sp != stack_start) {
8038 * The C# compiler uses these nops to notify the JIT that it should
8039 * insert seq points.
8041 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8042 MONO_ADD_INS (cfg->cbb, ins);
8044 if (cfg->keep_cil_nops)
8045 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8046 else
8047 MONO_INST_NEW (cfg, ins, OP_NOP);
8048 ip++;
8049 MONO_ADD_INS (cfg->cbb, ins);
8050 break;
8051 case CEE_BREAK:
8052 if (should_insert_brekpoint (cfg->method)) {
8053 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8054 } else {
8055 MONO_INST_NEW (cfg, ins, OP_NOP);
8057 ip++;
8058 MONO_ADD_INS (cfg->cbb, ins);
8059 break;
8060 case CEE_LDARG_0:
8061 case CEE_LDARG_1:
8062 case CEE_LDARG_2:
8063 case CEE_LDARG_3:
8064 CHECK_STACK_OVF (1);
8065 n = (*ip)-CEE_LDARG_0;
8066 CHECK_ARG (n);
8067 EMIT_NEW_ARGLOAD (cfg, ins, n);
8068 ip++;
8069 *sp++ = ins;
8070 break;
8071 case CEE_LDLOC_0:
8072 case CEE_LDLOC_1:
8073 case CEE_LDLOC_2:
8074 case CEE_LDLOC_3:
8075 CHECK_STACK_OVF (1);
8076 n = (*ip)-CEE_LDLOC_0;
8077 CHECK_LOCAL (n);
8078 EMIT_NEW_LOCLOAD (cfg, ins, n);
8079 ip++;
8080 *sp++ = ins;
8081 break;
8082 case CEE_STLOC_0:
8083 case CEE_STLOC_1:
8084 case CEE_STLOC_2:
8085 case CEE_STLOC_3: {
8086 CHECK_STACK (1);
8087 n = (*ip)-CEE_STLOC_0;
8088 CHECK_LOCAL (n);
8089 --sp;
8090 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8091 UNVERIFIED;
8092 emit_stloc_ir (cfg, sp, header, n);
8093 ++ip;
8094 inline_costs += 1;
8095 break;
8097 case CEE_LDARG_S:
8098 CHECK_OPSIZE (2);
8099 CHECK_STACK_OVF (1);
8100 n = ip [1];
8101 CHECK_ARG (n);
8102 EMIT_NEW_ARGLOAD (cfg, ins, n);
8103 *sp++ = ins;
8104 ip += 2;
8105 break;
8106 case CEE_LDARGA_S:
8107 CHECK_OPSIZE (2);
8108 CHECK_STACK_OVF (1);
8109 n = ip [1];
8110 CHECK_ARG (n);
8111 NEW_ARGLOADA (cfg, ins, n);
8112 MONO_ADD_INS (cfg->cbb, ins);
8113 *sp++ = ins;
8114 ip += 2;
8115 break;
8116 case CEE_STARG_S:
8117 CHECK_OPSIZE (2);
8118 CHECK_STACK (1);
8119 --sp;
8120 n = ip [1];
8121 CHECK_ARG (n);
8122 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8123 UNVERIFIED;
8124 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8125 ip += 2;
8126 break;
8127 case CEE_LDLOC_S:
8128 CHECK_OPSIZE (2);
8129 CHECK_STACK_OVF (1);
8130 n = ip [1];
8131 CHECK_LOCAL (n);
8132 EMIT_NEW_LOCLOAD (cfg, ins, n);
8133 *sp++ = ins;
8134 ip += 2;
8135 break;
8136 case CEE_LDLOCA_S: {
8137 unsigned char *tmp_ip;
8138 CHECK_OPSIZE (2);
8139 CHECK_STACK_OVF (1);
8140 CHECK_LOCAL (ip [1]);
8142 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8143 ip = tmp_ip;
8144 inline_costs += 1;
8145 break;
8148 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8149 *sp++ = ins;
8150 ip += 2;
8151 break;
8153 case CEE_STLOC_S:
8154 CHECK_OPSIZE (2);
8155 CHECK_STACK (1);
8156 --sp;
8157 CHECK_LOCAL (ip [1]);
8158 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8159 UNVERIFIED;
8160 emit_stloc_ir (cfg, sp, header, ip [1]);
8161 ip += 2;
8162 inline_costs += 1;
8163 break;
8164 case CEE_LDNULL:
8165 CHECK_STACK_OVF (1);
8166 EMIT_NEW_PCONST (cfg, ins, NULL);
8167 ins->type = STACK_OBJ;
8168 ++ip;
8169 *sp++ = ins;
8170 break;
8171 case CEE_LDC_I4_M1:
8172 CHECK_STACK_OVF (1);
8173 EMIT_NEW_ICONST (cfg, ins, -1);
8174 ++ip;
8175 *sp++ = ins;
8176 break;
8177 case CEE_LDC_I4_0:
8178 case CEE_LDC_I4_1:
8179 case CEE_LDC_I4_2:
8180 case CEE_LDC_I4_3:
8181 case CEE_LDC_I4_4:
8182 case CEE_LDC_I4_5:
8183 case CEE_LDC_I4_6:
8184 case CEE_LDC_I4_7:
8185 case CEE_LDC_I4_8:
8186 CHECK_STACK_OVF (1);
8187 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8188 ++ip;
8189 *sp++ = ins;
8190 break;
8191 case CEE_LDC_I4_S:
8192 CHECK_OPSIZE (2);
8193 CHECK_STACK_OVF (1);
8194 ++ip;
8195 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8196 ++ip;
8197 *sp++ = ins;
8198 break;
8199 case CEE_LDC_I4:
8200 CHECK_OPSIZE (5);
8201 CHECK_STACK_OVF (1);
8202 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8203 ip += 5;
8204 *sp++ = ins;
8205 break;
8206 case CEE_LDC_I8:
8207 CHECK_OPSIZE (9);
8208 CHECK_STACK_OVF (1);
8209 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8210 ins->type = STACK_I8;
8211 ins->dreg = alloc_dreg (cfg, STACK_I8);
8212 ++ip;
8213 ins->inst_l = (gint64)read64 (ip);
8214 MONO_ADD_INS (cfg->cbb, ins);
8215 ip += 8;
8216 *sp++ = ins;
8217 break;
8218 case CEE_LDC_R4: {
8219 float *f;
8220 gboolean use_aotconst = FALSE;
8222 #ifdef TARGET_POWERPC
8223 /* FIXME: Clean this up */
8224 if (cfg->compile_aot)
8225 use_aotconst = TRUE;
8226 #endif
8228 /* FIXME: we should really allocate this only late in the compilation process */
8229 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
8230 CHECK_OPSIZE (5);
8231 CHECK_STACK_OVF (1);
8233 if (use_aotconst) {
8234 MonoInst *cons;
8235 int dreg;
8237 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8239 dreg = alloc_freg (cfg);
8240 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8241 ins->type = cfg->r4_stack_type;
8242 } else {
8243 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8244 ins->type = cfg->r4_stack_type;
8245 ins->dreg = alloc_dreg (cfg, STACK_R8);
8246 ins->inst_p0 = f;
8247 MONO_ADD_INS (cfg->cbb, ins);
8249 ++ip;
8250 readr4 (ip, f);
8251 ip += 4;
8252 *sp++ = ins;
8253 break;
8255 case CEE_LDC_R8: {
8256 double *d;
8257 gboolean use_aotconst = FALSE;
8259 #ifdef TARGET_POWERPC
8260 /* FIXME: Clean this up */
8261 if (cfg->compile_aot)
8262 use_aotconst = TRUE;
8263 #endif
8265 /* FIXME: we should really allocate this only late in the compilation process */
8266 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
8267 CHECK_OPSIZE (9);
8268 CHECK_STACK_OVF (1);
8270 if (use_aotconst) {
8271 MonoInst *cons;
8272 int dreg;
8274 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8276 dreg = alloc_freg (cfg);
8277 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8278 ins->type = STACK_R8;
8279 } else {
8280 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8281 ins->type = STACK_R8;
8282 ins->dreg = alloc_dreg (cfg, STACK_R8);
8283 ins->inst_p0 = d;
8284 MONO_ADD_INS (cfg->cbb, ins);
8286 ++ip;
8287 readr8 (ip, d);
8288 ip += 8;
8289 *sp++ = ins;
8290 break;
8292 case CEE_DUP: {
8293 MonoInst *temp, *store;
8294 CHECK_STACK (1);
8295 CHECK_STACK_OVF (1);
8296 sp--;
8297 ins = *sp;
8299 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8300 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8302 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8303 *sp++ = ins;
8305 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8306 *sp++ = ins;
8308 ++ip;
8309 inline_costs += 2;
8310 break;
8312 case CEE_POP:
8313 CHECK_STACK (1);
8314 ip++;
8315 --sp;
8317 #ifdef TARGET_X86
8318 if (sp [0]->type == STACK_R8)
8319 /* we need to pop the value from the x86 FP stack */
8320 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8321 #endif
8322 break;
8323 case CEE_JMP: {
8324 MonoCallInst *call;
8325 MonoMethodSignature *fsig;
8326 int i, n;
8328 INLINE_FAILURE ("jmp");
8329 GSHAREDVT_FAILURE (*ip);
8331 CHECK_OPSIZE (5);
8332 if (stack_start != sp)
8333 UNVERIFIED;
8334 token = read32 (ip + 1);
8335 /* FIXME: check the signature matches */
8336 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8337 CHECK_CFG_ERROR;
8339 if (cfg->gshared && mono_method_check_context_used (cmethod))
8340 GENERIC_SHARING_FAILURE (CEE_JMP);
8342 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8344 fsig = mono_method_signature (cmethod);
8345 n = fsig->param_count + fsig->hasthis;
8346 if (cfg->llvm_only) {
8347 MonoInst **args;
8349 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8350 for (i = 0; i < n; ++i)
8351 EMIT_NEW_ARGLOAD (cfg, args [i], i);
8352 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
8354 * The code in mono-basic-block.c treats the rest of the code as dead, but we
8355 * have to emit a normal return since llvm expects it.
8357 if (cfg->ret)
8358 emit_setret (cfg, ins);
8359 MONO_INST_NEW (cfg, ins, OP_BR);
8360 ins->inst_target_bb = end_bblock;
8361 MONO_ADD_INS (cfg->cbb, ins);
8362 link_bblock (cfg, cfg->cbb, end_bblock);
8363 ip += 5;
8364 break;
8365 } else if (cfg->backend->have_op_tail_call) {
8366 /* Handle tail calls similarly to calls */
8367 DISABLE_AOT (cfg);
8369 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8370 call->method = cmethod;
8371 call->tail_call = TRUE;
8372 call->signature = mono_method_signature (cmethod);
8373 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8374 call->inst.inst_p0 = cmethod;
8375 for (i = 0; i < n; ++i)
8376 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8378 if (mini_type_is_vtype (mini_get_underlying_type (call->signature->ret)))
8379 call->vret_var = cfg->vret_addr;
8381 mono_arch_emit_call (cfg, call);
8382 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8383 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
8384 } else {
8385 for (i = 0; i < num_args; ++i)
8386 /* Prevent arguments from being optimized away */
8387 arg_array [i]->flags |= MONO_INST_VOLATILE;
8389 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8390 ins = (MonoInst*)call;
8391 ins->inst_p0 = cmethod;
8392 MONO_ADD_INS (cfg->cbb, ins);
8395 ip += 5;
8396 start_new_bblock = 1;
8397 break;
8399 case CEE_CALLI: {
8400 MonoInst *addr;
8401 MonoMethodSignature *fsig;
8403 CHECK_OPSIZE (5);
8404 token = read32 (ip + 1);
8406 ins = NULL;
8408 //GSHAREDVT_FAILURE (*ip);
8409 cmethod = NULL;
8410 CHECK_STACK (1);
8411 --sp;
8412 addr = *sp;
8413 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
8414 CHECK_CFG_ERROR;
8416 if (method->dynamic && fsig->pinvoke) {
8417 MonoInst *args [3];
8420 * This is a call through a function pointer using a pinvoke
8421 * signature. Have to create a wrapper and call that instead.
8422 * FIXME: This is very slow, need to create a wrapper at JIT time
8423 * instead based on the signature.
8425 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8426 EMIT_NEW_PCONST (cfg, args [1], fsig);
8427 args [2] = addr;
8428 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8431 n = fsig->param_count + fsig->hasthis;
8433 CHECK_STACK (n);
8435 //g_assert (!virtual_ || fsig->hasthis);
8437 sp -= n;
8439 inline_costs += 10 * num_calls++;
8442 * Making generic calls out of gsharedvt methods.
8443 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8444 * patching gshared method addresses into a gsharedvt method.
8446 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8448 * We pass the address to the gsharedvt trampoline in the rgctx reg
8450 MonoInst *callee = addr;
8452 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8453 /* Not tested */
8454 GSHAREDVT_FAILURE (*ip);
8456 if (cfg->llvm_only)
8457 // FIXME:
8458 GSHAREDVT_FAILURE (*ip);
8460 addr = emit_get_rgctx_sig (cfg, context_used,
8461 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8462 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8463 goto calli_end;
8466 /* Prevent inlining of methods with indirect calls */
8467 INLINE_FAILURE ("indirect call");
8469 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8470 MonoJumpInfoType info_type;
8471 gpointer info_data;
8474 * Instead of emitting an indirect call, emit a direct call
8475 * with the contents of the aotconst as the patch info.
8477 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8478 info_type = (MonoJumpInfoType)addr->inst_c1;
8479 info_data = addr->inst_p0;
8480 } else {
8481 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
8482 info_data = addr->inst_right->inst_left;
8485 if (info_type == MONO_PATCH_INFO_ICALL_ADDR) {
8486 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp);
8487 NULLIFY_INS (addr);
8488 goto calli_end;
8489 } else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8490 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8491 NULLIFY_INS (addr);
8492 goto calli_end;
8495 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8497 calli_end:
8499 /* End of call, INS should contain the result of the call, if any */
8501 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8502 g_assert (ins);
8503 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8506 CHECK_CFG_EXCEPTION;
8508 ip += 5;
8509 ins_flag = 0;
8510 constrained_class = NULL;
8511 break;
8513 case CEE_CALL:
8514 case CEE_CALLVIRT: {
8515 MonoInst *addr = NULL;
8516 MonoMethodSignature *fsig = NULL;
8517 int array_rank = 0;
8518 int virtual_ = *ip == CEE_CALLVIRT;
8519 gboolean pass_imt_from_rgctx = FALSE;
8520 MonoInst *imt_arg = NULL;
8521 MonoInst *keep_this_alive = NULL;
8522 gboolean pass_vtable = FALSE;
8523 gboolean pass_mrgctx = FALSE;
8524 MonoInst *vtable_arg = NULL;
8525 gboolean check_this = FALSE;
8526 gboolean supported_tail_call = FALSE;
8527 gboolean tail_call = FALSE;
8528 gboolean need_seq_point = FALSE;
8529 guint32 call_opcode = *ip;
8530 gboolean emit_widen = TRUE;
8531 gboolean push_res = TRUE;
8532 gboolean skip_ret = FALSE;
8533 gboolean delegate_invoke = FALSE;
8534 gboolean direct_icall = FALSE;
8535 gboolean constrained_partial_call = FALSE;
8536 MonoMethod *cil_method;
8538 CHECK_OPSIZE (5);
8539 token = read32 (ip + 1);
8541 ins = NULL;
8543 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8544 CHECK_CFG_ERROR;
8546 cil_method = cmethod;
8548 if (constrained_class) {
8549 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8550 if (!mini_is_gsharedvt_klass (constrained_class)) {
8551 g_assert (!cmethod->klass->valuetype);
8552 if (!mini_type_is_reference (&constrained_class->byval_arg))
8553 constrained_partial_call = TRUE;
8557 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8558 if (cfg->verbose_level > 2)
8559 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8560 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8561 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8562 cfg->gshared)) {
8563 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8564 CHECK_CFG_ERROR;
8566 } else {
8567 if (cfg->verbose_level > 2)
8568 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8570 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8572 * This is needed since get_method_constrained can't find
8573 * the method in klass representing a type var.
8574 * The type var is guaranteed to be a reference type in this
8575 * case.
8577 if (!mini_is_gsharedvt_klass (constrained_class))
8578 g_assert (!cmethod->klass->valuetype);
8579 } else {
8580 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8581 CHECK_CFG_ERROR;
8586 if (!dont_verify && !cfg->skip_visibility) {
8587 MonoMethod *target_method = cil_method;
8588 if (method->is_inflated) {
8589 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
8590 CHECK_CFG_ERROR;
8592 if (!mono_method_can_access_method (method_definition, target_method) &&
8593 !mono_method_can_access_method (method, cil_method))
8594 emit_method_access_failure (cfg, method, cil_method);
8597 if (mono_security_core_clr_enabled ())
8598 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
8600 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8601 /* MS.NET seems to silently convert this to a callvirt */
8602 virtual_ = 1;
8606 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8607 * converts to a callvirt.
8609 * tests/bug-515884.il is an example of this behavior
8611 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8612 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8613 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8614 virtual_ = 1;
8617 if (!cmethod->klass->inited)
8618 if (!mono_class_init (cmethod->klass))
8619 TYPE_LOAD_ERROR (cmethod->klass);
8621 fsig = mono_method_signature (cmethod);
8622 if (!fsig)
8623 LOAD_ERROR;
8624 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8625 mini_class_is_system_array (cmethod->klass)) {
8626 array_rank = cmethod->klass->rank;
8627 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8628 direct_icall = TRUE;
8629 } else if (fsig->pinvoke) {
8630 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8631 fsig = mono_method_signature (wrapper);
8632 } else if (constrained_class) {
8633 } else {
8634 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8635 CHECK_CFG_ERROR;
8638 if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated))
8639 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
8641 /* See code below */
8642 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8643 MonoBasicBlock *tbb;
8645 GET_BBLOCK (cfg, tbb, ip + 5);
8646 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8648 * We want to extend the try block to cover the call, but we can't do it if the
8649 * call is made directly since its followed by an exception check.
8651 direct_icall = FALSE;
8655 mono_save_token_info (cfg, image, token, cil_method);
8657 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8658 need_seq_point = TRUE;
8660 /* Don't support calls made using type arguments for now */
8662 if (cfg->gsharedvt) {
8663 if (mini_is_gsharedvt_signature (fsig))
8664 GSHAREDVT_FAILURE (*ip);
8668 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8669 g_assert_not_reached ();
8671 n = fsig->param_count + fsig->hasthis;
8673 if (!cfg->gshared && mono_class_is_gtd (cmethod->klass))
8674 UNVERIFIED;
8676 if (!cfg->gshared)
8677 g_assert (!mono_method_check_context_used (cmethod));
8679 CHECK_STACK (n);
8681 //g_assert (!virtual_ || fsig->hasthis);
8683 sp -= n;
8686 * We have the `constrained.' prefix opcode.
8688 if (constrained_class) {
8689 if (mini_is_gsharedvt_klass (constrained_class)) {
8690 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
8691 /* The 'Own method' case below */
8692 } else if (cmethod->klass->image != mono_defaults.corlib && !mono_class_is_interface (cmethod->klass) && !cmethod->klass->valuetype) {
8693 /* 'The type parameter is instantiated as a reference type' case below. */
8694 } else {
8695 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
8696 CHECK_CFG_EXCEPTION;
8697 g_assert (ins);
8698 goto call_end;
8702 if (constrained_partial_call) {
8703 gboolean need_box = TRUE;
8706 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
8707 * called method is not known at compile time either. The called method could end up being
8708 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
8709 * to box the receiver.
8710 * A simple solution would be to box always and make a normal virtual call, but that would
8711 * be bad performance wise.
8713 if (mono_class_is_interface (cmethod->klass) && mono_class_is_ginst (cmethod->klass)) {
8715 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
8717 need_box = FALSE;
8720 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8721 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
8722 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8723 ins->klass = constrained_class;
8724 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8725 CHECK_CFG_EXCEPTION;
8726 } else if (need_box) {
8727 MonoInst *box_type;
8728 MonoBasicBlock *is_ref_bb, *end_bb;
8729 MonoInst *nonbox_call;
8732 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
8733 * if needed.
8734 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
8735 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
8737 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8739 NEW_BBLOCK (cfg, is_ref_bb);
8740 NEW_BBLOCK (cfg, end_bb);
8742 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
8743 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
8744 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
8746 /* Non-ref case */
8747 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8749 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8751 /* Ref case */
8752 MONO_START_BB (cfg, is_ref_bb);
8753 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8754 ins->klass = constrained_class;
8755 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8756 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8758 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8760 MONO_START_BB (cfg, end_bb);
8761 cfg->cbb = end_bb;
8763 nonbox_call->dreg = ins->dreg;
8764 goto call_end;
8765 } else {
8766 g_assert (mono_class_is_interface (cmethod->klass));
8767 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8768 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8769 goto call_end;
8771 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8773 * The type parameter is instantiated as a valuetype,
8774 * but that type doesn't override the method we're
8775 * calling, so we need to box `this'.
8777 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8778 ins->klass = constrained_class;
8779 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8780 CHECK_CFG_EXCEPTION;
8781 } else if (!constrained_class->valuetype) {
8782 int dreg = alloc_ireg_ref (cfg);
8785 * The type parameter is instantiated as a reference
8786 * type. We have a managed pointer on the stack, so
8787 * we need to dereference it here.
8789 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8790 ins->type = STACK_OBJ;
8791 sp [0] = ins;
8792 } else {
8793 if (cmethod->klass->valuetype) {
8794 /* Own method */
8795 } else {
8796 /* Interface method */
8797 int ioffset, slot;
8799 mono_class_setup_vtable (constrained_class);
8800 CHECK_TYPELOAD (constrained_class);
8801 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
8802 if (ioffset == -1)
8803 TYPE_LOAD_ERROR (constrained_class);
8804 slot = mono_method_get_vtable_slot (cmethod);
8805 if (slot == -1)
8806 TYPE_LOAD_ERROR (cmethod->klass);
8807 cmethod = constrained_class->vtable [ioffset + slot];
8809 if (cmethod->klass == mono_defaults.enum_class) {
8810 /* Enum implements some interfaces, so treat this as the first case */
8811 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8812 ins->klass = constrained_class;
8813 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8814 CHECK_CFG_EXCEPTION;
8817 virtual_ = 0;
8819 constrained_class = NULL;
8822 if (check_call_signature (cfg, fsig, sp))
8823 UNVERIFIED;
8825 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8826 delegate_invoke = TRUE;
8828 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8829 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8830 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8831 emit_widen = FALSE;
8834 goto call_end;
8838 * If the callee is a shared method, then its static cctor
8839 * might not get called after the call was patched.
8841 if (cfg->gshared && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8842 emit_class_init (cfg, cmethod->klass);
8843 CHECK_TYPELOAD (cmethod->klass);
8846 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8848 if (cfg->gshared) {
8849 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8851 context_used = mini_method_check_context_used (cfg, cmethod);
8853 if (context_used && mono_class_is_interface (cmethod->klass)) {
8854 /* Generic method interface
8855 calls are resolved via a
8856 helper function and don't
8857 need an imt. */
8858 if (!cmethod_context || !cmethod_context->method_inst)
8859 pass_imt_from_rgctx = TRUE;
8863 * If a shared method calls another
8864 * shared method then the caller must
8865 * have a generic sharing context
8866 * because the magic trampoline
8867 * requires it. FIXME: We shouldn't
8868 * have to force the vtable/mrgctx
8869 * variable here. Instead there
8870 * should be a flag in the cfg to
8871 * request a generic sharing context.
8873 if (context_used &&
8874 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
8875 mono_get_vtable_var (cfg);
8878 if (pass_vtable) {
8879 if (context_used) {
8880 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8881 } else {
8882 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8884 CHECK_TYPELOAD (cmethod->klass);
8885 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8889 if (pass_mrgctx) {
8890 g_assert (!vtable_arg);
8892 if (!cfg->compile_aot) {
8894 * emit_get_rgctx_method () calls mono_class_vtable () so check
8895 * for type load errors before.
8897 mono_class_setup_vtable (cmethod->klass);
8898 CHECK_TYPELOAD (cmethod->klass);
8901 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8903 /* !marshalbyref is needed to properly handle generic methods + remoting */
8904 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8905 MONO_METHOD_IS_FINAL (cmethod)) &&
8906 !mono_class_is_marshalbyref (cmethod->klass)) {
8907 if (virtual_)
8908 check_this = TRUE;
8909 virtual_ = 0;
8913 if (pass_imt_from_rgctx) {
8914 g_assert (!pass_vtable);
8916 imt_arg = emit_get_rgctx_method (cfg, context_used,
8917 cmethod, MONO_RGCTX_INFO_METHOD);
8920 if (check_this)
8921 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8923 /* Calling virtual generic methods */
8924 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8925 !(MONO_METHOD_IS_FINAL (cmethod) &&
8926 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8927 fsig->generic_param_count &&
8928 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
8929 !cfg->llvm_only) {
8930 MonoInst *this_temp, *this_arg_temp, *store;
8931 MonoInst *iargs [4];
8933 g_assert (fsig->is_inflated);
8935 /* Prevent inlining of methods that contain indirect calls */
8936 INLINE_FAILURE ("virtual generic call");
8938 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
8939 GSHAREDVT_FAILURE (*ip);
8941 if (cfg->backend->have_generalized_imt_trampoline && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
8942 g_assert (!imt_arg);
8943 if (!context_used)
8944 g_assert (cmethod->is_inflated);
8945 imt_arg = emit_get_rgctx_method (cfg, context_used,
8946 cmethod, MONO_RGCTX_INFO_METHOD);
8947 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8948 } else {
8949 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8950 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8951 MONO_ADD_INS (cfg->cbb, store);
8953 /* FIXME: This should be a managed pointer */
8954 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8956 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8957 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8958 cmethod, MONO_RGCTX_INFO_METHOD);
8959 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8960 addr = mono_emit_jit_icall (cfg,
8961 mono_helper_compile_generic_method, iargs);
8963 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8965 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8968 goto call_end;
8972 * Implement a workaround for the inherent races involved in locking:
8973 * Monitor.Enter ()
8974 * try {
8975 * } finally {
8976 * Monitor.Exit ()
8978 * If a thread abort happens between the call to Monitor.Enter () and the start of the
8979 * try block, the Exit () won't be executed, see:
8980 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
8981 * To work around this, we extend such try blocks to include the last x bytes
8982 * of the Monitor.Enter () call.
8984 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8985 MonoBasicBlock *tbb;
8987 GET_BBLOCK (cfg, tbb, ip + 5);
8989 * Only extend try blocks with a finally, to avoid catching exceptions thrown
8990 * from Monitor.Enter like ArgumentNullException.
8992 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8993 /* Mark this bblock as needing to be extended */
8994 tbb->extend_try_block = TRUE;
8998 /* Conversion to a JIT intrinsic */
8999 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9000 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9001 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9002 emit_widen = FALSE;
9004 goto call_end;
9006 CHECK_CFG_ERROR;
9008 /* Inlining */
9009 if ((cfg->opt & MONO_OPT_INLINE) &&
9010 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9011 mono_method_check_inlining (cfg, cmethod)) {
9012 int costs;
9013 gboolean always = FALSE;
9015 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9016 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9017 /* Prevent inlining of methods that call wrappers */
9018 INLINE_FAILURE ("wrapper call");
9019 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9020 always = TRUE;
9023 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9024 if (costs) {
9025 cfg->real_offset += 5;
9027 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9028 /* *sp is already set by inline_method */
9029 sp++;
9030 push_res = FALSE;
9033 inline_costs += costs;
9035 goto call_end;
9039 /* Tail recursion elimination */
9040 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9041 gboolean has_vtargs = FALSE;
9042 int i;
9044 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9045 INLINE_FAILURE ("tail call");
9047 /* keep it simple */
9048 for (i = fsig->param_count - 1; i >= 0; i--) {
9049 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9050 has_vtargs = TRUE;
9053 if (!has_vtargs) {
9054 if (need_seq_point) {
9055 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9056 need_seq_point = FALSE;
9058 for (i = 0; i < n; ++i)
9059 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9060 MONO_INST_NEW (cfg, ins, OP_BR);
9061 MONO_ADD_INS (cfg->cbb, ins);
9062 tblock = start_bblock->out_bb [0];
9063 link_bblock (cfg, cfg->cbb, tblock);
9064 ins->inst_target_bb = tblock;
9065 start_new_bblock = 1;
9067 /* skip the CEE_RET, too */
9068 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9069 skip_ret = TRUE;
9070 push_res = FALSE;
9071 goto call_end;
9075 inline_costs += 10 * num_calls++;
9078 * Synchronized wrappers.
9079 * Its hard to determine where to replace a method with its synchronized
9080 * wrapper without causing an infinite recursion. The current solution is
9081 * to add the synchronized wrapper in the trampolines, and to
9082 * change the called method to a dummy wrapper, and resolve that wrapper
9083 * to the real method in mono_jit_compile_method ().
9085 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9086 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9087 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9088 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9092 * Making generic calls out of gsharedvt methods.
9093 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9094 * patching gshared method addresses into a gsharedvt method.
9096 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || mono_class_is_ginst (cmethod->klass)) &&
9097 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
9098 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
9099 MonoRgctxInfoType info_type;
9101 if (virtual_) {
9102 //if (mono_class_is_interface (cmethod->klass))
9103 //GSHAREDVT_FAILURE (*ip);
9104 // disable for possible remoting calls
9105 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9106 GSHAREDVT_FAILURE (*ip);
9107 if (fsig->generic_param_count) {
9108 /* virtual generic call */
9109 g_assert (!imt_arg);
9110 /* Same as the virtual generic case above */
9111 imt_arg = emit_get_rgctx_method (cfg, context_used,
9112 cmethod, MONO_RGCTX_INFO_METHOD);
9113 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9114 vtable_arg = NULL;
9115 } else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
9116 /* This can happen when we call a fully instantiated iface method */
9117 imt_arg = emit_get_rgctx_method (cfg, context_used,
9118 cmethod, MONO_RGCTX_INFO_METHOD);
9119 vtable_arg = NULL;
9123 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9124 keep_this_alive = sp [0];
9126 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9127 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9128 else
9129 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9130 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9132 if (cfg->llvm_only) {
9133 // FIXME: Avoid initializing vtable_arg
9134 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9135 } else {
9136 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9138 goto call_end;
9141 /* Generic sharing */
9144 * Use this if the callee is gsharedvt sharable too, since
9145 * at runtime we might find an instantiation so the call cannot
9146 * be patched (the 'no_patch' code path in mini-trampolines.c).
9148 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9149 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9150 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9151 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
9152 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9153 INLINE_FAILURE ("gshared");
9155 g_assert (cfg->gshared && cmethod);
9156 g_assert (!addr);
9159 * We are compiling a call to a
9160 * generic method from shared code,
9161 * which means that we have to look up
9162 * the method in the rgctx and do an
9163 * indirect call.
9165 if (fsig->hasthis)
9166 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9168 if (cfg->llvm_only) {
9169 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
9170 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
9171 else
9172 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9173 // FIXME: Avoid initializing imt_arg/vtable_arg
9174 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9175 } else {
9176 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9177 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9179 goto call_end;
9182 /* Direct calls to icalls */
9183 if (direct_icall) {
9184 MonoMethod *wrapper;
9185 int costs;
9187 /* Inline the wrapper */
9188 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9190 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9191 g_assert (costs > 0);
9192 cfg->real_offset += 5;
9194 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9195 /* *sp is already set by inline_method */
9196 sp++;
9197 push_res = FALSE;
9200 inline_costs += costs;
9202 goto call_end;
9205 /* Array methods */
9206 if (array_rank) {
9207 MonoInst *addr;
9209 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9210 MonoInst *val = sp [fsig->param_count];
9212 if (val->type == STACK_OBJ) {
9213 MonoInst *iargs [2];
9215 iargs [0] = sp [0];
9216 iargs [1] = val;
9218 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9221 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9222 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9223 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !MONO_INS_IS_PCONST_NULL (val))
9224 emit_write_barrier (cfg, addr, val);
9225 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9226 GSHAREDVT_FAILURE (*ip);
9227 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9228 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9230 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9231 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9232 if (!cmethod->klass->element_class->valuetype && !readonly)
9233 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9234 CHECK_TYPELOAD (cmethod->klass);
9236 readonly = FALSE;
9237 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9238 ins = addr;
9239 } else {
9240 g_assert_not_reached ();
9243 emit_widen = FALSE;
9244 goto call_end;
9247 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
9248 if (ins)
9249 goto call_end;
9251 /* Tail prefix / tail call optimization */
9253 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9254 /* FIXME: runtime generic context pointer for jumps? */
9255 /* FIXME: handle this for generic sharing eventually */
9256 if ((ins_flag & MONO_INST_TAILCALL) &&
9257 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9258 supported_tail_call = TRUE;
9260 if (supported_tail_call) {
9261 MonoCallInst *call;
9263 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9264 INLINE_FAILURE ("tail call");
9266 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9268 if (cfg->backend->have_op_tail_call) {
9269 /* Handle tail calls similarly to normal calls */
9270 tail_call = TRUE;
9271 } else {
9272 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9274 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9275 call->tail_call = TRUE;
9276 call->method = cmethod;
9277 call->signature = mono_method_signature (cmethod);
9280 * We implement tail calls by storing the actual arguments into the
9281 * argument variables, then emitting a CEE_JMP.
9283 for (i = 0; i < n; ++i) {
9284 /* Prevent argument from being register allocated */
9285 arg_array [i]->flags |= MONO_INST_VOLATILE;
9286 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9288 ins = (MonoInst*)call;
9289 ins->inst_p0 = cmethod;
9290 ins->inst_p1 = arg_array [0];
9291 MONO_ADD_INS (cfg->cbb, ins);
9292 link_bblock (cfg, cfg->cbb, end_bblock);
9293 start_new_bblock = 1;
9295 // FIXME: Eliminate unreachable epilogs
9298 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9299 * only reachable from this call.
9301 GET_BBLOCK (cfg, tblock, ip + 5);
9302 if (tblock == cfg->cbb || tblock->in_count == 0)
9303 skip_ret = TRUE;
9304 push_res = FALSE;
9306 goto call_end;
9311 * Virtual calls in llvm-only mode.
9313 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
9314 ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
9315 goto call_end;
9318 /* Common call */
9319 if (!(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
9320 INLINE_FAILURE ("call");
9321 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
9322 imt_arg, vtable_arg);
9324 if (tail_call && !cfg->llvm_only) {
9325 link_bblock (cfg, cfg->cbb, end_bblock);
9326 start_new_bblock = 1;
9328 // FIXME: Eliminate unreachable epilogs
9331 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9332 * only reachable from this call.
9334 GET_BBLOCK (cfg, tblock, ip + 5);
9335 if (tblock == cfg->cbb || tblock->in_count == 0)
9336 skip_ret = TRUE;
9337 push_res = FALSE;
9340 call_end:
9342 /* End of call, INS should contain the result of the call, if any */
9344 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9345 g_assert (ins);
9346 if (emit_widen)
9347 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9348 else
9349 *sp++ = ins;
9352 if (keep_this_alive) {
9353 MonoInst *dummy_use;
9355 /* See mono_emit_method_call_full () */
9356 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9359 if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) {
9361 * Clang can convert these calls to tail calls which screw up the stack
9362 * walk. This happens even when the -fno-optimize-sibling-calls
9363 * option is passed to clang.
9364 * Work around this by emitting a dummy call.
9366 mono_emit_jit_icall (cfg, mono_dummy_jit_icall, NULL);
9369 CHECK_CFG_EXCEPTION;
9371 ip += 5;
9372 if (skip_ret) {
9373 g_assert (*ip == CEE_RET);
9374 ip += 1;
9376 ins_flag = 0;
9377 constrained_class = NULL;
9378 if (need_seq_point)
9379 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9380 break;
9382 case CEE_RET:
9383 if (cfg->method != method) {
9384 /* return from inlined method */
9386 * If in_count == 0, that means the ret is unreachable due to
9387 * being preceeded by a throw. In that case, inline_method () will
9388 * handle setting the return value
9389 * (test case: test_0_inline_throw ()).
9391 if (return_var && cfg->cbb->in_count) {
9392 MonoType *ret_type = mono_method_signature (method)->ret;
9394 MonoInst *store;
9395 CHECK_STACK (1);
9396 --sp;
9398 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9399 UNVERIFIED;
9401 //g_assert (returnvar != -1);
9402 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9403 cfg->ret_var_set = TRUE;
9405 } else {
9406 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9408 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
9409 emit_pop_lmf (cfg);
9411 if (cfg->ret) {
9412 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
9414 if (seq_points && !sym_seq_points) {
9416 * Place a seq point here too even through the IL stack is not
9417 * empty, so a step over on
9418 * call <FOO>
9419 * ret
9420 * will work correctly.
9422 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9423 MONO_ADD_INS (cfg->cbb, ins);
9426 g_assert (!return_var);
9427 CHECK_STACK (1);
9428 --sp;
9430 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9431 UNVERIFIED;
9433 emit_setret (cfg, *sp);
9436 if (sp != stack_start)
9437 UNVERIFIED;
9438 MONO_INST_NEW (cfg, ins, OP_BR);
9439 ip++;
9440 ins->inst_target_bb = end_bblock;
9441 MONO_ADD_INS (cfg->cbb, ins);
9442 link_bblock (cfg, cfg->cbb, end_bblock);
9443 start_new_bblock = 1;
9444 break;
9445 case CEE_BR_S:
9446 CHECK_OPSIZE (2);
9447 MONO_INST_NEW (cfg, ins, OP_BR);
9448 ip++;
9449 target = ip + 1 + (signed char)(*ip);
9450 ++ip;
9451 GET_BBLOCK (cfg, tblock, target);
9452 link_bblock (cfg, cfg->cbb, tblock);
9453 ins->inst_target_bb = tblock;
9454 if (sp != stack_start) {
9455 handle_stack_args (cfg, stack_start, sp - stack_start);
9456 sp = stack_start;
9457 CHECK_UNVERIFIABLE (cfg);
9459 MONO_ADD_INS (cfg->cbb, ins);
9460 start_new_bblock = 1;
9461 inline_costs += BRANCH_COST;
9462 break;
9463 case CEE_BEQ_S:
9464 case CEE_BGE_S:
9465 case CEE_BGT_S:
9466 case CEE_BLE_S:
9467 case CEE_BLT_S:
9468 case CEE_BNE_UN_S:
9469 case CEE_BGE_UN_S:
9470 case CEE_BGT_UN_S:
9471 case CEE_BLE_UN_S:
9472 case CEE_BLT_UN_S:
9473 CHECK_OPSIZE (2);
9474 CHECK_STACK (2);
9475 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9476 ip++;
9477 target = ip + 1 + *(signed char*)ip;
9478 ip++;
9480 ADD_BINCOND (NULL);
9482 sp = stack_start;
9483 inline_costs += BRANCH_COST;
9484 break;
9485 case CEE_BR:
9486 CHECK_OPSIZE (5);
9487 MONO_INST_NEW (cfg, ins, OP_BR);
9488 ip++;
9490 target = ip + 4 + (gint32)read32(ip);
9491 ip += 4;
9492 GET_BBLOCK (cfg, tblock, target);
9493 link_bblock (cfg, cfg->cbb, tblock);
9494 ins->inst_target_bb = tblock;
9495 if (sp != stack_start) {
9496 handle_stack_args (cfg, stack_start, sp - stack_start);
9497 sp = stack_start;
9498 CHECK_UNVERIFIABLE (cfg);
9501 MONO_ADD_INS (cfg->cbb, ins);
9503 start_new_bblock = 1;
9504 inline_costs += BRANCH_COST;
9505 break;
9506 case CEE_BRFALSE_S:
9507 case CEE_BRTRUE_S:
9508 case CEE_BRFALSE:
9509 case CEE_BRTRUE: {
9510 MonoInst *cmp;
9511 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9512 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9513 guint32 opsize = is_short ? 1 : 4;
9515 CHECK_OPSIZE (opsize);
9516 CHECK_STACK (1);
9517 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9518 UNVERIFIED;
9519 ip ++;
9520 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9521 ip += opsize;
9523 sp--;
9525 GET_BBLOCK (cfg, tblock, target);
9526 link_bblock (cfg, cfg->cbb, tblock);
9527 GET_BBLOCK (cfg, tblock, ip);
9528 link_bblock (cfg, cfg->cbb, tblock);
9530 if (sp != stack_start) {
9531 handle_stack_args (cfg, stack_start, sp - stack_start);
9532 CHECK_UNVERIFIABLE (cfg);
9535 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9536 cmp->sreg1 = sp [0]->dreg;
9537 type_from_op (cfg, cmp, sp [0], NULL);
9538 CHECK_TYPE (cmp);
9540 #if SIZEOF_REGISTER == 4
9541 if (cmp->opcode == OP_LCOMPARE_IMM) {
9542 /* Convert it to OP_LCOMPARE */
9543 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9544 ins->type = STACK_I8;
9545 ins->dreg = alloc_dreg (cfg, STACK_I8);
9546 ins->inst_l = 0;
9547 MONO_ADD_INS (cfg->cbb, ins);
9548 cmp->opcode = OP_LCOMPARE;
9549 cmp->sreg2 = ins->dreg;
9551 #endif
9552 MONO_ADD_INS (cfg->cbb, cmp);
9554 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9555 type_from_op (cfg, ins, sp [0], NULL);
9556 MONO_ADD_INS (cfg->cbb, ins);
9557 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9558 GET_BBLOCK (cfg, tblock, target);
9559 ins->inst_true_bb = tblock;
9560 GET_BBLOCK (cfg, tblock, ip);
9561 ins->inst_false_bb = tblock;
9562 start_new_bblock = 2;
9564 sp = stack_start;
9565 inline_costs += BRANCH_COST;
9566 break;
9568 case CEE_BEQ:
9569 case CEE_BGE:
9570 case CEE_BGT:
9571 case CEE_BLE:
9572 case CEE_BLT:
9573 case CEE_BNE_UN:
9574 case CEE_BGE_UN:
9575 case CEE_BGT_UN:
9576 case CEE_BLE_UN:
9577 case CEE_BLT_UN:
9578 CHECK_OPSIZE (5);
9579 CHECK_STACK (2);
9580 MONO_INST_NEW (cfg, ins, *ip);
9581 ip++;
9582 target = ip + 4 + (gint32)read32(ip);
9583 ip += 4;
9585 ADD_BINCOND (NULL);
9587 sp = stack_start;
9588 inline_costs += BRANCH_COST;
9589 break;
9590 case CEE_SWITCH: {
9591 MonoInst *src1;
9592 MonoBasicBlock **targets;
9593 MonoBasicBlock *default_bblock;
9594 MonoJumpInfoBBTable *table;
9595 int offset_reg = alloc_preg (cfg);
9596 int target_reg = alloc_preg (cfg);
9597 int table_reg = alloc_preg (cfg);
9598 int sum_reg = alloc_preg (cfg);
9599 gboolean use_op_switch;
9601 CHECK_OPSIZE (5);
9602 CHECK_STACK (1);
9603 n = read32 (ip + 1);
9604 --sp;
9605 src1 = sp [0];
9606 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9607 UNVERIFIED;
9609 ip += 5;
9610 CHECK_OPSIZE (n * sizeof (guint32));
9611 target = ip + n * sizeof (guint32);
9613 GET_BBLOCK (cfg, default_bblock, target);
9614 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9616 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9617 for (i = 0; i < n; ++i) {
9618 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9619 targets [i] = tblock;
9620 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9621 ip += 4;
9624 if (sp != stack_start) {
9626 * Link the current bb with the targets as well, so handle_stack_args
9627 * will set their in_stack correctly.
9629 link_bblock (cfg, cfg->cbb, default_bblock);
9630 for (i = 0; i < n; ++i)
9631 link_bblock (cfg, cfg->cbb, targets [i]);
9633 handle_stack_args (cfg, stack_start, sp - stack_start);
9634 sp = stack_start;
9635 CHECK_UNVERIFIABLE (cfg);
9637 /* Undo the links */
9638 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
9639 for (i = 0; i < n; ++i)
9640 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
9643 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9644 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9646 for (i = 0; i < n; ++i)
9647 link_bblock (cfg, cfg->cbb, targets [i]);
9649 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9650 table->table = targets;
9651 table->table_size = n;
9653 use_op_switch = FALSE;
9654 #ifdef TARGET_ARM
9655 /* ARM implements SWITCH statements differently */
9656 /* FIXME: Make it use the generic implementation */
9657 if (!cfg->compile_aot)
9658 use_op_switch = TRUE;
9659 #endif
9661 if (COMPILE_LLVM (cfg))
9662 use_op_switch = TRUE;
9664 cfg->cbb->has_jump_table = 1;
9666 if (use_op_switch) {
9667 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9668 ins->sreg1 = src1->dreg;
9669 ins->inst_p0 = table;
9670 ins->inst_many_bb = targets;
9671 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
9672 MONO_ADD_INS (cfg->cbb, ins);
9673 } else {
9674 if (sizeof (gpointer) == 8)
9675 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9676 else
9677 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9679 #if SIZEOF_REGISTER == 8
9680 /* The upper word might not be zero, and we add it to a 64 bit address later */
9681 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9682 #endif
9684 if (cfg->compile_aot) {
9685 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9686 } else {
9687 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9688 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9689 ins->inst_p0 = table;
9690 ins->dreg = table_reg;
9691 MONO_ADD_INS (cfg->cbb, ins);
9694 /* FIXME: Use load_memindex */
9695 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9696 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9697 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9699 start_new_bblock = 1;
9700 inline_costs += (BRANCH_COST * 2);
9701 break;
9703 case CEE_LDIND_I1:
9704 case CEE_LDIND_U1:
9705 case CEE_LDIND_I2:
9706 case CEE_LDIND_U2:
9707 case CEE_LDIND_I4:
9708 case CEE_LDIND_U4:
9709 case CEE_LDIND_I8:
9710 case CEE_LDIND_I:
9711 case CEE_LDIND_R4:
9712 case CEE_LDIND_R8:
9713 case CEE_LDIND_REF:
9714 CHECK_STACK (1);
9715 --sp;
9717 switch (*ip) {
9718 case CEE_LDIND_R4:
9719 case CEE_LDIND_R8:
9720 dreg = alloc_freg (cfg);
9721 break;
9722 case CEE_LDIND_I8:
9723 dreg = alloc_lreg (cfg);
9724 break;
9725 case CEE_LDIND_REF:
9726 dreg = alloc_ireg_ref (cfg);
9727 break;
9728 default:
9729 dreg = alloc_preg (cfg);
9732 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9733 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9734 if (*ip == CEE_LDIND_R4)
9735 ins->type = cfg->r4_stack_type;
9736 ins->flags |= ins_flag;
9737 MONO_ADD_INS (cfg->cbb, ins);
9738 *sp++ = ins;
9739 if (ins_flag & MONO_INST_VOLATILE) {
9740 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9741 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9743 ins_flag = 0;
9744 ++ip;
9745 break;
9746 case CEE_STIND_REF:
9747 case CEE_STIND_I1:
9748 case CEE_STIND_I2:
9749 case CEE_STIND_I4:
9750 case CEE_STIND_I8:
9751 case CEE_STIND_R4:
9752 case CEE_STIND_R8:
9753 case CEE_STIND_I:
9754 CHECK_STACK (2);
9755 sp -= 2;
9757 if (ins_flag & MONO_INST_VOLATILE) {
9758 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9759 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9762 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9763 ins->flags |= ins_flag;
9764 ins_flag = 0;
9766 MONO_ADD_INS (cfg->cbb, ins);
9768 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1]))
9769 emit_write_barrier (cfg, sp [0], sp [1]);
9771 inline_costs += 1;
9772 ++ip;
9773 break;
9775 case CEE_MUL:
9776 CHECK_STACK (2);
9778 MONO_INST_NEW (cfg, ins, (*ip));
9779 sp -= 2;
9780 ins->sreg1 = sp [0]->dreg;
9781 ins->sreg2 = sp [1]->dreg;
9782 type_from_op (cfg, ins, sp [0], sp [1]);
9783 CHECK_TYPE (ins);
9784 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9786 /* Use the immediate opcodes if possible */
9787 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9788 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9789 if (imm_opcode != -1) {
9790 ins->opcode = imm_opcode;
9791 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9792 ins->sreg2 = -1;
9794 NULLIFY_INS (sp [1]);
9798 MONO_ADD_INS ((cfg)->cbb, (ins));
9800 *sp++ = mono_decompose_opcode (cfg, ins);
9801 ip++;
9802 break;
9803 case CEE_ADD:
9804 case CEE_SUB:
9805 case CEE_DIV:
9806 case CEE_DIV_UN:
9807 case CEE_REM:
9808 case CEE_REM_UN:
9809 case CEE_AND:
9810 case CEE_OR:
9811 case CEE_XOR:
9812 case CEE_SHL:
9813 case CEE_SHR:
9814 case CEE_SHR_UN:
9815 CHECK_STACK (2);
9817 MONO_INST_NEW (cfg, ins, (*ip));
9818 sp -= 2;
9819 ins->sreg1 = sp [0]->dreg;
9820 ins->sreg2 = sp [1]->dreg;
9821 type_from_op (cfg, ins, sp [0], sp [1]);
9822 CHECK_TYPE (ins);
9823 add_widen_op (cfg, ins, &sp [0], &sp [1]);
9824 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9826 /* FIXME: Pass opcode to is_inst_imm */
9828 /* Use the immediate opcodes if possible */
9829 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9830 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9831 if (imm_opcode != -1) {
9832 ins->opcode = imm_opcode;
9833 if (sp [1]->opcode == OP_I8CONST) {
9834 #if SIZEOF_REGISTER == 8
9835 ins->inst_imm = sp [1]->inst_l;
9836 #else
9837 ins->inst_ls_word = sp [1]->inst_ls_word;
9838 ins->inst_ms_word = sp [1]->inst_ms_word;
9839 #endif
9841 else
9842 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9843 ins->sreg2 = -1;
9845 /* Might be followed by an instruction added by add_widen_op */
9846 if (sp [1]->next == NULL)
9847 NULLIFY_INS (sp [1]);
9850 MONO_ADD_INS ((cfg)->cbb, (ins));
9852 *sp++ = mono_decompose_opcode (cfg, ins);
9853 ip++;
9854 break;
9855 case CEE_NEG:
9856 case CEE_NOT:
9857 case CEE_CONV_I1:
9858 case CEE_CONV_I2:
9859 case CEE_CONV_I4:
9860 case CEE_CONV_R4:
9861 case CEE_CONV_R8:
9862 case CEE_CONV_U4:
9863 case CEE_CONV_I8:
9864 case CEE_CONV_U8:
9865 case CEE_CONV_OVF_I8:
9866 case CEE_CONV_OVF_U8:
9867 case CEE_CONV_R_UN:
9868 CHECK_STACK (1);
9870 /* Special case this earlier so we have long constants in the IR */
9871 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9872 int data = sp [-1]->inst_c0;
9873 sp [-1]->opcode = OP_I8CONST;
9874 sp [-1]->type = STACK_I8;
9875 #if SIZEOF_REGISTER == 8
9876 if ((*ip) == CEE_CONV_U8)
9877 sp [-1]->inst_c0 = (guint32)data;
9878 else
9879 sp [-1]->inst_c0 = data;
9880 #else
9881 sp [-1]->inst_ls_word = data;
9882 if ((*ip) == CEE_CONV_U8)
9883 sp [-1]->inst_ms_word = 0;
9884 else
9885 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9886 #endif
9887 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9889 else {
9890 ADD_UNOP (*ip);
9892 ip++;
9893 break;
9894 case CEE_CONV_OVF_I4:
9895 case CEE_CONV_OVF_I1:
9896 case CEE_CONV_OVF_I2:
9897 case CEE_CONV_OVF_I:
9898 case CEE_CONV_OVF_U:
9899 CHECK_STACK (1);
9901 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9902 ADD_UNOP (CEE_CONV_OVF_I8);
9903 ADD_UNOP (*ip);
9904 } else {
9905 ADD_UNOP (*ip);
9907 ip++;
9908 break;
9909 case CEE_CONV_OVF_U1:
9910 case CEE_CONV_OVF_U2:
9911 case CEE_CONV_OVF_U4:
9912 CHECK_STACK (1);
9914 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9915 ADD_UNOP (CEE_CONV_OVF_U8);
9916 ADD_UNOP (*ip);
9917 } else {
9918 ADD_UNOP (*ip);
9920 ip++;
9921 break;
9922 case CEE_CONV_OVF_I1_UN:
9923 case CEE_CONV_OVF_I2_UN:
9924 case CEE_CONV_OVF_I4_UN:
9925 case CEE_CONV_OVF_I8_UN:
9926 case CEE_CONV_OVF_U1_UN:
9927 case CEE_CONV_OVF_U2_UN:
9928 case CEE_CONV_OVF_U4_UN:
9929 case CEE_CONV_OVF_U8_UN:
9930 case CEE_CONV_OVF_I_UN:
9931 case CEE_CONV_OVF_U_UN:
9932 case CEE_CONV_U2:
9933 case CEE_CONV_U1:
9934 case CEE_CONV_I:
9935 case CEE_CONV_U:
9936 CHECK_STACK (1);
9937 ADD_UNOP (*ip);
9938 CHECK_CFG_EXCEPTION;
9939 ip++;
9940 break;
9941 case CEE_ADD_OVF:
9942 case CEE_ADD_OVF_UN:
9943 case CEE_MUL_OVF:
9944 case CEE_MUL_OVF_UN:
9945 case CEE_SUB_OVF:
9946 case CEE_SUB_OVF_UN:
9947 CHECK_STACK (2);
9948 ADD_BINOP (*ip);
9949 ip++;
9950 break;
9951 case CEE_CPOBJ:
9952 GSHAREDVT_FAILURE (*ip);
9953 CHECK_OPSIZE (5);
9954 CHECK_STACK (2);
9955 token = read32 (ip + 1);
9956 klass = mini_get_class (method, token, generic_context);
9957 CHECK_TYPELOAD (klass);
9958 sp -= 2;
9959 if (generic_class_is_reference_type (cfg, klass)) {
9960 MonoInst *store, *load;
9961 int dreg = alloc_ireg_ref (cfg);
9963 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
9964 load->flags |= ins_flag;
9965 MONO_ADD_INS (cfg->cbb, load);
9967 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
9968 store->flags |= ins_flag;
9969 MONO_ADD_INS (cfg->cbb, store);
9971 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
9972 emit_write_barrier (cfg, sp [0], sp [1]);
9973 } else {
9974 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9976 ins_flag = 0;
9977 ip += 5;
9978 break;
9979 case CEE_LDOBJ: {
9980 int loc_index = -1;
9981 int stloc_len = 0;
9983 CHECK_OPSIZE (5);
9984 CHECK_STACK (1);
9985 --sp;
9986 token = read32 (ip + 1);
9987 klass = mini_get_class (method, token, generic_context);
9988 CHECK_TYPELOAD (klass);
9990 /* Optimize the common ldobj+stloc combination */
9991 switch (ip [5]) {
9992 case CEE_STLOC_S:
9993 loc_index = ip [6];
9994 stloc_len = 2;
9995 break;
9996 case CEE_STLOC_0:
9997 case CEE_STLOC_1:
9998 case CEE_STLOC_2:
9999 case CEE_STLOC_3:
10000 loc_index = ip [5] - CEE_STLOC_0;
10001 stloc_len = 1;
10002 break;
10003 default:
10004 break;
10007 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10008 CHECK_LOCAL (loc_index);
10010 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10011 ins->dreg = cfg->locals [loc_index]->dreg;
10012 ins->flags |= ins_flag;
10013 ip += 5;
10014 ip += stloc_len;
10015 if (ins_flag & MONO_INST_VOLATILE) {
10016 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10017 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10019 ins_flag = 0;
10020 break;
10023 /* Optimize the ldobj+stobj combination */
10024 /* The reference case ends up being a load+store anyway */
10025 /* Skip this if the operation is volatile. */
10026 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10027 CHECK_STACK (1);
10029 sp --;
10031 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10033 ip += 5 + 5;
10034 ins_flag = 0;
10035 break;
10038 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10039 ins->flags |= ins_flag;
10040 *sp++ = ins;
10042 if (ins_flag & MONO_INST_VOLATILE) {
10043 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10044 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10047 ip += 5;
10048 ins_flag = 0;
10049 inline_costs += 1;
10050 break;
10052 case CEE_LDSTR:
10053 CHECK_STACK_OVF (1);
10054 CHECK_OPSIZE (5);
10055 n = read32 (ip + 1);
10057 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10058 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10059 ins->type = STACK_OBJ;
10060 *sp = ins;
10062 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10063 MonoInst *iargs [1];
10064 char *str = (char *)mono_method_get_wrapper_data (method, n);
10066 if (cfg->compile_aot)
10067 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10068 else
10069 EMIT_NEW_PCONST (cfg, iargs [0], str);
10070 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10071 } else {
10072 if (cfg->opt & MONO_OPT_SHARED) {
10073 MonoInst *iargs [3];
10075 if (cfg->compile_aot) {
10076 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10078 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10079 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10080 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10081 *sp = mono_emit_jit_icall (cfg, ves_icall_mono_ldstr, iargs);
10082 mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10083 CHECK_CFG_ERROR;
10084 } else {
10085 if (cfg->cbb->out_of_line) {
10086 MonoInst *iargs [2];
10088 if (image == mono_defaults.corlib) {
10090 * Avoid relocations in AOT and save some space by using a
10091 * version of helper_ldstr specialized to mscorlib.
10093 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10094 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10095 } else {
10096 /* Avoid creating the string object */
10097 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10098 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10099 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10102 else
10103 if (cfg->compile_aot) {
10104 NEW_LDSTRCONST (cfg, ins, image, n);
10105 *sp = ins;
10106 MONO_ADD_INS (cfg->cbb, ins);
10108 else {
10109 NEW_PCONST (cfg, ins, NULL);
10110 ins->type = STACK_OBJ;
10111 ins->inst_p0 = mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10112 CHECK_CFG_ERROR;
10114 if (!ins->inst_p0)
10115 OUT_OF_MEMORY_FAILURE;
10117 *sp = ins;
10118 MONO_ADD_INS (cfg->cbb, ins);
10123 sp++;
10124 ip += 5;
10125 break;
10126 case CEE_NEWOBJ: {
10127 MonoInst *iargs [2];
10128 MonoMethodSignature *fsig;
10129 MonoInst this_ins;
10130 MonoInst *alloc;
10131 MonoInst *vtable_arg = NULL;
10133 CHECK_OPSIZE (5);
10134 token = read32 (ip + 1);
10135 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10136 CHECK_CFG_ERROR;
10138 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10139 CHECK_CFG_ERROR;
10141 mono_save_token_info (cfg, image, token, cmethod);
10143 if (!mono_class_init (cmethod->klass))
10144 TYPE_LOAD_ERROR (cmethod->klass);
10146 context_used = mini_method_check_context_used (cfg, cmethod);
10148 if (mono_security_core_clr_enabled ())
10149 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10151 if (cfg->gshared && cmethod && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10152 emit_class_init (cfg, cmethod->klass);
10153 CHECK_TYPELOAD (cmethod->klass);
10157 if (cfg->gsharedvt) {
10158 if (mini_is_gsharedvt_variable_signature (sig))
10159 GSHAREDVT_FAILURE (*ip);
10163 n = fsig->param_count;
10164 CHECK_STACK (n);
10167 * Generate smaller code for the common newobj <exception> instruction in
10168 * argument checking code.
10170 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10171 is_exception_class (cmethod->klass) && n <= 2 &&
10172 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10173 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10174 MonoInst *iargs [3];
10176 sp -= n;
10178 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10179 switch (n) {
10180 case 0:
10181 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10182 break;
10183 case 1:
10184 iargs [1] = sp [0];
10185 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10186 break;
10187 case 2:
10188 iargs [1] = sp [0];
10189 iargs [2] = sp [1];
10190 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10191 break;
10192 default:
10193 g_assert_not_reached ();
10196 ip += 5;
10197 inline_costs += 5;
10198 break;
10201 /* move the args to allow room for 'this' in the first position */
10202 while (n--) {
10203 --sp;
10204 sp [1] = sp [0];
10207 /* check_call_signature () requires sp[0] to be set */
10208 this_ins.type = STACK_OBJ;
10209 sp [0] = &this_ins;
10210 if (check_call_signature (cfg, fsig, sp))
10211 UNVERIFIED;
10213 iargs [0] = NULL;
10215 if (mini_class_is_system_array (cmethod->klass)) {
10216 *sp = emit_get_rgctx_method (cfg, context_used,
10217 cmethod, MONO_RGCTX_INFO_METHOD);
10219 /* Avoid varargs in the common case */
10220 if (fsig->param_count == 1)
10221 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10222 else if (fsig->param_count == 2)
10223 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10224 else if (fsig->param_count == 3)
10225 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10226 else if (fsig->param_count == 4)
10227 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10228 else
10229 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10230 } else if (cmethod->string_ctor) {
10231 g_assert (!context_used);
10232 g_assert (!vtable_arg);
10233 /* we simply pass a null pointer */
10234 EMIT_NEW_PCONST (cfg, *sp, NULL);
10235 /* now call the string ctor */
10236 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10237 } else {
10238 if (cmethod->klass->valuetype) {
10239 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10240 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10241 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10243 alloc = NULL;
10246 * The code generated by mini_emit_virtual_call () expects
10247 * iargs [0] to be a boxed instance, but luckily the vcall
10248 * will be transformed into a normal call there.
10250 } else if (context_used) {
10251 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10252 *sp = alloc;
10253 } else {
10254 MonoVTable *vtable = NULL;
10256 if (!cfg->compile_aot)
10257 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10258 CHECK_TYPELOAD (cmethod->klass);
10261 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10262 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10263 * As a workaround, we call class cctors before allocating objects.
10265 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10266 emit_class_init (cfg, cmethod->klass);
10267 if (cfg->verbose_level > 2)
10268 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10269 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10272 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10273 *sp = alloc;
10275 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10277 if (alloc)
10278 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10280 /* Now call the actual ctor */
10281 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10282 CHECK_CFG_EXCEPTION;
10285 if (alloc == NULL) {
10286 /* Valuetype */
10287 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10288 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10289 *sp++= ins;
10290 } else {
10291 *sp++ = alloc;
10294 ip += 5;
10295 inline_costs += 5;
10296 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10297 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10298 break;
10300 case CEE_CASTCLASS:
10301 case CEE_ISINST: {
10302 CHECK_STACK (1);
10303 --sp;
10304 CHECK_OPSIZE (5);
10305 token = read32 (ip + 1);
10306 klass = mini_get_class (method, token, generic_context);
10307 CHECK_TYPELOAD (klass);
10308 if (sp [0]->type != STACK_OBJ)
10309 UNVERIFIED;
10311 MONO_INST_NEW (cfg, ins, *ip == CEE_ISINST ? OP_ISINST : OP_CASTCLASS);
10312 ins->dreg = alloc_preg (cfg);
10313 ins->sreg1 = (*sp)->dreg;
10314 ins->klass = klass;
10315 ins->type = STACK_OBJ;
10316 MONO_ADD_INS (cfg->cbb, ins);
10318 CHECK_CFG_EXCEPTION;
10319 *sp++ = ins;
10320 ip += 5;
10322 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10323 break;
10325 case CEE_UNBOX_ANY: {
10326 MonoInst *res, *addr;
10328 CHECK_STACK (1);
10329 --sp;
10330 CHECK_OPSIZE (5);
10331 token = read32 (ip + 1);
10332 klass = mini_get_class (method, token, generic_context);
10333 CHECK_TYPELOAD (klass);
10335 mono_save_token_info (cfg, image, token, klass);
10337 context_used = mini_class_check_context_used (cfg, klass);
10339 if (mini_is_gsharedvt_klass (klass)) {
10340 res = handle_unbox_gsharedvt (cfg, klass, *sp);
10341 inline_costs += 2;
10342 } else if (generic_class_is_reference_type (cfg, klass)) {
10343 if (MONO_INS_IS_PCONST_NULL (*sp)) {
10344 EMIT_NEW_PCONST (cfg, res, NULL);
10345 res->type = STACK_OBJ;
10346 } else {
10347 MONO_INST_NEW (cfg, res, OP_CASTCLASS);
10348 res->dreg = alloc_preg (cfg);
10349 res->sreg1 = (*sp)->dreg;
10350 res->klass = klass;
10351 res->type = STACK_OBJ;
10352 MONO_ADD_INS (cfg->cbb, res);
10353 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10355 } else if (mono_class_is_nullable (klass)) {
10356 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10357 } else {
10358 addr = handle_unbox (cfg, klass, sp, context_used);
10359 /* LDOBJ */
10360 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10361 res = ins;
10362 inline_costs += 2;
10365 *sp ++ = res;
10366 ip += 5;
10367 break;
10369 case CEE_BOX: {
10370 MonoInst *val;
10371 MonoClass *enum_class;
10372 MonoMethod *has_flag;
10374 CHECK_STACK (1);
10375 --sp;
10376 val = *sp;
10377 CHECK_OPSIZE (5);
10378 token = read32 (ip + 1);
10379 klass = mini_get_class (method, token, generic_context);
10380 CHECK_TYPELOAD (klass);
10382 mono_save_token_info (cfg, image, token, klass);
10384 context_used = mini_class_check_context_used (cfg, klass);
10386 if (generic_class_is_reference_type (cfg, klass)) {
10387 *sp++ = val;
10388 ip += 5;
10389 break;
10392 if (klass == mono_defaults.void_class)
10393 UNVERIFIED;
10394 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10395 UNVERIFIED;
10396 /* frequent check in generic code: box (struct), brtrue */
10399 * Look for:
10401 * <push int/long ptr>
10402 * <push int/long>
10403 * box MyFlags
10404 * constrained. MyFlags
10405 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10407 * If we find this sequence and the operand types on box and constrained
10408 * are equal, we can emit a specialized instruction sequence instead of
10409 * the very slow HasFlag () call.
10411 if ((cfg->opt & MONO_OPT_INTRINS) &&
10412 /* Cheap checks first. */
10413 ip + 5 + 6 + 5 < end &&
10414 ip [5] == CEE_PREFIX1 &&
10415 ip [6] == CEE_CONSTRAINED_ &&
10416 ip [11] == CEE_CALLVIRT &&
10417 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
10418 mono_class_is_enum (klass) &&
10419 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10420 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10421 has_flag->klass == mono_defaults.enum_class &&
10422 !strcmp (has_flag->name, "HasFlag") &&
10423 has_flag->signature->hasthis &&
10424 has_flag->signature->param_count == 1) {
10425 CHECK_TYPELOAD (enum_class);
10427 if (enum_class == klass) {
10428 MonoInst *enum_this, *enum_flag;
10430 ip += 5 + 6 + 5;
10431 --sp;
10433 enum_this = sp [0];
10434 enum_flag = sp [1];
10436 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10437 break;
10441 // FIXME: LLVM can't handle the inconsistent bb linking
10442 if (!mono_class_is_nullable (klass) &&
10443 !mini_is_gsharedvt_klass (klass) &&
10444 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
10445 (ip [5] == CEE_BRTRUE ||
10446 ip [5] == CEE_BRTRUE_S ||
10447 ip [5] == CEE_BRFALSE ||
10448 ip [5] == CEE_BRFALSE_S)) {
10449 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10450 int dreg;
10451 MonoBasicBlock *true_bb, *false_bb;
10453 ip += 5;
10455 if (cfg->verbose_level > 3) {
10456 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10457 printf ("<box+brtrue opt>\n");
10460 switch (*ip) {
10461 case CEE_BRTRUE_S:
10462 case CEE_BRFALSE_S:
10463 CHECK_OPSIZE (2);
10464 ip++;
10465 target = ip + 1 + (signed char)(*ip);
10466 ip++;
10467 break;
10468 case CEE_BRTRUE:
10469 case CEE_BRFALSE:
10470 CHECK_OPSIZE (5);
10471 ip++;
10472 target = ip + 4 + (gint)(read32 (ip));
10473 ip += 4;
10474 break;
10475 default:
10476 g_assert_not_reached ();
10480 * We need to link both bblocks, since it is needed for handling stack
10481 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10482 * Branching to only one of them would lead to inconsistencies, so
10483 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10485 GET_BBLOCK (cfg, true_bb, target);
10486 GET_BBLOCK (cfg, false_bb, ip);
10488 mono_link_bblock (cfg, cfg->cbb, true_bb);
10489 mono_link_bblock (cfg, cfg->cbb, false_bb);
10491 if (sp != stack_start) {
10492 handle_stack_args (cfg, stack_start, sp - stack_start);
10493 sp = stack_start;
10494 CHECK_UNVERIFIABLE (cfg);
10497 if (COMPILE_LLVM (cfg)) {
10498 dreg = alloc_ireg (cfg);
10499 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10500 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10502 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10503 } else {
10504 /* The JIT can't eliminate the iconst+compare */
10505 MONO_INST_NEW (cfg, ins, OP_BR);
10506 ins->inst_target_bb = is_true ? true_bb : false_bb;
10507 MONO_ADD_INS (cfg->cbb, ins);
10510 start_new_bblock = 1;
10511 break;
10514 *sp++ = handle_box (cfg, val, klass, context_used);
10516 CHECK_CFG_EXCEPTION;
10517 ip += 5;
10518 inline_costs += 1;
10519 break;
10521 case CEE_UNBOX: {
10522 CHECK_STACK (1);
10523 --sp;
10524 CHECK_OPSIZE (5);
10525 token = read32 (ip + 1);
10526 klass = mini_get_class (method, token, generic_context);
10527 CHECK_TYPELOAD (klass);
10529 mono_save_token_info (cfg, image, token, klass);
10531 context_used = mini_class_check_context_used (cfg, klass);
10533 if (mono_class_is_nullable (klass)) {
10534 MonoInst *val;
10536 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10537 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10539 *sp++= ins;
10540 } else {
10541 ins = handle_unbox (cfg, klass, sp, context_used);
10542 *sp++ = ins;
10544 ip += 5;
10545 inline_costs += 2;
10546 break;
10548 case CEE_LDFLD:
10549 case CEE_LDFLDA:
10550 case CEE_STFLD:
10551 case CEE_LDSFLD:
10552 case CEE_LDSFLDA:
10553 case CEE_STSFLD: {
10554 MonoClassField *field;
10555 #ifndef DISABLE_REMOTING
10556 int costs;
10557 #endif
10558 guint foffset;
10559 gboolean is_instance;
10560 int op;
10561 gpointer addr = NULL;
10562 gboolean is_special_static;
10563 MonoType *ftype;
10564 MonoInst *store_val = NULL;
10565 MonoInst *thread_ins;
10567 op = *ip;
10568 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10569 if (is_instance) {
10570 if (op == CEE_STFLD) {
10571 CHECK_STACK (2);
10572 sp -= 2;
10573 store_val = sp [1];
10574 } else {
10575 CHECK_STACK (1);
10576 --sp;
10578 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10579 UNVERIFIED;
10580 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10581 UNVERIFIED;
10582 } else {
10583 if (op == CEE_STSFLD) {
10584 CHECK_STACK (1);
10585 sp--;
10586 store_val = sp [0];
10590 CHECK_OPSIZE (5);
10591 token = read32 (ip + 1);
10592 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10593 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
10594 klass = field->parent;
10596 else {
10597 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10598 CHECK_CFG_ERROR;
10600 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10601 FIELD_ACCESS_FAILURE (method, field);
10602 mono_class_init (klass);
10604 /* if the class is Critical then transparent code cannot access it's fields */
10605 if (!is_instance && mono_security_core_clr_enabled ())
10606 ensure_method_is_allowed_to_access_field (cfg, method, field);
10608 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10609 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10610 if (mono_security_core_clr_enabled ())
10611 ensure_method_is_allowed_to_access_field (cfg, method, field);
10614 ftype = mono_field_get_type (field);
10617 * LDFLD etc. is usable on static fields as well, so convert those cases to
10618 * the static case.
10620 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
10621 switch (op) {
10622 case CEE_LDFLD:
10623 op = CEE_LDSFLD;
10624 break;
10625 case CEE_STFLD:
10626 op = CEE_STSFLD;
10627 break;
10628 case CEE_LDFLDA:
10629 op = CEE_LDSFLDA;
10630 break;
10631 default:
10632 g_assert_not_reached ();
10634 is_instance = FALSE;
10637 context_used = mini_class_check_context_used (cfg, klass);
10639 /* INSTANCE CASE */
10641 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10642 if (op == CEE_STFLD) {
10643 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10644 UNVERIFIED;
10645 #ifndef DISABLE_REMOTING
10646 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10647 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10648 MonoInst *iargs [5];
10650 GSHAREDVT_FAILURE (op);
10652 iargs [0] = sp [0];
10653 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10654 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10655 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10656 field->offset);
10657 iargs [4] = sp [1];
10659 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10660 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10661 iargs, ip, cfg->real_offset, TRUE);
10662 CHECK_CFG_EXCEPTION;
10663 g_assert (costs > 0);
10665 cfg->real_offset += 5;
10667 inline_costs += costs;
10668 } else {
10669 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10671 } else
10672 #endif
10674 MonoInst *store, *wbarrier_ptr_ins = NULL;
10676 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10678 if (ins_flag & MONO_INST_VOLATILE) {
10679 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10680 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10683 if (mini_is_gsharedvt_klass (klass)) {
10684 MonoInst *offset_ins;
10686 context_used = mini_class_check_context_used (cfg, klass);
10688 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10689 /* The value is offset by 1 */
10690 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10691 dreg = alloc_ireg_mp (cfg);
10692 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10693 wbarrier_ptr_ins = ins;
10694 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10695 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10696 } else {
10697 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10699 if (sp [0]->opcode != OP_LDADDR)
10700 store->flags |= MONO_INST_FAULT;
10702 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
10703 if (mini_is_gsharedvt_klass (klass)) {
10704 g_assert (wbarrier_ptr_ins);
10705 emit_write_barrier (cfg, wbarrier_ptr_ins, sp [1]);
10706 } else {
10707 /* insert call to write barrier */
10708 MonoInst *ptr;
10709 int dreg;
10711 dreg = alloc_ireg_mp (cfg);
10712 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10713 emit_write_barrier (cfg, ptr, sp [1]);
10717 store->flags |= ins_flag;
10719 ins_flag = 0;
10720 ip += 5;
10721 break;
10724 #ifndef DISABLE_REMOTING
10725 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10726 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10727 MonoInst *iargs [4];
10729 GSHAREDVT_FAILURE (op);
10731 iargs [0] = sp [0];
10732 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10733 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10734 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10735 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10736 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10737 iargs, ip, cfg->real_offset, TRUE);
10738 CHECK_CFG_EXCEPTION;
10739 g_assert (costs > 0);
10741 cfg->real_offset += 5;
10743 *sp++ = iargs [0];
10745 inline_costs += costs;
10746 } else {
10747 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10748 *sp++ = ins;
10750 } else
10751 #endif
10752 if (is_instance) {
10753 if (sp [0]->type == STACK_VTYPE) {
10754 MonoInst *var;
10756 /* Have to compute the address of the variable */
10758 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10759 if (!var)
10760 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10761 else
10762 g_assert (var->klass == klass);
10764 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10765 sp [0] = ins;
10768 if (op == CEE_LDFLDA) {
10769 if (sp [0]->type == STACK_OBJ) {
10770 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10771 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10774 dreg = alloc_ireg_mp (cfg);
10776 if (mini_is_gsharedvt_klass (klass)) {
10777 MonoInst *offset_ins;
10779 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10780 /* The value is offset by 1 */
10781 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10782 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10783 } else {
10784 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10786 ins->klass = mono_class_from_mono_type (field->type);
10787 ins->type = STACK_MP;
10788 *sp++ = ins;
10789 } else {
10790 MonoInst *load;
10792 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10794 if (sp [0]->opcode == OP_LDADDR && klass->simd_type && cfg->opt & MONO_OPT_SIMD) {
10795 ins = mono_emit_simd_field_load (cfg, field, sp [0]);
10796 if (ins) {
10797 *sp++ = ins;
10798 ins_flag = 0;
10799 ip += 5;
10800 break;
10804 if (mini_is_gsharedvt_klass (klass)) {
10805 MonoInst *offset_ins;
10807 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10808 /* The value is offset by 1 */
10809 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10810 dreg = alloc_ireg_mp (cfg);
10811 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10812 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10813 } else {
10814 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10816 load->flags |= ins_flag;
10817 if (sp [0]->opcode != OP_LDADDR)
10818 load->flags |= MONO_INST_FAULT;
10819 *sp++ = load;
10823 if (is_instance) {
10824 ins_flag = 0;
10825 ip += 5;
10826 break;
10829 /* STATIC CASE */
10830 context_used = mini_class_check_context_used (cfg, klass);
10832 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) {
10833 mono_error_set_field_load (&cfg->error, field->parent, field->name, "Using static instructions with literal field");
10834 CHECK_CFG_ERROR;
10837 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10838 * to be called here.
10840 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10841 mono_class_vtable (cfg->domain, klass);
10842 CHECK_TYPELOAD (klass);
10844 mono_domain_lock (cfg->domain);
10845 if (cfg->domain->special_static_fields)
10846 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10847 mono_domain_unlock (cfg->domain);
10849 is_special_static = mono_class_field_is_special_static (field);
10851 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10852 thread_ins = mono_create_tls_get (cfg, TLS_KEY_THREAD);
10853 else
10854 thread_ins = NULL;
10856 /* Generate IR to compute the field address */
10857 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10859 * Fast access to TLS data
10860 * Inline version of get_thread_static_data () in
10861 * threads.c.
10863 guint32 offset;
10864 int idx, static_data_reg, array_reg, dreg;
10866 if (context_used && cfg->gsharedvt && mini_is_gsharedvt_klass (klass))
10867 GSHAREDVT_FAILURE (op);
10869 static_data_reg = alloc_ireg (cfg);
10870 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
10872 if (cfg->compile_aot) {
10873 int offset_reg, offset2_reg, idx_reg;
10875 /* For TLS variables, this will return the TLS offset */
10876 EMIT_NEW_SFLDACONST (cfg, ins, field);
10877 offset_reg = ins->dreg;
10878 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10879 idx_reg = alloc_ireg (cfg);
10880 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
10881 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10882 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10883 array_reg = alloc_ireg (cfg);
10884 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10885 offset2_reg = alloc_ireg (cfg);
10886 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
10887 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
10888 dreg = alloc_ireg (cfg);
10889 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10890 } else {
10891 offset = (gsize)addr & 0x7fffffff;
10892 idx = offset & 0x3f;
10894 array_reg = alloc_ireg (cfg);
10895 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10896 dreg = alloc_ireg (cfg);
10897 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
10899 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10900 (cfg->compile_aot && is_special_static) ||
10901 (context_used && is_special_static)) {
10902 MonoInst *iargs [2];
10904 g_assert (field->parent);
10905 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10906 if (context_used) {
10907 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10908 field, MONO_RGCTX_INFO_CLASS_FIELD);
10909 } else {
10910 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10912 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10913 } else if (context_used) {
10914 MonoInst *static_data;
10917 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10918 method->klass->name_space, method->klass->name, method->name,
10919 depth, field->offset);
10922 if (mono_class_needs_cctor_run (klass, method))
10923 emit_class_init (cfg, klass);
10926 * The pointer we're computing here is
10928 * super_info.static_data + field->offset
10930 static_data = mini_emit_get_rgctx_klass (cfg, context_used,
10931 klass, MONO_RGCTX_INFO_STATIC_DATA);
10933 if (mini_is_gsharedvt_klass (klass)) {
10934 MonoInst *offset_ins;
10936 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10937 /* The value is offset by 1 */
10938 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10939 dreg = alloc_ireg_mp (cfg);
10940 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10941 } else if (field->offset == 0) {
10942 ins = static_data;
10943 } else {
10944 int addr_reg = mono_alloc_preg (cfg);
10945 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10947 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10948 MonoInst *iargs [2];
10950 g_assert (field->parent);
10951 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10952 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10953 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10954 } else {
10955 MonoVTable *vtable = NULL;
10957 if (!cfg->compile_aot)
10958 vtable = mono_class_vtable (cfg->domain, klass);
10959 CHECK_TYPELOAD (klass);
10961 if (!addr) {
10962 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10963 if (!(g_slist_find (class_inits, klass))) {
10964 emit_class_init (cfg, klass);
10965 if (cfg->verbose_level > 2)
10966 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10967 class_inits = g_slist_prepend (class_inits, klass);
10969 } else {
10970 if (cfg->run_cctors) {
10971 /* This makes so that inline cannot trigger */
10972 /* .cctors: too many apps depend on them */
10973 /* running with a specific order... */
10974 g_assert (vtable);
10975 if (! vtable->initialized)
10976 INLINE_FAILURE ("class init");
10977 if (!mono_runtime_class_init_full (vtable, &cfg->error)) {
10978 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
10979 goto exception_exit;
10983 if (cfg->compile_aot)
10984 EMIT_NEW_SFLDACONST (cfg, ins, field);
10985 else {
10986 g_assert (vtable);
10987 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10988 g_assert (addr);
10989 EMIT_NEW_PCONST (cfg, ins, addr);
10991 } else {
10992 MonoInst *iargs [1];
10993 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10994 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10998 /* Generate IR to do the actual load/store operation */
11000 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11001 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11002 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11005 if (op == CEE_LDSFLDA) {
11006 ins->klass = mono_class_from_mono_type (ftype);
11007 ins->type = STACK_PTR;
11008 *sp++ = ins;
11009 } else if (op == CEE_STSFLD) {
11010 MonoInst *store;
11012 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11013 store->flags |= ins_flag;
11014 } else {
11015 gboolean is_const = FALSE;
11016 MonoVTable *vtable = NULL;
11017 gpointer addr = NULL;
11019 if (!context_used) {
11020 vtable = mono_class_vtable (cfg->domain, klass);
11021 CHECK_TYPELOAD (klass);
11023 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11024 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11025 int ro_type = ftype->type;
11026 if (!addr)
11027 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11028 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11029 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11032 GSHAREDVT_FAILURE (op);
11034 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11035 is_const = TRUE;
11036 switch (ro_type) {
11037 case MONO_TYPE_BOOLEAN:
11038 case MONO_TYPE_U1:
11039 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11040 sp++;
11041 break;
11042 case MONO_TYPE_I1:
11043 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11044 sp++;
11045 break;
11046 case MONO_TYPE_CHAR:
11047 case MONO_TYPE_U2:
11048 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11049 sp++;
11050 break;
11051 case MONO_TYPE_I2:
11052 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11053 sp++;
11054 break;
11055 break;
11056 case MONO_TYPE_I4:
11057 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11058 sp++;
11059 break;
11060 case MONO_TYPE_U4:
11061 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11062 sp++;
11063 break;
11064 case MONO_TYPE_I:
11065 case MONO_TYPE_U:
11066 case MONO_TYPE_PTR:
11067 case MONO_TYPE_FNPTR:
11068 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11069 type_to_eval_stack_type ((cfg), field->type, *sp);
11070 sp++;
11071 break;
11072 case MONO_TYPE_STRING:
11073 case MONO_TYPE_OBJECT:
11074 case MONO_TYPE_CLASS:
11075 case MONO_TYPE_SZARRAY:
11076 case MONO_TYPE_ARRAY:
11077 if (!mono_gc_is_moving ()) {
11078 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11079 type_to_eval_stack_type ((cfg), field->type, *sp);
11080 sp++;
11081 } else {
11082 is_const = FALSE;
11084 break;
11085 case MONO_TYPE_I8:
11086 case MONO_TYPE_U8:
11087 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11088 sp++;
11089 break;
11090 case MONO_TYPE_R4:
11091 case MONO_TYPE_R8:
11092 case MONO_TYPE_VALUETYPE:
11093 default:
11094 is_const = FALSE;
11095 break;
11099 if (!is_const) {
11100 MonoInst *load;
11102 CHECK_STACK_OVF (1);
11104 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11105 load->flags |= ins_flag;
11106 ins_flag = 0;
11107 *sp++ = load;
11111 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11112 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11113 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11116 ins_flag = 0;
11117 ip += 5;
11118 break;
11120 case CEE_STOBJ:
11121 CHECK_STACK (2);
11122 sp -= 2;
11123 CHECK_OPSIZE (5);
11124 token = read32 (ip + 1);
11125 klass = mini_get_class (method, token, generic_context);
11126 CHECK_TYPELOAD (klass);
11127 if (ins_flag & MONO_INST_VOLATILE) {
11128 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11129 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11131 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11132 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11133 ins->flags |= ins_flag;
11134 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11135 generic_class_is_reference_type (cfg, klass) && !MONO_INS_IS_PCONST_NULL (sp [1])) {
11136 /* insert call to write barrier */
11137 emit_write_barrier (cfg, sp [0], sp [1]);
11139 ins_flag = 0;
11140 ip += 5;
11141 inline_costs += 1;
11142 break;
11145 * Array opcodes
11147 case CEE_NEWARR: {
11148 MonoInst *len_ins;
11149 const char *data_ptr;
11150 int data_size = 0;
11151 guint32 field_token;
11153 CHECK_STACK (1);
11154 --sp;
11156 CHECK_OPSIZE (5);
11157 token = read32 (ip + 1);
11159 klass = mini_get_class (method, token, generic_context);
11160 CHECK_TYPELOAD (klass);
11162 context_used = mini_class_check_context_used (cfg, klass);
11164 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11165 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11166 ins->sreg1 = sp [0]->dreg;
11167 ins->type = STACK_I4;
11168 ins->dreg = alloc_ireg (cfg);
11169 MONO_ADD_INS (cfg->cbb, ins);
11170 *sp = mono_decompose_opcode (cfg, ins);
11173 if (context_used) {
11174 MonoInst *args [3];
11175 MonoClass *array_class = mono_array_class_get (klass, 1);
11176 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11178 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11180 /* vtable */
11181 args [0] = mini_emit_get_rgctx_klass (cfg, context_used,
11182 array_class, MONO_RGCTX_INFO_VTABLE);
11183 /* array len */
11184 args [1] = sp [0];
11186 if (managed_alloc)
11187 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11188 else
11189 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
11190 } else {
11191 if (cfg->opt & MONO_OPT_SHARED) {
11192 /* Decompose now to avoid problems with references to the domainvar */
11193 MonoInst *iargs [3];
11195 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11196 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11197 iargs [2] = sp [0];
11199 ins = mono_emit_jit_icall (cfg, ves_icall_array_new, iargs);
11200 } else {
11201 /* Decompose later since it is needed by abcrem */
11202 MonoClass *array_type = mono_array_class_get (klass, 1);
11203 mono_class_vtable (cfg->domain, array_type);
11204 CHECK_TYPELOAD (array_type);
11206 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11207 ins->dreg = alloc_ireg_ref (cfg);
11208 ins->sreg1 = sp [0]->dreg;
11209 ins->inst_newa_class = klass;
11210 ins->type = STACK_OBJ;
11211 ins->klass = array_type;
11212 MONO_ADD_INS (cfg->cbb, ins);
11213 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11214 cfg->cbb->has_array_access = TRUE;
11216 /* Needed so mono_emit_load_get_addr () gets called */
11217 mono_get_got_var (cfg);
11221 len_ins = sp [0];
11222 ip += 5;
11223 *sp++ = ins;
11224 inline_costs += 1;
11227 * we inline/optimize the initialization sequence if possible.
11228 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11229 * for small sizes open code the memcpy
11230 * ensure the rva field is big enough
11232 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11233 MonoMethod *memcpy_method = get_memcpy_method ();
11234 MonoInst *iargs [3];
11235 int add_reg = alloc_ireg_mp (cfg);
11237 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11238 if (cfg->compile_aot) {
11239 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11240 } else {
11241 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11243 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11244 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11245 ip += 11;
11248 break;
11250 case CEE_LDLEN:
11251 CHECK_STACK (1);
11252 --sp;
11253 if (sp [0]->type != STACK_OBJ)
11254 UNVERIFIED;
11256 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11257 ins->dreg = alloc_preg (cfg);
11258 ins->sreg1 = sp [0]->dreg;
11259 ins->type = STACK_I4;
11260 /* This flag will be inherited by the decomposition */
11261 ins->flags |= MONO_INST_FAULT;
11262 MONO_ADD_INS (cfg->cbb, ins);
11263 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11264 cfg->cbb->has_array_access = TRUE;
11265 ip ++;
11266 *sp++ = ins;
11267 break;
11268 case CEE_LDELEMA:
11269 CHECK_STACK (2);
11270 sp -= 2;
11271 CHECK_OPSIZE (5);
11272 if (sp [0]->type != STACK_OBJ)
11273 UNVERIFIED;
11275 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11277 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11278 CHECK_TYPELOAD (klass);
11279 /* we need to make sure that this array is exactly the type it needs
11280 * to be for correctness. the wrappers are lax with their usage
11281 * so we need to ignore them here
11283 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11284 MonoClass *array_class = mono_array_class_get (klass, 1);
11285 mini_emit_check_array_type (cfg, sp [0], array_class);
11286 CHECK_TYPELOAD (array_class);
11289 readonly = FALSE;
11290 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11291 *sp++ = ins;
11292 ip += 5;
11293 break;
11294 case CEE_LDELEM:
11295 case CEE_LDELEM_I1:
11296 case CEE_LDELEM_U1:
11297 case CEE_LDELEM_I2:
11298 case CEE_LDELEM_U2:
11299 case CEE_LDELEM_I4:
11300 case CEE_LDELEM_U4:
11301 case CEE_LDELEM_I8:
11302 case CEE_LDELEM_I:
11303 case CEE_LDELEM_R4:
11304 case CEE_LDELEM_R8:
11305 case CEE_LDELEM_REF: {
11306 MonoInst *addr;
11308 CHECK_STACK (2);
11309 sp -= 2;
11311 if (*ip == CEE_LDELEM) {
11312 CHECK_OPSIZE (5);
11313 token = read32 (ip + 1);
11314 klass = mini_get_class (method, token, generic_context);
11315 CHECK_TYPELOAD (klass);
11316 mono_class_init (klass);
11318 else
11319 klass = array_access_to_klass (*ip);
11321 if (sp [0]->type != STACK_OBJ)
11322 UNVERIFIED;
11324 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11326 if (mini_is_gsharedvt_variable_klass (klass)) {
11327 // FIXME-VT: OP_ICONST optimization
11328 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11329 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11330 ins->opcode = OP_LOADV_MEMBASE;
11331 } else if (sp [1]->opcode == OP_ICONST) {
11332 int array_reg = sp [0]->dreg;
11333 int index_reg = sp [1]->dreg;
11334 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11336 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
11337 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
11339 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11340 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11341 } else {
11342 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11343 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11345 *sp++ = ins;
11346 if (*ip == CEE_LDELEM)
11347 ip += 5;
11348 else
11349 ++ip;
11350 break;
11352 case CEE_STELEM_I:
11353 case CEE_STELEM_I1:
11354 case CEE_STELEM_I2:
11355 case CEE_STELEM_I4:
11356 case CEE_STELEM_I8:
11357 case CEE_STELEM_R4:
11358 case CEE_STELEM_R8:
11359 case CEE_STELEM_REF:
11360 case CEE_STELEM: {
11361 CHECK_STACK (3);
11362 sp -= 3;
11364 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11366 if (*ip == CEE_STELEM) {
11367 CHECK_OPSIZE (5);
11368 token = read32 (ip + 1);
11369 klass = mini_get_class (method, token, generic_context);
11370 CHECK_TYPELOAD (klass);
11371 mono_class_init (klass);
11373 else
11374 klass = array_access_to_klass (*ip);
11376 if (sp [0]->type != STACK_OBJ)
11377 UNVERIFIED;
11379 emit_array_store (cfg, klass, sp, TRUE);
11381 if (*ip == CEE_STELEM)
11382 ip += 5;
11383 else
11384 ++ip;
11385 inline_costs += 1;
11386 break;
11388 case CEE_CKFINITE: {
11389 CHECK_STACK (1);
11390 --sp;
11392 if (cfg->llvm_only) {
11393 MonoInst *iargs [1];
11395 iargs [0] = sp [0];
11396 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
11397 } else {
11398 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11399 ins->sreg1 = sp [0]->dreg;
11400 ins->dreg = alloc_freg (cfg);
11401 ins->type = STACK_R8;
11402 MONO_ADD_INS (cfg->cbb, ins);
11404 *sp++ = mono_decompose_opcode (cfg, ins);
11407 ++ip;
11408 break;
11410 case CEE_REFANYVAL: {
11411 MonoInst *src_var, *src;
11413 int klass_reg = alloc_preg (cfg);
11414 int dreg = alloc_preg (cfg);
11416 GSHAREDVT_FAILURE (*ip);
11418 CHECK_STACK (1);
11419 MONO_INST_NEW (cfg, ins, *ip);
11420 --sp;
11421 CHECK_OPSIZE (5);
11422 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11423 CHECK_TYPELOAD (klass);
11425 context_used = mini_class_check_context_used (cfg, klass);
11427 // FIXME:
11428 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11429 if (!src_var)
11430 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11431 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11432 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11434 if (context_used) {
11435 MonoInst *klass_ins;
11437 klass_ins = mini_emit_get_rgctx_klass (cfg, context_used,
11438 klass, MONO_RGCTX_INFO_KLASS);
11440 // FIXME:
11441 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11442 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11443 } else {
11444 mini_emit_class_check (cfg, klass_reg, klass);
11446 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11447 ins->type = STACK_MP;
11448 ins->klass = klass;
11449 *sp++ = ins;
11450 ip += 5;
11451 break;
11453 case CEE_MKREFANY: {
11454 MonoInst *loc, *addr;
11456 GSHAREDVT_FAILURE (*ip);
11458 CHECK_STACK (1);
11459 MONO_INST_NEW (cfg, ins, *ip);
11460 --sp;
11461 CHECK_OPSIZE (5);
11462 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11463 CHECK_TYPELOAD (klass);
11465 context_used = mini_class_check_context_used (cfg, klass);
11467 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11468 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11470 if (context_used) {
11471 MonoInst *const_ins;
11472 int type_reg = alloc_preg (cfg);
11474 const_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11475 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11476 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11477 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11478 } else {
11479 int const_reg = alloc_preg (cfg);
11480 int type_reg = alloc_preg (cfg);
11482 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11483 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11484 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11485 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11487 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11489 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11490 ins->type = STACK_VTYPE;
11491 ins->klass = mono_defaults.typed_reference_class;
11492 *sp++ = ins;
11493 ip += 5;
11494 break;
11496 case CEE_LDTOKEN: {
11497 gpointer handle;
11498 MonoClass *handle_class;
11500 CHECK_STACK_OVF (1);
11502 CHECK_OPSIZE (5);
11503 n = read32 (ip + 1);
11505 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11506 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11507 handle = mono_method_get_wrapper_data (method, n);
11508 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
11509 if (handle_class == mono_defaults.typehandle_class)
11510 handle = &((MonoClass*)handle)->byval_arg;
11512 else {
11513 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11514 CHECK_CFG_ERROR;
11516 if (!handle)
11517 LOAD_ERROR;
11518 mono_class_init (handle_class);
11519 if (cfg->gshared) {
11520 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11521 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11522 /* This case handles ldtoken
11523 of an open type, like for
11524 typeof(Gen<>). */
11525 context_used = 0;
11526 } else if (handle_class == mono_defaults.typehandle_class) {
11527 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
11528 } else if (handle_class == mono_defaults.fieldhandle_class)
11529 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11530 else if (handle_class == mono_defaults.methodhandle_class)
11531 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
11532 else
11533 g_assert_not_reached ();
11536 if ((cfg->opt & MONO_OPT_SHARED) &&
11537 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11538 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11539 MonoInst *addr, *vtvar, *iargs [3];
11540 int method_context_used;
11542 method_context_used = mini_method_check_context_used (cfg, method);
11544 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11546 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11547 EMIT_NEW_ICONST (cfg, iargs [1], n);
11548 if (method_context_used) {
11549 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11550 method, MONO_RGCTX_INFO_METHOD);
11551 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11552 } else {
11553 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11554 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11556 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11558 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11560 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11561 } else {
11562 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11563 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11564 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11565 (cmethod->klass == mono_defaults.systemtype_class) &&
11566 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11567 MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
11569 mono_class_init (tclass);
11570 if (context_used) {
11571 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11572 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11573 } else if (cfg->compile_aot) {
11574 if (method->wrapper_type) {
11575 mono_error_init (&error); //got to do it since there are multiple conditionals below
11576 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11577 /* Special case for static synchronized wrappers */
11578 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11579 } else {
11580 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11581 /* FIXME: n is not a normal token */
11582 DISABLE_AOT (cfg);
11583 EMIT_NEW_PCONST (cfg, ins, NULL);
11585 } else {
11586 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11588 } else {
11589 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &cfg->error);
11590 CHECK_CFG_ERROR;
11591 EMIT_NEW_PCONST (cfg, ins, rt);
11593 ins->type = STACK_OBJ;
11594 ins->klass = cmethod->klass;
11595 ip += 5;
11596 } else {
11597 MonoInst *addr, *vtvar;
11599 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11601 if (context_used) {
11602 if (handle_class == mono_defaults.typehandle_class) {
11603 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11604 mono_class_from_mono_type ((MonoType *)handle),
11605 MONO_RGCTX_INFO_TYPE);
11606 } else if (handle_class == mono_defaults.methodhandle_class) {
11607 ins = emit_get_rgctx_method (cfg, context_used,
11608 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
11609 } else if (handle_class == mono_defaults.fieldhandle_class) {
11610 ins = emit_get_rgctx_field (cfg, context_used,
11611 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
11612 } else {
11613 g_assert_not_reached ();
11615 } else if (cfg->compile_aot) {
11616 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11617 } else {
11618 EMIT_NEW_PCONST (cfg, ins, handle);
11620 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11621 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11622 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11626 *sp++ = ins;
11627 ip += 5;
11628 break;
11630 case CEE_THROW:
11631 CHECK_STACK (1);
11632 if (sp [-1]->type != STACK_OBJ)
11633 UNVERIFIED;
11635 MONO_INST_NEW (cfg, ins, OP_THROW);
11636 --sp;
11637 ins->sreg1 = sp [0]->dreg;
11638 ip++;
11639 cfg->cbb->out_of_line = TRUE;
11640 MONO_ADD_INS (cfg->cbb, ins);
11641 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11642 MONO_ADD_INS (cfg->cbb, ins);
11643 sp = stack_start;
11645 link_bblock (cfg, cfg->cbb, end_bblock);
11646 start_new_bblock = 1;
11647 /* This can complicate code generation for llvm since the return value might not be defined */
11648 if (COMPILE_LLVM (cfg))
11649 INLINE_FAILURE ("throw");
11650 break;
11651 case CEE_ENDFINALLY:
11652 if (!ip_in_finally_clause (cfg, ip - header->code))
11653 UNVERIFIED;
11654 /* mono_save_seq_point_info () depends on this */
11655 if (sp != stack_start)
11656 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11657 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11658 MONO_ADD_INS (cfg->cbb, ins);
11659 ip++;
11660 start_new_bblock = 1;
11663 * Control will leave the method so empty the stack, otherwise
11664 * the next basic block will start with a nonempty stack.
11666 while (sp != stack_start) {
11667 sp--;
11669 break;
11670 case CEE_LEAVE:
11671 case CEE_LEAVE_S: {
11672 GList *handlers;
11674 if (*ip == CEE_LEAVE) {
11675 CHECK_OPSIZE (5);
11676 target = ip + 5 + (gint32)read32(ip + 1);
11677 } else {
11678 CHECK_OPSIZE (2);
11679 target = ip + 2 + (signed char)(ip [1]);
11682 /* empty the stack */
11683 while (sp != stack_start) {
11684 sp--;
11688 * If this leave statement is in a catch block, check for a
11689 * pending exception, and rethrow it if necessary.
11690 * We avoid doing this in runtime invoke wrappers, since those are called
11691 * by native code which excepts the wrapper to catch all exceptions.
11693 for (i = 0; i < header->num_clauses; ++i) {
11694 MonoExceptionClause *clause = &header->clauses [i];
11697 * Use <= in the final comparison to handle clauses with multiple
11698 * leave statements, like in bug #78024.
11699 * The ordering of the exception clauses guarantees that we find the
11700 * innermost clause.
11702 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11703 MonoInst *exc_ins;
11704 MonoBasicBlock *dont_throw;
11707 MonoInst *load;
11709 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11712 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11714 NEW_BBLOCK (cfg, dont_throw);
11717 * Currently, we always rethrow the abort exception, despite the
11718 * fact that this is not correct. See thread6.cs for an example.
11719 * But propagating the abort exception is more important than
11720 * getting the sematics right.
11722 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11723 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11724 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11726 MONO_START_BB (cfg, dont_throw);
11730 #ifdef ENABLE_LLVM
11731 cfg->cbb->try_end = (intptr_t)(ip - header->code);
11732 #endif
11734 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11735 GList *tmp;
11736 MonoExceptionClause *clause;
11738 for (tmp = handlers; tmp; tmp = tmp->next) {
11739 clause = (MonoExceptionClause *)tmp->data;
11740 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11741 g_assert (tblock);
11742 link_bblock (cfg, cfg->cbb, tblock);
11743 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11744 ins->inst_target_bb = tblock;
11745 ins->inst_eh_block = clause;
11746 MONO_ADD_INS (cfg->cbb, ins);
11747 cfg->cbb->has_call_handler = 1;
11748 if (COMPILE_LLVM (cfg)) {
11749 MonoBasicBlock *target_bb;
11752 * Link the finally bblock with the target, since it will
11753 * conceptually branch there.
11755 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
11756 GET_BBLOCK (cfg, target_bb, target);
11757 link_bblock (cfg, tblock, target_bb);
11760 g_list_free (handlers);
11763 MONO_INST_NEW (cfg, ins, OP_BR);
11764 MONO_ADD_INS (cfg->cbb, ins);
11765 GET_BBLOCK (cfg, tblock, target);
11766 link_bblock (cfg, cfg->cbb, tblock);
11767 ins->inst_target_bb = tblock;
11769 start_new_bblock = 1;
11771 if (*ip == CEE_LEAVE)
11772 ip += 5;
11773 else
11774 ip += 2;
11776 break;
11780 * Mono specific opcodes
11782 case MONO_CUSTOM_PREFIX: {
11784 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11786 CHECK_OPSIZE (2);
11787 switch (ip [1]) {
11788 case CEE_MONO_ICALL: {
11789 gpointer func;
11790 MonoJitICallInfo *info;
11792 token = read32 (ip + 2);
11793 func = mono_method_get_wrapper_data (method, token);
11794 info = mono_find_jit_icall_by_addr (func);
11795 if (!info)
11796 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11797 g_assert (info);
11799 CHECK_STACK (info->sig->param_count);
11800 sp -= info->sig->param_count;
11802 ins = mono_emit_jit_icall (cfg, info->func, sp);
11803 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11804 *sp++ = ins;
11806 ip += 6;
11807 inline_costs += 10 * num_calls++;
11809 break;
11811 case CEE_MONO_LDPTR_CARD_TABLE:
11812 case CEE_MONO_LDPTR_NURSERY_START:
11813 case CEE_MONO_LDPTR_NURSERY_BITS:
11814 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
11815 CHECK_STACK_OVF (1);
11817 switch (ip [1]) {
11818 case CEE_MONO_LDPTR_CARD_TABLE:
11819 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
11820 break;
11821 case CEE_MONO_LDPTR_NURSERY_START:
11822 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
11823 break;
11824 case CEE_MONO_LDPTR_NURSERY_BITS:
11825 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
11826 break;
11827 case CEE_MONO_LDPTR_INT_REQ_FLAG:
11828 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11829 break;
11832 *sp++ = ins;
11833 ip += 2;
11834 inline_costs += 10 * num_calls++;
11835 break;
11837 case CEE_MONO_LDPTR: {
11838 gpointer ptr;
11840 CHECK_STACK_OVF (1);
11841 CHECK_OPSIZE (6);
11842 token = read32 (ip + 2);
11844 ptr = mono_method_get_wrapper_data (method, token);
11845 EMIT_NEW_PCONST (cfg, ins, ptr);
11846 *sp++ = ins;
11847 ip += 6;
11848 inline_costs += 10 * num_calls++;
11849 /* Can't embed random pointers into AOT code */
11850 DISABLE_AOT (cfg);
11851 break;
11853 case CEE_MONO_JIT_ICALL_ADDR: {
11854 MonoJitICallInfo *callinfo;
11855 gpointer ptr;
11857 CHECK_STACK_OVF (1);
11858 CHECK_OPSIZE (6);
11859 token = read32 (ip + 2);
11861 ptr = mono_method_get_wrapper_data (method, token);
11862 callinfo = mono_find_jit_icall_by_addr (ptr);
11863 g_assert (callinfo);
11864 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11865 *sp++ = ins;
11866 ip += 6;
11867 inline_costs += 10 * num_calls++;
11868 break;
11870 case CEE_MONO_ICALL_ADDR: {
11871 MonoMethod *cmethod;
11872 gpointer ptr;
11874 CHECK_STACK_OVF (1);
11875 CHECK_OPSIZE (6);
11876 token = read32 (ip + 2);
11878 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
11880 if (cfg->compile_aot) {
11881 if (cfg->direct_pinvoke && ip + 6 < end && (ip [6] == CEE_POP)) {
11883 * This is generated by emit_native_wrapper () to resolve the pinvoke address
11884 * before the call, its not needed when using direct pinvoke.
11885 * This is not an optimization, but its used to avoid looking up pinvokes
11886 * on platforms which don't support dlopen ().
11888 EMIT_NEW_PCONST (cfg, ins, NULL);
11889 } else {
11890 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11892 } else {
11893 ptr = mono_lookup_internal_call (cmethod);
11894 g_assert (ptr);
11895 EMIT_NEW_PCONST (cfg, ins, ptr);
11897 *sp++ = ins;
11898 ip += 6;
11899 break;
11901 case CEE_MONO_VTADDR: {
11902 MonoInst *src_var, *src;
11904 CHECK_STACK (1);
11905 --sp;
11907 // FIXME:
11908 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11909 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11910 *sp++ = src;
11911 ip += 2;
11912 break;
11914 case CEE_MONO_NEWOBJ: {
11915 MonoInst *iargs [2];
11917 CHECK_STACK_OVF (1);
11918 CHECK_OPSIZE (6);
11919 token = read32 (ip + 2);
11920 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11921 mono_class_init (klass);
11922 NEW_DOMAINCONST (cfg, iargs [0]);
11923 MONO_ADD_INS (cfg->cbb, iargs [0]);
11924 NEW_CLASSCONST (cfg, iargs [1], klass);
11925 MONO_ADD_INS (cfg->cbb, iargs [1]);
11926 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
11927 ip += 6;
11928 inline_costs += 10 * num_calls++;
11929 break;
11931 case CEE_MONO_OBJADDR:
11932 CHECK_STACK (1);
11933 --sp;
11934 MONO_INST_NEW (cfg, ins, OP_MOVE);
11935 ins->dreg = alloc_ireg_mp (cfg);
11936 ins->sreg1 = sp [0]->dreg;
11937 ins->type = STACK_MP;
11938 MONO_ADD_INS (cfg->cbb, ins);
11939 *sp++ = ins;
11940 ip += 2;
11941 break;
11942 case CEE_MONO_LDNATIVEOBJ:
11944 * Similar to LDOBJ, but instead load the unmanaged
11945 * representation of the vtype to the stack.
11947 CHECK_STACK (1);
11948 CHECK_OPSIZE (6);
11949 --sp;
11950 token = read32 (ip + 2);
11951 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11952 g_assert (klass->valuetype);
11953 mono_class_init (klass);
11956 MonoInst *src, *dest, *temp;
11958 src = sp [0];
11959 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11960 temp->backend.is_pinvoke = 1;
11961 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11962 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11964 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11965 dest->type = STACK_VTYPE;
11966 dest->klass = klass;
11968 *sp ++ = dest;
11969 ip += 6;
11971 break;
11972 case CEE_MONO_RETOBJ: {
11974 * Same as RET, but return the native representation of a vtype
11975 * to the caller.
11977 g_assert (cfg->ret);
11978 g_assert (mono_method_signature (method)->pinvoke);
11979 CHECK_STACK (1);
11980 --sp;
11982 CHECK_OPSIZE (6);
11983 token = read32 (ip + 2);
11984 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11986 if (!cfg->vret_addr) {
11987 g_assert (cfg->ret_var_is_local);
11989 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11990 } else {
11991 EMIT_NEW_RETLOADA (cfg, ins);
11993 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
11995 if (sp != stack_start)
11996 UNVERIFIED;
11998 MONO_INST_NEW (cfg, ins, OP_BR);
11999 ins->inst_target_bb = end_bblock;
12000 MONO_ADD_INS (cfg->cbb, ins);
12001 link_bblock (cfg, cfg->cbb, end_bblock);
12002 start_new_bblock = 1;
12003 ip += 6;
12004 break;
12006 case CEE_MONO_SAVE_LMF:
12007 case CEE_MONO_RESTORE_LMF:
12008 ip += 2;
12009 break;
12010 case CEE_MONO_CLASSCONST:
12011 CHECK_STACK_OVF (1);
12012 CHECK_OPSIZE (6);
12013 token = read32 (ip + 2);
12014 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12015 *sp++ = ins;
12016 ip += 6;
12017 inline_costs += 10 * num_calls++;
12018 break;
12019 case CEE_MONO_NOT_TAKEN:
12020 cfg->cbb->out_of_line = TRUE;
12021 ip += 2;
12022 break;
12023 case CEE_MONO_TLS: {
12024 MonoTlsKey key;
12026 CHECK_STACK_OVF (1);
12027 CHECK_OPSIZE (6);
12028 key = (MonoTlsKey)read32 (ip + 2);
12029 g_assert (key < TLS_KEY_NUM);
12031 ins = mono_create_tls_get (cfg, key);
12032 g_assert (ins);
12033 ins->type = STACK_PTR;
12034 *sp++ = ins;
12035 ip += 6;
12036 break;
12038 case CEE_MONO_DYN_CALL: {
12039 MonoCallInst *call;
12041 /* It would be easier to call a trampoline, but that would put an
12042 * extra frame on the stack, confusing exception handling. So
12043 * implement it inline using an opcode for now.
12046 if (!cfg->dyn_call_var) {
12047 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12048 /* prevent it from being register allocated */
12049 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12052 /* Has to use a call inst since it local regalloc expects it */
12053 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12054 ins = (MonoInst*)call;
12055 sp -= 2;
12056 ins->sreg1 = sp [0]->dreg;
12057 ins->sreg2 = sp [1]->dreg;
12058 MONO_ADD_INS (cfg->cbb, ins);
12060 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12062 ip += 2;
12063 inline_costs += 10 * num_calls++;
12065 break;
12067 case CEE_MONO_MEMORY_BARRIER: {
12068 CHECK_OPSIZE (6);
12069 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12070 ip += 6;
12071 break;
12073 case CEE_MONO_ATOMIC_STORE_I4: {
12074 g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4));
12076 CHECK_OPSIZE (6);
12077 CHECK_STACK (2);
12078 sp -= 2;
12080 MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4);
12081 ins->dreg = sp [0]->dreg;
12082 ins->sreg1 = sp [1]->dreg;
12083 ins->backend.memory_barrier_kind = (int) read32 (ip + 2);
12084 MONO_ADD_INS (cfg->cbb, ins);
12086 ip += 6;
12087 break;
12089 case CEE_MONO_JIT_ATTACH: {
12090 MonoInst *args [16], *domain_ins;
12091 MonoInst *ad_ins, *jit_tls_ins;
12092 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12094 g_assert (!mono_threads_is_coop_enabled ());
12096 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12098 EMIT_NEW_PCONST (cfg, ins, NULL);
12099 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12101 ad_ins = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
12102 jit_tls_ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
12104 if (ad_ins && jit_tls_ins) {
12105 NEW_BBLOCK (cfg, next_bb);
12106 NEW_BBLOCK (cfg, call_bb);
12108 if (cfg->compile_aot) {
12109 /* AOT code is only used in the root domain */
12110 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12111 } else {
12112 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12114 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12115 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12117 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12118 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12120 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12121 MONO_START_BB (cfg, call_bb);
12124 /* AOT code is only used in the root domain */
12125 EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
12126 if (cfg->compile_aot) {
12127 MonoInst *addr;
12130 * This is called on unattached threads, so it cannot go through the trampoline
12131 * infrastructure. Use an indirect call through a got slot initialized at load time
12132 * instead.
12134 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_THREAD_ATTACH, NULL);
12135 ins = mono_emit_calli (cfg, helper_sig_jit_thread_attach, args, addr, NULL, NULL);
12136 } else {
12137 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12139 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12141 if (next_bb)
12142 MONO_START_BB (cfg, next_bb);
12144 ip += 2;
12145 break;
12147 case CEE_MONO_JIT_DETACH: {
12148 MonoInst *args [16];
12150 /* Restore the original domain */
12151 dreg = alloc_ireg (cfg);
12152 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12153 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12154 ip += 2;
12155 break;
12157 case CEE_MONO_CALLI_EXTRA_ARG: {
12158 MonoInst *addr;
12159 MonoMethodSignature *fsig;
12160 MonoInst *arg;
12163 * This is the same as CEE_CALLI, but passes an additional argument
12164 * to the called method in llvmonly mode.
12165 * This is only used by delegate invoke wrappers to call the
12166 * actual delegate method.
12168 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
12170 CHECK_OPSIZE (6);
12171 token = read32 (ip + 2);
12173 ins = NULL;
12175 cmethod = NULL;
12176 CHECK_STACK (1);
12177 --sp;
12178 addr = *sp;
12179 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
12180 CHECK_CFG_ERROR;
12182 if (cfg->llvm_only)
12183 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
12185 n = fsig->param_count + fsig->hasthis + 1;
12187 CHECK_STACK (n);
12189 sp -= n;
12190 arg = sp [n - 1];
12192 if (cfg->llvm_only) {
12194 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
12195 * cconv. This is set by mono_init_delegate ().
12197 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
12198 MonoInst *callee = addr;
12199 MonoInst *call, *localloc_ins;
12200 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12201 int low_bit_reg = alloc_preg (cfg);
12203 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12204 NEW_BBLOCK (cfg, end_bb);
12206 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12207 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12208 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12210 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
12211 addr = emit_get_rgctx_sig (cfg, context_used,
12212 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12214 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
12216 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12217 ins->dreg = alloc_preg (cfg);
12218 ins->inst_imm = 2 * SIZEOF_VOID_P;
12219 MONO_ADD_INS (cfg->cbb, ins);
12220 localloc_ins = ins;
12221 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12222 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12223 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12225 call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12226 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12228 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
12229 MONO_START_BB (cfg, is_gsharedvt_bb);
12230 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12231 ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12232 ins->dreg = call->dreg;
12234 MONO_START_BB (cfg, end_bb);
12235 } else {
12236 /* Caller uses a normal calling conv */
12238 MonoInst *callee = addr;
12239 MonoInst *call, *localloc_ins;
12240 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12241 int low_bit_reg = alloc_preg (cfg);
12243 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12244 NEW_BBLOCK (cfg, end_bb);
12246 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12247 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12248 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12250 /* Normal case: callee uses a normal cconv, no conversion is needed */
12251 call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12252 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12253 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
12254 MONO_START_BB (cfg, is_gsharedvt_bb);
12255 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12256 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
12257 MONO_ADD_INS (cfg->cbb, addr);
12259 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
12261 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12262 ins->dreg = alloc_preg (cfg);
12263 ins->inst_imm = 2 * SIZEOF_VOID_P;
12264 MONO_ADD_INS (cfg->cbb, ins);
12265 localloc_ins = ins;
12266 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12267 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12268 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12270 ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12271 ins->dreg = call->dreg;
12272 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12274 MONO_START_BB (cfg, end_bb);
12276 } else {
12277 /* Same as CEE_CALLI */
12278 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
12280 * We pass the address to the gsharedvt trampoline in the rgctx reg
12282 MonoInst *callee = addr;
12284 addr = emit_get_rgctx_sig (cfg, context_used,
12285 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12286 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
12287 } else {
12288 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
12292 if (!MONO_TYPE_IS_VOID (fsig->ret))
12293 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
12295 CHECK_CFG_EXCEPTION;
12297 ip += 6;
12298 ins_flag = 0;
12299 constrained_class = NULL;
12300 break;
12302 case CEE_MONO_LDDOMAIN:
12303 CHECK_STACK_OVF (1);
12304 EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : cfg->domain);
12305 ip += 2;
12306 *sp++ = ins;
12307 break;
12308 case CEE_MONO_GET_LAST_ERROR:
12309 CHECK_OPSIZE (2);
12310 CHECK_STACK_OVF (1);
12312 MONO_INST_NEW (cfg, ins, OP_GET_LAST_ERROR);
12313 ins->dreg = alloc_dreg (cfg, STACK_I4);
12314 ins->type = STACK_I4;
12315 MONO_ADD_INS (cfg->cbb, ins);
12317 ip += 2;
12318 *sp++ = ins;
12319 break;
12320 default:
12321 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12322 break;
12324 break;
12327 case CEE_PREFIX1: {
12328 CHECK_OPSIZE (2);
12329 switch (ip [1]) {
12330 case CEE_ARGLIST: {
12331 /* somewhat similar to LDTOKEN */
12332 MonoInst *addr, *vtvar;
12333 CHECK_STACK_OVF (1);
12334 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12336 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12337 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12339 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12340 ins->type = STACK_VTYPE;
12341 ins->klass = mono_defaults.argumenthandle_class;
12342 *sp++ = ins;
12343 ip += 2;
12344 break;
12346 case CEE_CEQ:
12347 case CEE_CGT:
12348 case CEE_CGT_UN:
12349 case CEE_CLT:
12350 case CEE_CLT_UN: {
12351 MonoInst *cmp, *arg1, *arg2;
12353 CHECK_STACK (2);
12354 sp -= 2;
12355 arg1 = sp [0];
12356 arg2 = sp [1];
12359 * The following transforms:
12360 * CEE_CEQ into OP_CEQ
12361 * CEE_CGT into OP_CGT
12362 * CEE_CGT_UN into OP_CGT_UN
12363 * CEE_CLT into OP_CLT
12364 * CEE_CLT_UN into OP_CLT_UN
12366 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12368 MONO_INST_NEW (cfg, ins, cmp->opcode);
12369 cmp->sreg1 = arg1->dreg;
12370 cmp->sreg2 = arg2->dreg;
12371 type_from_op (cfg, cmp, arg1, arg2);
12372 CHECK_TYPE (cmp);
12373 add_widen_op (cfg, cmp, &arg1, &arg2);
12374 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12375 cmp->opcode = OP_LCOMPARE;
12376 else if (arg1->type == STACK_R4)
12377 cmp->opcode = OP_RCOMPARE;
12378 else if (arg1->type == STACK_R8)
12379 cmp->opcode = OP_FCOMPARE;
12380 else
12381 cmp->opcode = OP_ICOMPARE;
12382 MONO_ADD_INS (cfg->cbb, cmp);
12383 ins->type = STACK_I4;
12384 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
12385 type_from_op (cfg, ins, arg1, arg2);
12387 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12389 * The backends expect the fceq opcodes to do the
12390 * comparison too.
12392 ins->sreg1 = cmp->sreg1;
12393 ins->sreg2 = cmp->sreg2;
12394 NULLIFY_INS (cmp);
12396 MONO_ADD_INS (cfg->cbb, ins);
12397 *sp++ = ins;
12398 ip += 2;
12399 break;
12401 case CEE_LDFTN: {
12402 MonoInst *argconst;
12403 MonoMethod *cil_method;
12405 CHECK_STACK_OVF (1);
12406 CHECK_OPSIZE (6);
12407 n = read32 (ip + 2);
12408 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12409 CHECK_CFG_ERROR;
12411 mono_class_init (cmethod->klass);
12413 mono_save_token_info (cfg, image, n, cmethod);
12415 context_used = mini_method_check_context_used (cfg, cmethod);
12417 cil_method = cmethod;
12418 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12419 emit_method_access_failure (cfg, method, cil_method);
12421 if (mono_security_core_clr_enabled ())
12422 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12425 * Optimize the common case of ldftn+delegate creation
12427 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12428 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12429 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12430 MonoInst *target_ins, *handle_ins;
12431 MonoMethod *invoke;
12432 int invoke_context_used;
12434 invoke = mono_get_delegate_invoke (ctor_method->klass);
12435 if (!invoke || !mono_method_signature (invoke))
12436 LOAD_ERROR;
12438 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12440 target_ins = sp [-1];
12442 if (mono_security_core_clr_enabled ())
12443 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12445 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12446 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12447 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12448 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12449 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12453 /* FIXME: SGEN support */
12454 if (invoke_context_used == 0 || cfg->llvm_only) {
12455 ip += 6;
12456 if (cfg->verbose_level > 3)
12457 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12458 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12459 sp --;
12460 *sp = handle_ins;
12461 CHECK_CFG_EXCEPTION;
12462 ip += 5;
12463 sp ++;
12464 break;
12466 ip -= 6;
12471 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12472 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12473 *sp++ = ins;
12475 ip += 6;
12476 inline_costs += 10 * num_calls++;
12477 break;
12479 case CEE_LDVIRTFTN: {
12480 MonoInst *args [2];
12482 CHECK_STACK (1);
12483 CHECK_OPSIZE (6);
12484 n = read32 (ip + 2);
12485 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12486 CHECK_CFG_ERROR;
12488 mono_class_init (cmethod->klass);
12490 context_used = mini_method_check_context_used (cfg, cmethod);
12492 if (mono_security_core_clr_enabled ())
12493 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12496 * Optimize the common case of ldvirtftn+delegate creation
12498 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12499 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12500 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12501 MonoInst *target_ins, *handle_ins;
12502 MonoMethod *invoke;
12503 int invoke_context_used;
12504 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12506 invoke = mono_get_delegate_invoke (ctor_method->klass);
12507 if (!invoke || !mono_method_signature (invoke))
12508 LOAD_ERROR;
12510 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12512 target_ins = sp [-1];
12514 if (mono_security_core_clr_enabled ())
12515 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12517 /* FIXME: SGEN support */
12518 if (invoke_context_used == 0 || cfg->llvm_only) {
12519 ip += 6;
12520 if (cfg->verbose_level > 3)
12521 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12522 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12523 sp -= 2;
12524 *sp = handle_ins;
12525 CHECK_CFG_EXCEPTION;
12526 ip += 5;
12527 sp ++;
12528 break;
12530 ip -= 6;
12535 --sp;
12536 args [0] = *sp;
12538 args [1] = emit_get_rgctx_method (cfg, context_used,
12539 cmethod, MONO_RGCTX_INFO_METHOD);
12541 if (context_used)
12542 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12543 else
12544 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12546 ip += 6;
12547 inline_costs += 10 * num_calls++;
12548 break;
12550 case CEE_LDARG:
12551 CHECK_STACK_OVF (1);
12552 CHECK_OPSIZE (4);
12553 n = read16 (ip + 2);
12554 CHECK_ARG (n);
12555 EMIT_NEW_ARGLOAD (cfg, ins, n);
12556 *sp++ = ins;
12557 ip += 4;
12558 break;
12559 case CEE_LDARGA:
12560 CHECK_STACK_OVF (1);
12561 CHECK_OPSIZE (4);
12562 n = read16 (ip + 2);
12563 CHECK_ARG (n);
12564 NEW_ARGLOADA (cfg, ins, n);
12565 MONO_ADD_INS (cfg->cbb, ins);
12566 *sp++ = ins;
12567 ip += 4;
12568 break;
12569 case CEE_STARG:
12570 CHECK_STACK (1);
12571 --sp;
12572 CHECK_OPSIZE (4);
12573 n = read16 (ip + 2);
12574 CHECK_ARG (n);
12575 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12576 UNVERIFIED;
12577 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12578 ip += 4;
12579 break;
12580 case CEE_LDLOC:
12581 CHECK_STACK_OVF (1);
12582 CHECK_OPSIZE (4);
12583 n = read16 (ip + 2);
12584 CHECK_LOCAL (n);
12585 EMIT_NEW_LOCLOAD (cfg, ins, n);
12586 *sp++ = ins;
12587 ip += 4;
12588 break;
12589 case CEE_LDLOCA: {
12590 unsigned char *tmp_ip;
12591 CHECK_STACK_OVF (1);
12592 CHECK_OPSIZE (4);
12593 n = read16 (ip + 2);
12594 CHECK_LOCAL (n);
12596 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12597 ip = tmp_ip;
12598 inline_costs += 1;
12599 break;
12602 EMIT_NEW_LOCLOADA (cfg, ins, n);
12603 *sp++ = ins;
12604 ip += 4;
12605 break;
12607 case CEE_STLOC:
12608 CHECK_STACK (1);
12609 --sp;
12610 CHECK_OPSIZE (4);
12611 n = read16 (ip + 2);
12612 CHECK_LOCAL (n);
12613 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12614 UNVERIFIED;
12615 emit_stloc_ir (cfg, sp, header, n);
12616 ip += 4;
12617 inline_costs += 1;
12618 break;
12619 case CEE_LOCALLOC: {
12620 CHECK_STACK (1);
12621 MonoBasicBlock *non_zero_bb, *end_bb;
12622 int alloc_ptr = alloc_preg (cfg);
12623 --sp;
12624 if (sp != stack_start)
12625 UNVERIFIED;
12626 if (cfg->method != method)
12628 * Inlining this into a loop in a parent could lead to
12629 * stack overflows which is different behavior than the
12630 * non-inlined case, thus disable inlining in this case.
12632 INLINE_FAILURE("localloc");
12634 NEW_BBLOCK (cfg, non_zero_bb);
12635 NEW_BBLOCK (cfg, end_bb);
12637 /* if size != zero */
12638 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
12639 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_zero_bb);
12641 //size is zero, so result is NULL
12642 MONO_EMIT_NEW_PCONST (cfg, alloc_ptr, NULL);
12643 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12645 MONO_START_BB (cfg, non_zero_bb);
12646 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12647 ins->dreg = alloc_ptr;
12648 ins->sreg1 = sp [0]->dreg;
12649 ins->type = STACK_PTR;
12650 MONO_ADD_INS (cfg->cbb, ins);
12652 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12653 if (init_locals)
12654 ins->flags |= MONO_INST_INIT;
12656 MONO_START_BB (cfg, end_bb);
12657 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), alloc_ptr);
12658 ins->type = STACK_PTR;
12660 *sp++ = ins;
12661 ip += 2;
12662 break;
12664 case CEE_ENDFILTER: {
12665 MonoExceptionClause *clause, *nearest;
12666 int cc;
12668 CHECK_STACK (1);
12669 --sp;
12670 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12671 UNVERIFIED;
12672 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12673 ins->sreg1 = (*sp)->dreg;
12674 MONO_ADD_INS (cfg->cbb, ins);
12675 start_new_bblock = 1;
12676 ip += 2;
12678 nearest = NULL;
12679 for (cc = 0; cc < header->num_clauses; ++cc) {
12680 clause = &header->clauses [cc];
12681 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12682 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12683 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12684 nearest = clause;
12686 g_assert (nearest);
12687 if ((ip - header->code) != nearest->handler_offset)
12688 UNVERIFIED;
12690 break;
12692 case CEE_UNALIGNED_:
12693 ins_flag |= MONO_INST_UNALIGNED;
12694 /* FIXME: record alignment? we can assume 1 for now */
12695 CHECK_OPSIZE (3);
12696 ip += 3;
12697 break;
12698 case CEE_VOLATILE_:
12699 ins_flag |= MONO_INST_VOLATILE;
12700 ip += 2;
12701 break;
12702 case CEE_TAIL_:
12703 ins_flag |= MONO_INST_TAILCALL;
12704 cfg->flags |= MONO_CFG_HAS_TAIL;
12705 /* Can't inline tail calls at this time */
12706 inline_costs += 100000;
12707 ip += 2;
12708 break;
12709 case CEE_INITOBJ:
12710 CHECK_STACK (1);
12711 --sp;
12712 CHECK_OPSIZE (6);
12713 token = read32 (ip + 2);
12714 klass = mini_get_class (method, token, generic_context);
12715 CHECK_TYPELOAD (klass);
12716 if (generic_class_is_reference_type (cfg, klass))
12717 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12718 else
12719 mini_emit_initobj (cfg, *sp, NULL, klass);
12720 ip += 6;
12721 inline_costs += 1;
12722 break;
12723 case CEE_CONSTRAINED_:
12724 CHECK_OPSIZE (6);
12725 token = read32 (ip + 2);
12726 constrained_class = mini_get_class (method, token, generic_context);
12727 CHECK_TYPELOAD (constrained_class);
12728 ip += 6;
12729 break;
12730 case CEE_CPBLK:
12731 case CEE_INITBLK: {
12732 MonoInst *iargs [3];
12733 CHECK_STACK (3);
12734 sp -= 3;
12736 /* Skip optimized paths for volatile operations. */
12737 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12738 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12739 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12740 /* emit_memset only works when val == 0 */
12741 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12742 } else {
12743 MonoInst *call;
12744 iargs [0] = sp [0];
12745 iargs [1] = sp [1];
12746 iargs [2] = sp [2];
12747 if (ip [1] == CEE_CPBLK) {
12749 * FIXME: It's unclear whether we should be emitting both the acquire
12750 * and release barriers for cpblk. It is technically both a load and
12751 * store operation, so it seems like that's the sensible thing to do.
12753 * FIXME: We emit full barriers on both sides of the operation for
12754 * simplicity. We should have a separate atomic memcpy method instead.
12756 MonoMethod *memcpy_method = get_memcpy_method ();
12758 if (ins_flag & MONO_INST_VOLATILE)
12759 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12761 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12762 call->flags |= ins_flag;
12764 if (ins_flag & MONO_INST_VOLATILE)
12765 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12766 } else {
12767 MonoMethod *memset_method = get_memset_method ();
12768 if (ins_flag & MONO_INST_VOLATILE) {
12769 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12770 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12772 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12773 call->flags |= ins_flag;
12776 ip += 2;
12777 ins_flag = 0;
12778 inline_costs += 1;
12779 break;
12781 case CEE_NO_:
12782 CHECK_OPSIZE (3);
12783 if (ip [2] & 0x1)
12784 ins_flag |= MONO_INST_NOTYPECHECK;
12785 if (ip [2] & 0x2)
12786 ins_flag |= MONO_INST_NORANGECHECK;
12787 /* we ignore the no-nullcheck for now since we
12788 * really do it explicitly only when doing callvirt->call
12790 ip += 3;
12791 break;
12792 case CEE_RETHROW: {
12793 MonoInst *load;
12794 int handler_offset = -1;
12796 for (i = 0; i < header->num_clauses; ++i) {
12797 MonoExceptionClause *clause = &header->clauses [i];
12798 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12799 handler_offset = clause->handler_offset;
12800 break;
12804 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
12806 if (handler_offset == -1)
12807 UNVERIFIED;
12809 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12810 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12811 ins->sreg1 = load->dreg;
12812 MONO_ADD_INS (cfg->cbb, ins);
12814 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12815 MONO_ADD_INS (cfg->cbb, ins);
12817 sp = stack_start;
12818 link_bblock (cfg, cfg->cbb, end_bblock);
12819 start_new_bblock = 1;
12820 ip += 2;
12821 break;
12823 case CEE_SIZEOF: {
12824 guint32 val;
12825 int ialign;
12827 CHECK_STACK_OVF (1);
12828 CHECK_OPSIZE (6);
12829 token = read32 (ip + 2);
12830 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12831 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12832 CHECK_CFG_ERROR;
12834 val = mono_type_size (type, &ialign);
12835 } else {
12836 MonoClass *klass = mini_get_class (method, token, generic_context);
12837 CHECK_TYPELOAD (klass);
12839 val = mono_type_size (&klass->byval_arg, &ialign);
12841 if (mini_is_gsharedvt_klass (klass))
12842 GSHAREDVT_FAILURE (*ip);
12844 EMIT_NEW_ICONST (cfg, ins, val);
12845 *sp++= ins;
12846 ip += 6;
12847 break;
12849 case CEE_REFANYTYPE: {
12850 MonoInst *src_var, *src;
12852 GSHAREDVT_FAILURE (*ip);
12854 CHECK_STACK (1);
12855 --sp;
12857 // FIXME:
12858 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12859 if (!src_var)
12860 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12861 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12862 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12863 *sp++ = ins;
12864 ip += 2;
12865 break;
12867 case CEE_READONLY_:
12868 readonly = TRUE;
12869 ip += 2;
12870 break;
12872 case CEE_UNUSED56:
12873 case CEE_UNUSED57:
12874 case CEE_UNUSED70:
12875 case CEE_UNUSED:
12876 case CEE_UNUSED99:
12877 UNVERIFIED;
12879 default:
12880 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12881 UNVERIFIED;
12883 break;
12885 case CEE_UNUSED58:
12886 case CEE_UNUSED1:
12887 UNVERIFIED;
12889 default:
12890 g_warning ("opcode 0x%02x not handled", *ip);
12891 UNVERIFIED;
12894 if (start_new_bblock != 1)
12895 UNVERIFIED;
12897 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
12898 if (cfg->cbb->next_bb) {
12899 /* This could already be set because of inlining, #693905 */
12900 MonoBasicBlock *bb = cfg->cbb;
12902 while (bb->next_bb)
12903 bb = bb->next_bb;
12904 bb->next_bb = end_bblock;
12905 } else {
12906 cfg->cbb->next_bb = end_bblock;
12909 if (cfg->method == method && cfg->domainvar) {
12910 MonoInst *store;
12911 MonoInst *get_domain;
12913 cfg->cbb = init_localsbb;
12915 get_domain = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
12916 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12917 MONO_ADD_INS (cfg->cbb, store);
12920 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12921 if (cfg->compile_aot)
12922 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12923 mono_get_got_var (cfg);
12924 #endif
12926 if (cfg->method == method && cfg->got_var)
12927 mono_emit_load_got_addr (cfg);
12929 if (init_localsbb) {
12930 cfg->cbb = init_localsbb;
12931 cfg->ip = NULL;
12932 for (i = 0; i < header->num_locals; ++i) {
12933 emit_init_local (cfg, i, header->locals [i], init_locals);
12937 if (cfg->init_ref_vars && cfg->method == method) {
12938 /* Emit initialization for ref vars */
12939 // FIXME: Avoid duplication initialization for IL locals.
12940 for (i = 0; i < cfg->num_varinfo; ++i) {
12941 MonoInst *ins = cfg->varinfo [i];
12943 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12944 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12948 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
12949 cfg->cbb = init_localsbb;
12950 emit_push_lmf (cfg);
12953 cfg->cbb = init_localsbb;
12954 emit_instrumentation_call (cfg, mono_profiler_method_enter);
12956 if (seq_points) {
12957 MonoBasicBlock *bb;
12960 * Make seq points at backward branch targets interruptable.
12962 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12963 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12964 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12967 /* Add a sequence point for method entry/exit events */
12968 if (seq_points && cfg->gen_sdb_seq_points) {
12969 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12970 MONO_ADD_INS (init_localsbb, ins);
12971 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12972 MONO_ADD_INS (cfg->bb_exit, ins);
12976 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12977 * the code they refer to was dead (#11880).
12979 if (sym_seq_points) {
12980 for (i = 0; i < header->code_size; ++i) {
12981 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12982 MonoInst *ins;
12984 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12985 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12990 cfg->ip = NULL;
12992 if (cfg->method == method) {
12993 MonoBasicBlock *bb;
12994 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12995 if (bb == cfg->bb_init)
12996 bb->region = -1;
12997 else
12998 bb->region = mono_find_block_region (cfg, bb->real_offset);
12999 if (cfg->spvars)
13000 mono_create_spvar_for_region (cfg, bb->region);
13001 if (cfg->verbose_level > 2)
13002 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13004 } else {
13005 MonoBasicBlock *bb;
13006 /* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
13007 for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) {
13008 bb->real_offset = inline_offset;
13012 if (inline_costs < 0) {
13013 char *mname;
13015 /* Method is too large */
13016 mname = mono_method_full_name (method, TRUE);
13017 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
13018 g_free (mname);
13021 if ((cfg->verbose_level > 2) && (cfg->method == method))
13022 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13024 goto cleanup;
13026 mono_error_exit:
13027 g_assert (!mono_error_ok (&cfg->error));
13028 goto cleanup;
13030 exception_exit:
13031 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13032 goto cleanup;
13034 unverified:
13035 set_exception_type_from_invalid_il (cfg, method, ip);
13036 goto cleanup;
13038 cleanup:
13039 g_slist_free (class_inits);
13040 mono_basic_block_free (original_bb);
13041 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13042 if (cfg->exception_type)
13043 return -1;
13044 else
13045 return inline_costs;
13048 static int
13049 store_membase_reg_to_store_membase_imm (int opcode)
13051 switch (opcode) {
13052 case OP_STORE_MEMBASE_REG:
13053 return OP_STORE_MEMBASE_IMM;
13054 case OP_STOREI1_MEMBASE_REG:
13055 return OP_STOREI1_MEMBASE_IMM;
13056 case OP_STOREI2_MEMBASE_REG:
13057 return OP_STOREI2_MEMBASE_IMM;
13058 case OP_STOREI4_MEMBASE_REG:
13059 return OP_STOREI4_MEMBASE_IMM;
13060 case OP_STOREI8_MEMBASE_REG:
13061 return OP_STOREI8_MEMBASE_IMM;
13062 default:
13063 g_assert_not_reached ();
13066 return -1;
13070 mono_op_to_op_imm (int opcode)
13072 switch (opcode) {
13073 case OP_IADD:
13074 return OP_IADD_IMM;
13075 case OP_ISUB:
13076 return OP_ISUB_IMM;
13077 case OP_IDIV:
13078 return OP_IDIV_IMM;
13079 case OP_IDIV_UN:
13080 return OP_IDIV_UN_IMM;
13081 case OP_IREM:
13082 return OP_IREM_IMM;
13083 case OP_IREM_UN:
13084 return OP_IREM_UN_IMM;
13085 case OP_IMUL:
13086 return OP_IMUL_IMM;
13087 case OP_IAND:
13088 return OP_IAND_IMM;
13089 case OP_IOR:
13090 return OP_IOR_IMM;
13091 case OP_IXOR:
13092 return OP_IXOR_IMM;
13093 case OP_ISHL:
13094 return OP_ISHL_IMM;
13095 case OP_ISHR:
13096 return OP_ISHR_IMM;
13097 case OP_ISHR_UN:
13098 return OP_ISHR_UN_IMM;
13100 case OP_LADD:
13101 return OP_LADD_IMM;
13102 case OP_LSUB:
13103 return OP_LSUB_IMM;
13104 case OP_LAND:
13105 return OP_LAND_IMM;
13106 case OP_LOR:
13107 return OP_LOR_IMM;
13108 case OP_LXOR:
13109 return OP_LXOR_IMM;
13110 case OP_LSHL:
13111 return OP_LSHL_IMM;
13112 case OP_LSHR:
13113 return OP_LSHR_IMM;
13114 case OP_LSHR_UN:
13115 return OP_LSHR_UN_IMM;
13116 #if SIZEOF_REGISTER == 8
13117 case OP_LREM:
13118 return OP_LREM_IMM;
13119 #endif
13121 case OP_COMPARE:
13122 return OP_COMPARE_IMM;
13123 case OP_ICOMPARE:
13124 return OP_ICOMPARE_IMM;
13125 case OP_LCOMPARE:
13126 return OP_LCOMPARE_IMM;
13128 case OP_STORE_MEMBASE_REG:
13129 return OP_STORE_MEMBASE_IMM;
13130 case OP_STOREI1_MEMBASE_REG:
13131 return OP_STOREI1_MEMBASE_IMM;
13132 case OP_STOREI2_MEMBASE_REG:
13133 return OP_STOREI2_MEMBASE_IMM;
13134 case OP_STOREI4_MEMBASE_REG:
13135 return OP_STOREI4_MEMBASE_IMM;
13137 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13138 case OP_X86_PUSH:
13139 return OP_X86_PUSH_IMM;
13140 case OP_X86_COMPARE_MEMBASE_REG:
13141 return OP_X86_COMPARE_MEMBASE_IMM;
13142 #endif
13143 #if defined(TARGET_AMD64)
13144 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13145 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13146 #endif
13147 case OP_VOIDCALL_REG:
13148 return OP_VOIDCALL;
13149 case OP_CALL_REG:
13150 return OP_CALL;
13151 case OP_LCALL_REG:
13152 return OP_LCALL;
13153 case OP_FCALL_REG:
13154 return OP_FCALL;
13155 case OP_LOCALLOC:
13156 return OP_LOCALLOC_IMM;
13159 return -1;
13162 static int
13163 ldind_to_load_membase (int opcode)
13165 switch (opcode) {
13166 case CEE_LDIND_I1:
13167 return OP_LOADI1_MEMBASE;
13168 case CEE_LDIND_U1:
13169 return OP_LOADU1_MEMBASE;
13170 case CEE_LDIND_I2:
13171 return OP_LOADI2_MEMBASE;
13172 case CEE_LDIND_U2:
13173 return OP_LOADU2_MEMBASE;
13174 case CEE_LDIND_I4:
13175 return OP_LOADI4_MEMBASE;
13176 case CEE_LDIND_U4:
13177 return OP_LOADU4_MEMBASE;
13178 case CEE_LDIND_I:
13179 return OP_LOAD_MEMBASE;
13180 case CEE_LDIND_REF:
13181 return OP_LOAD_MEMBASE;
13182 case CEE_LDIND_I8:
13183 return OP_LOADI8_MEMBASE;
13184 case CEE_LDIND_R4:
13185 return OP_LOADR4_MEMBASE;
13186 case CEE_LDIND_R8:
13187 return OP_LOADR8_MEMBASE;
13188 default:
13189 g_assert_not_reached ();
13192 return -1;
13195 static int
13196 stind_to_store_membase (int opcode)
13198 switch (opcode) {
13199 case CEE_STIND_I1:
13200 return OP_STOREI1_MEMBASE_REG;
13201 case CEE_STIND_I2:
13202 return OP_STOREI2_MEMBASE_REG;
13203 case CEE_STIND_I4:
13204 return OP_STOREI4_MEMBASE_REG;
13205 case CEE_STIND_I:
13206 case CEE_STIND_REF:
13207 return OP_STORE_MEMBASE_REG;
13208 case CEE_STIND_I8:
13209 return OP_STOREI8_MEMBASE_REG;
13210 case CEE_STIND_R4:
13211 return OP_STORER4_MEMBASE_REG;
13212 case CEE_STIND_R8:
13213 return OP_STORER8_MEMBASE_REG;
13214 default:
13215 g_assert_not_reached ();
13218 return -1;
13222 mono_load_membase_to_load_mem (int opcode)
13224 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13225 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13226 switch (opcode) {
13227 case OP_LOAD_MEMBASE:
13228 return OP_LOAD_MEM;
13229 case OP_LOADU1_MEMBASE:
13230 return OP_LOADU1_MEM;
13231 case OP_LOADU2_MEMBASE:
13232 return OP_LOADU2_MEM;
13233 case OP_LOADI4_MEMBASE:
13234 return OP_LOADI4_MEM;
13235 case OP_LOADU4_MEMBASE:
13236 return OP_LOADU4_MEM;
13237 #if SIZEOF_REGISTER == 8
13238 case OP_LOADI8_MEMBASE:
13239 return OP_LOADI8_MEM;
13240 #endif
13242 #endif
13244 return -1;
13247 static inline int
13248 op_to_op_dest_membase (int store_opcode, int opcode)
13250 #if defined(TARGET_X86)
13251 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13252 return -1;
13254 switch (opcode) {
13255 case OP_IADD:
13256 return OP_X86_ADD_MEMBASE_REG;
13257 case OP_ISUB:
13258 return OP_X86_SUB_MEMBASE_REG;
13259 case OP_IAND:
13260 return OP_X86_AND_MEMBASE_REG;
13261 case OP_IOR:
13262 return OP_X86_OR_MEMBASE_REG;
13263 case OP_IXOR:
13264 return OP_X86_XOR_MEMBASE_REG;
13265 case OP_ADD_IMM:
13266 case OP_IADD_IMM:
13267 return OP_X86_ADD_MEMBASE_IMM;
13268 case OP_SUB_IMM:
13269 case OP_ISUB_IMM:
13270 return OP_X86_SUB_MEMBASE_IMM;
13271 case OP_AND_IMM:
13272 case OP_IAND_IMM:
13273 return OP_X86_AND_MEMBASE_IMM;
13274 case OP_OR_IMM:
13275 case OP_IOR_IMM:
13276 return OP_X86_OR_MEMBASE_IMM;
13277 case OP_XOR_IMM:
13278 case OP_IXOR_IMM:
13279 return OP_X86_XOR_MEMBASE_IMM;
13280 case OP_MOVE:
13281 return OP_NOP;
13283 #endif
13285 #if defined(TARGET_AMD64)
13286 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13287 return -1;
13289 switch (opcode) {
13290 case OP_IADD:
13291 return OP_X86_ADD_MEMBASE_REG;
13292 case OP_ISUB:
13293 return OP_X86_SUB_MEMBASE_REG;
13294 case OP_IAND:
13295 return OP_X86_AND_MEMBASE_REG;
13296 case OP_IOR:
13297 return OP_X86_OR_MEMBASE_REG;
13298 case OP_IXOR:
13299 return OP_X86_XOR_MEMBASE_REG;
13300 case OP_IADD_IMM:
13301 return OP_X86_ADD_MEMBASE_IMM;
13302 case OP_ISUB_IMM:
13303 return OP_X86_SUB_MEMBASE_IMM;
13304 case OP_IAND_IMM:
13305 return OP_X86_AND_MEMBASE_IMM;
13306 case OP_IOR_IMM:
13307 return OP_X86_OR_MEMBASE_IMM;
13308 case OP_IXOR_IMM:
13309 return OP_X86_XOR_MEMBASE_IMM;
13310 case OP_LADD:
13311 return OP_AMD64_ADD_MEMBASE_REG;
13312 case OP_LSUB:
13313 return OP_AMD64_SUB_MEMBASE_REG;
13314 case OP_LAND:
13315 return OP_AMD64_AND_MEMBASE_REG;
13316 case OP_LOR:
13317 return OP_AMD64_OR_MEMBASE_REG;
13318 case OP_LXOR:
13319 return OP_AMD64_XOR_MEMBASE_REG;
13320 case OP_ADD_IMM:
13321 case OP_LADD_IMM:
13322 return OP_AMD64_ADD_MEMBASE_IMM;
13323 case OP_SUB_IMM:
13324 case OP_LSUB_IMM:
13325 return OP_AMD64_SUB_MEMBASE_IMM;
13326 case OP_AND_IMM:
13327 case OP_LAND_IMM:
13328 return OP_AMD64_AND_MEMBASE_IMM;
13329 case OP_OR_IMM:
13330 case OP_LOR_IMM:
13331 return OP_AMD64_OR_MEMBASE_IMM;
13332 case OP_XOR_IMM:
13333 case OP_LXOR_IMM:
13334 return OP_AMD64_XOR_MEMBASE_IMM;
13335 case OP_MOVE:
13336 return OP_NOP;
13338 #endif
13340 return -1;
13343 static inline int
13344 op_to_op_store_membase (int store_opcode, int opcode)
13346 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13347 switch (opcode) {
13348 case OP_ICEQ:
13349 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13350 return OP_X86_SETEQ_MEMBASE;
13351 case OP_CNE:
13352 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13353 return OP_X86_SETNE_MEMBASE;
13355 #endif
13357 return -1;
13360 static inline int
13361 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13363 #ifdef TARGET_X86
13364 /* FIXME: This has sign extension issues */
13366 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13367 return OP_X86_COMPARE_MEMBASE8_IMM;
13370 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13371 return -1;
13373 switch (opcode) {
13374 case OP_X86_PUSH:
13375 return OP_X86_PUSH_MEMBASE;
13376 case OP_COMPARE_IMM:
13377 case OP_ICOMPARE_IMM:
13378 return OP_X86_COMPARE_MEMBASE_IMM;
13379 case OP_COMPARE:
13380 case OP_ICOMPARE:
13381 return OP_X86_COMPARE_MEMBASE_REG;
13383 #endif
13385 #ifdef TARGET_AMD64
13386 /* FIXME: This has sign extension issues */
13388 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13389 return OP_X86_COMPARE_MEMBASE8_IMM;
13392 switch (opcode) {
13393 case OP_X86_PUSH:
13394 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13395 return OP_X86_PUSH_MEMBASE;
13396 break;
13397 /* FIXME: This only works for 32 bit immediates
13398 case OP_COMPARE_IMM:
13399 case OP_LCOMPARE_IMM:
13400 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13401 return OP_AMD64_COMPARE_MEMBASE_IMM;
13403 case OP_ICOMPARE_IMM:
13404 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13405 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13406 break;
13407 case OP_COMPARE:
13408 case OP_LCOMPARE:
13409 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
13410 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13411 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13412 return OP_AMD64_COMPARE_MEMBASE_REG;
13413 break;
13414 case OP_ICOMPARE:
13415 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13416 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13417 break;
13419 #endif
13421 return -1;
13424 static inline int
13425 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
13427 #ifdef TARGET_X86
13428 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13429 return -1;
13431 switch (opcode) {
13432 case OP_COMPARE:
13433 case OP_ICOMPARE:
13434 return OP_X86_COMPARE_REG_MEMBASE;
13435 case OP_IADD:
13436 return OP_X86_ADD_REG_MEMBASE;
13437 case OP_ISUB:
13438 return OP_X86_SUB_REG_MEMBASE;
13439 case OP_IAND:
13440 return OP_X86_AND_REG_MEMBASE;
13441 case OP_IOR:
13442 return OP_X86_OR_REG_MEMBASE;
13443 case OP_IXOR:
13444 return OP_X86_XOR_REG_MEMBASE;
13446 #endif
13448 #ifdef TARGET_AMD64
13449 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
13450 switch (opcode) {
13451 case OP_ICOMPARE:
13452 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13453 case OP_IADD:
13454 return OP_X86_ADD_REG_MEMBASE;
13455 case OP_ISUB:
13456 return OP_X86_SUB_REG_MEMBASE;
13457 case OP_IAND:
13458 return OP_X86_AND_REG_MEMBASE;
13459 case OP_IOR:
13460 return OP_X86_OR_REG_MEMBASE;
13461 case OP_IXOR:
13462 return OP_X86_XOR_REG_MEMBASE;
13464 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
13465 switch (opcode) {
13466 case OP_COMPARE:
13467 case OP_LCOMPARE:
13468 return OP_AMD64_COMPARE_REG_MEMBASE;
13469 case OP_LADD:
13470 return OP_AMD64_ADD_REG_MEMBASE;
13471 case OP_LSUB:
13472 return OP_AMD64_SUB_REG_MEMBASE;
13473 case OP_LAND:
13474 return OP_AMD64_AND_REG_MEMBASE;
13475 case OP_LOR:
13476 return OP_AMD64_OR_REG_MEMBASE;
13477 case OP_LXOR:
13478 return OP_AMD64_XOR_REG_MEMBASE;
13481 #endif
13483 return -1;
13487 mono_op_to_op_imm_noemul (int opcode)
13489 switch (opcode) {
13490 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13491 case OP_LSHR:
13492 case OP_LSHL:
13493 case OP_LSHR_UN:
13494 return -1;
13495 #endif
13496 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13497 case OP_IDIV:
13498 case OP_IDIV_UN:
13499 case OP_IREM:
13500 case OP_IREM_UN:
13501 return -1;
13502 #endif
13503 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13504 case OP_IMUL:
13505 return -1;
13506 #endif
13507 default:
13508 return mono_op_to_op_imm (opcode);
13513 * mono_handle_global_vregs:
13515 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13516 * for them.
13518 void
13519 mono_handle_global_vregs (MonoCompile *cfg)
13521 gint32 *vreg_to_bb;
13522 MonoBasicBlock *bb;
13523 int i, pos;
13525 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13527 #ifdef MONO_ARCH_SIMD_INTRINSICS
13528 if (cfg->uses_simd_intrinsics)
13529 mono_simd_simplify_indirection (cfg);
13530 #endif
13532 /* Find local vregs used in more than one bb */
13533 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13534 MonoInst *ins = bb->code;
13535 int block_num = bb->block_num;
13537 if (cfg->verbose_level > 2)
13538 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13540 cfg->cbb = bb;
13541 for (; ins; ins = ins->next) {
13542 const char *spec = INS_INFO (ins->opcode);
13543 int regtype = 0, regindex;
13544 gint32 prev_bb;
13546 if (G_UNLIKELY (cfg->verbose_level > 2))
13547 mono_print_ins (ins);
13549 g_assert (ins->opcode >= MONO_CEE_LAST);
13551 for (regindex = 0; regindex < 4; regindex ++) {
13552 int vreg = 0;
13554 if (regindex == 0) {
13555 regtype = spec [MONO_INST_DEST];
13556 if (regtype == ' ')
13557 continue;
13558 vreg = ins->dreg;
13559 } else if (regindex == 1) {
13560 regtype = spec [MONO_INST_SRC1];
13561 if (regtype == ' ')
13562 continue;
13563 vreg = ins->sreg1;
13564 } else if (regindex == 2) {
13565 regtype = spec [MONO_INST_SRC2];
13566 if (regtype == ' ')
13567 continue;
13568 vreg = ins->sreg2;
13569 } else if (regindex == 3) {
13570 regtype = spec [MONO_INST_SRC3];
13571 if (regtype == ' ')
13572 continue;
13573 vreg = ins->sreg3;
13576 #if SIZEOF_REGISTER == 4
13577 /* In the LLVM case, the long opcodes are not decomposed */
13578 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13580 * Since some instructions reference the original long vreg,
13581 * and some reference the two component vregs, it is quite hard
13582 * to determine when it needs to be global. So be conservative.
13584 if (!get_vreg_to_inst (cfg, vreg)) {
13585 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13587 if (cfg->verbose_level > 2)
13588 printf ("LONG VREG R%d made global.\n", vreg);
13592 * Make the component vregs volatile since the optimizations can
13593 * get confused otherwise.
13595 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
13596 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
13598 #endif
13600 g_assert (vreg != -1);
13602 prev_bb = vreg_to_bb [vreg];
13603 if (prev_bb == 0) {
13604 /* 0 is a valid block num */
13605 vreg_to_bb [vreg] = block_num + 1;
13606 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13607 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13608 continue;
13610 if (!get_vreg_to_inst (cfg, vreg)) {
13611 if (G_UNLIKELY (cfg->verbose_level > 2))
13612 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13614 switch (regtype) {
13615 case 'i':
13616 if (vreg_is_ref (cfg, vreg))
13617 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13618 else
13619 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13620 break;
13621 case 'l':
13622 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13623 break;
13624 case 'f':
13625 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13626 break;
13627 case 'v':
13628 case 'x':
13629 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13630 break;
13631 default:
13632 g_assert_not_reached ();
13636 /* Flag as having been used in more than one bb */
13637 vreg_to_bb [vreg] = -1;
13643 /* If a variable is used in only one bblock, convert it into a local vreg */
13644 for (i = 0; i < cfg->num_varinfo; i++) {
13645 MonoInst *var = cfg->varinfo [i];
13646 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13648 switch (var->type) {
13649 case STACK_I4:
13650 case STACK_OBJ:
13651 case STACK_PTR:
13652 case STACK_MP:
13653 case STACK_VTYPE:
13654 #if SIZEOF_REGISTER == 8
13655 case STACK_I8:
13656 #endif
13657 #if !defined(TARGET_X86)
13658 /* Enabling this screws up the fp stack on x86 */
13659 case STACK_R8:
13660 #endif
13661 if (mono_arch_is_soft_float ())
13662 break;
13665 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
13666 break;
13669 /* Arguments are implicitly global */
13670 /* Putting R4 vars into registers doesn't work currently */
13671 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13672 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13674 * Make that the variable's liveness interval doesn't contain a call, since
13675 * that would cause the lvreg to be spilled, making the whole optimization
13676 * useless.
13678 /* This is too slow for JIT compilation */
13679 #if 0
13680 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13681 MonoInst *ins;
13682 int def_index, call_index, ins_index;
13683 gboolean spilled = FALSE;
13685 def_index = -1;
13686 call_index = -1;
13687 ins_index = 0;
13688 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13689 const char *spec = INS_INFO (ins->opcode);
13691 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13692 def_index = ins_index;
13694 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13695 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13696 if (call_index > def_index) {
13697 spilled = TRUE;
13698 break;
13702 if (MONO_IS_CALL (ins))
13703 call_index = ins_index;
13705 ins_index ++;
13708 if (spilled)
13709 break;
13711 #endif
13713 if (G_UNLIKELY (cfg->verbose_level > 2))
13714 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13715 var->flags |= MONO_INST_IS_DEAD;
13716 cfg->vreg_to_inst [var->dreg] = NULL;
13718 break;
13723 * Compress the varinfo and vars tables so the liveness computation is faster and
13724 * takes up less space.
13726 pos = 0;
13727 for (i = 0; i < cfg->num_varinfo; ++i) {
13728 MonoInst *var = cfg->varinfo [i];
13729 if (pos < i && cfg->locals_start == i)
13730 cfg->locals_start = pos;
13731 if (!(var->flags & MONO_INST_IS_DEAD)) {
13732 if (pos < i) {
13733 cfg->varinfo [pos] = cfg->varinfo [i];
13734 cfg->varinfo [pos]->inst_c0 = pos;
13735 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13736 cfg->vars [pos].idx = pos;
13737 #if SIZEOF_REGISTER == 4
13738 if (cfg->varinfo [pos]->type == STACK_I8) {
13739 /* Modify the two component vars too */
13740 MonoInst *var1;
13742 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
13743 var1->inst_c0 = pos;
13744 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
13745 var1->inst_c0 = pos;
13747 #endif
13749 pos ++;
13752 cfg->num_varinfo = pos;
13753 if (cfg->locals_start > cfg->num_varinfo)
13754 cfg->locals_start = cfg->num_varinfo;
13758 * mono_allocate_gsharedvt_vars:
13760 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
13761 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
13763 void
13764 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
13766 int i;
13768 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13770 for (i = 0; i < cfg->num_varinfo; ++i) {
13771 MonoInst *ins = cfg->varinfo [i];
13772 int idx;
13774 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
13775 if (i >= cfg->locals_start) {
13776 /* Local */
13777 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13778 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13779 ins->opcode = OP_GSHAREDVT_LOCAL;
13780 ins->inst_imm = idx;
13781 } else {
13782 /* Arg */
13783 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
13784 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13791 * mono_spill_global_vars:
13793 * Generate spill code for variables which are not allocated to registers,
13794 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13795 * code is generated which could be optimized by the local optimization passes.
13797 void
13798 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13800 MonoBasicBlock *bb;
13801 char spec2 [16];
13802 int orig_next_vreg;
13803 guint32 *vreg_to_lvreg;
13804 guint32 *lvregs;
13805 guint32 i, lvregs_len;
13806 gboolean dest_has_lvreg = FALSE;
13807 MonoStackType stacktypes [128];
13808 MonoInst **live_range_start, **live_range_end;
13809 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13811 *need_local_opts = FALSE;
13813 memset (spec2, 0, sizeof (spec2));
13815 /* FIXME: Move this function to mini.c */
13816 stacktypes ['i'] = STACK_PTR;
13817 stacktypes ['l'] = STACK_I8;
13818 stacktypes ['f'] = STACK_R8;
13819 #ifdef MONO_ARCH_SIMD_INTRINSICS
13820 stacktypes ['x'] = STACK_VTYPE;
13821 #endif
13823 #if SIZEOF_REGISTER == 4
13824 /* Create MonoInsts for longs */
13825 for (i = 0; i < cfg->num_varinfo; i++) {
13826 MonoInst *ins = cfg->varinfo [i];
13828 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13829 switch (ins->type) {
13830 case STACK_R8:
13831 case STACK_I8: {
13832 MonoInst *tree;
13834 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13835 break;
13837 g_assert (ins->opcode == OP_REGOFFSET);
13839 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
13840 g_assert (tree);
13841 tree->opcode = OP_REGOFFSET;
13842 tree->inst_basereg = ins->inst_basereg;
13843 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13845 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
13846 g_assert (tree);
13847 tree->opcode = OP_REGOFFSET;
13848 tree->inst_basereg = ins->inst_basereg;
13849 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13850 break;
13852 default:
13853 break;
13857 #endif
13859 if (cfg->compute_gc_maps) {
13860 /* registers need liveness info even for !non refs */
13861 for (i = 0; i < cfg->num_varinfo; i++) {
13862 MonoInst *ins = cfg->varinfo [i];
13864 if (ins->opcode == OP_REGVAR)
13865 ins->flags |= MONO_INST_GC_TRACK;
13869 /* FIXME: widening and truncation */
13872 * As an optimization, when a variable allocated to the stack is first loaded into
13873 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13874 * the variable again.
13876 orig_next_vreg = cfg->next_vreg;
13877 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13878 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
13879 lvregs_len = 0;
13882 * These arrays contain the first and last instructions accessing a given
13883 * variable.
13884 * Since we emit bblocks in the same order we process them here, and we
13885 * don't split live ranges, these will precisely describe the live range of
13886 * the variable, i.e. the instruction range where a valid value can be found
13887 * in the variables location.
13888 * The live range is computed using the liveness info computed by the liveness pass.
13889 * We can't use vmv->range, since that is an abstract live range, and we need
13890 * one which is instruction precise.
13891 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13893 /* FIXME: Only do this if debugging info is requested */
13894 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13895 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13896 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13897 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13899 /* Add spill loads/stores */
13900 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13901 MonoInst *ins;
13903 if (cfg->verbose_level > 2)
13904 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13906 /* Clear vreg_to_lvreg array */
13907 for (i = 0; i < lvregs_len; i++)
13908 vreg_to_lvreg [lvregs [i]] = 0;
13909 lvregs_len = 0;
13911 cfg->cbb = bb;
13912 MONO_BB_FOR_EACH_INS (bb, ins) {
13913 const char *spec = INS_INFO (ins->opcode);
13914 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13915 gboolean store, no_lvreg;
13916 int sregs [MONO_MAX_SRC_REGS];
13918 if (G_UNLIKELY (cfg->verbose_level > 2))
13919 mono_print_ins (ins);
13921 if (ins->opcode == OP_NOP)
13922 continue;
13925 * We handle LDADDR here as well, since it can only be decomposed
13926 * when variable addresses are known.
13928 if (ins->opcode == OP_LDADDR) {
13929 MonoInst *var = (MonoInst *)ins->inst_p0;
13931 if (var->opcode == OP_VTARG_ADDR) {
13932 /* Happens on SPARC/S390 where vtypes are passed by reference */
13933 MonoInst *vtaddr = var->inst_left;
13934 if (vtaddr->opcode == OP_REGVAR) {
13935 ins->opcode = OP_MOVE;
13936 ins->sreg1 = vtaddr->dreg;
13938 else if (var->inst_left->opcode == OP_REGOFFSET) {
13939 ins->opcode = OP_LOAD_MEMBASE;
13940 ins->inst_basereg = vtaddr->inst_basereg;
13941 ins->inst_offset = vtaddr->inst_offset;
13942 } else
13943 NOT_IMPLEMENTED;
13944 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
13945 /* gsharedvt arg passed by ref */
13946 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13948 ins->opcode = OP_LOAD_MEMBASE;
13949 ins->inst_basereg = var->inst_basereg;
13950 ins->inst_offset = var->inst_offset;
13951 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
13952 MonoInst *load, *load2, *load3;
13953 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
13954 int reg1, reg2, reg3;
13955 MonoInst *info_var = cfg->gsharedvt_info_var;
13956 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13959 * gsharedvt local.
13960 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13963 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13965 g_assert (info_var);
13966 g_assert (locals_var);
13968 /* Mark the instruction used to compute the locals var as used */
13969 cfg->gsharedvt_locals_var_ins = NULL;
13971 /* Load the offset */
13972 if (info_var->opcode == OP_REGOFFSET) {
13973 reg1 = alloc_ireg (cfg);
13974 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13975 } else if (info_var->opcode == OP_REGVAR) {
13976 load = NULL;
13977 reg1 = info_var->dreg;
13978 } else {
13979 g_assert_not_reached ();
13981 reg2 = alloc_ireg (cfg);
13982 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13983 /* Load the locals area address */
13984 reg3 = alloc_ireg (cfg);
13985 if (locals_var->opcode == OP_REGOFFSET) {
13986 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13987 } else if (locals_var->opcode == OP_REGVAR) {
13988 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13989 } else {
13990 g_assert_not_reached ();
13992 /* Compute the address */
13993 ins->opcode = OP_PADD;
13994 ins->sreg1 = reg3;
13995 ins->sreg2 = reg2;
13997 mono_bblock_insert_before_ins (bb, ins, load3);
13998 mono_bblock_insert_before_ins (bb, load3, load2);
13999 if (load)
14000 mono_bblock_insert_before_ins (bb, load2, load);
14001 } else {
14002 g_assert (var->opcode == OP_REGOFFSET);
14004 ins->opcode = OP_ADD_IMM;
14005 ins->sreg1 = var->inst_basereg;
14006 ins->inst_imm = var->inst_offset;
14009 *need_local_opts = TRUE;
14010 spec = INS_INFO (ins->opcode);
14013 if (ins->opcode < MONO_CEE_LAST) {
14014 mono_print_ins (ins);
14015 g_assert_not_reached ();
14019 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14020 * src register.
14021 * FIXME:
14023 if (MONO_IS_STORE_MEMBASE (ins)) {
14024 tmp_reg = ins->dreg;
14025 ins->dreg = ins->sreg2;
14026 ins->sreg2 = tmp_reg;
14027 store = TRUE;
14029 spec2 [MONO_INST_DEST] = ' ';
14030 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14031 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14032 spec2 [MONO_INST_SRC3] = ' ';
14033 spec = spec2;
14034 } else if (MONO_IS_STORE_MEMINDEX (ins))
14035 g_assert_not_reached ();
14036 else
14037 store = FALSE;
14038 no_lvreg = FALSE;
14040 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14041 printf ("\t %.3s %d", spec, ins->dreg);
14042 num_sregs = mono_inst_get_src_registers (ins, sregs);
14043 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14044 printf (" %d", sregs [srcindex]);
14045 printf ("\n");
14048 /***************/
14049 /* DREG */
14050 /***************/
14051 regtype = spec [MONO_INST_DEST];
14052 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14053 prev_dreg = -1;
14055 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14056 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14057 MonoInst *store_ins;
14058 int store_opcode;
14059 MonoInst *def_ins = ins;
14060 int dreg = ins->dreg; /* The original vreg */
14062 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14064 if (var->opcode == OP_REGVAR) {
14065 ins->dreg = var->dreg;
14066 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14068 * Instead of emitting a load+store, use a _membase opcode.
14070 g_assert (var->opcode == OP_REGOFFSET);
14071 if (ins->opcode == OP_MOVE) {
14072 NULLIFY_INS (ins);
14073 def_ins = NULL;
14074 } else {
14075 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14076 ins->inst_basereg = var->inst_basereg;
14077 ins->inst_offset = var->inst_offset;
14078 ins->dreg = -1;
14080 spec = INS_INFO (ins->opcode);
14081 } else {
14082 guint32 lvreg;
14084 g_assert (var->opcode == OP_REGOFFSET);
14086 prev_dreg = ins->dreg;
14088 /* Invalidate any previous lvreg for this vreg */
14089 vreg_to_lvreg [ins->dreg] = 0;
14091 lvreg = 0;
14093 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14094 regtype = 'l';
14095 store_opcode = OP_STOREI8_MEMBASE_REG;
14098 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14100 #if SIZEOF_REGISTER != 8
14101 if (regtype == 'l') {
14102 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
14103 mono_bblock_insert_after_ins (bb, ins, store_ins);
14104 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
14105 mono_bblock_insert_after_ins (bb, ins, store_ins);
14106 def_ins = store_ins;
14108 else
14109 #endif
14111 g_assert (store_opcode != OP_STOREV_MEMBASE);
14113 /* Try to fuse the store into the instruction itself */
14114 /* FIXME: Add more instructions */
14115 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14116 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14117 ins->inst_imm = ins->inst_c0;
14118 ins->inst_destbasereg = var->inst_basereg;
14119 ins->inst_offset = var->inst_offset;
14120 spec = INS_INFO (ins->opcode);
14121 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14122 ins->opcode = store_opcode;
14123 ins->inst_destbasereg = var->inst_basereg;
14124 ins->inst_offset = var->inst_offset;
14126 no_lvreg = TRUE;
14128 tmp_reg = ins->dreg;
14129 ins->dreg = ins->sreg2;
14130 ins->sreg2 = tmp_reg;
14131 store = TRUE;
14133 spec2 [MONO_INST_DEST] = ' ';
14134 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14135 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14136 spec2 [MONO_INST_SRC3] = ' ';
14137 spec = spec2;
14138 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14139 // FIXME: The backends expect the base reg to be in inst_basereg
14140 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14141 ins->dreg = -1;
14142 ins->inst_basereg = var->inst_basereg;
14143 ins->inst_offset = var->inst_offset;
14144 spec = INS_INFO (ins->opcode);
14145 } else {
14146 /* printf ("INS: "); mono_print_ins (ins); */
14147 /* Create a store instruction */
14148 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14150 /* Insert it after the instruction */
14151 mono_bblock_insert_after_ins (bb, ins, store_ins);
14153 def_ins = store_ins;
14156 * We can't assign ins->dreg to var->dreg here, since the
14157 * sregs could use it. So set a flag, and do it after
14158 * the sregs.
14160 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14161 dest_has_lvreg = TRUE;
14166 if (def_ins && !live_range_start [dreg]) {
14167 live_range_start [dreg] = def_ins;
14168 live_range_start_bb [dreg] = bb;
14171 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14172 MonoInst *tmp;
14174 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14175 tmp->inst_c1 = dreg;
14176 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14180 /************/
14181 /* SREGS */
14182 /************/
14183 num_sregs = mono_inst_get_src_registers (ins, sregs);
14184 for (srcindex = 0; srcindex < 3; ++srcindex) {
14185 regtype = spec [MONO_INST_SRC1 + srcindex];
14186 sreg = sregs [srcindex];
14188 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14189 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14190 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14191 MonoInst *use_ins = ins;
14192 MonoInst *load_ins;
14193 guint32 load_opcode;
14195 if (var->opcode == OP_REGVAR) {
14196 sregs [srcindex] = var->dreg;
14197 //mono_inst_set_src_registers (ins, sregs);
14198 live_range_end [sreg] = use_ins;
14199 live_range_end_bb [sreg] = bb;
14201 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14202 MonoInst *tmp;
14204 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14205 /* var->dreg is a hreg */
14206 tmp->inst_c1 = sreg;
14207 mono_bblock_insert_after_ins (bb, ins, tmp);
14210 continue;
14213 g_assert (var->opcode == OP_REGOFFSET);
14215 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14217 g_assert (load_opcode != OP_LOADV_MEMBASE);
14219 if (vreg_to_lvreg [sreg]) {
14220 g_assert (vreg_to_lvreg [sreg] != -1);
14222 /* The variable is already loaded to an lvreg */
14223 if (G_UNLIKELY (cfg->verbose_level > 2))
14224 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14225 sregs [srcindex] = vreg_to_lvreg [sreg];
14226 //mono_inst_set_src_registers (ins, sregs);
14227 continue;
14230 /* Try to fuse the load into the instruction */
14231 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14232 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14233 sregs [0] = var->inst_basereg;
14234 //mono_inst_set_src_registers (ins, sregs);
14235 ins->inst_offset = var->inst_offset;
14236 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14237 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14238 sregs [1] = var->inst_basereg;
14239 //mono_inst_set_src_registers (ins, sregs);
14240 ins->inst_offset = var->inst_offset;
14241 } else {
14242 if (MONO_IS_REAL_MOVE (ins)) {
14243 ins->opcode = OP_NOP;
14244 sreg = ins->dreg;
14245 } else {
14246 //printf ("%d ", srcindex); mono_print_ins (ins);
14248 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14250 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14251 if (var->dreg == prev_dreg) {
14253 * sreg refers to the value loaded by the load
14254 * emitted below, but we need to use ins->dreg
14255 * since it refers to the store emitted earlier.
14257 sreg = ins->dreg;
14259 g_assert (sreg != -1);
14260 vreg_to_lvreg [var->dreg] = sreg;
14261 g_assert (lvregs_len < 1024);
14262 lvregs [lvregs_len ++] = var->dreg;
14266 sregs [srcindex] = sreg;
14267 //mono_inst_set_src_registers (ins, sregs);
14269 #if SIZEOF_REGISTER != 8
14270 if (regtype == 'l') {
14271 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14272 mono_bblock_insert_before_ins (bb, ins, load_ins);
14273 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14274 mono_bblock_insert_before_ins (bb, ins, load_ins);
14275 use_ins = load_ins;
14277 else
14278 #endif
14280 #if SIZEOF_REGISTER == 4
14281 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14282 #endif
14283 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14284 mono_bblock_insert_before_ins (bb, ins, load_ins);
14285 use_ins = load_ins;
14289 if (var->dreg < orig_next_vreg) {
14290 live_range_end [var->dreg] = use_ins;
14291 live_range_end_bb [var->dreg] = bb;
14294 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14295 MonoInst *tmp;
14297 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14298 tmp->inst_c1 = var->dreg;
14299 mono_bblock_insert_after_ins (bb, ins, tmp);
14303 mono_inst_set_src_registers (ins, sregs);
14305 if (dest_has_lvreg) {
14306 g_assert (ins->dreg != -1);
14307 vreg_to_lvreg [prev_dreg] = ins->dreg;
14308 g_assert (lvregs_len < 1024);
14309 lvregs [lvregs_len ++] = prev_dreg;
14310 dest_has_lvreg = FALSE;
14313 if (store) {
14314 tmp_reg = ins->dreg;
14315 ins->dreg = ins->sreg2;
14316 ins->sreg2 = tmp_reg;
14319 if (MONO_IS_CALL (ins)) {
14320 /* Clear vreg_to_lvreg array */
14321 for (i = 0; i < lvregs_len; i++)
14322 vreg_to_lvreg [lvregs [i]] = 0;
14323 lvregs_len = 0;
14324 } else if (ins->opcode == OP_NOP) {
14325 ins->dreg = -1;
14326 MONO_INST_NULLIFY_SREGS (ins);
14329 if (cfg->verbose_level > 2)
14330 mono_print_ins_index (1, ins);
14333 /* Extend the live range based on the liveness info */
14334 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14335 for (i = 0; i < cfg->num_varinfo; i ++) {
14336 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14338 if (vreg_is_volatile (cfg, vi->vreg))
14339 /* The liveness info is incomplete */
14340 continue;
14342 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14343 /* Live from at least the first ins of this bb */
14344 live_range_start [vi->vreg] = bb->code;
14345 live_range_start_bb [vi->vreg] = bb;
14348 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14349 /* Live at least until the last ins of this bb */
14350 live_range_end [vi->vreg] = bb->last_ins;
14351 live_range_end_bb [vi->vreg] = bb;
14358 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14359 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14361 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14362 for (i = 0; i < cfg->num_varinfo; ++i) {
14363 int vreg = MONO_VARINFO (cfg, i)->vreg;
14364 MonoInst *ins;
14366 if (live_range_start [vreg]) {
14367 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14368 ins->inst_c0 = i;
14369 ins->inst_c1 = vreg;
14370 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14372 if (live_range_end [vreg]) {
14373 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14374 ins->inst_c0 = i;
14375 ins->inst_c1 = vreg;
14376 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14377 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14378 else
14379 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14384 if (cfg->gsharedvt_locals_var_ins) {
14385 /* Nullify if unused */
14386 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14387 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14390 g_free (live_range_start);
14391 g_free (live_range_end);
14392 g_free (live_range_start_bb);
14393 g_free (live_range_end_bb);
14398 * FIXME:
14399 * - use 'iadd' instead of 'int_add'
14400 * - handling ovf opcodes: decompose in method_to_ir.
14401 * - unify iregs/fregs
14402 * -> partly done, the missing parts are:
14403 * - a more complete unification would involve unifying the hregs as well, so
14404 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14405 * would no longer map to the machine hregs, so the code generators would need to
14406 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14407 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14408 * fp/non-fp branches speeds it up by about 15%.
14409 * - use sext/zext opcodes instead of shifts
14410 * - add OP_ICALL
14411 * - get rid of TEMPLOADs if possible and use vregs instead
14412 * - clean up usage of OP_P/OP_ opcodes
14413 * - cleanup usage of DUMMY_USE
14414 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14415 * stack
14416 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14417 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14418 * - make sure handle_stack_args () is called before the branch is emitted
14419 * - when the new IR is done, get rid of all unused stuff
14420 * - COMPARE/BEQ as separate instructions or unify them ?
14421 * - keeping them separate allows specialized compare instructions like
14422 * compare_imm, compare_membase
14423 * - most back ends unify fp compare+branch, fp compare+ceq
14424 * - integrate mono_save_args into inline_method
14425 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14426 * - handle long shift opts on 32 bit platforms somehow: they require
14427 * 3 sregs (2 for arg1 and 1 for arg2)
14428 * - make byref a 'normal' type.
14429 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14430 * variable if needed.
14431 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14432 * like inline_method.
14433 * - remove inlining restrictions
14434 * - fix LNEG and enable cfold of INEG
14435 * - generalize x86 optimizations like ldelema as a peephole optimization
14436 * - add store_mem_imm for amd64
14437 * - optimize the loading of the interruption flag in the managed->native wrappers
14438 * - avoid special handling of OP_NOP in passes
14439 * - move code inserting instructions into one function/macro.
14440 * - try a coalescing phase after liveness analysis
14441 * - add float -> vreg conversion + local optimizations on !x86
14442 * - figure out how to handle decomposed branches during optimizations, ie.
14443 * compare+branch, op_jump_table+op_br etc.
14444 * - promote RuntimeXHandles to vregs
14445 * - vtype cleanups:
14446 * - add a NEW_VARLOADA_VREG macro
14447 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14448 * accessing vtype fields.
14449 * - get rid of I8CONST on 64 bit platforms
14450 * - dealing with the increase in code size due to branches created during opcode
14451 * decomposition:
14452 * - use extended basic blocks
14453 * - all parts of the JIT
14454 * - handle_global_vregs () && local regalloc
14455 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14456 * - sources of increase in code size:
14457 * - vtypes
14458 * - long compares
14459 * - isinst and castclass
14460 * - lvregs not allocated to global registers even if used multiple times
14461 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14462 * meaningful.
14463 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14464 * - add all micro optimizations from the old JIT
14465 * - put tree optimizations into the deadce pass
14466 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14467 * specific function.
14468 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14469 * fcompare + branchCC.
14470 * - create a helper function for allocating a stack slot, taking into account
14471 * MONO_CFG_HAS_SPILLUP.
14472 * - merge r68207.
14473 * - merge the ia64 switch changes.
14474 * - optimize mono_regstate2_alloc_int/float.
14475 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14476 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14477 * parts of the tree could be separated by other instructions, killing the tree
14478 * arguments, or stores killing loads etc. Also, should we fold loads into other
14479 * instructions if the result of the load is used multiple times ?
14480 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14481 * - LAST MERGE: 108395.
14482 * - when returning vtypes in registers, generate IR and append it to the end of the
14483 * last bb instead of doing it in the epilog.
14484 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14489 NOTES
14490 -----
14492 - When to decompose opcodes:
14493 - earlier: this makes some optimizations hard to implement, since the low level IR
14494 no longer contains the neccessary information. But it is easier to do.
14495 - later: harder to implement, enables more optimizations.
14496 - Branches inside bblocks:
14497 - created when decomposing complex opcodes.
14498 - branches to another bblock: harmless, but not tracked by the branch
14499 optimizations, so need to branch to a label at the start of the bblock.
14500 - branches to inside the same bblock: very problematic, trips up the local
14501 reg allocator. Can be fixed by spitting the current bblock, but that is a
14502 complex operation, since some local vregs can become global vregs etc.
14503 - Local/global vregs:
14504 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14505 local register allocator.
14506 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14507 structure, created by mono_create_var (). Assigned to hregs or the stack by
14508 the global register allocator.
14509 - When to do optimizations like alu->alu_imm:
14510 - earlier -> saves work later on since the IR will be smaller/simpler
14511 - later -> can work on more instructions
14512 - Handling of valuetypes:
14513 - When a vtype is pushed on the stack, a new temporary is created, an
14514 instruction computing its address (LDADDR) is emitted and pushed on
14515 the stack. Need to optimize cases when the vtype is used immediately as in
14516 argument passing, stloc etc.
14517 - Instead of the to_end stuff in the old JIT, simply call the function handling
14518 the values on the stack before emitting the last instruction of the bb.
14521 #else /* !DISABLE_JIT */
14523 MONO_EMPTY_SOURCE_FILE (method_to_ir);
14525 #endif /* !DISABLE_JIT */