Merge pull request #3734 from kumpera/monoclass_reorg
[mono-project.git] / mono / mini / method-to-ir.c
blob28261c5338f544b708c125f36eebe65ff23b8d17
1 /*
2 * method-to-ir.c: Convert CIL to the JIT internal representation
4 * Author:
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
11 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
14 #include <config.h>
16 #ifndef DISABLE_JIT
18 #include <signal.h>
20 #ifdef HAVE_UNISTD_H
21 #include <unistd.h>
22 #endif
24 #include <math.h>
25 #include <string.h>
26 #include <ctype.h>
28 #ifdef HAVE_SYS_TIME_H
29 #include <sys/time.h>
30 #endif
32 #ifdef HAVE_ALLOCA_H
33 #include <alloca.h>
34 #endif
36 #include <mono/utils/memcheck.h>
37 #include "mini.h"
38 #include <mono/metadata/abi-details.h>
39 #include <mono/metadata/assembly.h>
40 #include <mono/metadata/attrdefs.h>
41 #include <mono/metadata/loader.h>
42 #include <mono/metadata/tabledefs.h>
43 #include <mono/metadata/class.h>
44 #include <mono/metadata/object.h>
45 #include <mono/metadata/exception.h>
46 #include <mono/metadata/opcodes.h>
47 #include <mono/metadata/mono-endian.h>
48 #include <mono/metadata/tokentype.h>
49 #include <mono/metadata/tabledefs.h>
50 #include <mono/metadata/marshal.h>
51 #include <mono/metadata/debug-helpers.h>
52 #include <mono/metadata/mono-debug.h>
53 #include <mono/metadata/mono-debug-debugger.h>
54 #include <mono/metadata/gc-internals.h>
55 #include <mono/metadata/security-manager.h>
56 #include <mono/metadata/threads-types.h>
57 #include <mono/metadata/security-core-clr.h>
58 #include <mono/metadata/profiler-private.h>
59 #include <mono/metadata/profiler.h>
60 #include <mono/metadata/monitor.h>
61 #include <mono/metadata/debug-mono-symfile.h>
62 #include <mono/utils/mono-compiler.h>
63 #include <mono/utils/mono-memory-model.h>
64 #include <mono/utils/mono-error-internals.h>
65 #include <mono/metadata/mono-basic-block.h>
66 #include <mono/metadata/reflection-internals.h>
67 #include <mono/utils/mono-threads-coop.h>
69 #include "trace.h"
71 #include "ir-emit.h"
73 #include "jit-icalls.h"
74 #include "jit.h"
75 #include "debugger-agent.h"
76 #include "seq-points.h"
77 #include "aot-compiler.h"
78 #include "mini-llvm.h"
80 #define BRANCH_COST 10
81 #define INLINE_LENGTH_LIMIT 20
83 /* These have 'cfg' as an implicit argument */
84 #define INLINE_FAILURE(msg) do { \
85 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
86 inline_failure (cfg, msg); \
87 goto exception_exit; \
88 } \
89 } while (0)
90 #define CHECK_CFG_EXCEPTION do {\
91 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
92 goto exception_exit; \
93 } while (0)
94 #define FIELD_ACCESS_FAILURE(method, field) do { \
95 field_access_failure ((cfg), (method), (field)); \
96 goto exception_exit; \
97 } while (0)
98 #define GENERIC_SHARING_FAILURE(opcode) do { \
99 if (cfg->gshared) { \
100 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
101 goto exception_exit; \
103 } while (0)
104 #define GSHAREDVT_FAILURE(opcode) do { \
105 if (cfg->gsharedvt) { \
106 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
107 goto exception_exit; \
109 } while (0)
110 #define OUT_OF_MEMORY_FAILURE do { \
111 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
112 mono_error_set_out_of_memory (&cfg->error, ""); \
113 goto exception_exit; \
114 } while (0)
115 #define DISABLE_AOT(cfg) do { \
116 if ((cfg)->verbose_level >= 2) \
117 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
118 (cfg)->disable_aot = TRUE; \
119 } while (0)
120 #define LOAD_ERROR do { \
121 break_on_unverified (); \
122 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
123 goto exception_exit; \
124 } while (0)
126 #define TYPE_LOAD_ERROR(klass) do { \
127 cfg->exception_ptr = klass; \
128 LOAD_ERROR; \
129 } while (0)
131 #define CHECK_CFG_ERROR do {\
132 if (!mono_error_ok (&cfg->error)) { \
133 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
134 goto mono_error_exit; \
136 } while (0)
138 /* Determine whenever 'ins' represents a load of the 'this' argument */
139 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
141 static int ldind_to_load_membase (int opcode);
142 static int stind_to_store_membase (int opcode);
144 int mono_op_to_op_imm (int opcode);
145 int mono_op_to_op_imm_noemul (int opcode);
147 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
149 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
150 guchar *ip, guint real_offset, gboolean inline_always);
151 static MonoInst*
152 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
154 /* helper methods signatures */
155 static MonoMethodSignature *helper_sig_domain_get;
156 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
157 static MonoMethodSignature *helper_sig_llvmonly_imt_trampoline;
159 /* type loading helpers */
160 static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, System.Runtime.CompilerServices, RuntimeHelpers)
161 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, System.Diagnostics, DebuggableAttribute)
164 * Instruction metadata
166 #ifdef MINI_OP
167 #undef MINI_OP
168 #endif
169 #ifdef MINI_OP3
170 #undef MINI_OP3
171 #endif
172 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
173 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
174 #define NONE ' '
175 #define IREG 'i'
176 #define FREG 'f'
177 #define VREG 'v'
178 #define XREG 'x'
179 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
180 #define LREG IREG
181 #else
182 #define LREG 'l'
183 #endif
184 /* keep in sync with the enum in mini.h */
185 const char
186 ins_info[] = {
187 #include "mini-ops.h"
189 #undef MINI_OP
190 #undef MINI_OP3
192 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
193 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
195 * This should contain the index of the last sreg + 1. This is not the same
196 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
198 const gint8 ins_sreg_counts[] = {
199 #include "mini-ops.h"
201 #undef MINI_OP
202 #undef MINI_OP3
204 #define MONO_INIT_VARINFO(vi,id) do { \
205 (vi)->range.first_use.pos.bid = 0xffff; \
206 (vi)->reg = -1; \
207 (vi)->idx = (id); \
208 } while (0)
210 guint32
211 mono_alloc_ireg (MonoCompile *cfg)
213 return alloc_ireg (cfg);
216 guint32
217 mono_alloc_lreg (MonoCompile *cfg)
219 return alloc_lreg (cfg);
222 guint32
223 mono_alloc_freg (MonoCompile *cfg)
225 return alloc_freg (cfg);
228 guint32
229 mono_alloc_preg (MonoCompile *cfg)
231 return alloc_preg (cfg);
234 guint32
235 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
237 return alloc_dreg (cfg, stack_type);
241 * mono_alloc_ireg_ref:
243 * Allocate an IREG, and mark it as holding a GC ref.
245 guint32
246 mono_alloc_ireg_ref (MonoCompile *cfg)
248 return alloc_ireg_ref (cfg);
252 * mono_alloc_ireg_mp:
254 * Allocate an IREG, and mark it as holding a managed pointer.
256 guint32
257 mono_alloc_ireg_mp (MonoCompile *cfg)
259 return alloc_ireg_mp (cfg);
263 * mono_alloc_ireg_copy:
265 * Allocate an IREG with the same GC type as VREG.
267 guint32
268 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
270 if (vreg_is_ref (cfg, vreg))
271 return alloc_ireg_ref (cfg);
272 else if (vreg_is_mp (cfg, vreg))
273 return alloc_ireg_mp (cfg);
274 else
275 return alloc_ireg (cfg);
278 guint
279 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
281 if (type->byref)
282 return OP_MOVE;
284 type = mini_get_underlying_type (type);
285 handle_enum:
286 switch (type->type) {
287 case MONO_TYPE_I1:
288 case MONO_TYPE_U1:
289 return OP_MOVE;
290 case MONO_TYPE_I2:
291 case MONO_TYPE_U2:
292 return OP_MOVE;
293 case MONO_TYPE_I4:
294 case MONO_TYPE_U4:
295 return OP_MOVE;
296 case MONO_TYPE_I:
297 case MONO_TYPE_U:
298 case MONO_TYPE_PTR:
299 case MONO_TYPE_FNPTR:
300 return OP_MOVE;
301 case MONO_TYPE_CLASS:
302 case MONO_TYPE_STRING:
303 case MONO_TYPE_OBJECT:
304 case MONO_TYPE_SZARRAY:
305 case MONO_TYPE_ARRAY:
306 return OP_MOVE;
307 case MONO_TYPE_I8:
308 case MONO_TYPE_U8:
309 #if SIZEOF_REGISTER == 8
310 return OP_MOVE;
311 #else
312 return OP_LMOVE;
313 #endif
314 case MONO_TYPE_R4:
315 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
316 case MONO_TYPE_R8:
317 return OP_FMOVE;
318 case MONO_TYPE_VALUETYPE:
319 if (type->data.klass->enumtype) {
320 type = mono_class_enum_basetype (type->data.klass);
321 goto handle_enum;
323 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
324 return OP_XMOVE;
325 return OP_VMOVE;
326 case MONO_TYPE_TYPEDBYREF:
327 return OP_VMOVE;
328 case MONO_TYPE_GENERICINST:
329 type = &type->data.generic_class->container_class->byval_arg;
330 goto handle_enum;
331 case MONO_TYPE_VAR:
332 case MONO_TYPE_MVAR:
333 g_assert (cfg->gshared);
334 if (mini_type_var_is_vt (type))
335 return OP_VMOVE;
336 else
337 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
338 default:
339 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
341 return -1;
344 void
345 mono_print_bb (MonoBasicBlock *bb, const char *msg)
347 int i;
348 MonoInst *tree;
350 printf ("\n%s %d: [IN: ", msg, bb->block_num);
351 for (i = 0; i < bb->in_count; ++i)
352 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
353 printf (", OUT: ");
354 for (i = 0; i < bb->out_count; ++i)
355 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
356 printf (" ]\n");
357 for (tree = bb->code; tree; tree = tree->next)
358 mono_print_ins_index (-1, tree);
361 void
362 mono_create_helper_signatures (void)
364 helper_sig_domain_get = mono_create_icall_signature ("ptr");
365 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
366 helper_sig_llvmonly_imt_trampoline = mono_create_icall_signature ("ptr ptr ptr");
369 static MONO_NEVER_INLINE void
370 break_on_unverified (void)
372 if (mini_get_debug_options ()->break_on_unverified)
373 G_BREAKPOINT ();
376 static MONO_NEVER_INLINE void
377 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
379 char *method_fname = mono_method_full_name (method, TRUE);
380 char *field_fname = mono_field_full_name (field);
381 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
382 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
383 g_free (method_fname);
384 g_free (field_fname);
387 static MONO_NEVER_INLINE void
388 inline_failure (MonoCompile *cfg, const char *msg)
390 if (cfg->verbose_level >= 2)
391 printf ("inline failed: %s\n", msg);
392 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
395 static MONO_NEVER_INLINE void
396 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
398 if (cfg->verbose_level > 2) \
399 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
400 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
403 static MONO_NEVER_INLINE void
404 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
406 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
407 if (cfg->verbose_level >= 2)
408 printf ("%s\n", cfg->exception_message);
409 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
413 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
414 * foo<T> (int i) { ldarg.0; box T; }
416 #define UNVERIFIED do { \
417 if (cfg->gsharedvt) { \
418 if (cfg->verbose_level > 2) \
419 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
420 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
421 goto exception_exit; \
423 break_on_unverified (); \
424 goto unverified; \
425 } while (0)
427 #define GET_BBLOCK(cfg,tblock,ip) do { \
428 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
429 if (!(tblock)) { \
430 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
431 NEW_BBLOCK (cfg, (tblock)); \
432 (tblock)->cil_code = (ip); \
433 ADD_BBLOCK (cfg, (tblock)); \
435 } while (0)
437 #if defined(TARGET_X86) || defined(TARGET_AMD64)
438 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
439 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
440 (dest)->dreg = alloc_ireg_mp ((cfg)); \
441 (dest)->sreg1 = (sr1); \
442 (dest)->sreg2 = (sr2); \
443 (dest)->inst_imm = (imm); \
444 (dest)->backend.shift_amount = (shift); \
445 MONO_ADD_INS ((cfg)->cbb, (dest)); \
446 } while (0)
447 #endif
449 /* Emit conversions so both operands of a binary opcode are of the same type */
450 static void
451 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
453 MonoInst *arg1 = *arg1_ref;
454 MonoInst *arg2 = *arg2_ref;
456 if (cfg->r4fp &&
457 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
458 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
459 MonoInst *conv;
461 /* Mixing r4/r8 is allowed by the spec */
462 if (arg1->type == STACK_R4) {
463 int dreg = alloc_freg (cfg);
465 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
466 conv->type = STACK_R8;
467 ins->sreg1 = dreg;
468 *arg1_ref = conv;
470 if (arg2->type == STACK_R4) {
471 int dreg = alloc_freg (cfg);
473 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
474 conv->type = STACK_R8;
475 ins->sreg2 = dreg;
476 *arg2_ref = conv;
480 #if SIZEOF_REGISTER == 8
481 /* FIXME: Need to add many more cases */
482 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
483 MonoInst *widen;
485 int dr = alloc_preg (cfg);
486 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
487 (ins)->sreg2 = widen->dreg;
489 #endif
492 #define ADD_BINOP(op) do { \
493 MONO_INST_NEW (cfg, ins, (op)); \
494 sp -= 2; \
495 ins->sreg1 = sp [0]->dreg; \
496 ins->sreg2 = sp [1]->dreg; \
497 type_from_op (cfg, ins, sp [0], sp [1]); \
498 CHECK_TYPE (ins); \
499 /* Have to insert a widening op */ \
500 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
501 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
502 MONO_ADD_INS ((cfg)->cbb, (ins)); \
503 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
504 } while (0)
506 #define ADD_UNOP(op) do { \
507 MONO_INST_NEW (cfg, ins, (op)); \
508 sp--; \
509 ins->sreg1 = sp [0]->dreg; \
510 type_from_op (cfg, ins, sp [0], NULL); \
511 CHECK_TYPE (ins); \
512 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
513 MONO_ADD_INS ((cfg)->cbb, (ins)); \
514 *sp++ = mono_decompose_opcode (cfg, ins); \
515 } while (0)
517 #define ADD_BINCOND(next_block) do { \
518 MonoInst *cmp; \
519 sp -= 2; \
520 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
521 cmp->sreg1 = sp [0]->dreg; \
522 cmp->sreg2 = sp [1]->dreg; \
523 type_from_op (cfg, cmp, sp [0], sp [1]); \
524 CHECK_TYPE (cmp); \
525 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
526 type_from_op (cfg, ins, sp [0], sp [1]); \
527 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
528 GET_BBLOCK (cfg, tblock, target); \
529 link_bblock (cfg, cfg->cbb, tblock); \
530 ins->inst_true_bb = tblock; \
531 if ((next_block)) { \
532 link_bblock (cfg, cfg->cbb, (next_block)); \
533 ins->inst_false_bb = (next_block); \
534 start_new_bblock = 1; \
535 } else { \
536 GET_BBLOCK (cfg, tblock, ip); \
537 link_bblock (cfg, cfg->cbb, tblock); \
538 ins->inst_false_bb = tblock; \
539 start_new_bblock = 2; \
541 if (sp != stack_start) { \
542 handle_stack_args (cfg, stack_start, sp - stack_start); \
543 CHECK_UNVERIFIABLE (cfg); \
545 MONO_ADD_INS (cfg->cbb, cmp); \
546 MONO_ADD_INS (cfg->cbb, ins); \
547 } while (0)
549 /* *
550 * link_bblock: Links two basic blocks
552 * links two basic blocks in the control flow graph, the 'from'
553 * argument is the starting block and the 'to' argument is the block
554 * the control flow ends to after 'from'.
556 static void
557 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
559 MonoBasicBlock **newa;
560 int i, found;
562 #if 0
563 if (from->cil_code) {
564 if (to->cil_code)
565 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
566 else
567 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
568 } else {
569 if (to->cil_code)
570 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
571 else
572 printf ("edge from entry to exit\n");
574 #endif
576 found = FALSE;
577 for (i = 0; i < from->out_count; ++i) {
578 if (to == from->out_bb [i]) {
579 found = TRUE;
580 break;
583 if (!found) {
584 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
585 for (i = 0; i < from->out_count; ++i) {
586 newa [i] = from->out_bb [i];
588 newa [i] = to;
589 from->out_count++;
590 from->out_bb = newa;
593 found = FALSE;
594 for (i = 0; i < to->in_count; ++i) {
595 if (from == to->in_bb [i]) {
596 found = TRUE;
597 break;
600 if (!found) {
601 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
602 for (i = 0; i < to->in_count; ++i) {
603 newa [i] = to->in_bb [i];
605 newa [i] = from;
606 to->in_count++;
607 to->in_bb = newa;
611 void
612 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
614 link_bblock (cfg, from, to);
618 * mono_find_block_region:
620 * We mark each basic block with a region ID. We use that to avoid BB
621 * optimizations when blocks are in different regions.
623 * Returns:
624 * A region token that encodes where this region is, and information
625 * about the clause owner for this block.
627 * The region encodes the try/catch/filter clause that owns this block
628 * as well as the type. -1 is a special value that represents a block
629 * that is in none of try/catch/filter.
631 static int
632 mono_find_block_region (MonoCompile *cfg, int offset)
634 MonoMethodHeader *header = cfg->header;
635 MonoExceptionClause *clause;
636 int i;
638 for (i = 0; i < header->num_clauses; ++i) {
639 clause = &header->clauses [i];
640 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
641 (offset < (clause->handler_offset)))
642 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
644 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
645 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
646 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
647 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
648 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
649 else
650 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
653 for (i = 0; i < header->num_clauses; ++i) {
654 clause = &header->clauses [i];
656 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
657 return ((i + 1) << 8) | clause->flags;
660 return -1;
663 static gboolean
664 ip_in_finally_clause (MonoCompile *cfg, int offset)
666 MonoMethodHeader *header = cfg->header;
667 MonoExceptionClause *clause;
668 int i;
670 for (i = 0; i < header->num_clauses; ++i) {
671 clause = &header->clauses [i];
672 if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)
673 continue;
675 if (MONO_OFFSET_IN_HANDLER (clause, offset))
676 return TRUE;
678 return FALSE;
681 static GList*
682 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
684 MonoMethodHeader *header = cfg->header;
685 MonoExceptionClause *clause;
686 int i;
687 GList *res = NULL;
689 for (i = 0; i < header->num_clauses; ++i) {
690 clause = &header->clauses [i];
691 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
692 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
693 if (clause->flags == type)
694 res = g_list_append (res, clause);
697 return res;
700 static void
701 mono_create_spvar_for_region (MonoCompile *cfg, int region)
703 MonoInst *var;
705 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
706 if (var)
707 return;
709 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
710 /* prevent it from being register allocated */
711 var->flags |= MONO_INST_VOLATILE;
713 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
716 MonoInst *
717 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
719 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
722 static MonoInst*
723 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
725 MonoInst *var;
727 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
728 if (var)
729 return var;
731 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
732 /* prevent it from being register allocated */
733 var->flags |= MONO_INST_VOLATILE;
735 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
737 return var;
741 * Returns the type used in the eval stack when @type is loaded.
742 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
744 void
745 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
747 MonoClass *klass;
749 type = mini_get_underlying_type (type);
750 inst->klass = klass = mono_class_from_mono_type (type);
751 if (type->byref) {
752 inst->type = STACK_MP;
753 return;
756 handle_enum:
757 switch (type->type) {
758 case MONO_TYPE_VOID:
759 inst->type = STACK_INV;
760 return;
761 case MONO_TYPE_I1:
762 case MONO_TYPE_U1:
763 case MONO_TYPE_I2:
764 case MONO_TYPE_U2:
765 case MONO_TYPE_I4:
766 case MONO_TYPE_U4:
767 inst->type = STACK_I4;
768 return;
769 case MONO_TYPE_I:
770 case MONO_TYPE_U:
771 case MONO_TYPE_PTR:
772 case MONO_TYPE_FNPTR:
773 inst->type = STACK_PTR;
774 return;
775 case MONO_TYPE_CLASS:
776 case MONO_TYPE_STRING:
777 case MONO_TYPE_OBJECT:
778 case MONO_TYPE_SZARRAY:
779 case MONO_TYPE_ARRAY:
780 inst->type = STACK_OBJ;
781 return;
782 case MONO_TYPE_I8:
783 case MONO_TYPE_U8:
784 inst->type = STACK_I8;
785 return;
786 case MONO_TYPE_R4:
787 inst->type = cfg->r4_stack_type;
788 break;
789 case MONO_TYPE_R8:
790 inst->type = STACK_R8;
791 return;
792 case MONO_TYPE_VALUETYPE:
793 if (type->data.klass->enumtype) {
794 type = mono_class_enum_basetype (type->data.klass);
795 goto handle_enum;
796 } else {
797 inst->klass = klass;
798 inst->type = STACK_VTYPE;
799 return;
801 case MONO_TYPE_TYPEDBYREF:
802 inst->klass = mono_defaults.typed_reference_class;
803 inst->type = STACK_VTYPE;
804 return;
805 case MONO_TYPE_GENERICINST:
806 type = &type->data.generic_class->container_class->byval_arg;
807 goto handle_enum;
808 case MONO_TYPE_VAR:
809 case MONO_TYPE_MVAR:
810 g_assert (cfg->gshared);
811 if (mini_is_gsharedvt_type (type)) {
812 g_assert (cfg->gsharedvt);
813 inst->type = STACK_VTYPE;
814 } else {
815 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
817 return;
818 default:
819 g_error ("unknown type 0x%02x in eval stack type", type->type);
824 * The following tables are used to quickly validate the IL code in type_from_op ().
826 static const char
827 bin_num_table [STACK_MAX] [STACK_MAX] = {
828 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
829 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
830 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
831 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
832 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
833 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
834 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
835 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
836 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
839 static const char
840 neg_table [] = {
841 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
844 /* reduce the size of this table */
845 static const char
846 bin_int_table [STACK_MAX] [STACK_MAX] = {
847 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
848 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
849 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
850 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
851 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
852 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
853 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
854 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
857 static const char
858 bin_comp_table [STACK_MAX] [STACK_MAX] = {
859 /* Inv i L p F & O vt r4 */
860 {0},
861 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
862 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
863 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
864 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
865 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
866 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
867 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
868 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
871 /* reduce the size of this table */
872 static const char
873 shift_table [STACK_MAX] [STACK_MAX] = {
874 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
875 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
876 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
877 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
878 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
879 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
880 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
881 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
885 * Tables to map from the non-specific opcode to the matching
886 * type-specific opcode.
888 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
889 static const guint16
890 binops_op_map [STACK_MAX] = {
891 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
894 /* handles from CEE_NEG to CEE_CONV_U8 */
895 static const guint16
896 unops_op_map [STACK_MAX] = {
897 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
900 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
901 static const guint16
902 ovfops_op_map [STACK_MAX] = {
903 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
906 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
907 static const guint16
908 ovf2ops_op_map [STACK_MAX] = {
909 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
912 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
913 static const guint16
914 ovf3ops_op_map [STACK_MAX] = {
915 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
918 /* handles from CEE_BEQ to CEE_BLT_UN */
919 static const guint16
920 beqops_op_map [STACK_MAX] = {
921 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
924 /* handles from CEE_CEQ to CEE_CLT_UN */
925 static const guint16
926 ceqops_op_map [STACK_MAX] = {
927 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
931 * Sets ins->type (the type on the eval stack) according to the
932 * type of the opcode and the arguments to it.
933 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
935 * FIXME: this function sets ins->type unconditionally in some cases, but
936 * it should set it to invalid for some types (a conv.x on an object)
938 static void
939 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
941 switch (ins->opcode) {
942 /* binops */
943 case CEE_ADD:
944 case CEE_SUB:
945 case CEE_MUL:
946 case CEE_DIV:
947 case CEE_REM:
948 /* FIXME: check unverifiable args for STACK_MP */
949 ins->type = bin_num_table [src1->type] [src2->type];
950 ins->opcode += binops_op_map [ins->type];
951 break;
952 case CEE_DIV_UN:
953 case CEE_REM_UN:
954 case CEE_AND:
955 case CEE_OR:
956 case CEE_XOR:
957 ins->type = bin_int_table [src1->type] [src2->type];
958 ins->opcode += binops_op_map [ins->type];
959 break;
960 case CEE_SHL:
961 case CEE_SHR:
962 case CEE_SHR_UN:
963 ins->type = shift_table [src1->type] [src2->type];
964 ins->opcode += binops_op_map [ins->type];
965 break;
966 case OP_COMPARE:
967 case OP_LCOMPARE:
968 case OP_ICOMPARE:
969 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
970 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
971 ins->opcode = OP_LCOMPARE;
972 else if (src1->type == STACK_R4)
973 ins->opcode = OP_RCOMPARE;
974 else if (src1->type == STACK_R8)
975 ins->opcode = OP_FCOMPARE;
976 else
977 ins->opcode = OP_ICOMPARE;
978 break;
979 case OP_ICOMPARE_IMM:
980 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
981 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
982 ins->opcode = OP_LCOMPARE_IMM;
983 break;
984 case CEE_BEQ:
985 case CEE_BGE:
986 case CEE_BGT:
987 case CEE_BLE:
988 case CEE_BLT:
989 case CEE_BNE_UN:
990 case CEE_BGE_UN:
991 case CEE_BGT_UN:
992 case CEE_BLE_UN:
993 case CEE_BLT_UN:
994 ins->opcode += beqops_op_map [src1->type];
995 break;
996 case OP_CEQ:
997 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
998 ins->opcode += ceqops_op_map [src1->type];
999 break;
1000 case OP_CGT:
1001 case OP_CGT_UN:
1002 case OP_CLT:
1003 case OP_CLT_UN:
1004 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
1005 ins->opcode += ceqops_op_map [src1->type];
1006 break;
1007 /* unops */
1008 case CEE_NEG:
1009 ins->type = neg_table [src1->type];
1010 ins->opcode += unops_op_map [ins->type];
1011 break;
1012 case CEE_NOT:
1013 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1014 ins->type = src1->type;
1015 else
1016 ins->type = STACK_INV;
1017 ins->opcode += unops_op_map [ins->type];
1018 break;
1019 case CEE_CONV_I1:
1020 case CEE_CONV_I2:
1021 case CEE_CONV_I4:
1022 case CEE_CONV_U4:
1023 ins->type = STACK_I4;
1024 ins->opcode += unops_op_map [src1->type];
1025 break;
1026 case CEE_CONV_R_UN:
1027 ins->type = STACK_R8;
1028 switch (src1->type) {
1029 case STACK_I4:
1030 case STACK_PTR:
1031 ins->opcode = OP_ICONV_TO_R_UN;
1032 break;
1033 case STACK_I8:
1034 ins->opcode = OP_LCONV_TO_R_UN;
1035 break;
1037 break;
1038 case CEE_CONV_OVF_I1:
1039 case CEE_CONV_OVF_U1:
1040 case CEE_CONV_OVF_I2:
1041 case CEE_CONV_OVF_U2:
1042 case CEE_CONV_OVF_I4:
1043 case CEE_CONV_OVF_U4:
1044 ins->type = STACK_I4;
1045 ins->opcode += ovf3ops_op_map [src1->type];
1046 break;
1047 case CEE_CONV_OVF_I_UN:
1048 case CEE_CONV_OVF_U_UN:
1049 ins->type = STACK_PTR;
1050 ins->opcode += ovf2ops_op_map [src1->type];
1051 break;
1052 case CEE_CONV_OVF_I1_UN:
1053 case CEE_CONV_OVF_I2_UN:
1054 case CEE_CONV_OVF_I4_UN:
1055 case CEE_CONV_OVF_U1_UN:
1056 case CEE_CONV_OVF_U2_UN:
1057 case CEE_CONV_OVF_U4_UN:
1058 ins->type = STACK_I4;
1059 ins->opcode += ovf2ops_op_map [src1->type];
1060 break;
1061 case CEE_CONV_U:
1062 ins->type = STACK_PTR;
1063 switch (src1->type) {
1064 case STACK_I4:
1065 ins->opcode = OP_ICONV_TO_U;
1066 break;
1067 case STACK_PTR:
1068 case STACK_MP:
1069 #if SIZEOF_VOID_P == 8
1070 ins->opcode = OP_LCONV_TO_U;
1071 #else
1072 ins->opcode = OP_MOVE;
1073 #endif
1074 break;
1075 case STACK_I8:
1076 ins->opcode = OP_LCONV_TO_U;
1077 break;
1078 case STACK_R8:
1079 ins->opcode = OP_FCONV_TO_U;
1080 break;
1082 break;
1083 case CEE_CONV_I8:
1084 case CEE_CONV_U8:
1085 ins->type = STACK_I8;
1086 ins->opcode += unops_op_map [src1->type];
1087 break;
1088 case CEE_CONV_OVF_I8:
1089 case CEE_CONV_OVF_U8:
1090 ins->type = STACK_I8;
1091 ins->opcode += ovf3ops_op_map [src1->type];
1092 break;
1093 case CEE_CONV_OVF_U8_UN:
1094 case CEE_CONV_OVF_I8_UN:
1095 ins->type = STACK_I8;
1096 ins->opcode += ovf2ops_op_map [src1->type];
1097 break;
1098 case CEE_CONV_R4:
1099 ins->type = cfg->r4_stack_type;
1100 ins->opcode += unops_op_map [src1->type];
1101 break;
1102 case CEE_CONV_R8:
1103 ins->type = STACK_R8;
1104 ins->opcode += unops_op_map [src1->type];
1105 break;
1106 case OP_CKFINITE:
1107 ins->type = STACK_R8;
1108 break;
1109 case CEE_CONV_U2:
1110 case CEE_CONV_U1:
1111 ins->type = STACK_I4;
1112 ins->opcode += ovfops_op_map [src1->type];
1113 break;
1114 case CEE_CONV_I:
1115 case CEE_CONV_OVF_I:
1116 case CEE_CONV_OVF_U:
1117 ins->type = STACK_PTR;
1118 ins->opcode += ovfops_op_map [src1->type];
1119 break;
1120 case CEE_ADD_OVF:
1121 case CEE_ADD_OVF_UN:
1122 case CEE_MUL_OVF:
1123 case CEE_MUL_OVF_UN:
1124 case CEE_SUB_OVF:
1125 case CEE_SUB_OVF_UN:
1126 ins->type = bin_num_table [src1->type] [src2->type];
1127 ins->opcode += ovfops_op_map [src1->type];
1128 if (ins->type == STACK_R8)
1129 ins->type = STACK_INV;
1130 break;
1131 case OP_LOAD_MEMBASE:
1132 ins->type = STACK_PTR;
1133 break;
1134 case OP_LOADI1_MEMBASE:
1135 case OP_LOADU1_MEMBASE:
1136 case OP_LOADI2_MEMBASE:
1137 case OP_LOADU2_MEMBASE:
1138 case OP_LOADI4_MEMBASE:
1139 case OP_LOADU4_MEMBASE:
1140 ins->type = STACK_PTR;
1141 break;
1142 case OP_LOADI8_MEMBASE:
1143 ins->type = STACK_I8;
1144 break;
1145 case OP_LOADR4_MEMBASE:
1146 ins->type = cfg->r4_stack_type;
1147 break;
1148 case OP_LOADR8_MEMBASE:
1149 ins->type = STACK_R8;
1150 break;
1151 default:
1152 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1153 break;
1156 if (ins->type == STACK_MP)
1157 ins->klass = mono_defaults.object_class;
1160 static const char
1161 ldind_type [] = {
1162 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1165 #if 0
1167 static const char
1168 param_table [STACK_MAX] [STACK_MAX] = {
1169 {0},
1172 static int
1173 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1175 int i;
1177 if (sig->hasthis) {
1178 switch (args->type) {
1179 case STACK_I4:
1180 case STACK_I8:
1181 case STACK_R8:
1182 case STACK_VTYPE:
1183 case STACK_INV:
1184 return 0;
1186 args++;
1188 for (i = 0; i < sig->param_count; ++i) {
1189 switch (args [i].type) {
1190 case STACK_INV:
1191 return 0;
1192 case STACK_MP:
1193 if (!sig->params [i]->byref)
1194 return 0;
1195 continue;
1196 case STACK_OBJ:
1197 if (sig->params [i]->byref)
1198 return 0;
1199 switch (sig->params [i]->type) {
1200 case MONO_TYPE_CLASS:
1201 case MONO_TYPE_STRING:
1202 case MONO_TYPE_OBJECT:
1203 case MONO_TYPE_SZARRAY:
1204 case MONO_TYPE_ARRAY:
1205 break;
1206 default:
1207 return 0;
1209 continue;
1210 case STACK_R8:
1211 if (sig->params [i]->byref)
1212 return 0;
1213 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1214 return 0;
1215 continue;
1216 case STACK_PTR:
1217 case STACK_I4:
1218 case STACK_I8:
1219 case STACK_VTYPE:
1220 break;
1222 /*if (!param_table [args [i].type] [sig->params [i]->type])
1223 return 0;*/
1225 return 1;
1227 #endif
1230 * When we need a pointer to the current domain many times in a method, we
1231 * call mono_domain_get() once and we store the result in a local variable.
1232 * This function returns the variable that represents the MonoDomain*.
1234 inline static MonoInst *
1235 mono_get_domainvar (MonoCompile *cfg)
1237 if (!cfg->domainvar)
1238 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1239 return cfg->domainvar;
1243 * The got_var contains the address of the Global Offset Table when AOT
1244 * compiling.
1246 MonoInst *
1247 mono_get_got_var (MonoCompile *cfg)
1249 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1250 return NULL;
1251 if (!cfg->got_var) {
1252 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1254 return cfg->got_var;
1257 static MonoInst *
1258 mono_get_vtable_var (MonoCompile *cfg)
1260 g_assert (cfg->gshared);
1262 if (!cfg->rgctx_var) {
1263 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1264 /* force the var to be stack allocated */
1265 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1268 return cfg->rgctx_var;
1271 static MonoType*
1272 type_from_stack_type (MonoInst *ins) {
1273 switch (ins->type) {
1274 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1275 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1276 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1277 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1278 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1279 case STACK_MP:
1280 return &ins->klass->this_arg;
1281 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1282 case STACK_VTYPE: return &ins->klass->byval_arg;
1283 default:
1284 g_error ("stack type %d to monotype not handled\n", ins->type);
1286 return NULL;
1289 static G_GNUC_UNUSED int
1290 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1292 t = mono_type_get_underlying_type (t);
1293 switch (t->type) {
1294 case MONO_TYPE_I1:
1295 case MONO_TYPE_U1:
1296 case MONO_TYPE_I2:
1297 case MONO_TYPE_U2:
1298 case MONO_TYPE_I4:
1299 case MONO_TYPE_U4:
1300 return STACK_I4;
1301 case MONO_TYPE_I:
1302 case MONO_TYPE_U:
1303 case MONO_TYPE_PTR:
1304 case MONO_TYPE_FNPTR:
1305 return STACK_PTR;
1306 case MONO_TYPE_CLASS:
1307 case MONO_TYPE_STRING:
1308 case MONO_TYPE_OBJECT:
1309 case MONO_TYPE_SZARRAY:
1310 case MONO_TYPE_ARRAY:
1311 return STACK_OBJ;
1312 case MONO_TYPE_I8:
1313 case MONO_TYPE_U8:
1314 return STACK_I8;
1315 case MONO_TYPE_R4:
1316 return cfg->r4_stack_type;
1317 case MONO_TYPE_R8:
1318 return STACK_R8;
1319 case MONO_TYPE_VALUETYPE:
1320 case MONO_TYPE_TYPEDBYREF:
1321 return STACK_VTYPE;
1322 case MONO_TYPE_GENERICINST:
1323 if (mono_type_generic_inst_is_valuetype (t))
1324 return STACK_VTYPE;
1325 else
1326 return STACK_OBJ;
1327 break;
1328 default:
1329 g_assert_not_reached ();
1332 return -1;
1335 static MonoClass*
1336 array_access_to_klass (int opcode)
1338 switch (opcode) {
1339 case CEE_LDELEM_U1:
1340 return mono_defaults.byte_class;
1341 case CEE_LDELEM_U2:
1342 return mono_defaults.uint16_class;
1343 case CEE_LDELEM_I:
1344 case CEE_STELEM_I:
1345 return mono_defaults.int_class;
1346 case CEE_LDELEM_I1:
1347 case CEE_STELEM_I1:
1348 return mono_defaults.sbyte_class;
1349 case CEE_LDELEM_I2:
1350 case CEE_STELEM_I2:
1351 return mono_defaults.int16_class;
1352 case CEE_LDELEM_I4:
1353 case CEE_STELEM_I4:
1354 return mono_defaults.int32_class;
1355 case CEE_LDELEM_U4:
1356 return mono_defaults.uint32_class;
1357 case CEE_LDELEM_I8:
1358 case CEE_STELEM_I8:
1359 return mono_defaults.int64_class;
1360 case CEE_LDELEM_R4:
1361 case CEE_STELEM_R4:
1362 return mono_defaults.single_class;
1363 case CEE_LDELEM_R8:
1364 case CEE_STELEM_R8:
1365 return mono_defaults.double_class;
1366 case CEE_LDELEM_REF:
1367 case CEE_STELEM_REF:
1368 return mono_defaults.object_class;
1369 default:
1370 g_assert_not_reached ();
1372 return NULL;
1376 * We try to share variables when possible
1378 static MonoInst *
1379 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1381 MonoInst *res;
1382 int pos, vnum;
1384 /* inlining can result in deeper stacks */
1385 if (slot >= cfg->header->max_stack)
1386 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1388 pos = ins->type - 1 + slot * STACK_MAX;
1390 switch (ins->type) {
1391 case STACK_I4:
1392 case STACK_I8:
1393 case STACK_R8:
1394 case STACK_PTR:
1395 case STACK_MP:
1396 case STACK_OBJ:
1397 if ((vnum = cfg->intvars [pos]))
1398 return cfg->varinfo [vnum];
1399 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1400 cfg->intvars [pos] = res->inst_c0;
1401 break;
1402 default:
1403 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1405 return res;
1408 static void
1409 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1412 * Don't use this if a generic_context is set, since that means AOT can't
1413 * look up the method using just the image+token.
1414 * table == 0 means this is a reference made from a wrapper.
1416 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1417 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1418 jump_info_token->image = image;
1419 jump_info_token->token = token;
1420 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1425 * This function is called to handle items that are left on the evaluation stack
1426 * at basic block boundaries. What happens is that we save the values to local variables
1427 * and we reload them later when first entering the target basic block (with the
1428 * handle_loaded_temps () function).
1429 * A single joint point will use the same variables (stored in the array bb->out_stack or
1430 * bb->in_stack, if the basic block is before or after the joint point).
1432 * This function needs to be called _before_ emitting the last instruction of
1433 * the bb (i.e. before emitting a branch).
1434 * If the stack merge fails at a join point, cfg->unverifiable is set.
1436 static void
1437 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1439 int i, bindex;
1440 MonoBasicBlock *bb = cfg->cbb;
1441 MonoBasicBlock *outb;
1442 MonoInst *inst, **locals;
1443 gboolean found;
1445 if (!count)
1446 return;
1447 if (cfg->verbose_level > 3)
1448 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1449 if (!bb->out_scount) {
1450 bb->out_scount = count;
1451 //printf ("bblock %d has out:", bb->block_num);
1452 found = FALSE;
1453 for (i = 0; i < bb->out_count; ++i) {
1454 outb = bb->out_bb [i];
1455 /* exception handlers are linked, but they should not be considered for stack args */
1456 if (outb->flags & BB_EXCEPTION_HANDLER)
1457 continue;
1458 //printf (" %d", outb->block_num);
1459 if (outb->in_stack) {
1460 found = TRUE;
1461 bb->out_stack = outb->in_stack;
1462 break;
1465 //printf ("\n");
1466 if (!found) {
1467 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1468 for (i = 0; i < count; ++i) {
1470 * try to reuse temps already allocated for this purpouse, if they occupy the same
1471 * stack slot and if they are of the same type.
1472 * This won't cause conflicts since if 'local' is used to
1473 * store one of the values in the in_stack of a bblock, then
1474 * the same variable will be used for the same outgoing stack
1475 * slot as well.
1476 * This doesn't work when inlining methods, since the bblocks
1477 * in the inlined methods do not inherit their in_stack from
1478 * the bblock they are inlined to. See bug #58863 for an
1479 * example.
1481 if (cfg->inlined_method)
1482 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1483 else
1484 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1489 for (i = 0; i < bb->out_count; ++i) {
1490 outb = bb->out_bb [i];
1491 /* exception handlers are linked, but they should not be considered for stack args */
1492 if (outb->flags & BB_EXCEPTION_HANDLER)
1493 continue;
1494 if (outb->in_scount) {
1495 if (outb->in_scount != bb->out_scount) {
1496 cfg->unverifiable = TRUE;
1497 return;
1499 continue; /* check they are the same locals */
1501 outb->in_scount = count;
1502 outb->in_stack = bb->out_stack;
1505 locals = bb->out_stack;
1506 cfg->cbb = bb;
1507 for (i = 0; i < count; ++i) {
1508 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1509 inst->cil_code = sp [i]->cil_code;
1510 sp [i] = locals [i];
1511 if (cfg->verbose_level > 3)
1512 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1516 * It is possible that the out bblocks already have in_stack assigned, and
1517 * the in_stacks differ. In this case, we will store to all the different
1518 * in_stacks.
1521 found = TRUE;
1522 bindex = 0;
1523 while (found) {
1524 /* Find a bblock which has a different in_stack */
1525 found = FALSE;
1526 while (bindex < bb->out_count) {
1527 outb = bb->out_bb [bindex];
1528 /* exception handlers are linked, but they should not be considered for stack args */
1529 if (outb->flags & BB_EXCEPTION_HANDLER) {
1530 bindex++;
1531 continue;
1533 if (outb->in_stack != locals) {
1534 for (i = 0; i < count; ++i) {
1535 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1536 inst->cil_code = sp [i]->cil_code;
1537 sp [i] = locals [i];
1538 if (cfg->verbose_level > 3)
1539 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1541 locals = outb->in_stack;
1542 found = TRUE;
1543 break;
1545 bindex ++;
1550 static MonoInst*
1551 emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1553 MonoInst *ins;
1555 if (cfg->compile_aot) {
1556 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1557 } else {
1558 MonoJumpInfo ji;
1559 gpointer target;
1560 MonoError error;
1562 ji.type = patch_type;
1563 ji.data.target = data;
1564 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, &error);
1565 mono_error_assert_ok (&error);
1567 EMIT_NEW_PCONST (cfg, ins, target);
1569 return ins;
1572 static void
1573 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1575 int ibitmap_reg = alloc_preg (cfg);
1576 #ifdef COMPRESSED_INTERFACE_BITMAP
1577 MonoInst *args [2];
1578 MonoInst *res, *ins;
1579 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1580 MONO_ADD_INS (cfg->cbb, ins);
1581 args [0] = ins;
1582 args [1] = emit_runtime_constant (cfg, MONO_PATCH_INFO_IID, klass);
1583 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1584 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1585 #else
1586 int ibitmap_byte_reg = alloc_preg (cfg);
1588 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1590 if (cfg->compile_aot) {
1591 int iid_reg = alloc_preg (cfg);
1592 int shifted_iid_reg = alloc_preg (cfg);
1593 int ibitmap_byte_address_reg = alloc_preg (cfg);
1594 int masked_iid_reg = alloc_preg (cfg);
1595 int iid_one_bit_reg = alloc_preg (cfg);
1596 int iid_bit_reg = alloc_preg (cfg);
1597 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1598 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1599 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1600 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1601 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1602 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1603 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1604 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1605 } else {
1606 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1607 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1609 #endif
1613 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1614 * stored in "klass_reg" implements the interface "klass".
1616 static void
1617 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1619 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1623 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1624 * stored in "vtable_reg" implements the interface "klass".
1626 static void
1627 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1629 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1633 * Emit code which checks whenever the interface id of @klass is smaller than
1634 * than the value given by max_iid_reg.
1636 static void
1637 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1638 MonoBasicBlock *false_target)
1640 if (cfg->compile_aot) {
1641 int iid_reg = alloc_preg (cfg);
1642 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1643 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1645 else
1646 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1647 if (false_target)
1648 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1649 else
1650 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1653 /* Same as above, but obtains max_iid from a vtable */
1654 static void
1655 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1656 MonoBasicBlock *false_target)
1658 int max_iid_reg = alloc_preg (cfg);
1660 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1661 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1664 /* Same as above, but obtains max_iid from a klass */
1665 static void
1666 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1667 MonoBasicBlock *false_target)
1669 int max_iid_reg = alloc_preg (cfg);
1671 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1672 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1675 static void
1676 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1678 int idepth_reg = alloc_preg (cfg);
1679 int stypes_reg = alloc_preg (cfg);
1680 int stype = alloc_preg (cfg);
1682 mono_class_setup_supertypes (klass);
1684 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1685 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1686 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1687 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1689 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1690 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1691 if (klass_ins) {
1692 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1693 } else if (cfg->compile_aot) {
1694 int const_reg = alloc_preg (cfg);
1695 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1696 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1697 } else {
1698 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1700 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1703 static void
1704 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1706 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1709 static void
1710 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1712 int intf_reg = alloc_preg (cfg);
1714 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1715 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1716 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1717 if (true_target)
1718 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1719 else
1720 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1724 * Variant of the above that takes a register to the class, not the vtable.
1726 static void
1727 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1729 int intf_bit_reg = alloc_preg (cfg);
1731 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1732 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1733 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1734 if (true_target)
1735 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1736 else
1737 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1740 static inline void
1741 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1743 if (klass_inst) {
1744 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1745 } else {
1746 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
1747 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, ins->dreg);
1749 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1752 static inline void
1753 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1755 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1758 static inline void
1759 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1761 if (cfg->compile_aot) {
1762 int const_reg = alloc_preg (cfg);
1763 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1764 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1765 } else {
1766 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1768 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1771 static void
1772 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1774 static void
1775 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1777 if (klass->rank) {
1778 int rank_reg = alloc_preg (cfg);
1779 int eclass_reg = alloc_preg (cfg);
1781 g_assert (!klass_inst);
1782 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1783 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1784 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1785 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1786 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1787 if (klass->cast_class == mono_defaults.object_class) {
1788 int parent_reg = alloc_preg (cfg);
1789 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1790 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1791 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1792 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1793 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1794 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1795 } else if (klass->cast_class == mono_defaults.enum_class) {
1796 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1797 } else if (mono_class_is_interface (klass->cast_class)) {
1798 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1799 } else {
1800 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1801 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1804 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1805 /* Check that the object is a vector too */
1806 int bounds_reg = alloc_preg (cfg);
1807 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1808 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1809 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1811 } else {
1812 int idepth_reg = alloc_preg (cfg);
1813 int stypes_reg = alloc_preg (cfg);
1814 int stype = alloc_preg (cfg);
1816 mono_class_setup_supertypes (klass);
1818 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1819 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1820 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1821 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1823 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1824 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1825 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1829 static void
1830 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1832 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1835 static void
1836 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1838 int val_reg;
1840 g_assert (val == 0);
1842 if (align == 0)
1843 align = 4;
1845 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1846 switch (size) {
1847 case 1:
1848 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1849 return;
1850 case 2:
1851 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1852 return;
1853 case 4:
1854 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1855 return;
1856 #if SIZEOF_REGISTER == 8
1857 case 8:
1858 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1859 return;
1860 #endif
1864 val_reg = alloc_preg (cfg);
1866 if (SIZEOF_REGISTER == 8)
1867 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1868 else
1869 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1871 if (align < 4) {
1872 /* This could be optimized further if neccesary */
1873 while (size >= 1) {
1874 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1875 offset += 1;
1876 size -= 1;
1878 return;
1881 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1882 if (offset % 8) {
1883 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1884 offset += 4;
1885 size -= 4;
1887 while (size >= 8) {
1888 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1889 offset += 8;
1890 size -= 8;
1894 while (size >= 4) {
1895 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1896 offset += 4;
1897 size -= 4;
1899 while (size >= 2) {
1900 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1901 offset += 2;
1902 size -= 2;
1904 while (size >= 1) {
1905 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1906 offset += 1;
1907 size -= 1;
1911 void
1912 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1914 int cur_reg;
1916 if (align == 0)
1917 align = 4;
1919 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1920 g_assert (size < 10000);
1922 if (align < 4) {
1923 /* This could be optimized further if neccesary */
1924 while (size >= 1) {
1925 cur_reg = alloc_preg (cfg);
1926 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1927 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1928 doffset += 1;
1929 soffset += 1;
1930 size -= 1;
1934 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1935 while (size >= 8) {
1936 cur_reg = alloc_preg (cfg);
1937 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1938 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1939 doffset += 8;
1940 soffset += 8;
1941 size -= 8;
1945 while (size >= 4) {
1946 cur_reg = alloc_preg (cfg);
1947 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1948 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1949 doffset += 4;
1950 soffset += 4;
1951 size -= 4;
1953 while (size >= 2) {
1954 cur_reg = alloc_preg (cfg);
1955 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1956 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1957 doffset += 2;
1958 soffset += 2;
1959 size -= 2;
1961 while (size >= 1) {
1962 cur_reg = alloc_preg (cfg);
1963 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1964 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1965 doffset += 1;
1966 soffset += 1;
1967 size -= 1;
1971 static void
1972 emit_tls_set (MonoCompile *cfg, int sreg1, MonoTlsKey tls_key)
1974 MonoInst *ins, *c;
1976 if (cfg->compile_aot) {
1977 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1978 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1979 ins->sreg1 = sreg1;
1980 ins->sreg2 = c->dreg;
1981 MONO_ADD_INS (cfg->cbb, ins);
1982 } else {
1983 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1984 ins->sreg1 = sreg1;
1985 ins->inst_offset = mini_get_tls_offset (tls_key);
1986 MONO_ADD_INS (cfg->cbb, ins);
1991 * emit_push_lmf:
1993 * Emit IR to push the current LMF onto the LMF stack.
1995 static void
1996 emit_push_lmf (MonoCompile *cfg)
1999 * Emit IR to push the LMF:
2000 * lmf_addr = <lmf_addr from tls>
2001 * lmf->lmf_addr = lmf_addr
2002 * lmf->prev_lmf = *lmf_addr
2003 * *lmf_addr = lmf
2005 int lmf_reg, prev_lmf_reg;
2006 MonoInst *ins, *lmf_ins;
2008 if (!cfg->lmf_ir)
2009 return;
2011 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2012 /* Load current lmf */
2013 lmf_ins = mono_get_lmf_intrinsic (cfg);
2014 g_assert (lmf_ins);
2015 MONO_ADD_INS (cfg->cbb, lmf_ins);
2016 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2017 lmf_reg = ins->dreg;
2018 /* Save previous_lmf */
2019 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
2020 /* Set new LMF */
2021 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
2022 } else {
2024 * Store lmf_addr in a variable, so it can be allocated to a global register.
2026 if (!cfg->lmf_addr_var)
2027 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2029 #ifdef HOST_WIN32
2030 ins = mono_get_jit_tls_intrinsic (cfg);
2031 if (ins) {
2032 int jit_tls_dreg = ins->dreg;
2034 MONO_ADD_INS (cfg->cbb, ins);
2035 lmf_reg = alloc_preg (cfg);
2036 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2037 } else {
2038 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2040 #else
2041 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2042 if (lmf_ins) {
2043 MONO_ADD_INS (cfg->cbb, lmf_ins);
2044 } else {
2045 #ifdef TARGET_IOS
2046 MonoInst *args [16], *jit_tls_ins, *ins;
2048 /* Inline mono_get_lmf_addr () */
2049 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2051 /* Load mono_jit_tls_id */
2052 if (cfg->compile_aot)
2053 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2054 else
2055 EMIT_NEW_ICONST (cfg, args [0], mono_jit_tls_id);
2056 /* call pthread_getspecific () */
2057 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2058 /* lmf_addr = &jit_tls->lmf */
2059 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2060 lmf_ins = ins;
2061 #else
2062 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2063 #endif
2065 #endif
2066 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2068 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2069 lmf_reg = ins->dreg;
2071 prev_lmf_reg = alloc_preg (cfg);
2072 /* Save previous_lmf */
2073 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2074 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2075 /* Set new lmf */
2076 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2081 * emit_pop_lmf:
2083 * Emit IR to pop the current LMF from the LMF stack.
2085 static void
2086 emit_pop_lmf (MonoCompile *cfg)
2088 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2089 MonoInst *ins;
2091 if (!cfg->lmf_ir)
2092 return;
2094 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2095 lmf_reg = ins->dreg;
2097 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2098 /* Load previous_lmf */
2099 prev_lmf_reg = alloc_preg (cfg);
2100 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2101 /* Set new LMF */
2102 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2103 } else {
2105 * Emit IR to pop the LMF:
2106 * *(lmf->lmf_addr) = lmf->prev_lmf
2108 /* This could be called before emit_push_lmf () */
2109 if (!cfg->lmf_addr_var)
2110 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2111 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2113 prev_lmf_reg = alloc_preg (cfg);
2114 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2115 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2119 static void
2120 emit_instrumentation_call (MonoCompile *cfg, void *func)
2122 MonoInst *iargs [1];
2125 * Avoid instrumenting inlined methods since it can
2126 * distort profiling results.
2128 if (cfg->method != cfg->current_method)
2129 return;
2131 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2132 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2133 mono_emit_jit_icall (cfg, func, iargs);
2137 static int
2138 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
2140 handle_enum:
2141 type = mini_get_underlying_type (type);
2142 switch (type->type) {
2143 case MONO_TYPE_VOID:
2144 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2145 case MONO_TYPE_I1:
2146 case MONO_TYPE_U1:
2147 case MONO_TYPE_I2:
2148 case MONO_TYPE_U2:
2149 case MONO_TYPE_I4:
2150 case MONO_TYPE_U4:
2151 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2152 case MONO_TYPE_I:
2153 case MONO_TYPE_U:
2154 case MONO_TYPE_PTR:
2155 case MONO_TYPE_FNPTR:
2156 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2157 case MONO_TYPE_CLASS:
2158 case MONO_TYPE_STRING:
2159 case MONO_TYPE_OBJECT:
2160 case MONO_TYPE_SZARRAY:
2161 case MONO_TYPE_ARRAY:
2162 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2163 case MONO_TYPE_I8:
2164 case MONO_TYPE_U8:
2165 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2166 case MONO_TYPE_R4:
2167 if (cfg->r4fp)
2168 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2169 else
2170 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2171 case MONO_TYPE_R8:
2172 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2173 case MONO_TYPE_VALUETYPE:
2174 if (type->data.klass->enumtype) {
2175 type = mono_class_enum_basetype (type->data.klass);
2176 goto handle_enum;
2177 } else
2178 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2179 case MONO_TYPE_TYPEDBYREF:
2180 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2181 case MONO_TYPE_GENERICINST:
2182 type = &type->data.generic_class->container_class->byval_arg;
2183 goto handle_enum;
2184 case MONO_TYPE_VAR:
2185 case MONO_TYPE_MVAR:
2186 /* gsharedvt */
2187 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2188 default:
2189 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2191 return -1;
2194 //XXX this ignores if t is byref
2195 #define MONO_TYPE_IS_PRIMITIVE_SCALAR(t) ((((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_U8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U)))))
2198 * target_type_is_incompatible:
2199 * @cfg: MonoCompile context
2201 * Check that the item @arg on the evaluation stack can be stored
2202 * in the target type (can be a local, or field, etc).
2203 * The cfg arg can be used to check if we need verification or just
2204 * validity checks.
2206 * Returns: non-0 value if arg can't be stored on a target.
2208 static int
2209 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2211 MonoType *simple_type;
2212 MonoClass *klass;
2214 if (target->byref) {
2215 /* FIXME: check that the pointed to types match */
2216 if (arg->type == STACK_MP) {
2217 /* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
2218 MonoClass *target_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&mono_class_from_mono_type (target)->byval_arg));
2219 MonoClass *source_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg));
2221 /* if the target is native int& or same type */
2222 if (target->type == MONO_TYPE_I || target_class_lowered == source_class_lowered)
2223 return 0;
2225 /* Both are primitive type byrefs and the source points to a larger type that the destination */
2226 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (&target_class_lowered->byval_arg) && MONO_TYPE_IS_PRIMITIVE_SCALAR (&source_class_lowered->byval_arg) &&
2227 mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
2228 return 0;
2229 return 1;
2231 if (arg->type == STACK_PTR)
2232 return 0;
2233 return 1;
2236 simple_type = mini_get_underlying_type (target);
2237 switch (simple_type->type) {
2238 case MONO_TYPE_VOID:
2239 return 1;
2240 case MONO_TYPE_I1:
2241 case MONO_TYPE_U1:
2242 case MONO_TYPE_I2:
2243 case MONO_TYPE_U2:
2244 case MONO_TYPE_I4:
2245 case MONO_TYPE_U4:
2246 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2247 return 1;
2248 return 0;
2249 case MONO_TYPE_PTR:
2250 /* STACK_MP is needed when setting pinned locals */
2251 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2252 return 1;
2253 return 0;
2254 case MONO_TYPE_I:
2255 case MONO_TYPE_U:
2256 case MONO_TYPE_FNPTR:
2258 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2259 * in native int. (#688008).
2261 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2262 return 1;
2263 return 0;
2264 case MONO_TYPE_CLASS:
2265 case MONO_TYPE_STRING:
2266 case MONO_TYPE_OBJECT:
2267 case MONO_TYPE_SZARRAY:
2268 case MONO_TYPE_ARRAY:
2269 if (arg->type != STACK_OBJ)
2270 return 1;
2271 /* FIXME: check type compatibility */
2272 return 0;
2273 case MONO_TYPE_I8:
2274 case MONO_TYPE_U8:
2275 if (arg->type != STACK_I8)
2276 return 1;
2277 return 0;
2278 case MONO_TYPE_R4:
2279 if (arg->type != cfg->r4_stack_type)
2280 return 1;
2281 return 0;
2282 case MONO_TYPE_R8:
2283 if (arg->type != STACK_R8)
2284 return 1;
2285 return 0;
2286 case MONO_TYPE_VALUETYPE:
2287 if (arg->type != STACK_VTYPE)
2288 return 1;
2289 klass = mono_class_from_mono_type (simple_type);
2290 if (klass != arg->klass)
2291 return 1;
2292 return 0;
2293 case MONO_TYPE_TYPEDBYREF:
2294 if (arg->type != STACK_VTYPE)
2295 return 1;
2296 klass = mono_class_from_mono_type (simple_type);
2297 if (klass != arg->klass)
2298 return 1;
2299 return 0;
2300 case MONO_TYPE_GENERICINST:
2301 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2302 MonoClass *target_class;
2303 if (arg->type != STACK_VTYPE)
2304 return 1;
2305 klass = mono_class_from_mono_type (simple_type);
2306 target_class = mono_class_from_mono_type (target);
2307 /* The second cases is needed when doing partial sharing */
2308 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
2309 return 1;
2310 return 0;
2311 } else {
2312 if (arg->type != STACK_OBJ)
2313 return 1;
2314 /* FIXME: check type compatibility */
2315 return 0;
2317 case MONO_TYPE_VAR:
2318 case MONO_TYPE_MVAR:
2319 g_assert (cfg->gshared);
2320 if (mini_type_var_is_vt (simple_type)) {
2321 if (arg->type != STACK_VTYPE)
2322 return 1;
2323 } else {
2324 if (arg->type != STACK_OBJ)
2325 return 1;
2327 return 0;
2328 default:
2329 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2331 return 1;
2335 * Prepare arguments for passing to a function call.
2336 * Return a non-zero value if the arguments can't be passed to the given
2337 * signature.
2338 * The type checks are not yet complete and some conversions may need
2339 * casts on 32 or 64 bit architectures.
2341 * FIXME: implement this using target_type_is_incompatible ()
2343 static int
2344 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2346 MonoType *simple_type;
2347 int i;
2349 if (sig->hasthis) {
2350 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2351 return 1;
2352 args++;
2354 for (i = 0; i < sig->param_count; ++i) {
2355 if (sig->params [i]->byref) {
2356 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2357 return 1;
2358 continue;
2360 simple_type = mini_get_underlying_type (sig->params [i]);
2361 handle_enum:
2362 switch (simple_type->type) {
2363 case MONO_TYPE_VOID:
2364 return 1;
2365 continue;
2366 case MONO_TYPE_I1:
2367 case MONO_TYPE_U1:
2368 case MONO_TYPE_I2:
2369 case MONO_TYPE_U2:
2370 case MONO_TYPE_I4:
2371 case MONO_TYPE_U4:
2372 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2373 return 1;
2374 continue;
2375 case MONO_TYPE_I:
2376 case MONO_TYPE_U:
2377 case MONO_TYPE_PTR:
2378 case MONO_TYPE_FNPTR:
2379 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2380 return 1;
2381 continue;
2382 case MONO_TYPE_CLASS:
2383 case MONO_TYPE_STRING:
2384 case MONO_TYPE_OBJECT:
2385 case MONO_TYPE_SZARRAY:
2386 case MONO_TYPE_ARRAY:
2387 if (args [i]->type != STACK_OBJ)
2388 return 1;
2389 continue;
2390 case MONO_TYPE_I8:
2391 case MONO_TYPE_U8:
2392 if (args [i]->type != STACK_I8)
2393 return 1;
2394 continue;
2395 case MONO_TYPE_R4:
2396 if (args [i]->type != cfg->r4_stack_type)
2397 return 1;
2398 continue;
2399 case MONO_TYPE_R8:
2400 if (args [i]->type != STACK_R8)
2401 return 1;
2402 continue;
2403 case MONO_TYPE_VALUETYPE:
2404 if (simple_type->data.klass->enumtype) {
2405 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2406 goto handle_enum;
2408 if (args [i]->type != STACK_VTYPE)
2409 return 1;
2410 continue;
2411 case MONO_TYPE_TYPEDBYREF:
2412 if (args [i]->type != STACK_VTYPE)
2413 return 1;
2414 continue;
2415 case MONO_TYPE_GENERICINST:
2416 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2417 goto handle_enum;
2418 case MONO_TYPE_VAR:
2419 case MONO_TYPE_MVAR:
2420 /* gsharedvt */
2421 if (args [i]->type != STACK_VTYPE)
2422 return 1;
2423 continue;
2424 default:
2425 g_error ("unknown type 0x%02x in check_call_signature",
2426 simple_type->type);
2429 return 0;
2432 static int
2433 callvirt_to_call (int opcode)
2435 switch (opcode) {
2436 case OP_CALL_MEMBASE:
2437 return OP_CALL;
2438 case OP_VOIDCALL_MEMBASE:
2439 return OP_VOIDCALL;
2440 case OP_FCALL_MEMBASE:
2441 return OP_FCALL;
2442 case OP_RCALL_MEMBASE:
2443 return OP_RCALL;
2444 case OP_VCALL_MEMBASE:
2445 return OP_VCALL;
2446 case OP_LCALL_MEMBASE:
2447 return OP_LCALL;
2448 default:
2449 g_assert_not_reached ();
2452 return -1;
2455 static int
2456 callvirt_to_call_reg (int opcode)
2458 switch (opcode) {
2459 case OP_CALL_MEMBASE:
2460 return OP_CALL_REG;
2461 case OP_VOIDCALL_MEMBASE:
2462 return OP_VOIDCALL_REG;
2463 case OP_FCALL_MEMBASE:
2464 return OP_FCALL_REG;
2465 case OP_RCALL_MEMBASE:
2466 return OP_RCALL_REG;
2467 case OP_VCALL_MEMBASE:
2468 return OP_VCALL_REG;
2469 case OP_LCALL_MEMBASE:
2470 return OP_LCALL_REG;
2471 default:
2472 g_assert_not_reached ();
2475 return -1;
2478 /* Either METHOD or IMT_ARG needs to be set */
2479 static void
2480 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2482 int method_reg;
2484 if (COMPILE_LLVM (cfg)) {
2485 if (imt_arg) {
2486 method_reg = alloc_preg (cfg);
2487 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2488 } else {
2489 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2490 method_reg = ins->dreg;
2493 #ifdef ENABLE_LLVM
2494 call->imt_arg_reg = method_reg;
2495 #endif
2496 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2497 return;
2500 if (imt_arg) {
2501 method_reg = alloc_preg (cfg);
2502 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2503 } else {
2504 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2505 method_reg = ins->dreg;
2508 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2511 static MonoJumpInfo *
2512 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2514 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2516 ji->ip.i = ip;
2517 ji->type = type;
2518 ji->data.target = target;
2520 return ji;
2523 static int
2524 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2526 if (cfg->gshared)
2527 return mono_class_check_context_used (klass);
2528 else
2529 return 0;
2532 static int
2533 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2535 if (cfg->gshared)
2536 return mono_method_check_context_used (method);
2537 else
2538 return 0;
2542 * check_method_sharing:
2544 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2546 static void
2547 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2549 gboolean pass_vtable = FALSE;
2550 gboolean pass_mrgctx = FALSE;
2552 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2553 (mono_class_is_ginst (cmethod->klass) || mono_class_is_gtd (cmethod->klass))) {
2554 gboolean sharable = FALSE;
2556 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2557 sharable = TRUE;
2560 * Pass vtable iff target method might
2561 * be shared, which means that sharing
2562 * is enabled for its class and its
2563 * context is sharable (and it's not a
2564 * generic method).
2566 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2567 pass_vtable = TRUE;
2570 if (mini_method_get_context (cmethod) &&
2571 mini_method_get_context (cmethod)->method_inst) {
2572 g_assert (!pass_vtable);
2574 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2575 pass_mrgctx = TRUE;
2576 } else {
2577 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2578 pass_mrgctx = TRUE;
2582 if (out_pass_vtable)
2583 *out_pass_vtable = pass_vtable;
2584 if (out_pass_mrgctx)
2585 *out_pass_mrgctx = pass_mrgctx;
2588 inline static MonoCallInst *
2589 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2590 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline)
2592 MonoType *sig_ret;
2593 MonoCallInst *call;
2594 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2595 int i;
2596 #endif
2598 if (cfg->llvm_only)
2599 tail = FALSE;
2601 if (tail) {
2602 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2604 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2605 } else
2606 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2608 call->args = args;
2609 call->signature = sig;
2610 call->rgctx_reg = rgctx;
2611 sig_ret = mini_get_underlying_type (sig->ret);
2613 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2615 if (tail) {
2616 if (mini_type_is_vtype (sig_ret)) {
2617 call->vret_var = cfg->vret_addr;
2618 //g_assert_not_reached ();
2620 } else if (mini_type_is_vtype (sig_ret)) {
2621 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2622 MonoInst *loada;
2624 temp->backend.is_pinvoke = sig->pinvoke;
2627 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2628 * address of return value to increase optimization opportunities.
2629 * Before vtype decomposition, the dreg of the call ins itself represents the
2630 * fact the call modifies the return value. After decomposition, the call will
2631 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2632 * will be transformed into an LDADDR.
2634 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2635 loada->dreg = alloc_preg (cfg);
2636 loada->inst_p0 = temp;
2637 /* We reference the call too since call->dreg could change during optimization */
2638 loada->inst_p1 = call;
2639 MONO_ADD_INS (cfg->cbb, loada);
2641 call->inst.dreg = temp->dreg;
2643 call->vret_var = loada;
2644 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2645 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2647 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2648 if (COMPILE_SOFT_FLOAT (cfg)) {
2650 * If the call has a float argument, we would need to do an r8->r4 conversion using
2651 * an icall, but that cannot be done during the call sequence since it would clobber
2652 * the call registers + the stack. So we do it before emitting the call.
2654 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2655 MonoType *t;
2656 MonoInst *in = call->args [i];
2658 if (i >= sig->hasthis)
2659 t = sig->params [i - sig->hasthis];
2660 else
2661 t = &mono_defaults.int_class->byval_arg;
2662 t = mono_type_get_underlying_type (t);
2664 if (!t->byref && t->type == MONO_TYPE_R4) {
2665 MonoInst *iargs [1];
2666 MonoInst *conv;
2668 iargs [0] = in;
2669 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2671 /* The result will be in an int vreg */
2672 call->args [i] = conv;
2676 #endif
2678 call->need_unbox_trampoline = unbox_trampoline;
2680 #ifdef ENABLE_LLVM
2681 if (COMPILE_LLVM (cfg))
2682 mono_llvm_emit_call (cfg, call);
2683 else
2684 mono_arch_emit_call (cfg, call);
2685 #else
2686 mono_arch_emit_call (cfg, call);
2687 #endif
2689 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2690 cfg->flags |= MONO_CFG_HAS_CALLS;
2692 return call;
2695 static void
2696 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2698 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2699 cfg->uses_rgctx_reg = TRUE;
2700 call->rgctx_reg = TRUE;
2701 #ifdef ENABLE_LLVM
2702 call->rgctx_arg_reg = rgctx_reg;
2703 #endif
2706 inline static MonoInst*
2707 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2709 MonoCallInst *call;
2710 MonoInst *ins;
2711 int rgctx_reg = -1;
2712 gboolean check_sp = FALSE;
2714 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2715 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2717 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2718 check_sp = TRUE;
2721 if (rgctx_arg) {
2722 rgctx_reg = mono_alloc_preg (cfg);
2723 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2726 if (check_sp) {
2727 if (!cfg->stack_inbalance_var)
2728 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2730 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2731 ins->dreg = cfg->stack_inbalance_var->dreg;
2732 MONO_ADD_INS (cfg->cbb, ins);
2735 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2737 call->inst.sreg1 = addr->dreg;
2739 if (imt_arg)
2740 emit_imt_argument (cfg, call, NULL, imt_arg);
2742 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2744 if (check_sp) {
2745 int sp_reg;
2747 sp_reg = mono_alloc_preg (cfg);
2749 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2750 ins->dreg = sp_reg;
2751 MONO_ADD_INS (cfg->cbb, ins);
2753 /* Restore the stack so we don't crash when throwing the exception */
2754 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2755 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2756 MONO_ADD_INS (cfg->cbb, ins);
2758 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2759 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2762 if (rgctx_arg)
2763 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2765 return (MonoInst*)call;
2768 static MonoInst*
2769 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2771 static MonoInst*
2772 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2773 static MonoInst*
2774 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2776 static MonoInst*
2777 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2778 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2780 #ifndef DISABLE_REMOTING
2781 gboolean might_be_remote = FALSE;
2782 #endif
2783 gboolean virtual_ = this_ins != NULL;
2784 gboolean enable_for_aot = TRUE;
2785 int context_used;
2786 MonoCallInst *call;
2787 MonoInst *call_target = NULL;
2788 int rgctx_reg = 0;
2789 gboolean need_unbox_trampoline;
2791 if (!sig)
2792 sig = mono_method_signature (method);
2794 if (cfg->llvm_only && (mono_class_is_interface (method->klass)))
2795 g_assert_not_reached ();
2797 if (rgctx_arg) {
2798 rgctx_reg = mono_alloc_preg (cfg);
2799 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2802 if (method->string_ctor) {
2803 /* Create the real signature */
2804 /* FIXME: Cache these */
2805 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2806 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2808 sig = ctor_sig;
2811 context_used = mini_method_check_context_used (cfg, method);
2813 #ifndef DISABLE_REMOTING
2814 might_be_remote = this_ins && sig->hasthis &&
2815 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2816 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2818 if (might_be_remote && context_used) {
2819 MonoInst *addr;
2821 g_assert (cfg->gshared);
2823 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2825 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2827 #endif
2829 if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2830 return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
2832 need_unbox_trampoline = method->klass == mono_defaults.object_class || mono_class_is_interface (method->klass);
2834 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2836 #ifndef DISABLE_REMOTING
2837 if (might_be_remote)
2838 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2839 else
2840 #endif
2841 call->method = method;
2842 call->inst.flags |= MONO_INST_HAS_METHOD;
2843 call->inst.inst_left = this_ins;
2844 call->tail_call = tail;
2846 if (virtual_) {
2847 int vtable_reg, slot_reg, this_reg;
2848 int offset;
2850 this_reg = this_ins->dreg;
2852 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2853 MonoInst *dummy_use;
2855 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2857 /* Make a call to delegate->invoke_impl */
2858 call->inst.inst_basereg = this_reg;
2859 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2860 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2862 /* We must emit a dummy use here because the delegate trampoline will
2863 replace the 'this' argument with the delegate target making this activation
2864 no longer a root for the delegate.
2865 This is an issue for delegates that target collectible code such as dynamic
2866 methods of GC'able assemblies.
2868 For a test case look into #667921.
2870 FIXME: a dummy use is not the best way to do it as the local register allocator
2871 will put it on a caller save register and spil it around the call.
2872 Ideally, we would either put it on a callee save register or only do the store part.
2874 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2876 return (MonoInst*)call;
2879 if ((!cfg->compile_aot || enable_for_aot) &&
2880 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2881 (MONO_METHOD_IS_FINAL (method) &&
2882 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2883 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2885 * the method is not virtual, we just need to ensure this is not null
2886 * and then we can call the method directly.
2888 #ifndef DISABLE_REMOTING
2889 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2891 * The check above ensures method is not gshared, this is needed since
2892 * gshared methods can't have wrappers.
2894 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2896 #endif
2898 if (!method->string_ctor)
2899 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2901 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2902 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2904 * the method is virtual, but we can statically dispatch since either
2905 * it's class or the method itself are sealed.
2906 * But first we need to ensure it's not a null reference.
2908 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2910 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2911 } else if (call_target) {
2912 vtable_reg = alloc_preg (cfg);
2913 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2915 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2916 call->inst.sreg1 = call_target->dreg;
2917 call->inst.flags &= !MONO_INST_HAS_METHOD;
2918 } else {
2919 vtable_reg = alloc_preg (cfg);
2920 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2921 if (mono_class_is_interface (method->klass)) {
2922 guint32 imt_slot = mono_method_get_imt_slot (method);
2923 emit_imt_argument (cfg, call, call->method, imt_arg);
2924 slot_reg = vtable_reg;
2925 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2926 } else {
2927 slot_reg = vtable_reg;
2928 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2929 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2930 if (imt_arg) {
2931 g_assert (mono_method_signature (method)->generic_param_count);
2932 emit_imt_argument (cfg, call, call->method, imt_arg);
2936 call->inst.sreg1 = slot_reg;
2937 call->inst.inst_offset = offset;
2938 call->is_virtual = TRUE;
2942 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2944 if (rgctx_arg)
2945 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2947 return (MonoInst*)call;
2950 MonoInst*
2951 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2953 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2956 MonoInst*
2957 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2958 MonoInst **args)
2960 MonoCallInst *call;
2962 g_assert (sig);
2964 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2965 call->fptr = func;
2967 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2969 return (MonoInst*)call;
2972 MonoInst*
2973 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2975 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2977 g_assert (info);
2979 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2983 * mono_emit_abs_call:
2985 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2987 inline static MonoInst*
2988 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2989 MonoMethodSignature *sig, MonoInst **args)
2991 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2992 MonoInst *ins;
2995 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2996 * handle it.
2998 if (cfg->abs_patches == NULL)
2999 cfg->abs_patches = g_hash_table_new (NULL, NULL);
3000 g_hash_table_insert (cfg->abs_patches, ji, ji);
3001 ins = mono_emit_native_call (cfg, ji, sig, args);
3002 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
3003 return ins;
3006 static MonoMethodSignature*
3007 sig_to_rgctx_sig (MonoMethodSignature *sig)
3009 // FIXME: memory allocation
3010 MonoMethodSignature *res;
3011 int i;
3013 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
3014 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
3015 res->param_count = sig->param_count + 1;
3016 for (i = 0; i < sig->param_count; ++i)
3017 res->params [i] = sig->params [i];
3018 res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
3019 return res;
3022 /* Make an indirect call to FSIG passing an additional argument */
3023 static MonoInst*
3024 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
3026 MonoMethodSignature *csig;
3027 MonoInst *args_buf [16];
3028 MonoInst **args;
3029 int i, pindex, tmp_reg;
3031 /* Make a call with an rgctx/extra arg */
3032 if (fsig->param_count + 2 < 16)
3033 args = args_buf;
3034 else
3035 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
3036 pindex = 0;
3037 if (fsig->hasthis)
3038 args [pindex ++] = orig_args [0];
3039 for (i = 0; i < fsig->param_count; ++i)
3040 args [pindex ++] = orig_args [fsig->hasthis + i];
3041 tmp_reg = alloc_preg (cfg);
3042 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
3043 csig = sig_to_rgctx_sig (fsig);
3044 return mono_emit_calli (cfg, csig, args, call_target, NULL, NULL);
3047 /* Emit an indirect call to the function descriptor ADDR */
3048 static MonoInst*
3049 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
3051 int addr_reg, arg_reg;
3052 MonoInst *call_target;
3054 g_assert (cfg->llvm_only);
3057 * addr points to a <addr, arg> pair, load both of them, and
3058 * make a call to addr, passing arg as an extra arg.
3060 addr_reg = alloc_preg (cfg);
3061 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
3062 arg_reg = alloc_preg (cfg);
3063 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
3065 return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
3068 static gboolean
3069 direct_icalls_enabled (MonoCompile *cfg)
3071 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
3072 #ifdef TARGET_AMD64
3073 if (cfg->compile_llvm && !cfg->llvm_only)
3074 return FALSE;
3075 #endif
3076 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
3077 return FALSE;
3078 return TRUE;
3081 MonoInst*
3082 mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
3085 * Call the jit icall without a wrapper if possible.
3086 * The wrapper is needed for the following reasons:
3087 * - to handle exceptions thrown using mono_raise_exceptions () from the
3088 * icall function. The EH code needs the lmf frame pushed by the
3089 * wrapper to be able to unwind back to managed code.
3090 * - to be able to do stack walks for asynchronously suspended
3091 * threads when debugging.
3093 if (info->no_raise && direct_icalls_enabled (cfg)) {
3094 char *name;
3095 int costs;
3097 if (!info->wrapper_method) {
3098 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
3099 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
3100 g_free (name);
3101 mono_memory_barrier ();
3105 * Inline the wrapper method, which is basically a call to the C icall, and
3106 * an exception check.
3108 costs = inline_method (cfg, info->wrapper_method, NULL,
3109 args, NULL, il_offset, TRUE);
3110 g_assert (costs > 0);
3111 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
3113 return args [0];
3114 } else {
3115 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
3119 static MonoInst*
3120 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3122 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3123 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3124 int widen_op = -1;
3127 * Native code might return non register sized integers
3128 * without initializing the upper bits.
3130 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3131 case OP_LOADI1_MEMBASE:
3132 widen_op = OP_ICONV_TO_I1;
3133 break;
3134 case OP_LOADU1_MEMBASE:
3135 widen_op = OP_ICONV_TO_U1;
3136 break;
3137 case OP_LOADI2_MEMBASE:
3138 widen_op = OP_ICONV_TO_I2;
3139 break;
3140 case OP_LOADU2_MEMBASE:
3141 widen_op = OP_ICONV_TO_U2;
3142 break;
3143 default:
3144 break;
3147 if (widen_op != -1) {
3148 int dreg = alloc_preg (cfg);
3149 MonoInst *widen;
3151 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3152 widen->type = ins->type;
3153 ins = widen;
3158 return ins;
3162 static void
3163 emit_method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
3165 MonoInst *args [16];
3167 args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (method), method, MONO_RGCTX_INFO_METHOD);
3168 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cil_method), cil_method, MONO_RGCTX_INFO_METHOD);
3170 mono_emit_jit_icall (cfg, mono_throw_method_access, args);
3173 static MonoMethod*
3174 get_memcpy_method (void)
3176 static MonoMethod *memcpy_method = NULL;
3177 if (!memcpy_method) {
3178 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3179 if (!memcpy_method)
3180 g_error ("Old corlib found. Install a new one");
3182 return memcpy_method;
3185 static void
3186 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3188 MonoClassField *field;
3189 gpointer iter = NULL;
3191 while ((field = mono_class_get_fields (klass, &iter))) {
3192 int foffset;
3194 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3195 continue;
3196 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3197 if (mini_type_is_reference (mono_field_get_type (field))) {
3198 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3199 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3200 } else {
3201 MonoClass *field_class = mono_class_from_mono_type (field->type);
3202 if (field_class->has_references)
3203 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3208 static void
3209 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3211 int card_table_shift_bits;
3212 gpointer card_table_mask;
3213 guint8 *card_table;
3214 MonoInst *dummy_use;
3215 int nursery_shift_bits;
3216 size_t nursery_size;
3218 if (!cfg->gen_write_barriers)
3219 return;
3221 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3223 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3225 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3226 MonoInst *wbarrier;
3228 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3229 wbarrier->sreg1 = ptr->dreg;
3230 wbarrier->sreg2 = value->dreg;
3231 MONO_ADD_INS (cfg->cbb, wbarrier);
3232 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3233 int offset_reg = alloc_preg (cfg);
3234 int card_reg;
3235 MonoInst *ins;
3237 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3238 if (card_table_mask)
3239 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3241 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3242 * IMM's larger than 32bits.
3244 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
3245 card_reg = ins->dreg;
3247 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3248 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3249 } else {
3250 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3251 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3254 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3257 static gboolean
3258 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3260 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3261 unsigned need_wb = 0;
3263 if (align == 0)
3264 align = 4;
3266 /*types with references can't have alignment smaller than sizeof(void*) */
3267 if (align < SIZEOF_VOID_P)
3268 return FALSE;
3270 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3271 if (size > 32 * SIZEOF_VOID_P)
3272 return FALSE;
3274 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3276 /* We don't unroll more than 5 stores to avoid code bloat. */
3277 if (size > 5 * SIZEOF_VOID_P) {
3278 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3279 size += (SIZEOF_VOID_P - 1);
3280 size &= ~(SIZEOF_VOID_P - 1);
3282 EMIT_NEW_ICONST (cfg, iargs [2], size);
3283 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3284 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3285 return TRUE;
3288 destreg = iargs [0]->dreg;
3289 srcreg = iargs [1]->dreg;
3290 offset = 0;
3292 dest_ptr_reg = alloc_preg (cfg);
3293 tmp_reg = alloc_preg (cfg);
3295 /*tmp = dreg*/
3296 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3298 while (size >= SIZEOF_VOID_P) {
3299 MonoInst *load_inst;
3300 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3301 load_inst->dreg = tmp_reg;
3302 load_inst->inst_basereg = srcreg;
3303 load_inst->inst_offset = offset;
3304 MONO_ADD_INS (cfg->cbb, load_inst);
3306 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3308 if (need_wb & 0x1)
3309 emit_write_barrier (cfg, iargs [0], load_inst);
3311 offset += SIZEOF_VOID_P;
3312 size -= SIZEOF_VOID_P;
3313 need_wb >>= 1;
3315 /*tmp += sizeof (void*)*/
3316 if (size >= SIZEOF_VOID_P) {
3317 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3318 MONO_ADD_INS (cfg->cbb, iargs [0]);
3322 /* Those cannot be references since size < sizeof (void*) */
3323 while (size >= 4) {
3324 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3325 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3326 offset += 4;
3327 size -= 4;
3330 while (size >= 2) {
3331 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3332 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3333 offset += 2;
3334 size -= 2;
3337 while (size >= 1) {
3338 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3339 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3340 offset += 1;
3341 size -= 1;
3344 return TRUE;
3348 * Emit code to copy a valuetype of type @klass whose address is stored in
3349 * @src->dreg to memory whose address is stored at @dest->dreg.
3351 void
3352 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3354 MonoInst *iargs [4];
3355 int n;
3356 guint32 align = 0;
3357 MonoMethod *memcpy_method;
3358 MonoInst *size_ins = NULL;
3359 MonoInst *memcpy_ins = NULL;
3361 g_assert (klass);
3362 if (cfg->gshared)
3363 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3366 * This check breaks with spilled vars... need to handle it during verification anyway.
3367 * g_assert (klass && klass == src->klass && klass == dest->klass);
3370 if (mini_is_gsharedvt_klass (klass)) {
3371 g_assert (!native);
3372 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3373 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3376 if (native)
3377 n = mono_class_native_size (klass, &align);
3378 else
3379 n = mono_class_value_size (klass, &align);
3381 /* if native is true there should be no references in the struct */
3382 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3383 /* Avoid barriers when storing to the stack */
3384 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3385 (dest->opcode == OP_LDADDR))) {
3386 int context_used;
3388 iargs [0] = dest;
3389 iargs [1] = src;
3391 context_used = mini_class_check_context_used (cfg, klass);
3393 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3394 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3395 return;
3396 } else if (context_used) {
3397 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3398 } else {
3399 iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
3400 if (!cfg->compile_aot)
3401 mono_class_compute_gc_descriptor (klass);
3404 if (size_ins)
3405 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3406 else
3407 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3408 return;
3412 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3413 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3414 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3415 } else {
3416 iargs [0] = dest;
3417 iargs [1] = src;
3418 if (size_ins)
3419 iargs [2] = size_ins;
3420 else
3421 EMIT_NEW_ICONST (cfg, iargs [2], n);
3423 memcpy_method = get_memcpy_method ();
3424 if (memcpy_ins)
3425 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3426 else
3427 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3431 static MonoMethod*
3432 get_memset_method (void)
3434 static MonoMethod *memset_method = NULL;
3435 if (!memset_method) {
3436 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3437 if (!memset_method)
3438 g_error ("Old corlib found. Install a new one");
3440 return memset_method;
3443 void
3444 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3446 MonoInst *iargs [3];
3447 int n;
3448 guint32 align;
3449 MonoMethod *memset_method;
3450 MonoInst *size_ins = NULL;
3451 MonoInst *bzero_ins = NULL;
3452 static MonoMethod *bzero_method;
3454 /* FIXME: Optimize this for the case when dest is an LDADDR */
3455 mono_class_init (klass);
3456 if (mini_is_gsharedvt_klass (klass)) {
3457 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3458 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3459 if (!bzero_method)
3460 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3461 g_assert (bzero_method);
3462 iargs [0] = dest;
3463 iargs [1] = size_ins;
3464 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3465 return;
3468 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3470 n = mono_class_value_size (klass, &align);
3472 if (n <= sizeof (gpointer) * 8) {
3473 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3475 else {
3476 memset_method = get_memset_method ();
3477 iargs [0] = dest;
3478 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3479 EMIT_NEW_ICONST (cfg, iargs [2], n);
3480 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3485 * emit_get_rgctx:
3487 * Emit IR to return either the this pointer for instance method,
3488 * or the mrgctx for static methods.
3490 static MonoInst*
3491 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3493 MonoInst *this_ins = NULL;
3495 g_assert (cfg->gshared);
3497 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3498 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3499 !method->klass->valuetype)
3500 EMIT_NEW_ARGLOAD (cfg, this_ins, 0);
3502 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3503 MonoInst *mrgctx_loc, *mrgctx_var;
3505 g_assert (!this_ins);
3506 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3508 mrgctx_loc = mono_get_vtable_var (cfg);
3509 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3511 return mrgctx_var;
3512 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3513 MonoInst *vtable_loc, *vtable_var;
3515 g_assert (!this_ins);
3517 vtable_loc = mono_get_vtable_var (cfg);
3518 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3520 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3521 MonoInst *mrgctx_var = vtable_var;
3522 int vtable_reg;
3524 vtable_reg = alloc_preg (cfg);
3525 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3526 vtable_var->type = STACK_PTR;
3529 return vtable_var;
3530 } else {
3531 MonoInst *ins;
3532 int vtable_reg;
3534 vtable_reg = alloc_preg (cfg);
3535 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3536 return ins;
3540 static MonoJumpInfoRgctxEntry *
3541 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3543 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3544 res->method = method;
3545 res->in_mrgctx = in_mrgctx;
3546 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3547 res->data->type = patch_type;
3548 res->data->data.target = patch_data;
3549 res->info_type = info_type;
3551 return res;
3554 static inline MonoInst*
3555 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3557 MonoInst *args [16];
3558 MonoInst *call;
3560 // FIXME: No fastpath since the slot is not a compile time constant
3561 args [0] = rgctx;
3562 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3563 if (entry->in_mrgctx)
3564 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3565 else
3566 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3567 return call;
3568 #if 0
3570 * FIXME: This can be called during decompose, which is a problem since it creates
3571 * new bblocks.
3572 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3574 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3575 gboolean mrgctx;
3576 MonoBasicBlock *is_null_bb, *end_bb;
3577 MonoInst *res, *ins, *call;
3578 MonoInst *args[16];
3580 slot = mini_get_rgctx_entry_slot (entry);
3582 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3583 index = MONO_RGCTX_SLOT_INDEX (slot);
3584 if (mrgctx)
3585 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3586 for (depth = 0; ; ++depth) {
3587 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3589 if (index < size - 1)
3590 break;
3591 index -= size - 1;
3594 NEW_BBLOCK (cfg, end_bb);
3595 NEW_BBLOCK (cfg, is_null_bb);
3597 if (mrgctx) {
3598 rgctx_reg = rgctx->dreg;
3599 } else {
3600 rgctx_reg = alloc_preg (cfg);
3602 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3603 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3604 NEW_BBLOCK (cfg, is_null_bb);
3606 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3607 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3610 for (i = 0; i < depth; ++i) {
3611 int array_reg = alloc_preg (cfg);
3613 /* load ptr to next array */
3614 if (mrgctx && i == 0)
3615 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3616 else
3617 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3618 rgctx_reg = array_reg;
3619 /* is the ptr null? */
3620 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3621 /* if yes, jump to actual trampoline */
3622 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3625 /* fetch slot */
3626 val_reg = alloc_preg (cfg);
3627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3628 /* is the slot null? */
3629 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3630 /* if yes, jump to actual trampoline */
3631 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3633 /* Fastpath */
3634 res_reg = alloc_preg (cfg);
3635 MONO_INST_NEW (cfg, ins, OP_MOVE);
3636 ins->dreg = res_reg;
3637 ins->sreg1 = val_reg;
3638 MONO_ADD_INS (cfg->cbb, ins);
3639 res = ins;
3640 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3642 /* Slowpath */
3643 MONO_START_BB (cfg, is_null_bb);
3644 args [0] = rgctx;
3645 EMIT_NEW_ICONST (cfg, args [1], index);
3646 if (mrgctx)
3647 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3648 else
3649 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3650 MONO_INST_NEW (cfg, ins, OP_MOVE);
3651 ins->dreg = res_reg;
3652 ins->sreg1 = call->dreg;
3653 MONO_ADD_INS (cfg->cbb, ins);
3654 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3656 MONO_START_BB (cfg, end_bb);
3658 return res;
3659 #endif
3663 * emit_rgctx_fetch:
3665 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3666 * given by RGCTX.
3668 static inline MonoInst*
3669 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3671 if (cfg->llvm_only)
3672 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3673 else
3674 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3677 static MonoInst*
3678 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3679 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3681 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3682 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3684 return emit_rgctx_fetch (cfg, rgctx, entry);
3687 static MonoInst*
3688 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3689 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3691 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3692 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3694 return emit_rgctx_fetch (cfg, rgctx, entry);
3697 static MonoInst*
3698 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3699 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3701 MonoJumpInfoGSharedVtCall *call_info;
3702 MonoJumpInfoRgctxEntry *entry;
3703 MonoInst *rgctx;
3705 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3706 call_info->sig = sig;
3707 call_info->method = cmethod;
3709 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3710 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3712 return emit_rgctx_fetch (cfg, rgctx, entry);
3716 * emit_get_rgctx_virt_method:
3718 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3720 static MonoInst*
3721 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3722 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3724 MonoJumpInfoVirtMethod *info;
3725 MonoJumpInfoRgctxEntry *entry;
3726 MonoInst *rgctx;
3728 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3729 info->klass = klass;
3730 info->method = virt_method;
3732 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3733 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3735 return emit_rgctx_fetch (cfg, rgctx, entry);
3738 static MonoInst*
3739 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3740 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3742 MonoJumpInfoRgctxEntry *entry;
3743 MonoInst *rgctx;
3745 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3746 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3748 return emit_rgctx_fetch (cfg, rgctx, entry);
3752 * emit_get_rgctx_method:
3754 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3755 * normal constants, else emit a load from the rgctx.
3757 static MonoInst*
3758 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3759 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3761 if (!context_used) {
3762 MonoInst *ins;
3764 switch (rgctx_type) {
3765 case MONO_RGCTX_INFO_METHOD:
3766 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3767 return ins;
3768 case MONO_RGCTX_INFO_METHOD_RGCTX:
3769 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3770 return ins;
3771 default:
3772 g_assert_not_reached ();
3774 } else {
3775 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3776 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3778 return emit_rgctx_fetch (cfg, rgctx, entry);
3782 static MonoInst*
3783 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3784 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3786 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3787 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3789 return emit_rgctx_fetch (cfg, rgctx, entry);
3792 static int
3793 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3795 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3796 MonoRuntimeGenericContextInfoTemplate *template_;
3797 int i, idx;
3799 g_assert (info);
3801 for (i = 0; i < info->num_entries; ++i) {
3802 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3804 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3805 return i;
3808 if (info->num_entries == info->count_entries) {
3809 MonoRuntimeGenericContextInfoTemplate *new_entries;
3810 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3812 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3814 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3815 info->entries = new_entries;
3816 info->count_entries = new_count_entries;
3819 idx = info->num_entries;
3820 template_ = &info->entries [idx];
3821 template_->info_type = rgctx_type;
3822 template_->data = data;
3824 info->num_entries ++;
3826 return idx;
3830 * emit_get_gsharedvt_info:
3832 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3834 static MonoInst*
3835 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3837 MonoInst *ins;
3838 int idx, dreg;
3840 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3841 /* Load info->entries [idx] */
3842 dreg = alloc_preg (cfg);
3843 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3845 return ins;
3848 static MonoInst*
3849 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3851 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3855 * On return the caller must check @klass for load errors.
3857 static void
3858 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3860 MonoInst *vtable_arg;
3861 int context_used;
3863 context_used = mini_class_check_context_used (cfg, klass);
3865 if (context_used) {
3866 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3867 klass, MONO_RGCTX_INFO_VTABLE);
3868 } else {
3869 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3871 if (!vtable)
3872 return;
3873 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3876 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3877 MonoInst *ins;
3880 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3881 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3883 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3884 ins->sreg1 = vtable_arg->dreg;
3885 MONO_ADD_INS (cfg->cbb, ins);
3886 } else {
3887 static int byte_offset = -1;
3888 static guint8 bitmask;
3889 int bits_reg, inited_reg;
3890 MonoBasicBlock *inited_bb;
3891 MonoInst *args [16];
3893 if (byte_offset < 0)
3894 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
3896 bits_reg = alloc_ireg (cfg);
3897 inited_reg = alloc_ireg (cfg);
3899 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, bits_reg, vtable_arg->dreg, byte_offset);
3900 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, inited_reg, bits_reg, bitmask);
3902 NEW_BBLOCK (cfg, inited_bb);
3904 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3905 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3907 args [0] = vtable_arg;
3908 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3910 MONO_START_BB (cfg, inited_bb);
3914 static void
3915 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3917 MonoInst *ins;
3919 if (cfg->gen_seq_points && cfg->method == method) {
3920 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3921 if (nonempty_stack)
3922 ins->flags |= MONO_INST_NONEMPTY_STACK;
3923 MONO_ADD_INS (cfg->cbb, ins);
3927 static void
3928 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3930 if (mini_get_debug_options ()->better_cast_details) {
3931 int vtable_reg = alloc_preg (cfg);
3932 int klass_reg = alloc_preg (cfg);
3933 MonoBasicBlock *is_null_bb = NULL;
3934 MonoInst *tls_get;
3935 int to_klass_reg, context_used;
3937 if (null_check) {
3938 NEW_BBLOCK (cfg, is_null_bb);
3940 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3941 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3944 tls_get = mono_get_jit_tls_intrinsic (cfg);
3945 if (!tls_get) {
3946 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3947 exit (1);
3950 MONO_ADD_INS (cfg->cbb, tls_get);
3951 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3952 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3954 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3956 context_used = mini_class_check_context_used (cfg, klass);
3957 if (context_used) {
3958 MonoInst *class_ins;
3960 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3961 to_klass_reg = class_ins->dreg;
3962 } else {
3963 to_klass_reg = alloc_preg (cfg);
3964 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3966 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3968 if (null_check)
3969 MONO_START_BB (cfg, is_null_bb);
3973 static void
3974 reset_cast_details (MonoCompile *cfg)
3976 /* Reset the variables holding the cast details */
3977 if (mini_get_debug_options ()->better_cast_details) {
3978 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3980 MONO_ADD_INS (cfg->cbb, tls_get);
3981 /* It is enough to reset the from field */
3982 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3987 * On return the caller must check @array_class for load errors
3989 static void
3990 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3992 int vtable_reg = alloc_preg (cfg);
3993 int context_used;
3995 context_used = mini_class_check_context_used (cfg, array_class);
3997 save_cast_details (cfg, array_class, obj->dreg, FALSE);
3999 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4001 if (cfg->opt & MONO_OPT_SHARED) {
4002 int class_reg = alloc_preg (cfg);
4003 MonoInst *ins;
4005 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4006 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
4007 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
4008 } else if (context_used) {
4009 MonoInst *vtable_ins;
4011 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
4012 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
4013 } else {
4014 if (cfg->compile_aot) {
4015 int vt_reg;
4016 MonoVTable *vtable;
4018 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
4019 return;
4020 vt_reg = alloc_preg (cfg);
4021 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
4022 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
4023 } else {
4024 MonoVTable *vtable;
4025 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
4026 return;
4027 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
4031 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
4033 reset_cast_details (cfg);
4037 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
4038 * generic code is generated.
4040 static MonoInst*
4041 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
4043 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
4045 if (context_used) {
4046 MonoInst *rgctx, *addr;
4048 /* FIXME: What if the class is shared? We might not
4049 have to get the address of the method from the
4050 RGCTX. */
4051 addr = emit_get_rgctx_method (cfg, context_used, method,
4052 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4053 if (cfg->llvm_only) {
4054 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature (method));
4055 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4056 } else {
4057 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4059 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4061 } else {
4062 gboolean pass_vtable, pass_mrgctx;
4063 MonoInst *rgctx_arg = NULL;
4065 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4066 g_assert (!pass_mrgctx);
4068 if (pass_vtable) {
4069 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4071 g_assert (vtable);
4072 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4075 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4079 static MonoInst*
4080 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
4082 MonoInst *add;
4083 int obj_reg;
4084 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
4085 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
4086 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
4087 int rank_reg = alloc_dreg (cfg ,STACK_I4);
4089 obj_reg = sp [0]->dreg;
4090 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4091 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4093 /* FIXME: generics */
4094 g_assert (klass->rank == 0);
4096 // Check rank == 0
4097 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
4098 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4100 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4101 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
4103 if (context_used) {
4104 MonoInst *element_class;
4106 /* This assertion is from the unboxcast insn */
4107 g_assert (klass->rank == 0);
4109 element_class = emit_get_rgctx_klass (cfg, context_used,
4110 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
4112 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
4113 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4114 } else {
4115 save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
4116 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
4117 reset_cast_details (cfg);
4120 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
4121 MONO_ADD_INS (cfg->cbb, add);
4122 add->type = STACK_MP;
4123 add->klass = klass;
4125 return add;
4128 static MonoInst*
4129 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
4131 MonoInst *addr, *klass_inst, *is_ref, *args[16];
4132 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4133 MonoInst *ins;
4134 int dreg, addr_reg;
4136 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
4138 /* obj */
4139 args [0] = obj;
4141 /* klass */
4142 args [1] = klass_inst;
4144 /* CASTCLASS */
4145 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
4147 NEW_BBLOCK (cfg, is_ref_bb);
4148 NEW_BBLOCK (cfg, is_nullable_bb);
4149 NEW_BBLOCK (cfg, end_bb);
4150 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4151 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4152 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4154 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4155 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4157 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
4158 addr_reg = alloc_dreg (cfg, STACK_MP);
4160 /* Non-ref case */
4161 /* UNBOX */
4162 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
4163 MONO_ADD_INS (cfg->cbb, addr);
4165 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4167 /* Ref case */
4168 MONO_START_BB (cfg, is_ref_bb);
4170 /* Save the ref to a temporary */
4171 dreg = alloc_ireg (cfg);
4172 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
4173 addr->dreg = addr_reg;
4174 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
4175 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4177 /* Nullable case */
4178 MONO_START_BB (cfg, is_nullable_bb);
4181 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
4182 MonoInst *unbox_call;
4183 MonoMethodSignature *unbox_sig;
4185 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4186 unbox_sig->ret = &klass->byval_arg;
4187 unbox_sig->param_count = 1;
4188 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
4190 if (cfg->llvm_only)
4191 unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
4192 else
4193 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
4195 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
4196 addr->dreg = addr_reg;
4199 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4201 /* End */
4202 MONO_START_BB (cfg, end_bb);
4204 /* LDOBJ */
4205 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
4207 return ins;
4211 * Returns NULL and set the cfg exception on error.
4213 static MonoInst*
4214 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
4216 MonoInst *iargs [2];
4217 void *alloc_ftn;
4219 if (context_used) {
4220 MonoInst *data;
4221 MonoRgctxInfoType rgctx_info;
4222 MonoInst *iargs [2];
4223 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
4225 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4227 if (cfg->opt & MONO_OPT_SHARED)
4228 rgctx_info = MONO_RGCTX_INFO_KLASS;
4229 else
4230 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4231 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4233 if (cfg->opt & MONO_OPT_SHARED) {
4234 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4235 iargs [1] = data;
4236 alloc_ftn = ves_icall_object_new;
4237 } else {
4238 iargs [0] = data;
4239 alloc_ftn = ves_icall_object_new_specific;
4242 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4243 if (known_instance_size) {
4244 int size = mono_class_instance_size (klass);
4245 if (size < sizeof (MonoObject))
4246 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4248 EMIT_NEW_ICONST (cfg, iargs [1], size);
4250 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4253 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4256 if (cfg->opt & MONO_OPT_SHARED) {
4257 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4258 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4260 alloc_ftn = ves_icall_object_new;
4261 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !mono_class_is_ginst (klass)) {
4262 /* This happens often in argument checking code, eg. throw new FooException... */
4263 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4264 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4265 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4266 } else {
4267 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4268 MonoMethod *managed_alloc = NULL;
4269 gboolean pass_lw;
4271 if (!vtable) {
4272 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4273 cfg->exception_ptr = klass;
4274 return NULL;
4277 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4279 if (managed_alloc) {
4280 int size = mono_class_instance_size (klass);
4281 if (size < sizeof (MonoObject))
4282 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4284 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4285 EMIT_NEW_ICONST (cfg, iargs [1], size);
4286 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4288 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4289 if (pass_lw) {
4290 guint32 lw = vtable->klass->instance_size;
4291 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4292 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4293 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4295 else {
4296 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4300 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4304 * Returns NULL and set the cfg exception on error.
4306 static MonoInst*
4307 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4309 MonoInst *alloc, *ins;
4311 if (mono_class_is_nullable (klass)) {
4312 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4314 if (context_used) {
4315 if (cfg->llvm_only && cfg->gsharedvt) {
4316 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4317 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4318 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4319 } else {
4320 /* FIXME: What if the class is shared? We might not
4321 have to get the method address from the RGCTX. */
4322 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4323 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4324 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4326 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4328 } else {
4329 gboolean pass_vtable, pass_mrgctx;
4330 MonoInst *rgctx_arg = NULL;
4332 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4333 g_assert (!pass_mrgctx);
4335 if (pass_vtable) {
4336 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4338 g_assert (vtable);
4339 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4342 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4346 if (mini_is_gsharedvt_klass (klass)) {
4347 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4348 MonoInst *res, *is_ref, *src_var, *addr;
4349 int dreg;
4351 dreg = alloc_ireg (cfg);
4353 NEW_BBLOCK (cfg, is_ref_bb);
4354 NEW_BBLOCK (cfg, is_nullable_bb);
4355 NEW_BBLOCK (cfg, end_bb);
4356 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4357 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4358 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4360 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4361 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4363 /* Non-ref case */
4364 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4365 if (!alloc)
4366 return NULL;
4367 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4368 ins->opcode = OP_STOREV_MEMBASE;
4370 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4371 res->type = STACK_OBJ;
4372 res->klass = klass;
4373 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4375 /* Ref case */
4376 MONO_START_BB (cfg, is_ref_bb);
4378 /* val is a vtype, so has to load the value manually */
4379 src_var = get_vreg_to_inst (cfg, val->dreg);
4380 if (!src_var)
4381 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4382 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4383 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4384 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4386 /* Nullable case */
4387 MONO_START_BB (cfg, is_nullable_bb);
4390 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4391 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4392 MonoInst *box_call;
4393 MonoMethodSignature *box_sig;
4396 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4397 * construct that method at JIT time, so have to do things by hand.
4399 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4400 box_sig->ret = &mono_defaults.object_class->byval_arg;
4401 box_sig->param_count = 1;
4402 box_sig->params [0] = &klass->byval_arg;
4404 if (cfg->llvm_only)
4405 box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
4406 else
4407 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4408 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4409 res->type = STACK_OBJ;
4410 res->klass = klass;
4413 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4415 MONO_START_BB (cfg, end_bb);
4417 return res;
4418 } else {
4419 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4420 if (!alloc)
4421 return NULL;
4423 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4424 return alloc;
4428 static gboolean
4429 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4431 int i;
4432 MonoGenericContainer *container;
4433 MonoGenericInst *ginst;
4435 if (mono_class_is_ginst (klass)) {
4436 container = mono_class_get_generic_container (mono_class_get_generic_class (klass)->container_class);
4437 ginst = mono_class_get_generic_class (klass)->context.class_inst;
4438 } else if (mono_class_is_gtd (klass) && context_used) {
4439 container = mono_class_get_generic_container (klass);
4440 ginst = container->context.class_inst;
4441 } else {
4442 return FALSE;
4445 for (i = 0; i < container->type_argc; ++i) {
4446 MonoType *type;
4447 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4448 continue;
4449 type = ginst->type_argv [i];
4450 if (mini_type_is_reference (type))
4451 return TRUE;
4453 return FALSE;
4456 static GHashTable* direct_icall_type_hash;
4458 static gboolean
4459 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4461 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4462 if (!direct_icalls_enabled (cfg))
4463 return FALSE;
4466 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4467 * Whitelist a few icalls for now.
4469 if (!direct_icall_type_hash) {
4470 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4472 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4473 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4474 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4475 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4476 mono_memory_barrier ();
4477 direct_icall_type_hash = h;
4480 if (cmethod->klass == mono_defaults.math_class)
4481 return TRUE;
4482 /* No locking needed */
4483 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4484 return TRUE;
4485 return FALSE;
4488 static gboolean
4489 method_needs_stack_walk (MonoCompile *cfg, MonoMethod *cmethod)
4491 if (cmethod->klass == mono_defaults.systemtype_class) {
4492 if (!strcmp (cmethod->name, "GetType"))
4493 return TRUE;
4495 return FALSE;
4498 #define is_complex_isinst(klass) (mono_class_is_interface (klass) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (mono_class_get_flags (klass) & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4500 static MonoInst*
4501 emit_isinst_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
4503 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4504 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4507 static MonoInst*
4508 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
4510 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
4511 MonoInst *res;
4513 save_cast_details (cfg, klass, args [0]->dreg, TRUE);
4514 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4515 reset_cast_details (cfg);
4517 return res;
4520 static int
4521 get_castclass_cache_idx (MonoCompile *cfg)
4523 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4524 cfg->castclass_cache_index ++;
4525 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4529 static MonoInst*
4530 emit_isinst_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
4532 MonoInst *args [3];
4533 int idx;
4535 args [0] = obj; /* obj */
4536 EMIT_NEW_CLASSCONST (cfg, args [1], klass); /* klass */
4538 idx = get_castclass_cache_idx (cfg); /* inline cache*/
4539 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4541 return emit_isinst_with_cache (cfg, klass, args);
4544 static MonoInst*
4545 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
4547 MonoInst *args [3];
4548 int idx;
4550 /* obj */
4551 args [0] = obj;
4553 /* klass */
4554 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4556 /* inline cache*/
4557 idx = get_castclass_cache_idx (cfg);
4558 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4560 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4561 return emit_castclass_with_cache (cfg, klass, args);
4565 * Returns NULL and set the cfg exception on error.
4567 static MonoInst*
4568 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4570 MonoBasicBlock *is_null_bb;
4571 int obj_reg = src->dreg;
4572 int vtable_reg = alloc_preg (cfg);
4573 MonoInst *klass_inst = NULL;
4575 if (MONO_INS_IS_PCONST_NULL (src))
4576 return src;
4578 if (context_used) {
4579 MonoInst *args [3];
4581 if (mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4582 MonoInst *cache_ins;
4584 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4586 /* obj */
4587 args [0] = src;
4589 /* klass - it's the second element of the cache entry*/
4590 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4592 /* cache */
4593 args [2] = cache_ins;
4595 return emit_castclass_with_cache (cfg, klass, args);
4598 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4601 NEW_BBLOCK (cfg, is_null_bb);
4603 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4604 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4606 save_cast_details (cfg, klass, obj_reg, FALSE);
4608 if (mono_class_is_interface (klass)) {
4609 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4610 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4611 } else {
4612 int klass_reg = alloc_preg (cfg);
4614 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4616 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (mono_class_get_flags (klass) & TYPE_ATTRIBUTE_SEALED)) {
4617 /* the remoting code is broken, access the class for now */
4618 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4619 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4620 if (!vt) {
4621 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4622 cfg->exception_ptr = klass;
4623 return NULL;
4625 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4626 } else {
4627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4628 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4630 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4631 } else {
4632 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4633 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4637 MONO_START_BB (cfg, is_null_bb);
4639 reset_cast_details (cfg);
4641 return src;
4645 * Returns NULL and set the cfg exception on error.
4647 static MonoInst*
4648 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4650 MonoInst *ins;
4651 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4652 int obj_reg = src->dreg;
4653 int vtable_reg = alloc_preg (cfg);
4654 int res_reg = alloc_ireg_ref (cfg);
4655 MonoInst *klass_inst = NULL;
4657 if (context_used) {
4658 MonoInst *args [3];
4660 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4661 MonoInst *cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4663 args [0] = src; /* obj */
4665 /* klass - it's the second element of the cache entry*/
4666 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4668 args [2] = cache_ins; /* cache */
4669 return emit_isinst_with_cache (cfg, klass, args);
4672 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4675 NEW_BBLOCK (cfg, is_null_bb);
4676 NEW_BBLOCK (cfg, false_bb);
4677 NEW_BBLOCK (cfg, end_bb);
4679 /* Do the assignment at the beginning, so the other assignment can be if converted */
4680 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4681 ins->type = STACK_OBJ;
4682 ins->klass = klass;
4684 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4685 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4687 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4689 if (mono_class_is_interface (klass)) {
4690 g_assert (!context_used);
4691 /* the is_null_bb target simply copies the input register to the output */
4692 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4693 } else {
4694 int klass_reg = alloc_preg (cfg);
4696 if (klass->rank) {
4697 int rank_reg = alloc_preg (cfg);
4698 int eclass_reg = alloc_preg (cfg);
4700 g_assert (!context_used);
4701 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4702 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4703 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4704 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4705 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4706 if (klass->cast_class == mono_defaults.object_class) {
4707 int parent_reg = alloc_preg (cfg);
4708 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4709 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4710 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4711 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4712 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4713 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4714 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4715 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4716 } else if (klass->cast_class == mono_defaults.enum_class) {
4717 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4718 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4719 } else if (mono_class_is_interface (klass->cast_class)) {
4720 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4721 } else {
4722 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4723 /* Check that the object is a vector too */
4724 int bounds_reg = alloc_preg (cfg);
4725 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4726 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4727 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4730 /* the is_null_bb target simply copies the input register to the output */
4731 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4733 } else if (mono_class_is_nullable (klass)) {
4734 g_assert (!context_used);
4735 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4736 /* the is_null_bb target simply copies the input register to the output */
4737 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4738 } else {
4739 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (mono_class_get_flags (klass) & TYPE_ATTRIBUTE_SEALED)) {
4740 g_assert (!context_used);
4741 /* the remoting code is broken, access the class for now */
4742 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4743 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4744 if (!vt) {
4745 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4746 cfg->exception_ptr = klass;
4747 return NULL;
4749 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4750 } else {
4751 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4752 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4754 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4755 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4756 } else {
4757 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4758 /* the is_null_bb target simply copies the input register to the output */
4759 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4764 MONO_START_BB (cfg, false_bb);
4766 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4767 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4769 MONO_START_BB (cfg, is_null_bb);
4771 MONO_START_BB (cfg, end_bb);
4773 return ins;
4776 static MonoInst*
4777 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4779 /* This opcode takes as input an object reference and a class, and returns:
4780 0) if the object is an instance of the class,
4781 1) if the object is not instance of the class,
4782 2) if the object is a proxy whose type cannot be determined */
4784 MonoInst *ins;
4785 #ifndef DISABLE_REMOTING
4786 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4787 #else
4788 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4789 #endif
4790 int obj_reg = src->dreg;
4791 int dreg = alloc_ireg (cfg);
4792 int tmp_reg;
4793 #ifndef DISABLE_REMOTING
4794 int klass_reg = alloc_preg (cfg);
4795 #endif
4797 NEW_BBLOCK (cfg, true_bb);
4798 NEW_BBLOCK (cfg, false_bb);
4799 NEW_BBLOCK (cfg, end_bb);
4800 #ifndef DISABLE_REMOTING
4801 NEW_BBLOCK (cfg, false2_bb);
4802 NEW_BBLOCK (cfg, no_proxy_bb);
4803 #endif
4805 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4806 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4808 if (mono_class_is_interface (klass)) {
4809 #ifndef DISABLE_REMOTING
4810 NEW_BBLOCK (cfg, interface_fail_bb);
4811 #endif
4813 tmp_reg = alloc_preg (cfg);
4814 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4815 #ifndef DISABLE_REMOTING
4816 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4817 MONO_START_BB (cfg, interface_fail_bb);
4818 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4820 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4822 tmp_reg = alloc_preg (cfg);
4823 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4824 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4825 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4826 #else
4827 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4828 #endif
4829 } else {
4830 #ifndef DISABLE_REMOTING
4831 tmp_reg = alloc_preg (cfg);
4832 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4833 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4835 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4836 tmp_reg = alloc_preg (cfg);
4837 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4838 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4840 tmp_reg = alloc_preg (cfg);
4841 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4842 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4843 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4845 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4846 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4848 MONO_START_BB (cfg, no_proxy_bb);
4850 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4851 #else
4852 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4853 #endif
4856 MONO_START_BB (cfg, false_bb);
4858 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4859 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4861 #ifndef DISABLE_REMOTING
4862 MONO_START_BB (cfg, false2_bb);
4864 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4865 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4866 #endif
4868 MONO_START_BB (cfg, true_bb);
4870 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4872 MONO_START_BB (cfg, end_bb);
4874 /* FIXME: */
4875 MONO_INST_NEW (cfg, ins, OP_ICONST);
4876 ins->dreg = dreg;
4877 ins->type = STACK_I4;
4879 return ins;
4882 static MonoInst*
4883 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4885 /* This opcode takes as input an object reference and a class, and returns:
4886 0) if the object is an instance of the class,
4887 1) if the object is a proxy whose type cannot be determined
4888 an InvalidCastException exception is thrown otherwhise*/
4890 MonoInst *ins;
4891 #ifndef DISABLE_REMOTING
4892 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4893 #else
4894 MonoBasicBlock *ok_result_bb;
4895 #endif
4896 int obj_reg = src->dreg;
4897 int dreg = alloc_ireg (cfg);
4898 int tmp_reg = alloc_preg (cfg);
4900 #ifndef DISABLE_REMOTING
4901 int klass_reg = alloc_preg (cfg);
4902 NEW_BBLOCK (cfg, end_bb);
4903 #endif
4905 NEW_BBLOCK (cfg, ok_result_bb);
4907 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4908 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4910 save_cast_details (cfg, klass, obj_reg, FALSE);
4912 if (mono_class_is_interface (klass)) {
4913 #ifndef DISABLE_REMOTING
4914 NEW_BBLOCK (cfg, interface_fail_bb);
4916 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4917 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4918 MONO_START_BB (cfg, interface_fail_bb);
4919 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4921 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4923 tmp_reg = alloc_preg (cfg);
4924 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4925 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4926 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4928 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4929 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4930 #else
4931 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4932 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4933 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4934 #endif
4935 } else {
4936 #ifndef DISABLE_REMOTING
4937 NEW_BBLOCK (cfg, no_proxy_bb);
4939 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4940 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4941 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4943 tmp_reg = alloc_preg (cfg);
4944 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4945 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4947 tmp_reg = alloc_preg (cfg);
4948 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4949 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4950 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4952 NEW_BBLOCK (cfg, fail_1_bb);
4954 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4956 MONO_START_BB (cfg, fail_1_bb);
4958 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4959 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4961 MONO_START_BB (cfg, no_proxy_bb);
4963 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4964 #else
4965 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4966 #endif
4969 MONO_START_BB (cfg, ok_result_bb);
4971 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4973 #ifndef DISABLE_REMOTING
4974 MONO_START_BB (cfg, end_bb);
4975 #endif
4977 /* FIXME: */
4978 MONO_INST_NEW (cfg, ins, OP_ICONST);
4979 ins->dreg = dreg;
4980 ins->type = STACK_I4;
4982 return ins;
4985 static G_GNUC_UNUSED MonoInst*
4986 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4988 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4989 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4990 gboolean is_i4;
4992 switch (enum_type->type) {
4993 case MONO_TYPE_I8:
4994 case MONO_TYPE_U8:
4995 #if SIZEOF_REGISTER == 8
4996 case MONO_TYPE_I:
4997 case MONO_TYPE_U:
4998 #endif
4999 is_i4 = FALSE;
5000 break;
5001 default:
5002 is_i4 = TRUE;
5003 break;
5007 MonoInst *load, *and_, *cmp, *ceq;
5008 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
5009 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
5010 int dest_reg = alloc_ireg (cfg);
5012 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
5013 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
5014 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
5015 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
5017 ceq->type = STACK_I4;
5019 if (!is_i4) {
5020 load = mono_decompose_opcode (cfg, load);
5021 and_ = mono_decompose_opcode (cfg, and_);
5022 cmp = mono_decompose_opcode (cfg, cmp);
5023 ceq = mono_decompose_opcode (cfg, ceq);
5026 return ceq;
5031 * Returns NULL and set the cfg exception on error.
5033 static G_GNUC_UNUSED MonoInst*
5034 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
5036 MonoInst *ptr;
5037 int dreg;
5038 gpointer trampoline;
5039 MonoInst *obj, *method_ins, *tramp_ins;
5040 MonoDomain *domain;
5041 guint8 **code_slot;
5043 if (virtual_ && !cfg->llvm_only) {
5044 MonoMethod *invoke = mono_get_delegate_invoke (klass);
5045 g_assert (invoke);
5047 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
5048 return NULL;
5051 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
5052 if (!obj)
5053 return NULL;
5055 /* Inline the contents of mono_delegate_ctor */
5057 /* Set target field */
5058 /* Optimize away setting of NULL target */
5059 if (!MONO_INS_IS_PCONST_NULL (target)) {
5060 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
5061 if (cfg->gen_write_barriers) {
5062 dreg = alloc_preg (cfg);
5063 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
5064 emit_write_barrier (cfg, ptr, target);
5068 /* Set method field */
5069 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5070 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
5073 * To avoid looking up the compiled code belonging to the target method
5074 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
5075 * store it, and we fill it after the method has been compiled.
5077 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
5078 MonoInst *code_slot_ins;
5080 if (context_used) {
5081 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
5082 } else {
5083 domain = mono_domain_get ();
5084 mono_domain_lock (domain);
5085 if (!domain_jit_info (domain)->method_code_hash)
5086 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
5087 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
5088 if (!code_slot) {
5089 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
5090 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
5092 mono_domain_unlock (domain);
5094 code_slot_ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
5096 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
5099 if (cfg->llvm_only) {
5100 MonoInst *args [16];
5102 if (virtual_) {
5103 args [0] = obj;
5104 args [1] = target;
5105 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5106 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
5107 } else {
5108 args [0] = obj;
5109 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
5112 return obj;
5115 if (cfg->compile_aot) {
5116 MonoDelegateClassMethodPair *del_tramp;
5118 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
5119 del_tramp->klass = klass;
5120 del_tramp->method = context_used ? NULL : method;
5121 del_tramp->is_virtual = virtual_;
5122 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
5123 } else {
5124 if (virtual_)
5125 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
5126 else
5127 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
5128 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
5131 /* Set invoke_impl field */
5132 if (virtual_) {
5133 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
5134 } else {
5135 dreg = alloc_preg (cfg);
5136 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
5137 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
5139 dreg = alloc_preg (cfg);
5140 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
5141 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
5144 dreg = alloc_preg (cfg);
5145 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
5146 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
5148 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
5150 return obj;
5153 static MonoInst*
5154 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
5156 MonoJitICallInfo *info;
5158 /* Need to register the icall so it gets an icall wrapper */
5159 info = mono_get_array_new_va_icall (rank);
5161 cfg->flags |= MONO_CFG_HAS_VARARGS;
5163 /* mono_array_new_va () needs a vararg calling convention */
5164 cfg->exception_message = g_strdup ("array-new");
5165 cfg->disable_llvm = TRUE;
5167 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
5168 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
5172 * handle_constrained_gsharedvt_call:
5174 * Handle constrained calls where the receiver is a gsharedvt type.
5175 * Return the instruction representing the call. Set the cfg exception on failure.
5177 static MonoInst*
5178 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
5179 gboolean *ref_emit_widen)
5181 MonoInst *ins = NULL;
5182 gboolean emit_widen = *ref_emit_widen;
5185 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
5186 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
5187 * pack the arguments into an array, and do the rest of the work in in an icall.
5189 if (((cmethod->klass == mono_defaults.object_class) || mono_class_is_interface (cmethod->klass) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
5190 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (fsig->ret)) &&
5191 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
5192 MonoInst *args [16];
5195 * This case handles calls to
5196 * - object:ToString()/Equals()/GetHashCode(),
5197 * - System.IComparable<T>:CompareTo()
5198 * - System.IEquatable<T>:Equals ()
5199 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
5202 args [0] = sp [0];
5203 if (mono_method_check_context_used (cmethod))
5204 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
5205 else
5206 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
5207 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
5209 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
5210 if (fsig->hasthis && fsig->param_count) {
5211 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
5212 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
5213 ins->dreg = alloc_preg (cfg);
5214 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
5215 MONO_ADD_INS (cfg->cbb, ins);
5216 args [4] = ins;
5218 if (mini_is_gsharedvt_type (fsig->params [0])) {
5219 int addr_reg, deref_arg_reg;
5221 ins = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
5222 deref_arg_reg = alloc_preg (cfg);
5223 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
5224 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
5226 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
5227 addr_reg = ins->dreg;
5228 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
5229 } else {
5230 EMIT_NEW_ICONST (cfg, args [3], 0);
5231 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
5233 } else {
5234 EMIT_NEW_ICONST (cfg, args [3], 0);
5235 EMIT_NEW_ICONST (cfg, args [4], 0);
5237 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
5238 emit_widen = FALSE;
5240 if (mini_is_gsharedvt_type (fsig->ret)) {
5241 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
5242 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
5243 MonoInst *add;
5245 /* Unbox */
5246 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
5247 MONO_ADD_INS (cfg->cbb, add);
5248 /* Load value */
5249 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
5250 MONO_ADD_INS (cfg->cbb, ins);
5251 /* ins represents the call result */
5253 } else {
5254 GSHAREDVT_FAILURE (CEE_CALLVIRT);
5257 *ref_emit_widen = emit_widen;
5259 return ins;
5261 exception_exit:
5262 return NULL;
5265 static void
5266 mono_emit_load_got_addr (MonoCompile *cfg)
5268 MonoInst *getaddr, *dummy_use;
5270 if (!cfg->got_var || cfg->got_var_allocated)
5271 return;
5273 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5274 getaddr->cil_code = cfg->header->code;
5275 getaddr->dreg = cfg->got_var->dreg;
5277 /* Add it to the start of the first bblock */
5278 if (cfg->bb_entry->code) {
5279 getaddr->next = cfg->bb_entry->code;
5280 cfg->bb_entry->code = getaddr;
5282 else
5283 MONO_ADD_INS (cfg->bb_entry, getaddr);
5285 cfg->got_var_allocated = TRUE;
5288 * Add a dummy use to keep the got_var alive, since real uses might
5289 * only be generated by the back ends.
5290 * Add it to end_bblock, so the variable's lifetime covers the whole
5291 * method.
5292 * It would be better to make the usage of the got var explicit in all
5293 * cases when the backend needs it (i.e. calls, throw etc.), so this
5294 * wouldn't be needed.
5296 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5297 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5300 static int inline_limit;
5301 static gboolean inline_limit_inited;
5303 static gboolean
5304 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5306 MonoMethodHeaderSummary header;
5307 MonoVTable *vtable;
5308 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5309 MonoMethodSignature *sig = mono_method_signature (method);
5310 int i;
5311 #endif
5313 if (cfg->disable_inline)
5314 return FALSE;
5315 if (cfg->gshared)
5316 return FALSE;
5318 if (cfg->inline_depth > 10)
5319 return FALSE;
5321 if (!mono_method_get_header_summary (method, &header))
5322 return FALSE;
5324 /*runtime, icall and pinvoke are checked by summary call*/
5325 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5326 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5327 (mono_class_is_marshalbyref (method->klass)) ||
5328 header.has_clauses)
5329 return FALSE;
5331 /* also consider num_locals? */
5332 /* Do the size check early to avoid creating vtables */
5333 if (!inline_limit_inited) {
5334 if (g_getenv ("MONO_INLINELIMIT"))
5335 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5336 else
5337 inline_limit = INLINE_LENGTH_LIMIT;
5338 inline_limit_inited = TRUE;
5340 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5341 return FALSE;
5344 * if we can initialize the class of the method right away, we do,
5345 * otherwise we don't allow inlining if the class needs initialization,
5346 * since it would mean inserting a call to mono_runtime_class_init()
5347 * inside the inlined code
5349 if (!(cfg->opt & MONO_OPT_SHARED)) {
5350 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5351 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5352 vtable = mono_class_vtable (cfg->domain, method->klass);
5353 if (!vtable)
5354 return FALSE;
5355 if (!cfg->compile_aot) {
5356 MonoError error;
5357 if (!mono_runtime_class_init_full (vtable, &error)) {
5358 mono_error_cleanup (&error);
5359 return FALSE;
5362 } else if (mono_class_get_flags (method->klass) & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5363 if (cfg->run_cctors && method->klass->has_cctor) {
5364 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5365 if (!method->klass->runtime_info)
5366 /* No vtable created yet */
5367 return FALSE;
5368 vtable = mono_class_vtable (cfg->domain, method->klass);
5369 if (!vtable)
5370 return FALSE;
5371 /* This makes so that inline cannot trigger */
5372 /* .cctors: too many apps depend on them */
5373 /* running with a specific order... */
5374 if (! vtable->initialized)
5375 return FALSE;
5376 MonoError error;
5377 if (!mono_runtime_class_init_full (vtable, &error)) {
5378 mono_error_cleanup (&error);
5379 return FALSE;
5382 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5383 if (!method->klass->runtime_info)
5384 /* No vtable created yet */
5385 return FALSE;
5386 vtable = mono_class_vtable (cfg->domain, method->klass);
5387 if (!vtable)
5388 return FALSE;
5389 if (!vtable->initialized)
5390 return FALSE;
5392 } else {
5394 * If we're compiling for shared code
5395 * the cctor will need to be run at aot method load time, for example,
5396 * or at the end of the compilation of the inlining method.
5398 if (mono_class_needs_cctor_run (method->klass, NULL) && !((mono_class_get_flags (method->klass) & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5399 return FALSE;
5402 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5403 if (mono_arch_is_soft_float ()) {
5404 /* FIXME: */
5405 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5406 return FALSE;
5407 for (i = 0; i < sig->param_count; ++i)
5408 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5409 return FALSE;
5411 #endif
5413 if (g_list_find (cfg->dont_inline, method))
5414 return FALSE;
5416 return TRUE;
5419 static gboolean
5420 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5422 if (!cfg->compile_aot) {
5423 g_assert (vtable);
5424 if (vtable->initialized)
5425 return FALSE;
5428 if (mono_class_get_flags (klass) & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5429 if (cfg->method == method)
5430 return FALSE;
5433 if (!mono_class_needs_cctor_run (klass, method))
5434 return FALSE;
5436 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5437 /* The initialization is already done before the method is called */
5438 return FALSE;
5440 return TRUE;
5443 static MonoInst*
5444 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5446 MonoInst *ins;
5447 guint32 size;
5448 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5449 int context_used;
5451 if (mini_is_gsharedvt_variable_klass (klass)) {
5452 size = -1;
5453 } else {
5454 mono_class_init (klass);
5455 size = mono_class_array_element_size (klass);
5458 mult_reg = alloc_preg (cfg);
5459 array_reg = arr->dreg;
5460 index_reg = index->dreg;
5462 #if SIZEOF_REGISTER == 8
5463 /* The array reg is 64 bits but the index reg is only 32 */
5464 if (COMPILE_LLVM (cfg)) {
5465 /* Not needed */
5466 index2_reg = index_reg;
5467 } else {
5468 index2_reg = alloc_preg (cfg);
5469 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5471 #else
5472 if (index->type == STACK_I8) {
5473 index2_reg = alloc_preg (cfg);
5474 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5475 } else {
5476 index2_reg = index_reg;
5478 #endif
5480 if (bcheck)
5481 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5483 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5484 if (size == 1 || size == 2 || size == 4 || size == 8) {
5485 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5487 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5488 ins->klass = mono_class_get_element_class (klass);
5489 ins->type = STACK_MP;
5491 return ins;
5493 #endif
5495 add_reg = alloc_ireg_mp (cfg);
5497 if (size == -1) {
5498 MonoInst *rgctx_ins;
5500 /* gsharedvt */
5501 g_assert (cfg->gshared);
5502 context_used = mini_class_check_context_used (cfg, klass);
5503 g_assert (context_used);
5504 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5505 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5506 } else {
5507 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5509 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5510 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5511 ins->klass = mono_class_get_element_class (klass);
5512 ins->type = STACK_MP;
5513 MONO_ADD_INS (cfg->cbb, ins);
5515 return ins;
5518 static MonoInst*
5519 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5521 int bounds_reg = alloc_preg (cfg);
5522 int add_reg = alloc_ireg_mp (cfg);
5523 int mult_reg = alloc_preg (cfg);
5524 int mult2_reg = alloc_preg (cfg);
5525 int low1_reg = alloc_preg (cfg);
5526 int low2_reg = alloc_preg (cfg);
5527 int high1_reg = alloc_preg (cfg);
5528 int high2_reg = alloc_preg (cfg);
5529 int realidx1_reg = alloc_preg (cfg);
5530 int realidx2_reg = alloc_preg (cfg);
5531 int sum_reg = alloc_preg (cfg);
5532 int index1, index2, tmpreg;
5533 MonoInst *ins;
5534 guint32 size;
5536 mono_class_init (klass);
5537 size = mono_class_array_element_size (klass);
5539 index1 = index_ins1->dreg;
5540 index2 = index_ins2->dreg;
5542 #if SIZEOF_REGISTER == 8
5543 /* The array reg is 64 bits but the index reg is only 32 */
5544 if (COMPILE_LLVM (cfg)) {
5545 /* Not needed */
5546 } else {
5547 tmpreg = alloc_preg (cfg);
5548 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5549 index1 = tmpreg;
5550 tmpreg = alloc_preg (cfg);
5551 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5552 index2 = tmpreg;
5554 #else
5555 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5556 tmpreg = -1;
5557 #endif
5559 /* range checking */
5560 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5561 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5563 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5564 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5565 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5566 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5567 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5568 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5569 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5571 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5572 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5573 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5574 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5575 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5576 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5577 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5579 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5580 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5581 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5582 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5583 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5585 ins->type = STACK_MP;
5586 ins->klass = klass;
5587 MONO_ADD_INS (cfg->cbb, ins);
5589 return ins;
5592 static MonoInst*
5593 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5595 int rank;
5596 MonoInst *addr;
5597 MonoMethod *addr_method;
5598 int element_size;
5599 MonoClass *eclass = cmethod->klass->element_class;
5601 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5603 if (rank == 1)
5604 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5606 /* emit_ldelema_2 depends on OP_LMUL */
5607 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
5608 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5611 if (mini_is_gsharedvt_variable_klass (eclass))
5612 element_size = 0;
5613 else
5614 element_size = mono_class_array_element_size (eclass);
5615 addr_method = mono_marshal_get_array_address (rank, element_size);
5616 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5618 return addr;
5621 static MonoBreakPolicy
5622 always_insert_breakpoint (MonoMethod *method)
5624 return MONO_BREAK_POLICY_ALWAYS;
5627 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5630 * mono_set_break_policy:
5631 * policy_callback: the new callback function
5633 * Allow embedders to decide wherther to actually obey breakpoint instructions
5634 * (both break IL instructions and Debugger.Break () method calls), for example
5635 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5636 * untrusted or semi-trusted code.
5638 * @policy_callback will be called every time a break point instruction needs to
5639 * be inserted with the method argument being the method that calls Debugger.Break()
5640 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5641 * if it wants the breakpoint to not be effective in the given method.
5642 * #MONO_BREAK_POLICY_ALWAYS is the default.
5644 void
5645 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5647 if (policy_callback)
5648 break_policy_func = policy_callback;
5649 else
5650 break_policy_func = always_insert_breakpoint;
5653 static gboolean
5654 should_insert_brekpoint (MonoMethod *method) {
5655 switch (break_policy_func (method)) {
5656 case MONO_BREAK_POLICY_ALWAYS:
5657 return TRUE;
5658 case MONO_BREAK_POLICY_NEVER:
5659 return FALSE;
5660 case MONO_BREAK_POLICY_ON_DBG:
5661 g_warning ("mdb no longer supported");
5662 return FALSE;
5663 default:
5664 g_warning ("Incorrect value returned from break policy callback");
5665 return FALSE;
5669 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5670 static MonoInst*
5671 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5673 MonoInst *addr, *store, *load;
5674 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5676 /* the bounds check is already done by the callers */
5677 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5678 if (is_set) {
5679 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5680 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5681 if (mini_type_is_reference (&eklass->byval_arg))
5682 emit_write_barrier (cfg, addr, load);
5683 } else {
5684 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5685 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5687 return store;
5691 static gboolean
5692 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5694 return mini_type_is_reference (&klass->byval_arg);
5697 static MonoInst*
5698 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5700 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5701 !(MONO_INS_IS_PCONST_NULL (sp [2]))) {
5702 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5703 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5704 MonoInst *iargs [3];
5706 if (!helper->slot)
5707 mono_class_setup_vtable (obj_array);
5708 g_assert (helper->slot);
5710 if (sp [0]->type != STACK_OBJ)
5711 return NULL;
5712 if (sp [2]->type != STACK_OBJ)
5713 return NULL;
5715 iargs [2] = sp [2];
5716 iargs [1] = sp [1];
5717 iargs [0] = sp [0];
5719 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5720 } else {
5721 MonoInst *ins;
5723 if (mini_is_gsharedvt_variable_klass (klass)) {
5724 MonoInst *addr;
5726 // FIXME-VT: OP_ICONST optimization
5727 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5728 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5729 ins->opcode = OP_STOREV_MEMBASE;
5730 } else if (sp [1]->opcode == OP_ICONST) {
5731 int array_reg = sp [0]->dreg;
5732 int index_reg = sp [1]->dreg;
5733 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5735 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
5736 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
5738 if (safety_checks)
5739 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5740 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5741 } else {
5742 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5743 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5744 if (generic_class_is_reference_type (cfg, klass))
5745 emit_write_barrier (cfg, addr, sp [2]);
5747 return ins;
5751 static MonoInst*
5752 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5754 MonoClass *eklass;
5756 if (is_set)
5757 eklass = mono_class_from_mono_type (fsig->params [2]);
5758 else
5759 eklass = mono_class_from_mono_type (fsig->ret);
5761 if (is_set) {
5762 return emit_array_store (cfg, eklass, args, FALSE);
5763 } else {
5764 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5765 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5766 return ins;
5770 static gboolean
5771 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5773 uint32_t align;
5774 int param_size, return_size;
5776 param_klass = mono_class_from_mono_type (mini_get_underlying_type (&param_klass->byval_arg));
5777 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
5779 if (cfg->verbose_level > 3)
5780 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
5782 //Don't allow mixing reference types with value types
5783 if (param_klass->valuetype != return_klass->valuetype) {
5784 if (cfg->verbose_level > 3)
5785 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
5786 return FALSE;
5789 if (!param_klass->valuetype) {
5790 if (cfg->verbose_level > 3)
5791 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
5792 return TRUE;
5795 //That are blitable
5796 if (param_klass->has_references || return_klass->has_references)
5797 return FALSE;
5799 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5800 if ((MONO_TYPE_ISSTRUCT (&param_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5801 (!MONO_TYPE_ISSTRUCT (&param_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
5802 if (cfg->verbose_level > 3)
5803 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
5804 return FALSE;
5807 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5808 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
5809 if (cfg->verbose_level > 3)
5810 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
5811 return FALSE;
5814 param_size = mono_class_value_size (param_klass, &align);
5815 return_size = mono_class_value_size (return_klass, &align);
5817 //We can do it if sizes match
5818 if (param_size == return_size) {
5819 if (cfg->verbose_level > 3)
5820 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
5821 return TRUE;
5824 //No simple way to handle struct if sizes don't match
5825 if (MONO_TYPE_ISSTRUCT (&param_klass->byval_arg)) {
5826 if (cfg->verbose_level > 3)
5827 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
5828 return FALSE;
5832 * Same reg size category.
5833 * A quick note on why we don't require widening here.
5834 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
5836 * Since the source value comes from a function argument, the JIT will already have
5837 * the value in a VREG and performed any widening needed before (say, when loading from a field).
5839 if (param_size <= 4 && return_size <= 4) {
5840 if (cfg->verbose_level > 3)
5841 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
5842 return TRUE;
5845 return FALSE;
5848 static MonoInst*
5849 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5851 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5852 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5854 if (mini_is_gsharedvt_variable_type (fsig->ret))
5855 return NULL;
5857 //Valuetypes that are semantically equivalent or numbers than can be widened to
5858 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5859 return args [0];
5861 //Arrays of valuetypes that are semantically equivalent
5862 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5863 return args [0];
5865 return NULL;
5868 static MonoInst*
5869 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5871 #ifdef MONO_ARCH_SIMD_INTRINSICS
5872 MonoInst *ins = NULL;
5874 if (cfg->opt & MONO_OPT_SIMD) {
5875 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5876 if (ins)
5877 return ins;
5879 #endif
5881 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5884 static MonoInst*
5885 emit_memory_barrier (MonoCompile *cfg, int kind)
5887 MonoInst *ins = NULL;
5888 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5889 MONO_ADD_INS (cfg->cbb, ins);
5890 ins->backend.memory_barrier_kind = kind;
5892 return ins;
5895 static MonoInst*
5896 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5898 MonoInst *ins = NULL;
5899 int opcode = 0;
5901 /* The LLVM backend supports these intrinsics */
5902 if (cmethod->klass == mono_defaults.math_class) {
5903 if (strcmp (cmethod->name, "Sin") == 0) {
5904 opcode = OP_SIN;
5905 } else if (strcmp (cmethod->name, "Cos") == 0) {
5906 opcode = OP_COS;
5907 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5908 opcode = OP_SQRT;
5909 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5910 opcode = OP_ABS;
5913 if (opcode && fsig->param_count == 1) {
5914 MONO_INST_NEW (cfg, ins, opcode);
5915 ins->type = STACK_R8;
5916 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5917 ins->sreg1 = args [0]->dreg;
5918 MONO_ADD_INS (cfg->cbb, ins);
5921 opcode = 0;
5922 if (cfg->opt & MONO_OPT_CMOV) {
5923 if (strcmp (cmethod->name, "Min") == 0) {
5924 if (fsig->params [0]->type == MONO_TYPE_I4)
5925 opcode = OP_IMIN;
5926 if (fsig->params [0]->type == MONO_TYPE_U4)
5927 opcode = OP_IMIN_UN;
5928 else if (fsig->params [0]->type == MONO_TYPE_I8)
5929 opcode = OP_LMIN;
5930 else if (fsig->params [0]->type == MONO_TYPE_U8)
5931 opcode = OP_LMIN_UN;
5932 } else if (strcmp (cmethod->name, "Max") == 0) {
5933 if (fsig->params [0]->type == MONO_TYPE_I4)
5934 opcode = OP_IMAX;
5935 if (fsig->params [0]->type == MONO_TYPE_U4)
5936 opcode = OP_IMAX_UN;
5937 else if (fsig->params [0]->type == MONO_TYPE_I8)
5938 opcode = OP_LMAX;
5939 else if (fsig->params [0]->type == MONO_TYPE_U8)
5940 opcode = OP_LMAX_UN;
5944 if (opcode && fsig->param_count == 2) {
5945 MONO_INST_NEW (cfg, ins, opcode);
5946 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5947 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5948 ins->sreg1 = args [0]->dreg;
5949 ins->sreg2 = args [1]->dreg;
5950 MONO_ADD_INS (cfg->cbb, ins);
5954 return ins;
5957 static MonoInst*
5958 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5960 if (cmethod->klass == mono_defaults.array_class) {
5961 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5962 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5963 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5964 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5965 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5966 return emit_array_unsafe_mov (cfg, fsig, args);
5969 return NULL;
5972 static MonoInst*
5973 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5975 MonoInst *ins = NULL;
5977 MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
5979 if (cmethod->klass == mono_defaults.string_class) {
5980 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5981 int dreg = alloc_ireg (cfg);
5982 int index_reg = alloc_preg (cfg);
5983 int add_reg = alloc_preg (cfg);
5985 #if SIZEOF_REGISTER == 8
5986 if (COMPILE_LLVM (cfg)) {
5987 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
5988 } else {
5989 /* The array reg is 64 bits but the index reg is only 32 */
5990 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5992 #else
5993 index_reg = args [1]->dreg;
5994 #endif
5995 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5997 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5998 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5999 add_reg = ins->dreg;
6000 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
6001 add_reg, 0);
6002 #else
6003 int mult_reg = alloc_preg (cfg);
6004 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
6005 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
6006 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
6007 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
6008 #endif
6009 type_from_op (cfg, ins, NULL, NULL);
6010 return ins;
6011 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
6012 int dreg = alloc_ireg (cfg);
6013 /* Decompose later to allow more optimizations */
6014 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
6015 ins->type = STACK_I4;
6016 ins->flags |= MONO_INST_FAULT;
6017 cfg->cbb->has_array_access = TRUE;
6018 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
6020 return ins;
6021 } else
6022 return NULL;
6023 } else if (cmethod->klass == mono_defaults.object_class) {
6024 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
6025 int dreg = alloc_ireg_ref (cfg);
6026 int vt_reg = alloc_preg (cfg);
6027 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6028 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
6029 type_from_op (cfg, ins, NULL, NULL);
6031 return ins;
6032 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
6033 int dreg = alloc_ireg (cfg);
6034 int t1 = alloc_ireg (cfg);
6036 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
6037 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
6038 ins->type = STACK_I4;
6040 return ins;
6041 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
6042 MONO_INST_NEW (cfg, ins, OP_NOP);
6043 MONO_ADD_INS (cfg->cbb, ins);
6044 return ins;
6045 } else
6046 return NULL;
6047 } else if (cmethod->klass == mono_defaults.array_class) {
6048 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
6049 return emit_array_generic_access (cfg, fsig, args, FALSE);
6050 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
6051 return emit_array_generic_access (cfg, fsig, args, TRUE);
6053 #ifndef MONO_BIG_ARRAYS
6055 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
6056 * Array methods.
6058 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
6059 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
6060 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
6061 int dreg = alloc_ireg (cfg);
6062 int bounds_reg = alloc_ireg_mp (cfg);
6063 MonoBasicBlock *end_bb, *szarray_bb;
6064 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
6066 NEW_BBLOCK (cfg, end_bb);
6067 NEW_BBLOCK (cfg, szarray_bb);
6069 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
6070 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
6071 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
6072 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
6073 /* Non-szarray case */
6074 if (get_length)
6075 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6076 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
6077 else
6078 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6079 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
6080 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
6081 MONO_START_BB (cfg, szarray_bb);
6082 /* Szarray case */
6083 if (get_length)
6084 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6085 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6086 else
6087 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6088 MONO_START_BB (cfg, end_bb);
6090 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
6091 ins->type = STACK_I4;
6093 return ins;
6095 #endif
6097 if (cmethod->name [0] != 'g')
6098 return NULL;
6100 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
6101 int dreg = alloc_ireg (cfg);
6102 int vtable_reg = alloc_preg (cfg);
6103 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
6104 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6105 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
6106 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
6107 type_from_op (cfg, ins, NULL, NULL);
6109 return ins;
6110 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
6111 int dreg = alloc_ireg (cfg);
6113 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6114 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6115 type_from_op (cfg, ins, NULL, NULL);
6117 return ins;
6118 } else
6119 return NULL;
6120 } else if (cmethod->klass == runtime_helpers_class) {
6121 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
6122 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
6123 return ins;
6124 } else
6125 return NULL;
6126 } else if (cmethod->klass == mono_defaults.monitor_class) {
6127 gboolean is_enter = FALSE;
6128 gboolean is_v4 = FALSE;
6130 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 2 && fsig->params [1]->byref) {
6131 is_enter = TRUE;
6132 is_v4 = TRUE;
6134 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 1)
6135 is_enter = TRUE;
6137 if (is_enter) {
6139 * To make async stack traces work, icalls which can block should have a wrapper.
6140 * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
6142 MonoBasicBlock *end_bb;
6144 NEW_BBLOCK (cfg, end_bb);
6146 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_fast : (gpointer)mono_monitor_enter_fast, args);
6147 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
6148 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
6149 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4 : (gpointer)mono_monitor_enter, args);
6150 MONO_START_BB (cfg, end_bb);
6151 return ins;
6153 } else if (cmethod->klass == mono_defaults.thread_class) {
6154 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
6155 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
6156 MONO_ADD_INS (cfg->cbb, ins);
6157 return ins;
6158 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
6159 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6160 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
6161 guint32 opcode = 0;
6162 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6164 if (fsig->params [0]->type == MONO_TYPE_I1)
6165 opcode = OP_LOADI1_MEMBASE;
6166 else if (fsig->params [0]->type == MONO_TYPE_U1)
6167 opcode = OP_LOADU1_MEMBASE;
6168 else if (fsig->params [0]->type == MONO_TYPE_I2)
6169 opcode = OP_LOADI2_MEMBASE;
6170 else if (fsig->params [0]->type == MONO_TYPE_U2)
6171 opcode = OP_LOADU2_MEMBASE;
6172 else if (fsig->params [0]->type == MONO_TYPE_I4)
6173 opcode = OP_LOADI4_MEMBASE;
6174 else if (fsig->params [0]->type == MONO_TYPE_U4)
6175 opcode = OP_LOADU4_MEMBASE;
6176 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6177 opcode = OP_LOADI8_MEMBASE;
6178 else if (fsig->params [0]->type == MONO_TYPE_R4)
6179 opcode = OP_LOADR4_MEMBASE;
6180 else if (fsig->params [0]->type == MONO_TYPE_R8)
6181 opcode = OP_LOADR8_MEMBASE;
6182 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6183 opcode = OP_LOAD_MEMBASE;
6185 if (opcode) {
6186 MONO_INST_NEW (cfg, ins, opcode);
6187 ins->inst_basereg = args [0]->dreg;
6188 ins->inst_offset = 0;
6189 MONO_ADD_INS (cfg->cbb, ins);
6191 switch (fsig->params [0]->type) {
6192 case MONO_TYPE_I1:
6193 case MONO_TYPE_U1:
6194 case MONO_TYPE_I2:
6195 case MONO_TYPE_U2:
6196 case MONO_TYPE_I4:
6197 case MONO_TYPE_U4:
6198 ins->dreg = mono_alloc_ireg (cfg);
6199 ins->type = STACK_I4;
6200 break;
6201 case MONO_TYPE_I8:
6202 case MONO_TYPE_U8:
6203 ins->dreg = mono_alloc_lreg (cfg);
6204 ins->type = STACK_I8;
6205 break;
6206 case MONO_TYPE_I:
6207 case MONO_TYPE_U:
6208 ins->dreg = mono_alloc_ireg (cfg);
6209 #if SIZEOF_REGISTER == 8
6210 ins->type = STACK_I8;
6211 #else
6212 ins->type = STACK_I4;
6213 #endif
6214 break;
6215 case MONO_TYPE_R4:
6216 case MONO_TYPE_R8:
6217 ins->dreg = mono_alloc_freg (cfg);
6218 ins->type = STACK_R8;
6219 break;
6220 default:
6221 g_assert (mini_type_is_reference (fsig->params [0]));
6222 ins->dreg = mono_alloc_ireg_ref (cfg);
6223 ins->type = STACK_OBJ;
6224 break;
6227 if (opcode == OP_LOADI8_MEMBASE)
6228 ins = mono_decompose_opcode (cfg, ins);
6230 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6232 return ins;
6234 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
6235 guint32 opcode = 0;
6236 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6238 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
6239 opcode = OP_STOREI1_MEMBASE_REG;
6240 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
6241 opcode = OP_STOREI2_MEMBASE_REG;
6242 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
6243 opcode = OP_STOREI4_MEMBASE_REG;
6244 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6245 opcode = OP_STOREI8_MEMBASE_REG;
6246 else if (fsig->params [0]->type == MONO_TYPE_R4)
6247 opcode = OP_STORER4_MEMBASE_REG;
6248 else if (fsig->params [0]->type == MONO_TYPE_R8)
6249 opcode = OP_STORER8_MEMBASE_REG;
6250 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6251 opcode = OP_STORE_MEMBASE_REG;
6253 if (opcode) {
6254 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6256 MONO_INST_NEW (cfg, ins, opcode);
6257 ins->sreg1 = args [1]->dreg;
6258 ins->inst_destbasereg = args [0]->dreg;
6259 ins->inst_offset = 0;
6260 MONO_ADD_INS (cfg->cbb, ins);
6262 if (opcode == OP_STOREI8_MEMBASE_REG)
6263 ins = mono_decompose_opcode (cfg, ins);
6265 return ins;
6268 } else if (cmethod->klass->image == mono_defaults.corlib &&
6269 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6270 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
6271 ins = NULL;
6273 #if SIZEOF_REGISTER == 8
6274 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6275 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
6276 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
6277 ins->dreg = mono_alloc_preg (cfg);
6278 ins->sreg1 = args [0]->dreg;
6279 ins->type = STACK_I8;
6280 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
6281 MONO_ADD_INS (cfg->cbb, ins);
6282 } else {
6283 MonoInst *load_ins;
6285 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6287 /* 64 bit reads are already atomic */
6288 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6289 load_ins->dreg = mono_alloc_preg (cfg);
6290 load_ins->inst_basereg = args [0]->dreg;
6291 load_ins->inst_offset = 0;
6292 load_ins->type = STACK_I8;
6293 MONO_ADD_INS (cfg->cbb, load_ins);
6295 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6297 ins = load_ins;
6300 #endif
6302 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6303 MonoInst *ins_iconst;
6304 guint32 opcode = 0;
6306 if (fsig->params [0]->type == MONO_TYPE_I4) {
6307 opcode = OP_ATOMIC_ADD_I4;
6308 cfg->has_atomic_add_i4 = TRUE;
6310 #if SIZEOF_REGISTER == 8
6311 else if (fsig->params [0]->type == MONO_TYPE_I8)
6312 opcode = OP_ATOMIC_ADD_I8;
6313 #endif
6314 if (opcode) {
6315 if (!mono_arch_opcode_supported (opcode))
6316 return NULL;
6317 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6318 ins_iconst->inst_c0 = 1;
6319 ins_iconst->dreg = mono_alloc_ireg (cfg);
6320 MONO_ADD_INS (cfg->cbb, ins_iconst);
6322 MONO_INST_NEW (cfg, ins, opcode);
6323 ins->dreg = mono_alloc_ireg (cfg);
6324 ins->inst_basereg = args [0]->dreg;
6325 ins->inst_offset = 0;
6326 ins->sreg2 = ins_iconst->dreg;
6327 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6328 MONO_ADD_INS (cfg->cbb, ins);
6330 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6331 MonoInst *ins_iconst;
6332 guint32 opcode = 0;
6334 if (fsig->params [0]->type == MONO_TYPE_I4) {
6335 opcode = OP_ATOMIC_ADD_I4;
6336 cfg->has_atomic_add_i4 = TRUE;
6338 #if SIZEOF_REGISTER == 8
6339 else if (fsig->params [0]->type == MONO_TYPE_I8)
6340 opcode = OP_ATOMIC_ADD_I8;
6341 #endif
6342 if (opcode) {
6343 if (!mono_arch_opcode_supported (opcode))
6344 return NULL;
6345 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6346 ins_iconst->inst_c0 = -1;
6347 ins_iconst->dreg = mono_alloc_ireg (cfg);
6348 MONO_ADD_INS (cfg->cbb, ins_iconst);
6350 MONO_INST_NEW (cfg, ins, opcode);
6351 ins->dreg = mono_alloc_ireg (cfg);
6352 ins->inst_basereg = args [0]->dreg;
6353 ins->inst_offset = 0;
6354 ins->sreg2 = ins_iconst->dreg;
6355 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6356 MONO_ADD_INS (cfg->cbb, ins);
6358 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6359 guint32 opcode = 0;
6361 if (fsig->params [0]->type == MONO_TYPE_I4) {
6362 opcode = OP_ATOMIC_ADD_I4;
6363 cfg->has_atomic_add_i4 = TRUE;
6365 #if SIZEOF_REGISTER == 8
6366 else if (fsig->params [0]->type == MONO_TYPE_I8)
6367 opcode = OP_ATOMIC_ADD_I8;
6368 #endif
6369 if (opcode) {
6370 if (!mono_arch_opcode_supported (opcode))
6371 return NULL;
6372 MONO_INST_NEW (cfg, ins, opcode);
6373 ins->dreg = mono_alloc_ireg (cfg);
6374 ins->inst_basereg = args [0]->dreg;
6375 ins->inst_offset = 0;
6376 ins->sreg2 = args [1]->dreg;
6377 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6378 MONO_ADD_INS (cfg->cbb, ins);
6381 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6382 MonoInst *f2i = NULL, *i2f;
6383 guint32 opcode, f2i_opcode, i2f_opcode;
6384 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6385 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6387 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6388 fsig->params [0]->type == MONO_TYPE_R4) {
6389 opcode = OP_ATOMIC_EXCHANGE_I4;
6390 f2i_opcode = OP_MOVE_F_TO_I4;
6391 i2f_opcode = OP_MOVE_I4_TO_F;
6392 cfg->has_atomic_exchange_i4 = TRUE;
6394 #if SIZEOF_REGISTER == 8
6395 else if (is_ref ||
6396 fsig->params [0]->type == MONO_TYPE_I8 ||
6397 fsig->params [0]->type == MONO_TYPE_R8 ||
6398 fsig->params [0]->type == MONO_TYPE_I) {
6399 opcode = OP_ATOMIC_EXCHANGE_I8;
6400 f2i_opcode = OP_MOVE_F_TO_I8;
6401 i2f_opcode = OP_MOVE_I8_TO_F;
6403 #else
6404 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6405 opcode = OP_ATOMIC_EXCHANGE_I4;
6406 cfg->has_atomic_exchange_i4 = TRUE;
6408 #endif
6409 else
6410 return NULL;
6412 if (!mono_arch_opcode_supported (opcode))
6413 return NULL;
6415 if (is_float) {
6416 /* TODO: Decompose these opcodes instead of bailing here. */
6417 if (COMPILE_SOFT_FLOAT (cfg))
6418 return NULL;
6420 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6421 f2i->dreg = mono_alloc_ireg (cfg);
6422 f2i->sreg1 = args [1]->dreg;
6423 if (f2i_opcode == OP_MOVE_F_TO_I4)
6424 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6425 MONO_ADD_INS (cfg->cbb, f2i);
6428 MONO_INST_NEW (cfg, ins, opcode);
6429 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6430 ins->inst_basereg = args [0]->dreg;
6431 ins->inst_offset = 0;
6432 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6433 MONO_ADD_INS (cfg->cbb, ins);
6435 switch (fsig->params [0]->type) {
6436 case MONO_TYPE_I4:
6437 ins->type = STACK_I4;
6438 break;
6439 case MONO_TYPE_I8:
6440 ins->type = STACK_I8;
6441 break;
6442 case MONO_TYPE_I:
6443 #if SIZEOF_REGISTER == 8
6444 ins->type = STACK_I8;
6445 #else
6446 ins->type = STACK_I4;
6447 #endif
6448 break;
6449 case MONO_TYPE_R4:
6450 case MONO_TYPE_R8:
6451 ins->type = STACK_R8;
6452 break;
6453 default:
6454 g_assert (mini_type_is_reference (fsig->params [0]));
6455 ins->type = STACK_OBJ;
6456 break;
6459 if (is_float) {
6460 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6461 i2f->dreg = mono_alloc_freg (cfg);
6462 i2f->sreg1 = ins->dreg;
6463 i2f->type = STACK_R8;
6464 if (i2f_opcode == OP_MOVE_I4_TO_F)
6465 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6466 MONO_ADD_INS (cfg->cbb, i2f);
6468 ins = i2f;
6471 if (cfg->gen_write_barriers && is_ref)
6472 emit_write_barrier (cfg, args [0], args [1]);
6474 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6475 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6476 guint32 opcode, f2i_opcode, i2f_opcode;
6477 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
6478 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6480 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6481 fsig->params [1]->type == MONO_TYPE_R4) {
6482 opcode = OP_ATOMIC_CAS_I4;
6483 f2i_opcode = OP_MOVE_F_TO_I4;
6484 i2f_opcode = OP_MOVE_I4_TO_F;
6485 cfg->has_atomic_cas_i4 = TRUE;
6487 #if SIZEOF_REGISTER == 8
6488 else if (is_ref ||
6489 fsig->params [1]->type == MONO_TYPE_I8 ||
6490 fsig->params [1]->type == MONO_TYPE_R8 ||
6491 fsig->params [1]->type == MONO_TYPE_I) {
6492 opcode = OP_ATOMIC_CAS_I8;
6493 f2i_opcode = OP_MOVE_F_TO_I8;
6494 i2f_opcode = OP_MOVE_I8_TO_F;
6496 #else
6497 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6498 opcode = OP_ATOMIC_CAS_I4;
6499 cfg->has_atomic_cas_i4 = TRUE;
6501 #endif
6502 else
6503 return NULL;
6505 if (!mono_arch_opcode_supported (opcode))
6506 return NULL;
6508 if (is_float) {
6509 /* TODO: Decompose these opcodes instead of bailing here. */
6510 if (COMPILE_SOFT_FLOAT (cfg))
6511 return NULL;
6513 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6514 f2i_new->dreg = mono_alloc_ireg (cfg);
6515 f2i_new->sreg1 = args [1]->dreg;
6516 if (f2i_opcode == OP_MOVE_F_TO_I4)
6517 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6518 MONO_ADD_INS (cfg->cbb, f2i_new);
6520 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6521 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6522 f2i_cmp->sreg1 = args [2]->dreg;
6523 if (f2i_opcode == OP_MOVE_F_TO_I4)
6524 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6525 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6528 MONO_INST_NEW (cfg, ins, opcode);
6529 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6530 ins->sreg1 = args [0]->dreg;
6531 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6532 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6533 MONO_ADD_INS (cfg->cbb, ins);
6535 switch (fsig->params [1]->type) {
6536 case MONO_TYPE_I4:
6537 ins->type = STACK_I4;
6538 break;
6539 case MONO_TYPE_I8:
6540 ins->type = STACK_I8;
6541 break;
6542 case MONO_TYPE_I:
6543 #if SIZEOF_REGISTER == 8
6544 ins->type = STACK_I8;
6545 #else
6546 ins->type = STACK_I4;
6547 #endif
6548 break;
6549 case MONO_TYPE_R4:
6550 ins->type = cfg->r4_stack_type;
6551 break;
6552 case MONO_TYPE_R8:
6553 ins->type = STACK_R8;
6554 break;
6555 default:
6556 g_assert (mini_type_is_reference (fsig->params [1]));
6557 ins->type = STACK_OBJ;
6558 break;
6561 if (is_float) {
6562 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6563 i2f->dreg = mono_alloc_freg (cfg);
6564 i2f->sreg1 = ins->dreg;
6565 i2f->type = STACK_R8;
6566 if (i2f_opcode == OP_MOVE_I4_TO_F)
6567 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6568 MONO_ADD_INS (cfg->cbb, i2f);
6570 ins = i2f;
6573 if (cfg->gen_write_barriers && is_ref)
6574 emit_write_barrier (cfg, args [0], args [1]);
6576 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6577 fsig->params [1]->type == MONO_TYPE_I4) {
6578 MonoInst *cmp, *ceq;
6580 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6581 return NULL;
6583 /* int32 r = CAS (location, value, comparand); */
6584 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6585 ins->dreg = alloc_ireg (cfg);
6586 ins->sreg1 = args [0]->dreg;
6587 ins->sreg2 = args [1]->dreg;
6588 ins->sreg3 = args [2]->dreg;
6589 ins->type = STACK_I4;
6590 MONO_ADD_INS (cfg->cbb, ins);
6592 /* bool result = r == comparand; */
6593 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6594 cmp->sreg1 = ins->dreg;
6595 cmp->sreg2 = args [2]->dreg;
6596 cmp->type = STACK_I4;
6597 MONO_ADD_INS (cfg->cbb, cmp);
6599 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6600 ceq->dreg = alloc_ireg (cfg);
6601 ceq->type = STACK_I4;
6602 MONO_ADD_INS (cfg->cbb, ceq);
6604 /* *success = result; */
6605 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6607 cfg->has_atomic_cas_i4 = TRUE;
6609 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6610 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6612 if (ins)
6613 return ins;
6614 } else if (cmethod->klass->image == mono_defaults.corlib &&
6615 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6616 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6617 ins = NULL;
6619 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6620 guint32 opcode = 0;
6621 MonoType *t = fsig->params [0];
6622 gboolean is_ref;
6623 gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
6625 g_assert (t->byref);
6626 /* t is a byref type, so the reference check is more complicated */
6627 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6628 if (t->type == MONO_TYPE_I1)
6629 opcode = OP_ATOMIC_LOAD_I1;
6630 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6631 opcode = OP_ATOMIC_LOAD_U1;
6632 else if (t->type == MONO_TYPE_I2)
6633 opcode = OP_ATOMIC_LOAD_I2;
6634 else if (t->type == MONO_TYPE_U2)
6635 opcode = OP_ATOMIC_LOAD_U2;
6636 else if (t->type == MONO_TYPE_I4)
6637 opcode = OP_ATOMIC_LOAD_I4;
6638 else if (t->type == MONO_TYPE_U4)
6639 opcode = OP_ATOMIC_LOAD_U4;
6640 else if (t->type == MONO_TYPE_R4)
6641 opcode = OP_ATOMIC_LOAD_R4;
6642 else if (t->type == MONO_TYPE_R8)
6643 opcode = OP_ATOMIC_LOAD_R8;
6644 #if SIZEOF_REGISTER == 8
6645 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6646 opcode = OP_ATOMIC_LOAD_I8;
6647 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6648 opcode = OP_ATOMIC_LOAD_U8;
6649 #else
6650 else if (t->type == MONO_TYPE_I)
6651 opcode = OP_ATOMIC_LOAD_I4;
6652 else if (is_ref || t->type == MONO_TYPE_U)
6653 opcode = OP_ATOMIC_LOAD_U4;
6654 #endif
6656 if (opcode) {
6657 if (!mono_arch_opcode_supported (opcode))
6658 return NULL;
6660 MONO_INST_NEW (cfg, ins, opcode);
6661 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6662 ins->sreg1 = args [0]->dreg;
6663 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6664 MONO_ADD_INS (cfg->cbb, ins);
6666 switch (t->type) {
6667 case MONO_TYPE_BOOLEAN:
6668 case MONO_TYPE_I1:
6669 case MONO_TYPE_U1:
6670 case MONO_TYPE_I2:
6671 case MONO_TYPE_U2:
6672 case MONO_TYPE_I4:
6673 case MONO_TYPE_U4:
6674 ins->type = STACK_I4;
6675 break;
6676 case MONO_TYPE_I8:
6677 case MONO_TYPE_U8:
6678 ins->type = STACK_I8;
6679 break;
6680 case MONO_TYPE_I:
6681 case MONO_TYPE_U:
6682 #if SIZEOF_REGISTER == 8
6683 ins->type = STACK_I8;
6684 #else
6685 ins->type = STACK_I4;
6686 #endif
6687 break;
6688 case MONO_TYPE_R4:
6689 ins->type = cfg->r4_stack_type;
6690 break;
6691 case MONO_TYPE_R8:
6692 ins->type = STACK_R8;
6693 break;
6694 default:
6695 g_assert (is_ref);
6696 ins->type = STACK_OBJ;
6697 break;
6702 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6703 guint32 opcode = 0;
6704 MonoType *t = fsig->params [0];
6705 gboolean is_ref;
6707 g_assert (t->byref);
6708 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6709 if (t->type == MONO_TYPE_I1)
6710 opcode = OP_ATOMIC_STORE_I1;
6711 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6712 opcode = OP_ATOMIC_STORE_U1;
6713 else if (t->type == MONO_TYPE_I2)
6714 opcode = OP_ATOMIC_STORE_I2;
6715 else if (t->type == MONO_TYPE_U2)
6716 opcode = OP_ATOMIC_STORE_U2;
6717 else if (t->type == MONO_TYPE_I4)
6718 opcode = OP_ATOMIC_STORE_I4;
6719 else if (t->type == MONO_TYPE_U4)
6720 opcode = OP_ATOMIC_STORE_U4;
6721 else if (t->type == MONO_TYPE_R4)
6722 opcode = OP_ATOMIC_STORE_R4;
6723 else if (t->type == MONO_TYPE_R8)
6724 opcode = OP_ATOMIC_STORE_R8;
6725 #if SIZEOF_REGISTER == 8
6726 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6727 opcode = OP_ATOMIC_STORE_I8;
6728 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6729 opcode = OP_ATOMIC_STORE_U8;
6730 #else
6731 else if (t->type == MONO_TYPE_I)
6732 opcode = OP_ATOMIC_STORE_I4;
6733 else if (is_ref || t->type == MONO_TYPE_U)
6734 opcode = OP_ATOMIC_STORE_U4;
6735 #endif
6737 if (opcode) {
6738 if (!mono_arch_opcode_supported (opcode))
6739 return NULL;
6741 MONO_INST_NEW (cfg, ins, opcode);
6742 ins->dreg = args [0]->dreg;
6743 ins->sreg1 = args [1]->dreg;
6744 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6745 MONO_ADD_INS (cfg->cbb, ins);
6747 if (cfg->gen_write_barriers && is_ref)
6748 emit_write_barrier (cfg, args [0], args [1]);
6752 if (ins)
6753 return ins;
6754 } else if (cmethod->klass->image == mono_defaults.corlib &&
6755 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6756 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6757 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6758 if (should_insert_brekpoint (cfg->method)) {
6759 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6760 } else {
6761 MONO_INST_NEW (cfg, ins, OP_NOP);
6762 MONO_ADD_INS (cfg->cbb, ins);
6764 return ins;
6766 } else if (cmethod->klass->image == mono_defaults.corlib &&
6767 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6768 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6769 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6770 #ifdef TARGET_WIN32
6771 EMIT_NEW_ICONST (cfg, ins, 1);
6772 #else
6773 EMIT_NEW_ICONST (cfg, ins, 0);
6774 #endif
6776 } else if (cmethod->klass->image == mono_defaults.corlib &&
6777 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6778 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6779 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6780 /* No stack walks are currently available, so implement this as an intrinsic */
6781 MonoInst *assembly_ins;
6783 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6784 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6785 return ins;
6787 } else if (cmethod->klass->image == mono_defaults.corlib &&
6788 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6789 (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
6790 if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
6791 /* No stack walks are currently available, so implement this as an intrinsic */
6792 MonoInst *method_ins;
6793 MonoMethod *declaring = cfg->method;
6795 /* This returns the declaring generic method */
6796 if (declaring->is_inflated)
6797 declaring = ((MonoMethodInflated*)cfg->method)->declaring;
6798 EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
6799 ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
6800 cfg->no_inline = TRUE;
6801 if (cfg->method != cfg->current_method)
6802 inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
6803 return ins;
6805 } else if (cmethod->klass == mono_defaults.math_class) {
6807 * There is general branchless code for Min/Max, but it does not work for
6808 * all inputs:
6809 * http://everything2.com/?node_id=1051618
6811 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6812 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6813 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6814 !strcmp (cmethod->klass->name, "Selector")) ||
6815 ((!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") ||
6816 !strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.Mac")) &&
6817 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6818 !strcmp (cmethod->klass->name, "Selector"))
6820 if ((cfg->backend->have_objc_get_selector || cfg->compile_llvm) &&
6821 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6822 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6823 cfg->compile_aot) {
6824 MonoInst *pi;
6825 MonoJumpInfoToken *ji;
6826 char *s;
6828 if (args [0]->opcode == OP_GOT_ENTRY) {
6829 pi = (MonoInst *)args [0]->inst_p1;
6830 g_assert (pi->opcode == OP_PATCH_INFO);
6831 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6832 ji = (MonoJumpInfoToken *)pi->inst_p0;
6833 } else {
6834 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6835 ji = (MonoJumpInfoToken *)args [0]->inst_p0;
6838 NULLIFY_INS (args [0]);
6840 s = mono_ldstr_utf8 (ji->image, mono_metadata_token_index (ji->token), &cfg->error);
6841 return_val_if_nok (&cfg->error, NULL);
6843 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6844 ins->dreg = mono_alloc_ireg (cfg);
6845 // FIXME: Leaks
6846 ins->inst_p0 = s;
6847 MONO_ADD_INS (cfg->cbb, ins);
6848 return ins;
6852 #ifdef MONO_ARCH_SIMD_INTRINSICS
6853 if (cfg->opt & MONO_OPT_SIMD) {
6854 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6855 if (ins)
6856 return ins;
6858 #endif
6860 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6861 if (ins)
6862 return ins;
6864 if (COMPILE_LLVM (cfg)) {
6865 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6866 if (ins)
6867 return ins;
6870 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6874 * This entry point could be used later for arbitrary method
6875 * redirection.
6877 inline static MonoInst*
6878 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6879 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6881 if (method->klass == mono_defaults.string_class) {
6882 /* managed string allocation support */
6883 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6884 MonoInst *iargs [2];
6885 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6886 MonoMethod *managed_alloc = NULL;
6888 g_assert (vtable); /*Should not fail since it System.String*/
6889 #ifndef MONO_CROSS_COMPILE
6890 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6891 #endif
6892 if (!managed_alloc)
6893 return NULL;
6894 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6895 iargs [1] = args [0];
6896 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6899 return NULL;
6902 static void
6903 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6905 MonoInst *store, *temp;
6906 int i;
6908 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6909 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6912 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6913 * would be different than the MonoInst's used to represent arguments, and
6914 * the ldelema implementation can't deal with that.
6915 * Solution: When ldelema is used on an inline argument, create a var for
6916 * it, emit ldelema on that var, and emit the saving code below in
6917 * inline_method () if needed.
6919 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6920 cfg->args [i] = temp;
6921 /* This uses cfg->args [i] which is set by the preceeding line */
6922 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6923 store->cil_code = sp [0]->cil_code;
6924 sp++;
6928 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6929 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6931 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6932 static gboolean
6933 check_inline_called_method_name_limit (MonoMethod *called_method)
6935 int strncmp_result;
6936 static const char *limit = NULL;
6938 if (limit == NULL) {
6939 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6941 if (limit_string != NULL)
6942 limit = limit_string;
6943 else
6944 limit = "";
6947 if (limit [0] != '\0') {
6948 char *called_method_name = mono_method_full_name (called_method, TRUE);
6950 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6951 g_free (called_method_name);
6953 //return (strncmp_result <= 0);
6954 return (strncmp_result == 0);
6955 } else {
6956 return TRUE;
6959 #endif
6961 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6962 static gboolean
6963 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6965 int strncmp_result;
6966 static const char *limit = NULL;
6968 if (limit == NULL) {
6969 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6970 if (limit_string != NULL) {
6971 limit = limit_string;
6972 } else {
6973 limit = "";
6977 if (limit [0] != '\0') {
6978 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6980 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6981 g_free (caller_method_name);
6983 //return (strncmp_result <= 0);
6984 return (strncmp_result == 0);
6985 } else {
6986 return TRUE;
6989 #endif
6991 static void
6992 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6994 static double r8_0 = 0.0;
6995 static float r4_0 = 0.0;
6996 MonoInst *ins;
6997 int t;
6999 rtype = mini_get_underlying_type (rtype);
7000 t = rtype->type;
7002 if (rtype->byref) {
7003 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
7004 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
7005 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
7006 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
7007 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
7008 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
7009 MONO_INST_NEW (cfg, ins, OP_R4CONST);
7010 ins->type = STACK_R4;
7011 ins->inst_p0 = (void*)&r4_0;
7012 ins->dreg = dreg;
7013 MONO_ADD_INS (cfg->cbb, ins);
7014 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
7015 MONO_INST_NEW (cfg, ins, OP_R8CONST);
7016 ins->type = STACK_R8;
7017 ins->inst_p0 = (void*)&r8_0;
7018 ins->dreg = dreg;
7019 MONO_ADD_INS (cfg->cbb, ins);
7020 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
7021 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
7022 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
7023 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
7024 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
7025 } else {
7026 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
7030 static void
7031 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
7033 int t;
7035 rtype = mini_get_underlying_type (rtype);
7036 t = rtype->type;
7038 if (rtype->byref) {
7039 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
7040 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
7041 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
7042 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
7043 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
7044 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
7045 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
7046 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
7047 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
7048 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
7049 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
7050 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
7051 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
7052 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
7053 } else {
7054 emit_init_rvar (cfg, dreg, rtype);
7058 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
7059 static void
7060 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
7062 MonoInst *var = cfg->locals [local];
7063 if (COMPILE_SOFT_FLOAT (cfg)) {
7064 MonoInst *store;
7065 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
7066 emit_init_rvar (cfg, reg, type);
7067 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
7068 } else {
7069 if (init)
7070 emit_init_rvar (cfg, var->dreg, type);
7071 else
7072 emit_dummy_init_rvar (cfg, var->dreg, type);
7077 * inline_method:
7079 * Return the cost of inlining CMETHOD, or zero if it should not be inlined.
7081 static int
7082 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
7083 guchar *ip, guint real_offset, gboolean inline_always)
7085 MonoError error;
7086 MonoInst *ins, *rvar = NULL;
7087 MonoMethodHeader *cheader;
7088 MonoBasicBlock *ebblock, *sbblock;
7089 int i, costs;
7090 MonoMethod *prev_inlined_method;
7091 MonoInst **prev_locals, **prev_args;
7092 MonoType **prev_arg_types;
7093 guint prev_real_offset;
7094 GHashTable *prev_cbb_hash;
7095 MonoBasicBlock **prev_cil_offset_to_bb;
7096 MonoBasicBlock *prev_cbb;
7097 const unsigned char *prev_ip;
7098 unsigned char *prev_cil_start;
7099 guint32 prev_cil_offset_to_bb_len;
7100 MonoMethod *prev_current_method;
7101 MonoGenericContext *prev_generic_context;
7102 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
7104 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
7106 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
7107 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
7108 return 0;
7109 #endif
7110 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
7111 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
7112 return 0;
7113 #endif
7115 if (!fsig)
7116 fsig = mono_method_signature (cmethod);
7118 if (cfg->verbose_level > 2)
7119 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7121 if (!cmethod->inline_info) {
7122 cfg->stat_inlineable_methods++;
7123 cmethod->inline_info = 1;
7126 /* allocate local variables */
7127 cheader = mono_method_get_header_checked (cmethod, &error);
7128 if (!cheader) {
7129 if (inline_always) {
7130 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
7131 mono_error_move (&cfg->error, &error);
7132 } else {
7133 mono_error_cleanup (&error);
7135 return 0;
7138 /*Must verify before creating locals as it can cause the JIT to assert.*/
7139 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
7140 mono_metadata_free_mh (cheader);
7141 return 0;
7144 /* allocate space to store the return value */
7145 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7146 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
7149 prev_locals = cfg->locals;
7150 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
7151 for (i = 0; i < cheader->num_locals; ++i)
7152 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
7154 /* allocate start and end blocks */
7155 /* This is needed so if the inline is aborted, we can clean up */
7156 NEW_BBLOCK (cfg, sbblock);
7157 sbblock->real_offset = real_offset;
7159 NEW_BBLOCK (cfg, ebblock);
7160 ebblock->block_num = cfg->num_bblocks++;
7161 ebblock->real_offset = real_offset;
7163 prev_args = cfg->args;
7164 prev_arg_types = cfg->arg_types;
7165 prev_inlined_method = cfg->inlined_method;
7166 cfg->inlined_method = cmethod;
7167 cfg->ret_var_set = FALSE;
7168 cfg->inline_depth ++;
7169 prev_real_offset = cfg->real_offset;
7170 prev_cbb_hash = cfg->cbb_hash;
7171 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
7172 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
7173 prev_cil_start = cfg->cil_start;
7174 prev_ip = cfg->ip;
7175 prev_cbb = cfg->cbb;
7176 prev_current_method = cfg->current_method;
7177 prev_generic_context = cfg->generic_context;
7178 prev_ret_var_set = cfg->ret_var_set;
7179 prev_disable_inline = cfg->disable_inline;
7181 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
7182 virtual_ = TRUE;
7184 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
7186 ret_var_set = cfg->ret_var_set;
7188 cfg->inlined_method = prev_inlined_method;
7189 cfg->real_offset = prev_real_offset;
7190 cfg->cbb_hash = prev_cbb_hash;
7191 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
7192 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
7193 cfg->cil_start = prev_cil_start;
7194 cfg->ip = prev_ip;
7195 cfg->locals = prev_locals;
7196 cfg->args = prev_args;
7197 cfg->arg_types = prev_arg_types;
7198 cfg->current_method = prev_current_method;
7199 cfg->generic_context = prev_generic_context;
7200 cfg->ret_var_set = prev_ret_var_set;
7201 cfg->disable_inline = prev_disable_inline;
7202 cfg->inline_depth --;
7204 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
7205 if (cfg->verbose_level > 2)
7206 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7208 cfg->stat_inlined_methods++;
7210 /* always add some code to avoid block split failures */
7211 MONO_INST_NEW (cfg, ins, OP_NOP);
7212 MONO_ADD_INS (prev_cbb, ins);
7214 prev_cbb->next_bb = sbblock;
7215 link_bblock (cfg, prev_cbb, sbblock);
7218 * Get rid of the begin and end bblocks if possible to aid local
7219 * optimizations.
7221 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
7223 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
7224 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
7226 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
7227 MonoBasicBlock *prev = ebblock->in_bb [0];
7229 if (prev->next_bb == ebblock) {
7230 mono_merge_basic_blocks (cfg, prev, ebblock);
7231 cfg->cbb = prev;
7232 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
7233 mono_merge_basic_blocks (cfg, prev_cbb, prev);
7234 cfg->cbb = prev_cbb;
7236 } else {
7237 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
7238 cfg->cbb = ebblock;
7240 } else {
7242 * Its possible that the rvar is set in some prev bblock, but not in others.
7243 * (#1835).
7245 if (rvar) {
7246 MonoBasicBlock *bb;
7248 for (i = 0; i < ebblock->in_count; ++i) {
7249 bb = ebblock->in_bb [i];
7251 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
7252 cfg->cbb = bb;
7254 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7259 cfg->cbb = ebblock;
7262 if (rvar) {
7264 * If the inlined method contains only a throw, then the ret var is not
7265 * set, so set it to a dummy value.
7267 if (!ret_var_set)
7268 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7270 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
7271 *sp++ = ins;
7273 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7274 return costs + 1;
7275 } else {
7276 if (cfg->verbose_level > 2)
7277 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
7278 cfg->exception_type = MONO_EXCEPTION_NONE;
7280 /* This gets rid of the newly added bblocks */
7281 cfg->cbb = prev_cbb;
7283 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7284 return 0;
7288 * Some of these comments may well be out-of-date.
7289 * Design decisions: we do a single pass over the IL code (and we do bblock
7290 * splitting/merging in the few cases when it's required: a back jump to an IL
7291 * address that was not already seen as bblock starting point).
7292 * Code is validated as we go (full verification is still better left to metadata/verify.c).
7293 * Complex operations are decomposed in simpler ones right away. We need to let the
7294 * arch-specific code peek and poke inside this process somehow (except when the
7295 * optimizations can take advantage of the full semantic info of coarse opcodes).
7296 * All the opcodes of the form opcode.s are 'normalized' to opcode.
7297 * MonoInst->opcode initially is the IL opcode or some simplification of that
7298 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
7299 * opcode with value bigger than OP_LAST.
7300 * At this point the IR can be handed over to an interpreter, a dumb code generator
7301 * or to the optimizing code generator that will translate it to SSA form.
7303 * Profiling directed optimizations.
7304 * We may compile by default with few or no optimizations and instrument the code
7305 * or the user may indicate what methods to optimize the most either in a config file
7306 * or through repeated runs where the compiler applies offline the optimizations to
7307 * each method and then decides if it was worth it.
7310 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
7311 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
7312 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
7313 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
7314 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
7315 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
7316 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
7317 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
7319 /* offset from br.s -> br like opcodes */
7320 #define BIG_BRANCH_OFFSET 13
7322 static gboolean
7323 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
7325 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
7327 return b == NULL || b == bb;
7330 static int
7331 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7333 unsigned char *ip = start;
7334 unsigned char *target;
7335 int i;
7336 guint cli_addr;
7337 MonoBasicBlock *bblock;
7338 const MonoOpcode *opcode;
7340 while (ip < end) {
7341 cli_addr = ip - start;
7342 i = mono_opcode_value ((const guint8 **)&ip, end);
7343 if (i < 0)
7344 UNVERIFIED;
7345 opcode = &mono_opcodes [i];
7346 switch (opcode->argument) {
7347 case MonoInlineNone:
7348 ip++;
7349 break;
7350 case MonoInlineString:
7351 case MonoInlineType:
7352 case MonoInlineField:
7353 case MonoInlineMethod:
7354 case MonoInlineTok:
7355 case MonoInlineSig:
7356 case MonoShortInlineR:
7357 case MonoInlineI:
7358 ip += 5;
7359 break;
7360 case MonoInlineVar:
7361 ip += 3;
7362 break;
7363 case MonoShortInlineVar:
7364 case MonoShortInlineI:
7365 ip += 2;
7366 break;
7367 case MonoShortInlineBrTarget:
7368 target = start + cli_addr + 2 + (signed char)ip [1];
7369 GET_BBLOCK (cfg, bblock, target);
7370 ip += 2;
7371 if (ip < end)
7372 GET_BBLOCK (cfg, bblock, ip);
7373 break;
7374 case MonoInlineBrTarget:
7375 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7376 GET_BBLOCK (cfg, bblock, target);
7377 ip += 5;
7378 if (ip < end)
7379 GET_BBLOCK (cfg, bblock, ip);
7380 break;
7381 case MonoInlineSwitch: {
7382 guint32 n = read32 (ip + 1);
7383 guint32 j;
7384 ip += 5;
7385 cli_addr += 5 + 4 * n;
7386 target = start + cli_addr;
7387 GET_BBLOCK (cfg, bblock, target);
7389 for (j = 0; j < n; ++j) {
7390 target = start + cli_addr + (gint32)read32 (ip);
7391 GET_BBLOCK (cfg, bblock, target);
7392 ip += 4;
7394 break;
7396 case MonoInlineR:
7397 case MonoInlineI8:
7398 ip += 9;
7399 break;
7400 default:
7401 g_assert_not_reached ();
7404 if (i == CEE_THROW) {
7405 unsigned char *bb_start = ip - 1;
7407 /* Find the start of the bblock containing the throw */
7408 bblock = NULL;
7409 while ((bb_start >= start) && !bblock) {
7410 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7411 bb_start --;
7413 if (bblock)
7414 bblock->out_of_line = 1;
7417 return 0;
7418 unverified:
7419 exception_exit:
7420 *pos = ip;
7421 return 1;
7424 static inline MonoMethod *
7425 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
7427 MonoMethod *method;
7429 mono_error_init (error);
7431 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7432 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
7433 if (context) {
7434 method = mono_class_inflate_generic_method_checked (method, context, error);
7436 } else {
7437 method = mono_get_method_checked (m->klass->image, token, klass, context, error);
7440 return method;
7443 static inline MonoMethod *
7444 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7446 MonoError error;
7447 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
7449 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
7450 mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
7451 method = NULL;
7454 if (!method && !cfg)
7455 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7457 return method;
7460 static inline MonoClass*
7461 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7463 MonoError error;
7464 MonoClass *klass;
7466 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7467 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
7468 if (context) {
7469 klass = mono_class_inflate_generic_class_checked (klass, context, &error);
7470 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7472 } else {
7473 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7474 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7476 if (klass)
7477 mono_class_init (klass);
7478 return klass;
7481 static inline MonoMethodSignature*
7482 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
7484 MonoMethodSignature *fsig;
7486 mono_error_init (error);
7487 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7488 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7489 } else {
7490 fsig = mono_metadata_parse_signature_checked (method->klass->image, token, error);
7491 return_val_if_nok (error, NULL);
7493 if (context) {
7494 fsig = mono_inflate_generic_signature(fsig, context, error);
7496 return fsig;
7499 static MonoMethod*
7500 throw_exception (void)
7502 static MonoMethod *method = NULL;
7504 if (!method) {
7505 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7506 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7508 g_assert (method);
7509 return method;
7512 static void
7513 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7515 MonoMethod *thrower = throw_exception ();
7516 MonoInst *args [1];
7518 EMIT_NEW_PCONST (cfg, args [0], ex);
7519 mono_emit_method_call (cfg, thrower, args, NULL);
7523 * Return the original method is a wrapper is specified. We can only access
7524 * the custom attributes from the original method.
7526 static MonoMethod*
7527 get_original_method (MonoMethod *method)
7529 if (method->wrapper_type == MONO_WRAPPER_NONE)
7530 return method;
7532 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7533 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7534 return NULL;
7536 /* in other cases we need to find the original method */
7537 return mono_marshal_method_from_wrapper (method);
7540 static void
7541 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
7543 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7544 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7545 if (ex)
7546 emit_throw_exception (cfg, ex);
7549 static void
7550 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7552 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7553 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7554 if (ex)
7555 emit_throw_exception (cfg, ex);
7559 * Check that the IL instructions at ip are the array initialization
7560 * sequence and return the pointer to the data and the size.
7562 static const char*
7563 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7566 * newarr[System.Int32]
7567 * dup
7568 * ldtoken field valuetype ...
7569 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7571 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7572 MonoError error;
7573 guint32 token = read32 (ip + 7);
7574 guint32 field_token = read32 (ip + 2);
7575 guint32 field_index = field_token & 0xffffff;
7576 guint32 rva;
7577 const char *data_ptr;
7578 int size = 0;
7579 MonoMethod *cmethod;
7580 MonoClass *dummy_class;
7581 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7582 int dummy_align;
7584 if (!field) {
7585 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7586 return NULL;
7589 *out_field_token = field_token;
7591 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7592 if (!cmethod)
7593 return NULL;
7594 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7595 return NULL;
7596 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7597 case MONO_TYPE_BOOLEAN:
7598 case MONO_TYPE_I1:
7599 case MONO_TYPE_U1:
7600 size = 1; break;
7601 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7602 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7603 case MONO_TYPE_CHAR:
7604 case MONO_TYPE_I2:
7605 case MONO_TYPE_U2:
7606 size = 2; break;
7607 case MONO_TYPE_I4:
7608 case MONO_TYPE_U4:
7609 case MONO_TYPE_R4:
7610 size = 4; break;
7611 case MONO_TYPE_R8:
7612 case MONO_TYPE_I8:
7613 case MONO_TYPE_U8:
7614 size = 8; break;
7615 #endif
7616 default:
7617 return NULL;
7619 size *= len;
7620 if (size > mono_type_size (field->type, &dummy_align))
7621 return NULL;
7622 *out_size = size;
7623 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7624 if (!image_is_dynamic (method->klass->image)) {
7625 field_index = read32 (ip + 2) & 0xffffff;
7626 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7627 data_ptr = mono_image_rva_map (method->klass->image, rva);
7628 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7629 /* for aot code we do the lookup on load */
7630 if (aot && data_ptr)
7631 return (const char *)GUINT_TO_POINTER (rva);
7632 } else {
7633 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7634 g_assert (!aot);
7635 data_ptr = mono_field_get_data (field);
7637 return data_ptr;
7639 return NULL;
7642 static void
7643 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7645 MonoError error;
7646 char *method_fname = mono_method_full_name (method, TRUE);
7647 char *method_code;
7648 MonoMethodHeader *header = mono_method_get_header_checked (method, &error);
7650 if (!header) {
7651 method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (&error));
7652 mono_error_cleanup (&error);
7653 } else if (header->code_size == 0)
7654 method_code = g_strdup ("method body is empty.");
7655 else
7656 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7657 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
7658 g_free (method_fname);
7659 g_free (method_code);
7660 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7663 static void
7664 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7666 MonoInst *ins;
7667 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7668 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7669 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7670 /* Optimize reg-reg moves away */
7672 * Can't optimize other opcodes, since sp[0] might point to
7673 * the last ins of a decomposed opcode.
7675 sp [0]->dreg = (cfg)->locals [n]->dreg;
7676 } else {
7677 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7682 * ldloca inhibits many optimizations so try to get rid of it in common
7683 * cases.
7685 static inline unsigned char *
7686 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7688 int local, token;
7689 MonoClass *klass;
7690 MonoType *type;
7692 if (size == 1) {
7693 local = ip [1];
7694 ip += 2;
7695 } else {
7696 local = read16 (ip + 2);
7697 ip += 4;
7700 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7701 /* From the INITOBJ case */
7702 token = read32 (ip + 2);
7703 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7704 CHECK_TYPELOAD (klass);
7705 type = mini_get_underlying_type (&klass->byval_arg);
7706 emit_init_local (cfg, local, type, TRUE);
7707 return ip + 6;
7709 exception_exit:
7710 return NULL;
7713 static MonoInst*
7714 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
7716 MonoInst *icall_args [16];
7717 MonoInst *call_target, *ins, *vtable_ins;
7718 int arg_reg, this_reg, vtable_reg;
7719 gboolean is_iface = mono_class_is_interface (cmethod->klass);
7720 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
7721 gboolean variant_iface = FALSE;
7722 guint32 slot;
7723 int offset;
7726 * In llvm-only mode, vtables contain function descriptors instead of
7727 * method addresses/trampolines.
7729 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7731 if (is_iface)
7732 slot = mono_method_get_imt_slot (cmethod);
7733 else
7734 slot = mono_method_get_vtable_index (cmethod);
7736 this_reg = sp [0]->dreg;
7738 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
7739 variant_iface = TRUE;
7741 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
7743 * The simplest case, a normal virtual call.
7745 int slot_reg = alloc_preg (cfg);
7746 int addr_reg = alloc_preg (cfg);
7747 int arg_reg = alloc_preg (cfg);
7748 MonoBasicBlock *non_null_bb;
7750 vtable_reg = alloc_preg (cfg);
7751 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7752 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7754 /* Load the vtable slot, which contains a function descriptor. */
7755 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7757 NEW_BBLOCK (cfg, non_null_bb);
7759 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7760 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
7761 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
7763 /* Slow path */
7764 // FIXME: Make the wrapper use the preserveall cconv
7765 // FIXME: Use one icall per slot for small slot numbers ?
7766 icall_args [0] = vtable_ins;
7767 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7768 /* Make the icall return the vtable slot value to save some code space */
7769 ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
7770 ins->dreg = slot_reg;
7771 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
7773 /* Fastpath */
7774 MONO_START_BB (cfg, non_null_bb);
7775 /* Load the address + arg from the vtable slot */
7776 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7777 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
7779 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7782 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt) {
7784 * A simple interface call
7786 * We make a call through an imt slot to obtain the function descriptor we need to call.
7787 * The imt slot contains a function descriptor for a runtime function + arg.
7789 int slot_reg = alloc_preg (cfg);
7790 int addr_reg = alloc_preg (cfg);
7791 int arg_reg = alloc_preg (cfg);
7792 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7794 vtable_reg = alloc_preg (cfg);
7795 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7796 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7799 * The slot is already initialized when the vtable is created so there is no need
7800 * to check it here.
7803 /* Load the imt slot, which contains a function descriptor. */
7804 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7806 /* Load the address + arg of the imt thunk from the imt slot */
7807 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7808 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7810 * IMT thunks in llvm-only mode are C functions which take an info argument
7811 * plus the imt method and return the ftndesc to call.
7813 icall_args [0] = thunk_arg_ins;
7814 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7815 cmethod, MONO_RGCTX_INFO_METHOD);
7816 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
7818 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7821 if ((fsig->generic_param_count || variant_iface) && !is_gsharedvt) {
7823 * This is similar to the interface case, the vtable slot points to an imt thunk which is
7824 * dynamically extended as more instantiations are discovered.
7825 * This handles generic virtual methods both on classes and interfaces.
7827 int slot_reg = alloc_preg (cfg);
7828 int addr_reg = alloc_preg (cfg);
7829 int arg_reg = alloc_preg (cfg);
7830 int ftndesc_reg = alloc_preg (cfg);
7831 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7832 MonoBasicBlock *slowpath_bb, *end_bb;
7834 NEW_BBLOCK (cfg, slowpath_bb);
7835 NEW_BBLOCK (cfg, end_bb);
7837 vtable_reg = alloc_preg (cfg);
7838 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7839 if (is_iface)
7840 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7841 else
7842 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7844 /* Load the slot, which contains a function descriptor. */
7845 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7847 /* These slots are not initialized, so fall back to the slow path until they are initialized */
7848 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
7849 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7850 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7852 /* Fastpath */
7853 /* Same as with iface calls */
7854 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7855 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7856 icall_args [0] = thunk_arg_ins;
7857 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7858 cmethod, MONO_RGCTX_INFO_METHOD);
7859 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
7860 ftndesc_ins->dreg = ftndesc_reg;
7862 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
7863 * they don't know about yet. Fall back to the slowpath in that case.
7865 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
7866 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7868 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7870 /* Slowpath */
7871 MONO_START_BB (cfg, slowpath_bb);
7872 icall_args [0] = vtable_ins;
7873 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7874 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7875 cmethod, MONO_RGCTX_INFO_METHOD);
7876 if (is_iface)
7877 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_iface_call, icall_args);
7878 else
7879 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
7880 ftndesc_ins->dreg = ftndesc_reg;
7881 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7883 /* Common case */
7884 MONO_START_BB (cfg, end_bb);
7885 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7889 * Non-optimized cases
7891 icall_args [0] = sp [0];
7892 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7894 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7895 cmethod, MONO_RGCTX_INFO_METHOD);
7897 arg_reg = alloc_preg (cfg);
7898 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
7899 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
7901 g_assert (is_gsharedvt);
7902 if (is_iface)
7903 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
7904 else
7905 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
7908 * Pass the extra argument even if the callee doesn't receive it, most
7909 * calling conventions allow this.
7911 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7914 static gboolean
7915 is_exception_class (MonoClass *klass)
7917 while (klass) {
7918 if (klass == mono_defaults.exception_class)
7919 return TRUE;
7920 klass = klass->parent;
7922 return FALSE;
7926 * is_jit_optimizer_disabled:
7928 * Determine whenever M's assembly has a DebuggableAttribute with the
7929 * IsJITOptimizerDisabled flag set.
7931 static gboolean
7932 is_jit_optimizer_disabled (MonoMethod *m)
7934 MonoError error;
7935 MonoAssembly *ass = m->klass->image->assembly;
7936 MonoCustomAttrInfo* attrs;
7937 MonoClass *klass;
7938 int i;
7939 gboolean val = FALSE;
7941 g_assert (ass);
7942 if (ass->jit_optimizer_disabled_inited)
7943 return ass->jit_optimizer_disabled;
7945 klass = mono_class_try_get_debuggable_attribute_class ();
7947 if (!klass) {
7948 /* Linked away */
7949 ass->jit_optimizer_disabled = FALSE;
7950 mono_memory_barrier ();
7951 ass->jit_optimizer_disabled_inited = TRUE;
7952 return FALSE;
7955 attrs = mono_custom_attrs_from_assembly_checked (ass, FALSE, &error);
7956 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7957 if (attrs) {
7958 for (i = 0; i < attrs->num_attrs; ++i) {
7959 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7960 const gchar *p;
7961 MonoMethodSignature *sig;
7963 if (!attr->ctor || attr->ctor->klass != klass)
7964 continue;
7965 /* Decode the attribute. See reflection.c */
7966 p = (const char*)attr->data;
7967 g_assert (read16 (p) == 0x0001);
7968 p += 2;
7970 // FIXME: Support named parameters
7971 sig = mono_method_signature (attr->ctor);
7972 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7973 continue;
7974 /* Two boolean arguments */
7975 p ++;
7976 val = *p;
7978 mono_custom_attrs_free (attrs);
7981 ass->jit_optimizer_disabled = val;
7982 mono_memory_barrier ();
7983 ass->jit_optimizer_disabled_inited = TRUE;
7985 return val;
7988 static gboolean
7989 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7991 gboolean supported_tail_call;
7992 int i;
7994 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7996 for (i = 0; i < fsig->param_count; ++i) {
7997 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7998 /* These can point to the current method's stack */
7999 supported_tail_call = FALSE;
8001 if (fsig->hasthis && cmethod->klass->valuetype)
8002 /* this might point to the current method's stack */
8003 supported_tail_call = FALSE;
8004 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
8005 supported_tail_call = FALSE;
8006 if (cfg->method->save_lmf)
8007 supported_tail_call = FALSE;
8008 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
8009 supported_tail_call = FALSE;
8010 if (call_opcode != CEE_CALL)
8011 supported_tail_call = FALSE;
8013 /* Debugging support */
8014 #if 0
8015 if (supported_tail_call) {
8016 if (!mono_debug_count ())
8017 supported_tail_call = FALSE;
8019 #endif
8021 return supported_tail_call;
8025 * handle_ctor_call:
8027 * Handle calls made to ctors from NEWOBJ opcodes.
8029 static void
8030 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
8031 MonoInst **sp, guint8 *ip, int *inline_costs)
8033 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
8035 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
8036 mono_method_is_generic_sharable (cmethod, TRUE)) {
8037 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
8038 mono_class_vtable (cfg->domain, cmethod->klass);
8039 CHECK_TYPELOAD (cmethod->klass);
8041 vtable_arg = emit_get_rgctx_method (cfg, context_used,
8042 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8043 } else {
8044 if (context_used) {
8045 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
8046 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8047 } else {
8048 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8050 CHECK_TYPELOAD (cmethod->klass);
8051 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8056 /* Avoid virtual calls to ctors if possible */
8057 if (mono_class_is_marshalbyref (cmethod->klass))
8058 callvirt_this_arg = sp [0];
8060 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8061 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
8062 CHECK_CFG_EXCEPTION;
8063 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8064 mono_method_check_inlining (cfg, cmethod) &&
8065 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
8066 int costs;
8068 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
8069 cfg->real_offset += 5;
8071 *inline_costs += costs - 5;
8072 } else {
8073 INLINE_FAILURE ("inline failure");
8074 // FIXME-VT: Clean this up
8075 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
8076 GSHAREDVT_FAILURE(*ip);
8077 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
8079 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8080 MonoInst *addr;
8082 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
8084 if (cfg->llvm_only) {
8085 // FIXME: Avoid initializing vtable_arg
8086 emit_llvmonly_calli (cfg, fsig, sp, addr);
8087 } else {
8088 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
8090 } else if (context_used &&
8091 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
8092 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
8093 MonoInst *cmethod_addr;
8095 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
8097 if (cfg->llvm_only) {
8098 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
8099 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8100 emit_llvmonly_calli (cfg, fsig, sp, addr);
8101 } else {
8102 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8103 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8105 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
8107 } else {
8108 INLINE_FAILURE ("ctor call");
8109 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
8110 callvirt_this_arg, NULL, vtable_arg);
8112 exception_exit:
8113 return;
8116 static void
8117 emit_setret (MonoCompile *cfg, MonoInst *val)
8119 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
8120 MonoInst *ins;
8122 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8123 MonoInst *ret_addr;
8125 if (!cfg->vret_addr) {
8126 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
8127 } else {
8128 EMIT_NEW_RETLOADA (cfg, ret_addr);
8130 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
8131 ins->klass = mono_class_from_mono_type (ret_type);
8133 } else {
8134 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8135 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8136 MonoInst *iargs [1];
8137 MonoInst *conv;
8139 iargs [0] = val;
8140 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8141 mono_arch_emit_setret (cfg, cfg->method, conv);
8142 } else {
8143 mono_arch_emit_setret (cfg, cfg->method, val);
8145 #else
8146 mono_arch_emit_setret (cfg, cfg->method, val);
8147 #endif
8152 * mono_method_to_ir:
8154 * Translate the .net IL into linear IR.
8156 * @start_bblock: if not NULL, the starting basic block, used during inlining.
8157 * @end_bblock: if not NULL, the ending basic block, used during inlining.
8158 * @return_var: if not NULL, the place where the return value is stored, used during inlining.
8159 * @inline_args: if not NULL, contains the arguments to the inline call
8160 * @inline_offset: if not zero, the real offset from the inline call, or zero otherwise.
8161 * @is_virtual_call: whether this method is being called as a result of a call to callvirt
8163 * This method is used to turn ECMA IL into Mono's internal Linear IR
8164 * reprensetation. It is used both for entire methods, as well as
8165 * inlining existing methods. In the former case, the @start_bblock,
8166 * @end_bblock, @return_var, @inline_args are all set to NULL, and the
8167 * inline_offset is set to zero.
8169 * Returns: the inline cost, or -1 if there was an error processing this method.
8172 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
8173 MonoInst *return_var, MonoInst **inline_args,
8174 guint inline_offset, gboolean is_virtual_call)
8176 MonoError error;
8177 MonoInst *ins, **sp, **stack_start;
8178 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
8179 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
8180 MonoMethod *cmethod, *method_definition;
8181 MonoInst **arg_array;
8182 MonoMethodHeader *header;
8183 MonoImage *image;
8184 guint32 token, ins_flag;
8185 MonoClass *klass;
8186 MonoClass *constrained_class = NULL;
8187 unsigned char *ip, *end, *target, *err_pos;
8188 MonoMethodSignature *sig;
8189 MonoGenericContext *generic_context = NULL;
8190 MonoGenericContainer *generic_container = NULL;
8191 MonoType **param_types;
8192 int i, n, start_new_bblock, dreg;
8193 int num_calls = 0, inline_costs = 0;
8194 int breakpoint_id = 0;
8195 guint num_args;
8196 GSList *class_inits = NULL;
8197 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
8198 int context_used;
8199 gboolean init_locals, seq_points, skip_dead_blocks;
8200 gboolean sym_seq_points = FALSE;
8201 MonoDebugMethodInfo *minfo;
8202 MonoBitSet *seq_point_locs = NULL;
8203 MonoBitSet *seq_point_set_locs = NULL;
8205 cfg->disable_inline = is_jit_optimizer_disabled (method);
8207 /* serialization and xdomain stuff may need access to private fields and methods */
8208 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
8209 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
8210 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
8211 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
8212 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
8213 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
8215 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
8216 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
8217 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
8218 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
8219 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
8221 image = method->klass->image;
8222 header = mono_method_get_header_checked (method, &cfg->error);
8223 if (!header) {
8224 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
8225 goto exception_exit;
8226 } else {
8227 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
8230 generic_container = mono_method_get_generic_container (method);
8231 sig = mono_method_signature (method);
8232 num_args = sig->hasthis + sig->param_count;
8233 ip = (unsigned char*)header->code;
8234 cfg->cil_start = ip;
8235 end = ip + header->code_size;
8236 cfg->stat_cil_code_size += header->code_size;
8238 seq_points = cfg->gen_seq_points && cfg->method == method;
8240 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
8241 /* We could hit a seq point before attaching to the JIT (#8338) */
8242 seq_points = FALSE;
8245 if (cfg->gen_sdb_seq_points && cfg->method == method) {
8246 minfo = mono_debug_lookup_method (method);
8247 if (minfo) {
8248 MonoSymSeqPoint *sps;
8249 int i, n_il_offsets;
8251 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
8252 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8253 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8254 sym_seq_points = TRUE;
8255 for (i = 0; i < n_il_offsets; ++i) {
8256 if (sps [i].il_offset < header->code_size)
8257 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
8259 g_free (sps);
8260 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
8261 /* Methods without line number info like auto-generated property accessors */
8262 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8263 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8264 sym_seq_points = TRUE;
8269 * Methods without init_locals set could cause asserts in various passes
8270 * (#497220). To work around this, we emit dummy initialization opcodes
8271 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
8272 * on some platforms.
8274 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
8275 init_locals = header->init_locals;
8276 else
8277 init_locals = TRUE;
8279 method_definition = method;
8280 while (method_definition->is_inflated) {
8281 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
8282 method_definition = imethod->declaring;
8285 /* SkipVerification is not allowed if core-clr is enabled */
8286 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
8287 dont_verify = TRUE;
8288 dont_verify_stloc = TRUE;
8291 if (sig->is_inflated)
8292 generic_context = mono_method_get_context (method);
8293 else if (generic_container)
8294 generic_context = &generic_container->context;
8295 cfg->generic_context = generic_context;
8297 if (!cfg->gshared)
8298 g_assert (!sig->has_type_parameters);
8300 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
8301 g_assert (method->is_inflated);
8302 g_assert (mono_method_get_context (method)->method_inst);
8304 if (method->is_inflated && mono_method_get_context (method)->method_inst)
8305 g_assert (sig->generic_param_count);
8307 if (cfg->method == method) {
8308 cfg->real_offset = 0;
8309 } else {
8310 cfg->real_offset = inline_offset;
8313 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
8314 cfg->cil_offset_to_bb_len = header->code_size;
8316 cfg->current_method = method;
8318 if (cfg->verbose_level > 2)
8319 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
8321 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
8322 if (sig->hasthis)
8323 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
8324 for (n = 0; n < sig->param_count; ++n)
8325 param_types [n + sig->hasthis] = sig->params [n];
8326 cfg->arg_types = param_types;
8328 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
8329 if (cfg->method == method) {
8331 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
8332 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
8334 /* ENTRY BLOCK */
8335 NEW_BBLOCK (cfg, start_bblock);
8336 cfg->bb_entry = start_bblock;
8337 start_bblock->cil_code = NULL;
8338 start_bblock->cil_length = 0;
8340 /* EXIT BLOCK */
8341 NEW_BBLOCK (cfg, end_bblock);
8342 cfg->bb_exit = end_bblock;
8343 end_bblock->cil_code = NULL;
8344 end_bblock->cil_length = 0;
8345 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8346 g_assert (cfg->num_bblocks == 2);
8348 arg_array = cfg->args;
8350 if (header->num_clauses) {
8351 cfg->spvars = g_hash_table_new (NULL, NULL);
8352 cfg->exvars = g_hash_table_new (NULL, NULL);
8354 /* handle exception clauses */
8355 for (i = 0; i < header->num_clauses; ++i) {
8356 MonoBasicBlock *try_bb;
8357 MonoExceptionClause *clause = &header->clauses [i];
8358 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
8360 try_bb->real_offset = clause->try_offset;
8361 try_bb->try_start = TRUE;
8362 try_bb->region = ((i + 1) << 8) | clause->flags;
8363 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
8364 tblock->real_offset = clause->handler_offset;
8365 tblock->flags |= BB_EXCEPTION_HANDLER;
8368 * Linking the try block with the EH block hinders inlining as we won't be able to
8369 * merge the bblocks from inlining and produce an artificial hole for no good reason.
8371 if (COMPILE_LLVM (cfg))
8372 link_bblock (cfg, try_bb, tblock);
8374 if (*(ip + clause->handler_offset) == CEE_POP)
8375 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
8377 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
8378 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
8379 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
8380 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8381 MONO_ADD_INS (tblock, ins);
8383 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
8384 /* finally clauses already have a seq point */
8385 /* seq points for filter clauses are emitted below */
8386 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8387 MONO_ADD_INS (tblock, ins);
8390 /* todo: is a fault block unsafe to optimize? */
8391 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
8392 tblock->flags |= BB_EXCEPTION_UNSAFE;
8395 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
8396 while (p < end) {
8397 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
8399 /* catch and filter blocks get the exception object on the stack */
8400 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
8401 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8403 /* mostly like handle_stack_args (), but just sets the input args */
8404 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
8405 tblock->in_scount = 1;
8406 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8407 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8409 cfg->cbb = tblock;
8411 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
8412 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
8413 if (!cfg->compile_llvm) {
8414 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
8415 ins->dreg = tblock->in_stack [0]->dreg;
8416 MONO_ADD_INS (tblock, ins);
8418 #else
8419 MonoInst *dummy_use;
8422 * Add a dummy use for the exvar so its liveness info will be
8423 * correct.
8425 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
8426 #endif
8428 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8429 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8430 MONO_ADD_INS (tblock, ins);
8433 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8434 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
8435 tblock->flags |= BB_EXCEPTION_HANDLER;
8436 tblock->real_offset = clause->data.filter_offset;
8437 tblock->in_scount = 1;
8438 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8439 /* The filter block shares the exvar with the handler block */
8440 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8441 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8442 MONO_ADD_INS (tblock, ins);
8446 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
8447 clause->data.catch_class &&
8448 cfg->gshared &&
8449 mono_class_check_context_used (clause->data.catch_class)) {
8451 * In shared generic code with catch
8452 * clauses containing type variables
8453 * the exception handling code has to
8454 * be able to get to the rgctx.
8455 * Therefore we have to make sure that
8456 * the vtable/mrgctx argument (for
8457 * static or generic methods) or the
8458 * "this" argument (for non-static
8459 * methods) are live.
8461 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8462 mini_method_get_context (method)->method_inst ||
8463 method->klass->valuetype) {
8464 mono_get_vtable_var (cfg);
8465 } else {
8466 MonoInst *dummy_use;
8468 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
8472 } else {
8473 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
8474 cfg->cbb = start_bblock;
8475 cfg->args = arg_array;
8476 mono_save_args (cfg, sig, inline_args);
8479 /* FIRST CODE BLOCK */
8480 NEW_BBLOCK (cfg, tblock);
8481 tblock->cil_code = ip;
8482 cfg->cbb = tblock;
8483 cfg->ip = ip;
8485 ADD_BBLOCK (cfg, tblock);
8487 if (cfg->method == method) {
8488 breakpoint_id = mono_debugger_method_has_breakpoint (method);
8489 if (breakpoint_id) {
8490 MONO_INST_NEW (cfg, ins, OP_BREAK);
8491 MONO_ADD_INS (cfg->cbb, ins);
8495 /* we use a separate basic block for the initialization code */
8496 NEW_BBLOCK (cfg, init_localsbb);
8497 if (cfg->method == method)
8498 cfg->bb_init = init_localsbb;
8499 init_localsbb->real_offset = cfg->real_offset;
8500 start_bblock->next_bb = init_localsbb;
8501 init_localsbb->next_bb = cfg->cbb;
8502 link_bblock (cfg, start_bblock, init_localsbb);
8503 link_bblock (cfg, init_localsbb, cfg->cbb);
8505 cfg->cbb = init_localsbb;
8507 if (cfg->gsharedvt && cfg->method == method) {
8508 MonoGSharedVtMethodInfo *info;
8509 MonoInst *var, *locals_var;
8510 int dreg;
8512 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8513 info->method = cfg->method;
8514 info->count_entries = 16;
8515 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8516 cfg->gsharedvt_info = info;
8518 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8519 /* prevent it from being register allocated */
8520 //var->flags |= MONO_INST_VOLATILE;
8521 cfg->gsharedvt_info_var = var;
8523 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8524 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8526 /* Allocate locals */
8527 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8528 /* prevent it from being register allocated */
8529 //locals_var->flags |= MONO_INST_VOLATILE;
8530 cfg->gsharedvt_locals_var = locals_var;
8532 dreg = alloc_ireg (cfg);
8533 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8535 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8536 ins->dreg = locals_var->dreg;
8537 ins->sreg1 = dreg;
8538 MONO_ADD_INS (cfg->cbb, ins);
8539 cfg->gsharedvt_locals_var_ins = ins;
8541 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8543 if (init_locals)
8544 ins->flags |= MONO_INST_INIT;
8548 if (mono_security_core_clr_enabled ()) {
8549 /* check if this is native code, e.g. an icall or a p/invoke */
8550 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8551 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8552 if (wrapped) {
8553 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8554 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8556 /* if this ia a native call then it can only be JITted from platform code */
8557 if ((icall || pinvk) && method->klass && method->klass->image) {
8558 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8559 MonoException *ex = icall ? mono_get_exception_security () :
8560 mono_get_exception_method_access ();
8561 emit_throw_exception (cfg, ex);
8568 CHECK_CFG_EXCEPTION;
8570 if (header->code_size == 0)
8571 UNVERIFIED;
8573 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8574 ip = err_pos;
8575 UNVERIFIED;
8578 if (cfg->method == method)
8579 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
8581 for (n = 0; n < header->num_locals; ++n) {
8582 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8583 UNVERIFIED;
8585 class_inits = NULL;
8587 /* We force the vtable variable here for all shared methods
8588 for the possibility that they might show up in a stack
8589 trace where their exact instantiation is needed. */
8590 if (cfg->gshared && method == cfg->method) {
8591 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8592 mini_method_get_context (method)->method_inst ||
8593 method->klass->valuetype) {
8594 mono_get_vtable_var (cfg);
8595 } else {
8596 /* FIXME: Is there a better way to do this?
8597 We need the variable live for the duration
8598 of the whole method. */
8599 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8603 /* add a check for this != NULL to inlined methods */
8604 if (is_virtual_call) {
8605 MonoInst *arg_ins;
8607 NEW_ARGLOAD (cfg, arg_ins, 0);
8608 MONO_ADD_INS (cfg->cbb, arg_ins);
8609 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8612 skip_dead_blocks = !dont_verify;
8613 if (skip_dead_blocks) {
8614 original_bb = bb = mono_basic_block_split (method, &cfg->error, header);
8615 CHECK_CFG_ERROR;
8616 g_assert (bb);
8619 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8620 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8622 ins_flag = 0;
8623 start_new_bblock = 0;
8624 while (ip < end) {
8625 if (cfg->method == method)
8626 cfg->real_offset = ip - header->code;
8627 else
8628 cfg->real_offset = inline_offset;
8629 cfg->ip = ip;
8631 context_used = 0;
8633 if (start_new_bblock) {
8634 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
8635 if (start_new_bblock == 2) {
8636 g_assert (ip == tblock->cil_code);
8637 } else {
8638 GET_BBLOCK (cfg, tblock, ip);
8640 cfg->cbb->next_bb = tblock;
8641 cfg->cbb = tblock;
8642 start_new_bblock = 0;
8643 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8644 if (cfg->verbose_level > 3)
8645 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8646 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8647 *sp++ = ins;
8649 if (class_inits)
8650 g_slist_free (class_inits);
8651 class_inits = NULL;
8652 } else {
8653 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
8654 link_bblock (cfg, cfg->cbb, tblock);
8655 if (sp != stack_start) {
8656 handle_stack_args (cfg, stack_start, sp - stack_start);
8657 sp = stack_start;
8658 CHECK_UNVERIFIABLE (cfg);
8660 cfg->cbb->next_bb = tblock;
8661 cfg->cbb = tblock;
8662 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8663 if (cfg->verbose_level > 3)
8664 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8665 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8666 *sp++ = ins;
8668 g_slist_free (class_inits);
8669 class_inits = NULL;
8673 if (skip_dead_blocks) {
8674 int ip_offset = ip - header->code;
8676 if (ip_offset == bb->end)
8677 bb = bb->next;
8679 if (bb->dead) {
8680 int op_size = mono_opcode_size (ip, end);
8681 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8683 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8685 if (ip_offset + op_size == bb->end) {
8686 MONO_INST_NEW (cfg, ins, OP_NOP);
8687 MONO_ADD_INS (cfg->cbb, ins);
8688 start_new_bblock = 1;
8691 ip += op_size;
8692 continue;
8696 * Sequence points are points where the debugger can place a breakpoint.
8697 * Currently, we generate these automatically at points where the IL
8698 * stack is empty.
8700 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8702 * Make methods interruptable at the beginning, and at the targets of
8703 * backward branches.
8704 * Also, do this at the start of every bblock in methods with clauses too,
8705 * to be able to handle instructions with inprecise control flow like
8706 * throw/endfinally.
8707 * Backward branches are handled at the end of method-to-ir ().
8709 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8710 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8712 /* Avoid sequence points on empty IL like .volatile */
8713 // FIXME: Enable this
8714 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8715 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8716 if ((sp != stack_start) && !sym_seq_point)
8717 ins->flags |= MONO_INST_NONEMPTY_STACK;
8718 MONO_ADD_INS (cfg->cbb, ins);
8720 if (sym_seq_points)
8721 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8724 cfg->cbb->real_offset = cfg->real_offset;
8726 if ((cfg->method == method) && cfg->coverage_info) {
8727 guint32 cil_offset = ip - header->code;
8728 cfg->coverage_info->data [cil_offset].cil_code = ip;
8730 /* TODO: Use an increment here */
8731 #if defined(TARGET_X86)
8732 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8733 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8734 ins->inst_imm = 1;
8735 MONO_ADD_INS (cfg->cbb, ins);
8736 #else
8737 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8738 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8739 #endif
8742 if (cfg->verbose_level > 3)
8743 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8745 switch (*ip) {
8746 case CEE_NOP:
8747 if (seq_points && !sym_seq_points && sp != stack_start) {
8749 * The C# compiler uses these nops to notify the JIT that it should
8750 * insert seq points.
8752 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8753 MONO_ADD_INS (cfg->cbb, ins);
8755 if (cfg->keep_cil_nops)
8756 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8757 else
8758 MONO_INST_NEW (cfg, ins, OP_NOP);
8759 ip++;
8760 MONO_ADD_INS (cfg->cbb, ins);
8761 break;
8762 case CEE_BREAK:
8763 if (should_insert_brekpoint (cfg->method)) {
8764 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8765 } else {
8766 MONO_INST_NEW (cfg, ins, OP_NOP);
8768 ip++;
8769 MONO_ADD_INS (cfg->cbb, ins);
8770 break;
8771 case CEE_LDARG_0:
8772 case CEE_LDARG_1:
8773 case CEE_LDARG_2:
8774 case CEE_LDARG_3:
8775 CHECK_STACK_OVF (1);
8776 n = (*ip)-CEE_LDARG_0;
8777 CHECK_ARG (n);
8778 EMIT_NEW_ARGLOAD (cfg, ins, n);
8779 ip++;
8780 *sp++ = ins;
8781 break;
8782 case CEE_LDLOC_0:
8783 case CEE_LDLOC_1:
8784 case CEE_LDLOC_2:
8785 case CEE_LDLOC_3:
8786 CHECK_STACK_OVF (1);
8787 n = (*ip)-CEE_LDLOC_0;
8788 CHECK_LOCAL (n);
8789 EMIT_NEW_LOCLOAD (cfg, ins, n);
8790 ip++;
8791 *sp++ = ins;
8792 break;
8793 case CEE_STLOC_0:
8794 case CEE_STLOC_1:
8795 case CEE_STLOC_2:
8796 case CEE_STLOC_3: {
8797 CHECK_STACK (1);
8798 n = (*ip)-CEE_STLOC_0;
8799 CHECK_LOCAL (n);
8800 --sp;
8801 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8802 UNVERIFIED;
8803 emit_stloc_ir (cfg, sp, header, n);
8804 ++ip;
8805 inline_costs += 1;
8806 break;
8808 case CEE_LDARG_S:
8809 CHECK_OPSIZE (2);
8810 CHECK_STACK_OVF (1);
8811 n = ip [1];
8812 CHECK_ARG (n);
8813 EMIT_NEW_ARGLOAD (cfg, ins, n);
8814 *sp++ = ins;
8815 ip += 2;
8816 break;
8817 case CEE_LDARGA_S:
8818 CHECK_OPSIZE (2);
8819 CHECK_STACK_OVF (1);
8820 n = ip [1];
8821 CHECK_ARG (n);
8822 NEW_ARGLOADA (cfg, ins, n);
8823 MONO_ADD_INS (cfg->cbb, ins);
8824 *sp++ = ins;
8825 ip += 2;
8826 break;
8827 case CEE_STARG_S:
8828 CHECK_OPSIZE (2);
8829 CHECK_STACK (1);
8830 --sp;
8831 n = ip [1];
8832 CHECK_ARG (n);
8833 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8834 UNVERIFIED;
8835 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8836 ip += 2;
8837 break;
8838 case CEE_LDLOC_S:
8839 CHECK_OPSIZE (2);
8840 CHECK_STACK_OVF (1);
8841 n = ip [1];
8842 CHECK_LOCAL (n);
8843 EMIT_NEW_LOCLOAD (cfg, ins, n);
8844 *sp++ = ins;
8845 ip += 2;
8846 break;
8847 case CEE_LDLOCA_S: {
8848 unsigned char *tmp_ip;
8849 CHECK_OPSIZE (2);
8850 CHECK_STACK_OVF (1);
8851 CHECK_LOCAL (ip [1]);
8853 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8854 ip = tmp_ip;
8855 inline_costs += 1;
8856 break;
8859 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8860 *sp++ = ins;
8861 ip += 2;
8862 break;
8864 case CEE_STLOC_S:
8865 CHECK_OPSIZE (2);
8866 CHECK_STACK (1);
8867 --sp;
8868 CHECK_LOCAL (ip [1]);
8869 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8870 UNVERIFIED;
8871 emit_stloc_ir (cfg, sp, header, ip [1]);
8872 ip += 2;
8873 inline_costs += 1;
8874 break;
8875 case CEE_LDNULL:
8876 CHECK_STACK_OVF (1);
8877 EMIT_NEW_PCONST (cfg, ins, NULL);
8878 ins->type = STACK_OBJ;
8879 ++ip;
8880 *sp++ = ins;
8881 break;
8882 case CEE_LDC_I4_M1:
8883 CHECK_STACK_OVF (1);
8884 EMIT_NEW_ICONST (cfg, ins, -1);
8885 ++ip;
8886 *sp++ = ins;
8887 break;
8888 case CEE_LDC_I4_0:
8889 case CEE_LDC_I4_1:
8890 case CEE_LDC_I4_2:
8891 case CEE_LDC_I4_3:
8892 case CEE_LDC_I4_4:
8893 case CEE_LDC_I4_5:
8894 case CEE_LDC_I4_6:
8895 case CEE_LDC_I4_7:
8896 case CEE_LDC_I4_8:
8897 CHECK_STACK_OVF (1);
8898 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8899 ++ip;
8900 *sp++ = ins;
8901 break;
8902 case CEE_LDC_I4_S:
8903 CHECK_OPSIZE (2);
8904 CHECK_STACK_OVF (1);
8905 ++ip;
8906 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8907 ++ip;
8908 *sp++ = ins;
8909 break;
8910 case CEE_LDC_I4:
8911 CHECK_OPSIZE (5);
8912 CHECK_STACK_OVF (1);
8913 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8914 ip += 5;
8915 *sp++ = ins;
8916 break;
8917 case CEE_LDC_I8:
8918 CHECK_OPSIZE (9);
8919 CHECK_STACK_OVF (1);
8920 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8921 ins->type = STACK_I8;
8922 ins->dreg = alloc_dreg (cfg, STACK_I8);
8923 ++ip;
8924 ins->inst_l = (gint64)read64 (ip);
8925 MONO_ADD_INS (cfg->cbb, ins);
8926 ip += 8;
8927 *sp++ = ins;
8928 break;
8929 case CEE_LDC_R4: {
8930 float *f;
8931 gboolean use_aotconst = FALSE;
8933 #ifdef TARGET_POWERPC
8934 /* FIXME: Clean this up */
8935 if (cfg->compile_aot)
8936 use_aotconst = TRUE;
8937 #endif
8939 /* FIXME: we should really allocate this only late in the compilation process */
8940 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
8941 CHECK_OPSIZE (5);
8942 CHECK_STACK_OVF (1);
8944 if (use_aotconst) {
8945 MonoInst *cons;
8946 int dreg;
8948 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8950 dreg = alloc_freg (cfg);
8951 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8952 ins->type = cfg->r4_stack_type;
8953 } else {
8954 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8955 ins->type = cfg->r4_stack_type;
8956 ins->dreg = alloc_dreg (cfg, STACK_R8);
8957 ins->inst_p0 = f;
8958 MONO_ADD_INS (cfg->cbb, ins);
8960 ++ip;
8961 readr4 (ip, f);
8962 ip += 4;
8963 *sp++ = ins;
8964 break;
8966 case CEE_LDC_R8: {
8967 double *d;
8968 gboolean use_aotconst = FALSE;
8970 #ifdef TARGET_POWERPC
8971 /* FIXME: Clean this up */
8972 if (cfg->compile_aot)
8973 use_aotconst = TRUE;
8974 #endif
8976 /* FIXME: we should really allocate this only late in the compilation process */
8977 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
8978 CHECK_OPSIZE (9);
8979 CHECK_STACK_OVF (1);
8981 if (use_aotconst) {
8982 MonoInst *cons;
8983 int dreg;
8985 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8987 dreg = alloc_freg (cfg);
8988 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8989 ins->type = STACK_R8;
8990 } else {
8991 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8992 ins->type = STACK_R8;
8993 ins->dreg = alloc_dreg (cfg, STACK_R8);
8994 ins->inst_p0 = d;
8995 MONO_ADD_INS (cfg->cbb, ins);
8997 ++ip;
8998 readr8 (ip, d);
8999 ip += 8;
9000 *sp++ = ins;
9001 break;
9003 case CEE_DUP: {
9004 MonoInst *temp, *store;
9005 CHECK_STACK (1);
9006 CHECK_STACK_OVF (1);
9007 sp--;
9008 ins = *sp;
9010 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
9011 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
9013 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
9014 *sp++ = ins;
9016 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
9017 *sp++ = ins;
9019 ++ip;
9020 inline_costs += 2;
9021 break;
9023 case CEE_POP:
9024 CHECK_STACK (1);
9025 ip++;
9026 --sp;
9028 #ifdef TARGET_X86
9029 if (sp [0]->type == STACK_R8)
9030 /* we need to pop the value from the x86 FP stack */
9031 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
9032 #endif
9033 break;
9034 case CEE_JMP: {
9035 MonoCallInst *call;
9036 MonoMethodSignature *fsig;
9037 int i, n;
9039 INLINE_FAILURE ("jmp");
9040 GSHAREDVT_FAILURE (*ip);
9042 CHECK_OPSIZE (5);
9043 if (stack_start != sp)
9044 UNVERIFIED;
9045 token = read32 (ip + 1);
9046 /* FIXME: check the signature matches */
9047 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9048 CHECK_CFG_ERROR;
9050 if (cfg->gshared && mono_method_check_context_used (cmethod))
9051 GENERIC_SHARING_FAILURE (CEE_JMP);
9053 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9055 fsig = mono_method_signature (cmethod);
9056 n = fsig->param_count + fsig->hasthis;
9057 if (cfg->llvm_only) {
9058 MonoInst **args;
9060 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
9061 for (i = 0; i < n; ++i)
9062 EMIT_NEW_ARGLOAD (cfg, args [i], i);
9063 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
9065 * The code in mono-basic-block.c treats the rest of the code as dead, but we
9066 * have to emit a normal return since llvm expects it.
9068 if (cfg->ret)
9069 emit_setret (cfg, ins);
9070 MONO_INST_NEW (cfg, ins, OP_BR);
9071 ins->inst_target_bb = end_bblock;
9072 MONO_ADD_INS (cfg->cbb, ins);
9073 link_bblock (cfg, cfg->cbb, end_bblock);
9074 ip += 5;
9075 break;
9076 } else if (cfg->backend->have_op_tail_call) {
9077 /* Handle tail calls similarly to calls */
9078 DISABLE_AOT (cfg);
9080 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
9081 call->method = cmethod;
9082 call->tail_call = TRUE;
9083 call->signature = mono_method_signature (cmethod);
9084 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
9085 call->inst.inst_p0 = cmethod;
9086 for (i = 0; i < n; ++i)
9087 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
9089 if (mini_type_is_vtype (mini_get_underlying_type (call->signature->ret)))
9090 call->vret_var = cfg->vret_addr;
9092 mono_arch_emit_call (cfg, call);
9093 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
9094 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
9095 } else {
9096 for (i = 0; i < num_args; ++i)
9097 /* Prevent arguments from being optimized away */
9098 arg_array [i]->flags |= MONO_INST_VOLATILE;
9100 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9101 ins = (MonoInst*)call;
9102 ins->inst_p0 = cmethod;
9103 MONO_ADD_INS (cfg->cbb, ins);
9106 ip += 5;
9107 start_new_bblock = 1;
9108 break;
9110 case CEE_CALLI: {
9111 MonoInst *addr;
9112 MonoMethodSignature *fsig;
9114 CHECK_OPSIZE (5);
9115 token = read32 (ip + 1);
9117 ins = NULL;
9119 //GSHAREDVT_FAILURE (*ip);
9120 cmethod = NULL;
9121 CHECK_STACK (1);
9122 --sp;
9123 addr = *sp;
9124 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
9125 CHECK_CFG_ERROR;
9127 if (method->dynamic && fsig->pinvoke) {
9128 MonoInst *args [3];
9131 * This is a call through a function pointer using a pinvoke
9132 * signature. Have to create a wrapper and call that instead.
9133 * FIXME: This is very slow, need to create a wrapper at JIT time
9134 * instead based on the signature.
9136 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
9137 EMIT_NEW_PCONST (cfg, args [1], fsig);
9138 args [2] = addr;
9139 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
9142 n = fsig->param_count + fsig->hasthis;
9144 CHECK_STACK (n);
9146 //g_assert (!virtual_ || fsig->hasthis);
9148 sp -= n;
9150 inline_costs += 10 * num_calls++;
9153 * Making generic calls out of gsharedvt methods.
9154 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9155 * patching gshared method addresses into a gsharedvt method.
9157 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
9159 * We pass the address to the gsharedvt trampoline in the rgctx reg
9161 MonoInst *callee = addr;
9163 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
9164 /* Not tested */
9165 GSHAREDVT_FAILURE (*ip);
9167 if (cfg->llvm_only)
9168 // FIXME:
9169 GSHAREDVT_FAILURE (*ip);
9171 addr = emit_get_rgctx_sig (cfg, context_used,
9172 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
9173 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
9174 goto calli_end;
9177 /* Prevent inlining of methods with indirect calls */
9178 INLINE_FAILURE ("indirect call");
9180 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
9181 MonoJumpInfoType info_type;
9182 gpointer info_data;
9185 * Instead of emitting an indirect call, emit a direct call
9186 * with the contents of the aotconst as the patch info.
9188 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
9189 info_type = (MonoJumpInfoType)addr->inst_c1;
9190 info_data = addr->inst_p0;
9191 } else {
9192 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
9193 info_data = addr->inst_right->inst_left;
9196 if (info_type == MONO_PATCH_INFO_ICALL_ADDR) {
9197 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp);
9198 NULLIFY_INS (addr);
9199 goto calli_end;
9200 } else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
9201 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
9202 NULLIFY_INS (addr);
9203 goto calli_end;
9206 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9208 calli_end:
9210 /* End of call, INS should contain the result of the call, if any */
9212 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9213 g_assert (ins);
9214 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9217 CHECK_CFG_EXCEPTION;
9219 ip += 5;
9220 ins_flag = 0;
9221 constrained_class = NULL;
9222 break;
9224 case CEE_CALL:
9225 case CEE_CALLVIRT: {
9226 MonoInst *addr = NULL;
9227 MonoMethodSignature *fsig = NULL;
9228 int array_rank = 0;
9229 int virtual_ = *ip == CEE_CALLVIRT;
9230 gboolean pass_imt_from_rgctx = FALSE;
9231 MonoInst *imt_arg = NULL;
9232 MonoInst *keep_this_alive = NULL;
9233 gboolean pass_vtable = FALSE;
9234 gboolean pass_mrgctx = FALSE;
9235 MonoInst *vtable_arg = NULL;
9236 gboolean check_this = FALSE;
9237 gboolean supported_tail_call = FALSE;
9238 gboolean tail_call = FALSE;
9239 gboolean need_seq_point = FALSE;
9240 guint32 call_opcode = *ip;
9241 gboolean emit_widen = TRUE;
9242 gboolean push_res = TRUE;
9243 gboolean skip_ret = FALSE;
9244 gboolean delegate_invoke = FALSE;
9245 gboolean direct_icall = FALSE;
9246 gboolean constrained_partial_call = FALSE;
9247 MonoMethod *cil_method;
9249 CHECK_OPSIZE (5);
9250 token = read32 (ip + 1);
9252 ins = NULL;
9254 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9255 CHECK_CFG_ERROR;
9257 cil_method = cmethod;
9259 if (constrained_class) {
9260 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
9261 if (!mini_is_gsharedvt_klass (constrained_class)) {
9262 g_assert (!cmethod->klass->valuetype);
9263 if (!mini_type_is_reference (&constrained_class->byval_arg))
9264 constrained_partial_call = TRUE;
9268 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9269 if (cfg->verbose_level > 2)
9270 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
9271 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
9272 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
9273 cfg->gshared)) {
9274 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
9275 CHECK_CFG_ERROR;
9277 } else {
9278 if (cfg->verbose_level > 2)
9279 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
9281 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
9283 * This is needed since get_method_constrained can't find
9284 * the method in klass representing a type var.
9285 * The type var is guaranteed to be a reference type in this
9286 * case.
9288 if (!mini_is_gsharedvt_klass (constrained_class))
9289 g_assert (!cmethod->klass->valuetype);
9290 } else {
9291 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
9292 CHECK_CFG_ERROR;
9297 if (!dont_verify && !cfg->skip_visibility) {
9298 MonoMethod *target_method = cil_method;
9299 if (method->is_inflated) {
9300 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
9301 CHECK_CFG_ERROR;
9303 if (!mono_method_can_access_method (method_definition, target_method) &&
9304 !mono_method_can_access_method (method, cil_method))
9305 emit_method_access_failure (cfg, method, cil_method);
9308 if (mono_security_core_clr_enabled ())
9309 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
9311 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
9312 /* MS.NET seems to silently convert this to a callvirt */
9313 virtual_ = 1;
9317 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
9318 * converts to a callvirt.
9320 * tests/bug-515884.il is an example of this behavior
9322 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
9323 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
9324 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
9325 virtual_ = 1;
9328 if (!cmethod->klass->inited)
9329 if (!mono_class_init (cmethod->klass))
9330 TYPE_LOAD_ERROR (cmethod->klass);
9332 fsig = mono_method_signature (cmethod);
9333 if (!fsig)
9334 LOAD_ERROR;
9335 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
9336 mini_class_is_system_array (cmethod->klass)) {
9337 array_rank = cmethod->klass->rank;
9338 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
9339 direct_icall = TRUE;
9340 } else if (fsig->pinvoke) {
9341 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9342 fsig = mono_method_signature (wrapper);
9343 } else if (constrained_class) {
9344 } else {
9345 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
9346 CHECK_CFG_ERROR;
9349 if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated))
9350 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
9352 /* See code below */
9353 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9354 MonoBasicBlock *tbb;
9356 GET_BBLOCK (cfg, tbb, ip + 5);
9357 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9359 * We want to extend the try block to cover the call, but we can't do it if the
9360 * call is made directly since its followed by an exception check.
9362 direct_icall = FALSE;
9366 mono_save_token_info (cfg, image, token, cil_method);
9368 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
9369 need_seq_point = TRUE;
9371 /* Don't support calls made using type arguments for now */
9373 if (cfg->gsharedvt) {
9374 if (mini_is_gsharedvt_signature (fsig))
9375 GSHAREDVT_FAILURE (*ip);
9379 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
9380 g_assert_not_reached ();
9382 n = fsig->param_count + fsig->hasthis;
9384 if (!cfg->gshared && mono_class_is_gtd (cmethod->klass))
9385 UNVERIFIED;
9387 if (!cfg->gshared)
9388 g_assert (!mono_method_check_context_used (cmethod));
9390 CHECK_STACK (n);
9392 //g_assert (!virtual_ || fsig->hasthis);
9394 sp -= n;
9397 * We have the `constrained.' prefix opcode.
9399 if (constrained_class) {
9400 if (mini_is_gsharedvt_klass (constrained_class)) {
9401 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
9402 /* The 'Own method' case below */
9403 } else if (cmethod->klass->image != mono_defaults.corlib && !mono_class_is_interface (cmethod->klass) && !cmethod->klass->valuetype) {
9404 /* 'The type parameter is instantiated as a reference type' case below. */
9405 } else {
9406 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
9407 CHECK_CFG_EXCEPTION;
9408 g_assert (ins);
9409 goto call_end;
9413 if (constrained_partial_call) {
9414 gboolean need_box = TRUE;
9417 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
9418 * called method is not known at compile time either. The called method could end up being
9419 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
9420 * to box the receiver.
9421 * A simple solution would be to box always and make a normal virtual call, but that would
9422 * be bad performance wise.
9424 if (mono_class_is_interface (cmethod->klass) && mono_class_is_ginst (cmethod->klass)) {
9426 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
9428 need_box = FALSE;
9431 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9432 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
9433 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9434 ins->klass = constrained_class;
9435 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9436 CHECK_CFG_EXCEPTION;
9437 } else if (need_box) {
9438 MonoInst *box_type;
9439 MonoBasicBlock *is_ref_bb, *end_bb;
9440 MonoInst *nonbox_call;
9443 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
9444 * if needed.
9445 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
9446 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
9448 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9450 NEW_BBLOCK (cfg, is_ref_bb);
9451 NEW_BBLOCK (cfg, end_bb);
9453 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
9454 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
9455 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
9457 /* Non-ref case */
9458 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9460 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9462 /* Ref case */
9463 MONO_START_BB (cfg, is_ref_bb);
9464 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9465 ins->klass = constrained_class;
9466 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9467 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9469 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9471 MONO_START_BB (cfg, end_bb);
9472 cfg->cbb = end_bb;
9474 nonbox_call->dreg = ins->dreg;
9475 goto call_end;
9476 } else {
9477 g_assert (mono_class_is_interface (cmethod->klass));
9478 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9479 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9480 goto call_end;
9482 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9484 * The type parameter is instantiated as a valuetype,
9485 * but that type doesn't override the method we're
9486 * calling, so we need to box `this'.
9488 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9489 ins->klass = constrained_class;
9490 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9491 CHECK_CFG_EXCEPTION;
9492 } else if (!constrained_class->valuetype) {
9493 int dreg = alloc_ireg_ref (cfg);
9496 * The type parameter is instantiated as a reference
9497 * type. We have a managed pointer on the stack, so
9498 * we need to dereference it here.
9500 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
9501 ins->type = STACK_OBJ;
9502 sp [0] = ins;
9503 } else {
9504 if (cmethod->klass->valuetype) {
9505 /* Own method */
9506 } else {
9507 /* Interface method */
9508 int ioffset, slot;
9510 mono_class_setup_vtable (constrained_class);
9511 CHECK_TYPELOAD (constrained_class);
9512 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
9513 if (ioffset == -1)
9514 TYPE_LOAD_ERROR (constrained_class);
9515 slot = mono_method_get_vtable_slot (cmethod);
9516 if (slot == -1)
9517 TYPE_LOAD_ERROR (cmethod->klass);
9518 cmethod = constrained_class->vtable [ioffset + slot];
9520 if (cmethod->klass == mono_defaults.enum_class) {
9521 /* Enum implements some interfaces, so treat this as the first case */
9522 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9523 ins->klass = constrained_class;
9524 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9525 CHECK_CFG_EXCEPTION;
9528 virtual_ = 0;
9530 constrained_class = NULL;
9533 if (check_call_signature (cfg, fsig, sp))
9534 UNVERIFIED;
9536 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
9537 delegate_invoke = TRUE;
9539 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
9540 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9541 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9542 emit_widen = FALSE;
9545 goto call_end;
9549 * If the callee is a shared method, then its static cctor
9550 * might not get called after the call was patched.
9552 if (cfg->gshared && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9553 emit_class_init (cfg, cmethod->klass);
9554 CHECK_TYPELOAD (cmethod->klass);
9557 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
9559 if (cfg->gshared) {
9560 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
9562 context_used = mini_method_check_context_used (cfg, cmethod);
9564 if (context_used && mono_class_is_interface (cmethod->klass)) {
9565 /* Generic method interface
9566 calls are resolved via a
9567 helper function and don't
9568 need an imt. */
9569 if (!cmethod_context || !cmethod_context->method_inst)
9570 pass_imt_from_rgctx = TRUE;
9574 * If a shared method calls another
9575 * shared method then the caller must
9576 * have a generic sharing context
9577 * because the magic trampoline
9578 * requires it. FIXME: We shouldn't
9579 * have to force the vtable/mrgctx
9580 * variable here. Instead there
9581 * should be a flag in the cfg to
9582 * request a generic sharing context.
9584 if (context_used &&
9585 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9586 mono_get_vtable_var (cfg);
9589 if (pass_vtable) {
9590 if (context_used) {
9591 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9592 } else {
9593 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9595 CHECK_TYPELOAD (cmethod->klass);
9596 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9600 if (pass_mrgctx) {
9601 g_assert (!vtable_arg);
9603 if (!cfg->compile_aot) {
9605 * emit_get_rgctx_method () calls mono_class_vtable () so check
9606 * for type load errors before.
9608 mono_class_setup_vtable (cmethod->klass);
9609 CHECK_TYPELOAD (cmethod->klass);
9612 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9614 /* !marshalbyref is needed to properly handle generic methods + remoting */
9615 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9616 MONO_METHOD_IS_FINAL (cmethod)) &&
9617 !mono_class_is_marshalbyref (cmethod->klass)) {
9618 if (virtual_)
9619 check_this = TRUE;
9620 virtual_ = 0;
9624 if (pass_imt_from_rgctx) {
9625 g_assert (!pass_vtable);
9627 imt_arg = emit_get_rgctx_method (cfg, context_used,
9628 cmethod, MONO_RGCTX_INFO_METHOD);
9631 if (check_this)
9632 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9634 /* Calling virtual generic methods */
9635 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9636 !(MONO_METHOD_IS_FINAL (cmethod) &&
9637 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9638 fsig->generic_param_count &&
9639 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
9640 !cfg->llvm_only) {
9641 MonoInst *this_temp, *this_arg_temp, *store;
9642 MonoInst *iargs [4];
9644 g_assert (fsig->is_inflated);
9646 /* Prevent inlining of methods that contain indirect calls */
9647 INLINE_FAILURE ("virtual generic call");
9649 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
9650 GSHAREDVT_FAILURE (*ip);
9652 if (cfg->backend->have_generalized_imt_trampoline && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
9653 g_assert (!imt_arg);
9654 if (!context_used)
9655 g_assert (cmethod->is_inflated);
9656 imt_arg = emit_get_rgctx_method (cfg, context_used,
9657 cmethod, MONO_RGCTX_INFO_METHOD);
9658 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9659 } else {
9660 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9661 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9662 MONO_ADD_INS (cfg->cbb, store);
9664 /* FIXME: This should be a managed pointer */
9665 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9667 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9668 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9669 cmethod, MONO_RGCTX_INFO_METHOD);
9670 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9671 addr = mono_emit_jit_icall (cfg,
9672 mono_helper_compile_generic_method, iargs);
9674 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9676 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9679 goto call_end;
9683 * Implement a workaround for the inherent races involved in locking:
9684 * Monitor.Enter ()
9685 * try {
9686 * } finally {
9687 * Monitor.Exit ()
9689 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9690 * try block, the Exit () won't be executed, see:
9691 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9692 * To work around this, we extend such try blocks to include the last x bytes
9693 * of the Monitor.Enter () call.
9695 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9696 MonoBasicBlock *tbb;
9698 GET_BBLOCK (cfg, tbb, ip + 5);
9700 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9701 * from Monitor.Enter like ArgumentNullException.
9703 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9704 /* Mark this bblock as needing to be extended */
9705 tbb->extend_try_block = TRUE;
9709 /* Conversion to a JIT intrinsic */
9710 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9711 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9712 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9713 emit_widen = FALSE;
9715 goto call_end;
9717 CHECK_CFG_ERROR;
9719 /* Inlining */
9720 if ((cfg->opt & MONO_OPT_INLINE) &&
9721 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9722 mono_method_check_inlining (cfg, cmethod)) {
9723 int costs;
9724 gboolean always = FALSE;
9726 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9727 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9728 /* Prevent inlining of methods that call wrappers */
9729 INLINE_FAILURE ("wrapper call");
9730 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9731 always = TRUE;
9734 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9735 if (costs) {
9736 cfg->real_offset += 5;
9738 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9739 /* *sp is already set by inline_method */
9740 sp++;
9741 push_res = FALSE;
9744 inline_costs += costs;
9746 goto call_end;
9750 /* Tail recursion elimination */
9751 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9752 gboolean has_vtargs = FALSE;
9753 int i;
9755 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9756 INLINE_FAILURE ("tail call");
9758 /* keep it simple */
9759 for (i = fsig->param_count - 1; i >= 0; i--) {
9760 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9761 has_vtargs = TRUE;
9764 if (!has_vtargs) {
9765 if (need_seq_point) {
9766 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9767 need_seq_point = FALSE;
9769 for (i = 0; i < n; ++i)
9770 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9771 MONO_INST_NEW (cfg, ins, OP_BR);
9772 MONO_ADD_INS (cfg->cbb, ins);
9773 tblock = start_bblock->out_bb [0];
9774 link_bblock (cfg, cfg->cbb, tblock);
9775 ins->inst_target_bb = tblock;
9776 start_new_bblock = 1;
9778 /* skip the CEE_RET, too */
9779 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9780 skip_ret = TRUE;
9781 push_res = FALSE;
9782 goto call_end;
9786 inline_costs += 10 * num_calls++;
9789 * Synchronized wrappers.
9790 * Its hard to determine where to replace a method with its synchronized
9791 * wrapper without causing an infinite recursion. The current solution is
9792 * to add the synchronized wrapper in the trampolines, and to
9793 * change the called method to a dummy wrapper, and resolve that wrapper
9794 * to the real method in mono_jit_compile_method ().
9796 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9797 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9798 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9799 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9803 * Making generic calls out of gsharedvt methods.
9804 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9805 * patching gshared method addresses into a gsharedvt method.
9807 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || mono_class_is_ginst (cmethod->klass)) &&
9808 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
9809 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
9810 MonoRgctxInfoType info_type;
9812 if (virtual_) {
9813 //if (mono_class_is_interface (cmethod->klass))
9814 //GSHAREDVT_FAILURE (*ip);
9815 // disable for possible remoting calls
9816 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9817 GSHAREDVT_FAILURE (*ip);
9818 if (fsig->generic_param_count) {
9819 /* virtual generic call */
9820 g_assert (!imt_arg);
9821 /* Same as the virtual generic case above */
9822 imt_arg = emit_get_rgctx_method (cfg, context_used,
9823 cmethod, MONO_RGCTX_INFO_METHOD);
9824 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9825 vtable_arg = NULL;
9826 } else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
9827 /* This can happen when we call a fully instantiated iface method */
9828 imt_arg = emit_get_rgctx_method (cfg, context_used,
9829 cmethod, MONO_RGCTX_INFO_METHOD);
9830 vtable_arg = NULL;
9834 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9835 keep_this_alive = sp [0];
9837 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9838 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9839 else
9840 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9841 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9843 if (cfg->llvm_only) {
9844 // FIXME: Avoid initializing vtable_arg
9845 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9846 } else {
9847 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9849 goto call_end;
9852 /* Generic sharing */
9855 * Use this if the callee is gsharedvt sharable too, since
9856 * at runtime we might find an instantiation so the call cannot
9857 * be patched (the 'no_patch' code path in mini-trampolines.c).
9859 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9860 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9861 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9862 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
9863 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9864 INLINE_FAILURE ("gshared");
9866 g_assert (cfg->gshared && cmethod);
9867 g_assert (!addr);
9870 * We are compiling a call to a
9871 * generic method from shared code,
9872 * which means that we have to look up
9873 * the method in the rgctx and do an
9874 * indirect call.
9876 if (fsig->hasthis)
9877 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9879 if (cfg->llvm_only) {
9880 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
9881 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
9882 else
9883 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9884 // FIXME: Avoid initializing imt_arg/vtable_arg
9885 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9886 } else {
9887 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9888 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9890 goto call_end;
9893 /* Direct calls to icalls */
9894 if (direct_icall) {
9895 MonoMethod *wrapper;
9896 int costs;
9898 /* Inline the wrapper */
9899 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9901 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9902 g_assert (costs > 0);
9903 cfg->real_offset += 5;
9905 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9906 /* *sp is already set by inline_method */
9907 sp++;
9908 push_res = FALSE;
9911 inline_costs += costs;
9913 goto call_end;
9916 /* Array methods */
9917 if (array_rank) {
9918 MonoInst *addr;
9920 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9921 MonoInst *val = sp [fsig->param_count];
9923 if (val->type == STACK_OBJ) {
9924 MonoInst *iargs [2];
9926 iargs [0] = sp [0];
9927 iargs [1] = val;
9929 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9932 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9933 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9934 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !MONO_INS_IS_PCONST_NULL (val))
9935 emit_write_barrier (cfg, addr, val);
9936 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9937 GSHAREDVT_FAILURE (*ip);
9938 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9939 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9941 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9942 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9943 if (!cmethod->klass->element_class->valuetype && !readonly)
9944 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9945 CHECK_TYPELOAD (cmethod->klass);
9947 readonly = FALSE;
9948 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9949 ins = addr;
9950 } else {
9951 g_assert_not_reached ();
9954 emit_widen = FALSE;
9955 goto call_end;
9958 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
9959 if (ins)
9960 goto call_end;
9962 /* Tail prefix / tail call optimization */
9964 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9965 /* FIXME: runtime generic context pointer for jumps? */
9966 /* FIXME: handle this for generic sharing eventually */
9967 if ((ins_flag & MONO_INST_TAILCALL) &&
9968 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9969 supported_tail_call = TRUE;
9971 if (supported_tail_call) {
9972 MonoCallInst *call;
9974 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9975 INLINE_FAILURE ("tail call");
9977 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9979 if (cfg->backend->have_op_tail_call) {
9980 /* Handle tail calls similarly to normal calls */
9981 tail_call = TRUE;
9982 } else {
9983 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9985 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9986 call->tail_call = TRUE;
9987 call->method = cmethod;
9988 call->signature = mono_method_signature (cmethod);
9991 * We implement tail calls by storing the actual arguments into the
9992 * argument variables, then emitting a CEE_JMP.
9994 for (i = 0; i < n; ++i) {
9995 /* Prevent argument from being register allocated */
9996 arg_array [i]->flags |= MONO_INST_VOLATILE;
9997 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9999 ins = (MonoInst*)call;
10000 ins->inst_p0 = cmethod;
10001 ins->inst_p1 = arg_array [0];
10002 MONO_ADD_INS (cfg->cbb, ins);
10003 link_bblock (cfg, cfg->cbb, end_bblock);
10004 start_new_bblock = 1;
10006 // FIXME: Eliminate unreachable epilogs
10009 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
10010 * only reachable from this call.
10012 GET_BBLOCK (cfg, tblock, ip + 5);
10013 if (tblock == cfg->cbb || tblock->in_count == 0)
10014 skip_ret = TRUE;
10015 push_res = FALSE;
10017 goto call_end;
10022 * Virtual calls in llvm-only mode.
10024 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
10025 ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
10026 goto call_end;
10029 /* Common call */
10030 INLINE_FAILURE ("call");
10031 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
10032 imt_arg, vtable_arg);
10034 if (tail_call && !cfg->llvm_only) {
10035 link_bblock (cfg, cfg->cbb, end_bblock);
10036 start_new_bblock = 1;
10038 // FIXME: Eliminate unreachable epilogs
10041 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
10042 * only reachable from this call.
10044 GET_BBLOCK (cfg, tblock, ip + 5);
10045 if (tblock == cfg->cbb || tblock->in_count == 0)
10046 skip_ret = TRUE;
10047 push_res = FALSE;
10050 call_end:
10052 /* End of call, INS should contain the result of the call, if any */
10054 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
10055 g_assert (ins);
10056 if (emit_widen)
10057 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
10058 else
10059 *sp++ = ins;
10062 if (keep_this_alive) {
10063 MonoInst *dummy_use;
10065 /* See mono_emit_method_call_full () */
10066 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
10069 if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) {
10071 * Clang can convert these calls to tail calls which screw up the stack
10072 * walk. This happens even when the -fno-optimize-sibling-calls
10073 * option is passed to clang.
10074 * Work around this by emitting a dummy call.
10076 mono_emit_jit_icall (cfg, mono_dummy_jit_icall, NULL);
10079 CHECK_CFG_EXCEPTION;
10081 ip += 5;
10082 if (skip_ret) {
10083 g_assert (*ip == CEE_RET);
10084 ip += 1;
10086 ins_flag = 0;
10087 constrained_class = NULL;
10088 if (need_seq_point)
10089 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10090 break;
10092 case CEE_RET:
10093 if (cfg->method != method) {
10094 /* return from inlined method */
10096 * If in_count == 0, that means the ret is unreachable due to
10097 * being preceeded by a throw. In that case, inline_method () will
10098 * handle setting the return value
10099 * (test case: test_0_inline_throw ()).
10101 if (return_var && cfg->cbb->in_count) {
10102 MonoType *ret_type = mono_method_signature (method)->ret;
10104 MonoInst *store;
10105 CHECK_STACK (1);
10106 --sp;
10108 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
10109 UNVERIFIED;
10111 //g_assert (returnvar != -1);
10112 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
10113 cfg->ret_var_set = TRUE;
10115 } else {
10116 emit_instrumentation_call (cfg, mono_profiler_method_leave);
10118 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
10119 emit_pop_lmf (cfg);
10121 if (cfg->ret) {
10122 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
10124 if (seq_points && !sym_seq_points) {
10126 * Place a seq point here too even through the IL stack is not
10127 * empty, so a step over on
10128 * call <FOO>
10129 * ret
10130 * will work correctly.
10132 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
10133 MONO_ADD_INS (cfg->cbb, ins);
10136 g_assert (!return_var);
10137 CHECK_STACK (1);
10138 --sp;
10140 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
10141 UNVERIFIED;
10143 emit_setret (cfg, *sp);
10146 if (sp != stack_start)
10147 UNVERIFIED;
10148 MONO_INST_NEW (cfg, ins, OP_BR);
10149 ip++;
10150 ins->inst_target_bb = end_bblock;
10151 MONO_ADD_INS (cfg->cbb, ins);
10152 link_bblock (cfg, cfg->cbb, end_bblock);
10153 start_new_bblock = 1;
10154 break;
10155 case CEE_BR_S:
10156 CHECK_OPSIZE (2);
10157 MONO_INST_NEW (cfg, ins, OP_BR);
10158 ip++;
10159 target = ip + 1 + (signed char)(*ip);
10160 ++ip;
10161 GET_BBLOCK (cfg, tblock, target);
10162 link_bblock (cfg, cfg->cbb, tblock);
10163 ins->inst_target_bb = tblock;
10164 if (sp != stack_start) {
10165 handle_stack_args (cfg, stack_start, sp - stack_start);
10166 sp = stack_start;
10167 CHECK_UNVERIFIABLE (cfg);
10169 MONO_ADD_INS (cfg->cbb, ins);
10170 start_new_bblock = 1;
10171 inline_costs += BRANCH_COST;
10172 break;
10173 case CEE_BEQ_S:
10174 case CEE_BGE_S:
10175 case CEE_BGT_S:
10176 case CEE_BLE_S:
10177 case CEE_BLT_S:
10178 case CEE_BNE_UN_S:
10179 case CEE_BGE_UN_S:
10180 case CEE_BGT_UN_S:
10181 case CEE_BLE_UN_S:
10182 case CEE_BLT_UN_S:
10183 CHECK_OPSIZE (2);
10184 CHECK_STACK (2);
10185 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
10186 ip++;
10187 target = ip + 1 + *(signed char*)ip;
10188 ip++;
10190 ADD_BINCOND (NULL);
10192 sp = stack_start;
10193 inline_costs += BRANCH_COST;
10194 break;
10195 case CEE_BR:
10196 CHECK_OPSIZE (5);
10197 MONO_INST_NEW (cfg, ins, OP_BR);
10198 ip++;
10200 target = ip + 4 + (gint32)read32(ip);
10201 ip += 4;
10202 GET_BBLOCK (cfg, tblock, target);
10203 link_bblock (cfg, cfg->cbb, tblock);
10204 ins->inst_target_bb = tblock;
10205 if (sp != stack_start) {
10206 handle_stack_args (cfg, stack_start, sp - stack_start);
10207 sp = stack_start;
10208 CHECK_UNVERIFIABLE (cfg);
10211 MONO_ADD_INS (cfg->cbb, ins);
10213 start_new_bblock = 1;
10214 inline_costs += BRANCH_COST;
10215 break;
10216 case CEE_BRFALSE_S:
10217 case CEE_BRTRUE_S:
10218 case CEE_BRFALSE:
10219 case CEE_BRTRUE: {
10220 MonoInst *cmp;
10221 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
10222 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
10223 guint32 opsize = is_short ? 1 : 4;
10225 CHECK_OPSIZE (opsize);
10226 CHECK_STACK (1);
10227 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
10228 UNVERIFIED;
10229 ip ++;
10230 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
10231 ip += opsize;
10233 sp--;
10235 GET_BBLOCK (cfg, tblock, target);
10236 link_bblock (cfg, cfg->cbb, tblock);
10237 GET_BBLOCK (cfg, tblock, ip);
10238 link_bblock (cfg, cfg->cbb, tblock);
10240 if (sp != stack_start) {
10241 handle_stack_args (cfg, stack_start, sp - stack_start);
10242 CHECK_UNVERIFIABLE (cfg);
10245 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
10246 cmp->sreg1 = sp [0]->dreg;
10247 type_from_op (cfg, cmp, sp [0], NULL);
10248 CHECK_TYPE (cmp);
10250 #if SIZEOF_REGISTER == 4
10251 if (cmp->opcode == OP_LCOMPARE_IMM) {
10252 /* Convert it to OP_LCOMPARE */
10253 MONO_INST_NEW (cfg, ins, OP_I8CONST);
10254 ins->type = STACK_I8;
10255 ins->dreg = alloc_dreg (cfg, STACK_I8);
10256 ins->inst_l = 0;
10257 MONO_ADD_INS (cfg->cbb, ins);
10258 cmp->opcode = OP_LCOMPARE;
10259 cmp->sreg2 = ins->dreg;
10261 #endif
10262 MONO_ADD_INS (cfg->cbb, cmp);
10264 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
10265 type_from_op (cfg, ins, sp [0], NULL);
10266 MONO_ADD_INS (cfg->cbb, ins);
10267 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
10268 GET_BBLOCK (cfg, tblock, target);
10269 ins->inst_true_bb = tblock;
10270 GET_BBLOCK (cfg, tblock, ip);
10271 ins->inst_false_bb = tblock;
10272 start_new_bblock = 2;
10274 sp = stack_start;
10275 inline_costs += BRANCH_COST;
10276 break;
10278 case CEE_BEQ:
10279 case CEE_BGE:
10280 case CEE_BGT:
10281 case CEE_BLE:
10282 case CEE_BLT:
10283 case CEE_BNE_UN:
10284 case CEE_BGE_UN:
10285 case CEE_BGT_UN:
10286 case CEE_BLE_UN:
10287 case CEE_BLT_UN:
10288 CHECK_OPSIZE (5);
10289 CHECK_STACK (2);
10290 MONO_INST_NEW (cfg, ins, *ip);
10291 ip++;
10292 target = ip + 4 + (gint32)read32(ip);
10293 ip += 4;
10295 ADD_BINCOND (NULL);
10297 sp = stack_start;
10298 inline_costs += BRANCH_COST;
10299 break;
10300 case CEE_SWITCH: {
10301 MonoInst *src1;
10302 MonoBasicBlock **targets;
10303 MonoBasicBlock *default_bblock;
10304 MonoJumpInfoBBTable *table;
10305 int offset_reg = alloc_preg (cfg);
10306 int target_reg = alloc_preg (cfg);
10307 int table_reg = alloc_preg (cfg);
10308 int sum_reg = alloc_preg (cfg);
10309 gboolean use_op_switch;
10311 CHECK_OPSIZE (5);
10312 CHECK_STACK (1);
10313 n = read32 (ip + 1);
10314 --sp;
10315 src1 = sp [0];
10316 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
10317 UNVERIFIED;
10319 ip += 5;
10320 CHECK_OPSIZE (n * sizeof (guint32));
10321 target = ip + n * sizeof (guint32);
10323 GET_BBLOCK (cfg, default_bblock, target);
10324 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
10326 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
10327 for (i = 0; i < n; ++i) {
10328 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
10329 targets [i] = tblock;
10330 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
10331 ip += 4;
10334 if (sp != stack_start) {
10336 * Link the current bb with the targets as well, so handle_stack_args
10337 * will set their in_stack correctly.
10339 link_bblock (cfg, cfg->cbb, default_bblock);
10340 for (i = 0; i < n; ++i)
10341 link_bblock (cfg, cfg->cbb, targets [i]);
10343 handle_stack_args (cfg, stack_start, sp - stack_start);
10344 sp = stack_start;
10345 CHECK_UNVERIFIABLE (cfg);
10347 /* Undo the links */
10348 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
10349 for (i = 0; i < n; ++i)
10350 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
10353 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
10354 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
10356 for (i = 0; i < n; ++i)
10357 link_bblock (cfg, cfg->cbb, targets [i]);
10359 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
10360 table->table = targets;
10361 table->table_size = n;
10363 use_op_switch = FALSE;
10364 #ifdef TARGET_ARM
10365 /* ARM implements SWITCH statements differently */
10366 /* FIXME: Make it use the generic implementation */
10367 if (!cfg->compile_aot)
10368 use_op_switch = TRUE;
10369 #endif
10371 if (COMPILE_LLVM (cfg))
10372 use_op_switch = TRUE;
10374 cfg->cbb->has_jump_table = 1;
10376 if (use_op_switch) {
10377 MONO_INST_NEW (cfg, ins, OP_SWITCH);
10378 ins->sreg1 = src1->dreg;
10379 ins->inst_p0 = table;
10380 ins->inst_many_bb = targets;
10381 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
10382 MONO_ADD_INS (cfg->cbb, ins);
10383 } else {
10384 if (sizeof (gpointer) == 8)
10385 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
10386 else
10387 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
10389 #if SIZEOF_REGISTER == 8
10390 /* The upper word might not be zero, and we add it to a 64 bit address later */
10391 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
10392 #endif
10394 if (cfg->compile_aot) {
10395 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
10396 } else {
10397 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
10398 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
10399 ins->inst_p0 = table;
10400 ins->dreg = table_reg;
10401 MONO_ADD_INS (cfg->cbb, ins);
10404 /* FIXME: Use load_memindex */
10405 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
10406 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
10407 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
10409 start_new_bblock = 1;
10410 inline_costs += (BRANCH_COST * 2);
10411 break;
10413 case CEE_LDIND_I1:
10414 case CEE_LDIND_U1:
10415 case CEE_LDIND_I2:
10416 case CEE_LDIND_U2:
10417 case CEE_LDIND_I4:
10418 case CEE_LDIND_U4:
10419 case CEE_LDIND_I8:
10420 case CEE_LDIND_I:
10421 case CEE_LDIND_R4:
10422 case CEE_LDIND_R8:
10423 case CEE_LDIND_REF:
10424 CHECK_STACK (1);
10425 --sp;
10427 switch (*ip) {
10428 case CEE_LDIND_R4:
10429 case CEE_LDIND_R8:
10430 dreg = alloc_freg (cfg);
10431 break;
10432 case CEE_LDIND_I8:
10433 dreg = alloc_lreg (cfg);
10434 break;
10435 case CEE_LDIND_REF:
10436 dreg = alloc_ireg_ref (cfg);
10437 break;
10438 default:
10439 dreg = alloc_preg (cfg);
10442 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
10443 ins->type = ldind_type [*ip - CEE_LDIND_I1];
10444 if (*ip == CEE_LDIND_R4)
10445 ins->type = cfg->r4_stack_type;
10446 ins->flags |= ins_flag;
10447 MONO_ADD_INS (cfg->cbb, ins);
10448 *sp++ = ins;
10449 if (ins_flag & MONO_INST_VOLATILE) {
10450 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10451 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10453 ins_flag = 0;
10454 ++ip;
10455 break;
10456 case CEE_STIND_REF:
10457 case CEE_STIND_I1:
10458 case CEE_STIND_I2:
10459 case CEE_STIND_I4:
10460 case CEE_STIND_I8:
10461 case CEE_STIND_R4:
10462 case CEE_STIND_R8:
10463 case CEE_STIND_I:
10464 CHECK_STACK (2);
10465 sp -= 2;
10467 if (ins_flag & MONO_INST_VOLATILE) {
10468 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10469 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10472 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
10473 ins->flags |= ins_flag;
10474 ins_flag = 0;
10476 MONO_ADD_INS (cfg->cbb, ins);
10478 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1]))
10479 emit_write_barrier (cfg, sp [0], sp [1]);
10481 inline_costs += 1;
10482 ++ip;
10483 break;
10485 case CEE_MUL:
10486 CHECK_STACK (2);
10488 MONO_INST_NEW (cfg, ins, (*ip));
10489 sp -= 2;
10490 ins->sreg1 = sp [0]->dreg;
10491 ins->sreg2 = sp [1]->dreg;
10492 type_from_op (cfg, ins, sp [0], sp [1]);
10493 CHECK_TYPE (ins);
10494 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
10496 /* Use the immediate opcodes if possible */
10497 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
10498 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10499 if (imm_opcode != -1) {
10500 ins->opcode = imm_opcode;
10501 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
10502 ins->sreg2 = -1;
10504 NULLIFY_INS (sp [1]);
10508 MONO_ADD_INS ((cfg)->cbb, (ins));
10510 *sp++ = mono_decompose_opcode (cfg, ins);
10511 ip++;
10512 break;
10513 case CEE_ADD:
10514 case CEE_SUB:
10515 case CEE_DIV:
10516 case CEE_DIV_UN:
10517 case CEE_REM:
10518 case CEE_REM_UN:
10519 case CEE_AND:
10520 case CEE_OR:
10521 case CEE_XOR:
10522 case CEE_SHL:
10523 case CEE_SHR:
10524 case CEE_SHR_UN:
10525 CHECK_STACK (2);
10527 MONO_INST_NEW (cfg, ins, (*ip));
10528 sp -= 2;
10529 ins->sreg1 = sp [0]->dreg;
10530 ins->sreg2 = sp [1]->dreg;
10531 type_from_op (cfg, ins, sp [0], sp [1]);
10532 CHECK_TYPE (ins);
10533 add_widen_op (cfg, ins, &sp [0], &sp [1]);
10534 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
10536 /* FIXME: Pass opcode to is_inst_imm */
10538 /* Use the immediate opcodes if possible */
10539 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
10540 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10541 if (imm_opcode != -1) {
10542 ins->opcode = imm_opcode;
10543 if (sp [1]->opcode == OP_I8CONST) {
10544 #if SIZEOF_REGISTER == 8
10545 ins->inst_imm = sp [1]->inst_l;
10546 #else
10547 ins->inst_ls_word = sp [1]->inst_ls_word;
10548 ins->inst_ms_word = sp [1]->inst_ms_word;
10549 #endif
10551 else
10552 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10553 ins->sreg2 = -1;
10555 /* Might be followed by an instruction added by add_widen_op */
10556 if (sp [1]->next == NULL)
10557 NULLIFY_INS (sp [1]);
10560 MONO_ADD_INS ((cfg)->cbb, (ins));
10562 *sp++ = mono_decompose_opcode (cfg, ins);
10563 ip++;
10564 break;
10565 case CEE_NEG:
10566 case CEE_NOT:
10567 case CEE_CONV_I1:
10568 case CEE_CONV_I2:
10569 case CEE_CONV_I4:
10570 case CEE_CONV_R4:
10571 case CEE_CONV_R8:
10572 case CEE_CONV_U4:
10573 case CEE_CONV_I8:
10574 case CEE_CONV_U8:
10575 case CEE_CONV_OVF_I8:
10576 case CEE_CONV_OVF_U8:
10577 case CEE_CONV_R_UN:
10578 CHECK_STACK (1);
10580 /* Special case this earlier so we have long constants in the IR */
10581 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10582 int data = sp [-1]->inst_c0;
10583 sp [-1]->opcode = OP_I8CONST;
10584 sp [-1]->type = STACK_I8;
10585 #if SIZEOF_REGISTER == 8
10586 if ((*ip) == CEE_CONV_U8)
10587 sp [-1]->inst_c0 = (guint32)data;
10588 else
10589 sp [-1]->inst_c0 = data;
10590 #else
10591 sp [-1]->inst_ls_word = data;
10592 if ((*ip) == CEE_CONV_U8)
10593 sp [-1]->inst_ms_word = 0;
10594 else
10595 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10596 #endif
10597 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10599 else {
10600 ADD_UNOP (*ip);
10602 ip++;
10603 break;
10604 case CEE_CONV_OVF_I4:
10605 case CEE_CONV_OVF_I1:
10606 case CEE_CONV_OVF_I2:
10607 case CEE_CONV_OVF_I:
10608 case CEE_CONV_OVF_U:
10609 CHECK_STACK (1);
10611 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10612 ADD_UNOP (CEE_CONV_OVF_I8);
10613 ADD_UNOP (*ip);
10614 } else {
10615 ADD_UNOP (*ip);
10617 ip++;
10618 break;
10619 case CEE_CONV_OVF_U1:
10620 case CEE_CONV_OVF_U2:
10621 case CEE_CONV_OVF_U4:
10622 CHECK_STACK (1);
10624 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10625 ADD_UNOP (CEE_CONV_OVF_U8);
10626 ADD_UNOP (*ip);
10627 } else {
10628 ADD_UNOP (*ip);
10630 ip++;
10631 break;
10632 case CEE_CONV_OVF_I1_UN:
10633 case CEE_CONV_OVF_I2_UN:
10634 case CEE_CONV_OVF_I4_UN:
10635 case CEE_CONV_OVF_I8_UN:
10636 case CEE_CONV_OVF_U1_UN:
10637 case CEE_CONV_OVF_U2_UN:
10638 case CEE_CONV_OVF_U4_UN:
10639 case CEE_CONV_OVF_U8_UN:
10640 case CEE_CONV_OVF_I_UN:
10641 case CEE_CONV_OVF_U_UN:
10642 case CEE_CONV_U2:
10643 case CEE_CONV_U1:
10644 case CEE_CONV_I:
10645 case CEE_CONV_U:
10646 CHECK_STACK (1);
10647 ADD_UNOP (*ip);
10648 CHECK_CFG_EXCEPTION;
10649 ip++;
10650 break;
10651 case CEE_ADD_OVF:
10652 case CEE_ADD_OVF_UN:
10653 case CEE_MUL_OVF:
10654 case CEE_MUL_OVF_UN:
10655 case CEE_SUB_OVF:
10656 case CEE_SUB_OVF_UN:
10657 CHECK_STACK (2);
10658 ADD_BINOP (*ip);
10659 ip++;
10660 break;
10661 case CEE_CPOBJ:
10662 GSHAREDVT_FAILURE (*ip);
10663 CHECK_OPSIZE (5);
10664 CHECK_STACK (2);
10665 token = read32 (ip + 1);
10666 klass = mini_get_class (method, token, generic_context);
10667 CHECK_TYPELOAD (klass);
10668 sp -= 2;
10669 if (generic_class_is_reference_type (cfg, klass)) {
10670 MonoInst *store, *load;
10671 int dreg = alloc_ireg_ref (cfg);
10673 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10674 load->flags |= ins_flag;
10675 MONO_ADD_INS (cfg->cbb, load);
10677 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10678 store->flags |= ins_flag;
10679 MONO_ADD_INS (cfg->cbb, store);
10681 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10682 emit_write_barrier (cfg, sp [0], sp [1]);
10683 } else {
10684 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10686 ins_flag = 0;
10687 ip += 5;
10688 break;
10689 case CEE_LDOBJ: {
10690 int loc_index = -1;
10691 int stloc_len = 0;
10693 CHECK_OPSIZE (5);
10694 CHECK_STACK (1);
10695 --sp;
10696 token = read32 (ip + 1);
10697 klass = mini_get_class (method, token, generic_context);
10698 CHECK_TYPELOAD (klass);
10700 /* Optimize the common ldobj+stloc combination */
10701 switch (ip [5]) {
10702 case CEE_STLOC_S:
10703 loc_index = ip [6];
10704 stloc_len = 2;
10705 break;
10706 case CEE_STLOC_0:
10707 case CEE_STLOC_1:
10708 case CEE_STLOC_2:
10709 case CEE_STLOC_3:
10710 loc_index = ip [5] - CEE_STLOC_0;
10711 stloc_len = 1;
10712 break;
10713 default:
10714 break;
10717 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10718 CHECK_LOCAL (loc_index);
10720 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10721 ins->dreg = cfg->locals [loc_index]->dreg;
10722 ins->flags |= ins_flag;
10723 ip += 5;
10724 ip += stloc_len;
10725 if (ins_flag & MONO_INST_VOLATILE) {
10726 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10727 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10729 ins_flag = 0;
10730 break;
10733 /* Optimize the ldobj+stobj combination */
10734 /* The reference case ends up being a load+store anyway */
10735 /* Skip this if the operation is volatile. */
10736 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10737 CHECK_STACK (1);
10739 sp --;
10741 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10743 ip += 5 + 5;
10744 ins_flag = 0;
10745 break;
10748 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10749 ins->flags |= ins_flag;
10750 *sp++ = ins;
10752 if (ins_flag & MONO_INST_VOLATILE) {
10753 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10754 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10757 ip += 5;
10758 ins_flag = 0;
10759 inline_costs += 1;
10760 break;
10762 case CEE_LDSTR:
10763 CHECK_STACK_OVF (1);
10764 CHECK_OPSIZE (5);
10765 n = read32 (ip + 1);
10767 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10768 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10769 ins->type = STACK_OBJ;
10770 *sp = ins;
10772 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10773 MonoInst *iargs [1];
10774 char *str = (char *)mono_method_get_wrapper_data (method, n);
10776 if (cfg->compile_aot)
10777 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10778 else
10779 EMIT_NEW_PCONST (cfg, iargs [0], str);
10780 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10781 } else {
10782 if (cfg->opt & MONO_OPT_SHARED) {
10783 MonoInst *iargs [3];
10785 if (cfg->compile_aot) {
10786 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10788 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10789 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10790 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10791 *sp = mono_emit_jit_icall (cfg, ves_icall_mono_ldstr, iargs);
10792 mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10793 CHECK_CFG_ERROR;
10794 } else {
10795 if (cfg->cbb->out_of_line) {
10796 MonoInst *iargs [2];
10798 if (image == mono_defaults.corlib) {
10800 * Avoid relocations in AOT and save some space by using a
10801 * version of helper_ldstr specialized to mscorlib.
10803 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10804 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10805 } else {
10806 /* Avoid creating the string object */
10807 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10808 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10809 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10812 else
10813 if (cfg->compile_aot) {
10814 NEW_LDSTRCONST (cfg, ins, image, n);
10815 *sp = ins;
10816 MONO_ADD_INS (cfg->cbb, ins);
10818 else {
10819 NEW_PCONST (cfg, ins, NULL);
10820 ins->type = STACK_OBJ;
10821 ins->inst_p0 = mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10822 CHECK_CFG_ERROR;
10824 if (!ins->inst_p0)
10825 OUT_OF_MEMORY_FAILURE;
10827 *sp = ins;
10828 MONO_ADD_INS (cfg->cbb, ins);
10833 sp++;
10834 ip += 5;
10835 break;
10836 case CEE_NEWOBJ: {
10837 MonoInst *iargs [2];
10838 MonoMethodSignature *fsig;
10839 MonoInst this_ins;
10840 MonoInst *alloc;
10841 MonoInst *vtable_arg = NULL;
10843 CHECK_OPSIZE (5);
10844 token = read32 (ip + 1);
10845 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10846 CHECK_CFG_ERROR;
10848 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10849 CHECK_CFG_ERROR;
10851 mono_save_token_info (cfg, image, token, cmethod);
10853 if (!mono_class_init (cmethod->klass))
10854 TYPE_LOAD_ERROR (cmethod->klass);
10856 context_used = mini_method_check_context_used (cfg, cmethod);
10858 if (mono_security_core_clr_enabled ())
10859 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10861 if (cfg->gshared && cmethod && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10862 emit_class_init (cfg, cmethod->klass);
10863 CHECK_TYPELOAD (cmethod->klass);
10867 if (cfg->gsharedvt) {
10868 if (mini_is_gsharedvt_variable_signature (sig))
10869 GSHAREDVT_FAILURE (*ip);
10873 n = fsig->param_count;
10874 CHECK_STACK (n);
10877 * Generate smaller code for the common newobj <exception> instruction in
10878 * argument checking code.
10880 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10881 is_exception_class (cmethod->klass) && n <= 2 &&
10882 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10883 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10884 MonoInst *iargs [3];
10886 sp -= n;
10888 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10889 switch (n) {
10890 case 0:
10891 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10892 break;
10893 case 1:
10894 iargs [1] = sp [0];
10895 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10896 break;
10897 case 2:
10898 iargs [1] = sp [0];
10899 iargs [2] = sp [1];
10900 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10901 break;
10902 default:
10903 g_assert_not_reached ();
10906 ip += 5;
10907 inline_costs += 5;
10908 break;
10911 /* move the args to allow room for 'this' in the first position */
10912 while (n--) {
10913 --sp;
10914 sp [1] = sp [0];
10917 /* check_call_signature () requires sp[0] to be set */
10918 this_ins.type = STACK_OBJ;
10919 sp [0] = &this_ins;
10920 if (check_call_signature (cfg, fsig, sp))
10921 UNVERIFIED;
10923 iargs [0] = NULL;
10925 if (mini_class_is_system_array (cmethod->klass)) {
10926 *sp = emit_get_rgctx_method (cfg, context_used,
10927 cmethod, MONO_RGCTX_INFO_METHOD);
10929 /* Avoid varargs in the common case */
10930 if (fsig->param_count == 1)
10931 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10932 else if (fsig->param_count == 2)
10933 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10934 else if (fsig->param_count == 3)
10935 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10936 else if (fsig->param_count == 4)
10937 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10938 else
10939 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10940 } else if (cmethod->string_ctor) {
10941 g_assert (!context_used);
10942 g_assert (!vtable_arg);
10943 /* we simply pass a null pointer */
10944 EMIT_NEW_PCONST (cfg, *sp, NULL);
10945 /* now call the string ctor */
10946 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10947 } else {
10948 if (cmethod->klass->valuetype) {
10949 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10950 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10951 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10953 alloc = NULL;
10956 * The code generated by mini_emit_virtual_call () expects
10957 * iargs [0] to be a boxed instance, but luckily the vcall
10958 * will be transformed into a normal call there.
10960 } else if (context_used) {
10961 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10962 *sp = alloc;
10963 } else {
10964 MonoVTable *vtable = NULL;
10966 if (!cfg->compile_aot)
10967 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10968 CHECK_TYPELOAD (cmethod->klass);
10971 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10972 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10973 * As a workaround, we call class cctors before allocating objects.
10975 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10976 emit_class_init (cfg, cmethod->klass);
10977 if (cfg->verbose_level > 2)
10978 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10979 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10982 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10983 *sp = alloc;
10985 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10987 if (alloc)
10988 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10990 /* Now call the actual ctor */
10991 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10992 CHECK_CFG_EXCEPTION;
10995 if (alloc == NULL) {
10996 /* Valuetype */
10997 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10998 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10999 *sp++= ins;
11000 } else {
11001 *sp++ = alloc;
11004 ip += 5;
11005 inline_costs += 5;
11006 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
11007 emit_seq_point (cfg, method, ip, FALSE, TRUE);
11008 break;
11010 case CEE_CASTCLASS:
11011 case CEE_ISINST: {
11012 CHECK_STACK (1);
11013 --sp;
11014 CHECK_OPSIZE (5);
11015 token = read32 (ip + 1);
11016 klass = mini_get_class (method, token, generic_context);
11017 CHECK_TYPELOAD (klass);
11018 if (sp [0]->type != STACK_OBJ)
11019 UNVERIFIED;
11021 MONO_INST_NEW (cfg, ins, *ip == CEE_ISINST ? OP_ISINST : OP_CASTCLASS);
11022 ins->dreg = alloc_preg (cfg);
11023 ins->sreg1 = (*sp)->dreg;
11024 ins->klass = klass;
11025 ins->type = STACK_OBJ;
11026 MONO_ADD_INS (cfg->cbb, ins);
11028 CHECK_CFG_EXCEPTION;
11029 *sp++ = ins;
11030 ip += 5;
11032 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
11033 break;
11035 case CEE_UNBOX_ANY: {
11036 MonoInst *res, *addr;
11038 CHECK_STACK (1);
11039 --sp;
11040 CHECK_OPSIZE (5);
11041 token = read32 (ip + 1);
11042 klass = mini_get_class (method, token, generic_context);
11043 CHECK_TYPELOAD (klass);
11045 mono_save_token_info (cfg, image, token, klass);
11047 context_used = mini_class_check_context_used (cfg, klass);
11049 if (mini_is_gsharedvt_klass (klass)) {
11050 res = handle_unbox_gsharedvt (cfg, klass, *sp);
11051 inline_costs += 2;
11052 } else if (generic_class_is_reference_type (cfg, klass)) {
11053 if (MONO_INS_IS_PCONST_NULL (*sp)) {
11054 EMIT_NEW_PCONST (cfg, res, NULL);
11055 res->type = STACK_OBJ;
11056 } else {
11057 MONO_INST_NEW (cfg, res, OP_CASTCLASS);
11058 res->dreg = alloc_preg (cfg);
11059 res->sreg1 = (*sp)->dreg;
11060 res->klass = klass;
11061 res->type = STACK_OBJ;
11062 MONO_ADD_INS (cfg->cbb, res);
11063 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
11065 } else if (mono_class_is_nullable (klass)) {
11066 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
11067 } else {
11068 addr = handle_unbox (cfg, klass, sp, context_used);
11069 /* LDOBJ */
11070 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11071 res = ins;
11072 inline_costs += 2;
11075 *sp ++ = res;
11076 ip += 5;
11077 break;
11079 case CEE_BOX: {
11080 MonoInst *val;
11081 MonoClass *enum_class;
11082 MonoMethod *has_flag;
11084 CHECK_STACK (1);
11085 --sp;
11086 val = *sp;
11087 CHECK_OPSIZE (5);
11088 token = read32 (ip + 1);
11089 klass = mini_get_class (method, token, generic_context);
11090 CHECK_TYPELOAD (klass);
11092 mono_save_token_info (cfg, image, token, klass);
11094 context_used = mini_class_check_context_used (cfg, klass);
11096 if (generic_class_is_reference_type (cfg, klass)) {
11097 *sp++ = val;
11098 ip += 5;
11099 break;
11102 if (klass == mono_defaults.void_class)
11103 UNVERIFIED;
11104 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
11105 UNVERIFIED;
11106 /* frequent check in generic code: box (struct), brtrue */
11109 * Look for:
11111 * <push int/long ptr>
11112 * <push int/long>
11113 * box MyFlags
11114 * constrained. MyFlags
11115 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
11117 * If we find this sequence and the operand types on box and constrained
11118 * are equal, we can emit a specialized instruction sequence instead of
11119 * the very slow HasFlag () call.
11121 if ((cfg->opt & MONO_OPT_INTRINS) &&
11122 /* Cheap checks first. */
11123 ip + 5 + 6 + 5 < end &&
11124 ip [5] == CEE_PREFIX1 &&
11125 ip [6] == CEE_CONSTRAINED_ &&
11126 ip [11] == CEE_CALLVIRT &&
11127 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
11128 mono_class_is_enum (klass) &&
11129 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
11130 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
11131 has_flag->klass == mono_defaults.enum_class &&
11132 !strcmp (has_flag->name, "HasFlag") &&
11133 has_flag->signature->hasthis &&
11134 has_flag->signature->param_count == 1) {
11135 CHECK_TYPELOAD (enum_class);
11137 if (enum_class == klass) {
11138 MonoInst *enum_this, *enum_flag;
11140 ip += 5 + 6 + 5;
11141 --sp;
11143 enum_this = sp [0];
11144 enum_flag = sp [1];
11146 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
11147 break;
11151 // FIXME: LLVM can't handle the inconsistent bb linking
11152 if (!mono_class_is_nullable (klass) &&
11153 !mini_is_gsharedvt_klass (klass) &&
11154 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11155 (ip [5] == CEE_BRTRUE ||
11156 ip [5] == CEE_BRTRUE_S ||
11157 ip [5] == CEE_BRFALSE ||
11158 ip [5] == CEE_BRFALSE_S)) {
11159 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
11160 int dreg;
11161 MonoBasicBlock *true_bb, *false_bb;
11163 ip += 5;
11165 if (cfg->verbose_level > 3) {
11166 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11167 printf ("<box+brtrue opt>\n");
11170 switch (*ip) {
11171 case CEE_BRTRUE_S:
11172 case CEE_BRFALSE_S:
11173 CHECK_OPSIZE (2);
11174 ip++;
11175 target = ip + 1 + (signed char)(*ip);
11176 ip++;
11177 break;
11178 case CEE_BRTRUE:
11179 case CEE_BRFALSE:
11180 CHECK_OPSIZE (5);
11181 ip++;
11182 target = ip + 4 + (gint)(read32 (ip));
11183 ip += 4;
11184 break;
11185 default:
11186 g_assert_not_reached ();
11190 * We need to link both bblocks, since it is needed for handling stack
11191 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
11192 * Branching to only one of them would lead to inconsistencies, so
11193 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
11195 GET_BBLOCK (cfg, true_bb, target);
11196 GET_BBLOCK (cfg, false_bb, ip);
11198 mono_link_bblock (cfg, cfg->cbb, true_bb);
11199 mono_link_bblock (cfg, cfg->cbb, false_bb);
11201 if (sp != stack_start) {
11202 handle_stack_args (cfg, stack_start, sp - stack_start);
11203 sp = stack_start;
11204 CHECK_UNVERIFIABLE (cfg);
11207 if (COMPILE_LLVM (cfg)) {
11208 dreg = alloc_ireg (cfg);
11209 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
11210 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
11212 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
11213 } else {
11214 /* The JIT can't eliminate the iconst+compare */
11215 MONO_INST_NEW (cfg, ins, OP_BR);
11216 ins->inst_target_bb = is_true ? true_bb : false_bb;
11217 MONO_ADD_INS (cfg->cbb, ins);
11220 start_new_bblock = 1;
11221 break;
11224 *sp++ = handle_box (cfg, val, klass, context_used);
11226 CHECK_CFG_EXCEPTION;
11227 ip += 5;
11228 inline_costs += 1;
11229 break;
11231 case CEE_UNBOX: {
11232 CHECK_STACK (1);
11233 --sp;
11234 CHECK_OPSIZE (5);
11235 token = read32 (ip + 1);
11236 klass = mini_get_class (method, token, generic_context);
11237 CHECK_TYPELOAD (klass);
11239 mono_save_token_info (cfg, image, token, klass);
11241 context_used = mini_class_check_context_used (cfg, klass);
11243 if (mono_class_is_nullable (klass)) {
11244 MonoInst *val;
11246 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
11247 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
11249 *sp++= ins;
11250 } else {
11251 ins = handle_unbox (cfg, klass, sp, context_used);
11252 *sp++ = ins;
11254 ip += 5;
11255 inline_costs += 2;
11256 break;
11258 case CEE_LDFLD:
11259 case CEE_LDFLDA:
11260 case CEE_STFLD:
11261 case CEE_LDSFLD:
11262 case CEE_LDSFLDA:
11263 case CEE_STSFLD: {
11264 MonoClassField *field;
11265 #ifndef DISABLE_REMOTING
11266 int costs;
11267 #endif
11268 guint foffset;
11269 gboolean is_instance;
11270 int op;
11271 gpointer addr = NULL;
11272 gboolean is_special_static;
11273 MonoType *ftype;
11274 MonoInst *store_val = NULL;
11275 MonoInst *thread_ins;
11277 op = *ip;
11278 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
11279 if (is_instance) {
11280 if (op == CEE_STFLD) {
11281 CHECK_STACK (2);
11282 sp -= 2;
11283 store_val = sp [1];
11284 } else {
11285 CHECK_STACK (1);
11286 --sp;
11288 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
11289 UNVERIFIED;
11290 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
11291 UNVERIFIED;
11292 } else {
11293 if (op == CEE_STSFLD) {
11294 CHECK_STACK (1);
11295 sp--;
11296 store_val = sp [0];
11300 CHECK_OPSIZE (5);
11301 token = read32 (ip + 1);
11302 if (method->wrapper_type != MONO_WRAPPER_NONE) {
11303 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
11304 klass = field->parent;
11306 else {
11307 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
11308 CHECK_CFG_ERROR;
11310 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
11311 FIELD_ACCESS_FAILURE (method, field);
11312 mono_class_init (klass);
11314 /* if the class is Critical then transparent code cannot access it's fields */
11315 if (!is_instance && mono_security_core_clr_enabled ())
11316 ensure_method_is_allowed_to_access_field (cfg, method, field);
11318 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
11319 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
11320 if (mono_security_core_clr_enabled ())
11321 ensure_method_is_allowed_to_access_field (cfg, method, field);
11324 ftype = mono_field_get_type (field);
11327 * LDFLD etc. is usable on static fields as well, so convert those cases to
11328 * the static case.
11330 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
11331 switch (op) {
11332 case CEE_LDFLD:
11333 op = CEE_LDSFLD;
11334 break;
11335 case CEE_STFLD:
11336 op = CEE_STSFLD;
11337 break;
11338 case CEE_LDFLDA:
11339 op = CEE_LDSFLDA;
11340 break;
11341 default:
11342 g_assert_not_reached ();
11344 is_instance = FALSE;
11347 context_used = mini_class_check_context_used (cfg, klass);
11349 /* INSTANCE CASE */
11351 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
11352 if (op == CEE_STFLD) {
11353 if (target_type_is_incompatible (cfg, field->type, sp [1]))
11354 UNVERIFIED;
11355 #ifndef DISABLE_REMOTING
11356 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
11357 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
11358 MonoInst *iargs [5];
11360 GSHAREDVT_FAILURE (op);
11362 iargs [0] = sp [0];
11363 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11364 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11365 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
11366 field->offset);
11367 iargs [4] = sp [1];
11369 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11370 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
11371 iargs, ip, cfg->real_offset, TRUE);
11372 CHECK_CFG_EXCEPTION;
11373 g_assert (costs > 0);
11375 cfg->real_offset += 5;
11377 inline_costs += costs;
11378 } else {
11379 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
11381 } else
11382 #endif
11384 MonoInst *store, *wbarrier_ptr_ins = NULL;
11386 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11388 if (ins_flag & MONO_INST_VOLATILE) {
11389 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11390 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11393 if (mini_is_gsharedvt_klass (klass)) {
11394 MonoInst *offset_ins;
11396 context_used = mini_class_check_context_used (cfg, klass);
11398 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11399 /* The value is offset by 1 */
11400 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11401 dreg = alloc_ireg_mp (cfg);
11402 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11403 wbarrier_ptr_ins = ins;
11404 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
11405 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
11406 } else {
11407 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
11409 if (sp [0]->opcode != OP_LDADDR)
11410 store->flags |= MONO_INST_FAULT;
11412 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
11413 if (mini_is_gsharedvt_klass (klass)) {
11414 g_assert (wbarrier_ptr_ins);
11415 emit_write_barrier (cfg, wbarrier_ptr_ins, sp [1]);
11416 } else {
11417 /* insert call to write barrier */
11418 MonoInst *ptr;
11419 int dreg;
11421 dreg = alloc_ireg_mp (cfg);
11422 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11423 emit_write_barrier (cfg, ptr, sp [1]);
11427 store->flags |= ins_flag;
11429 ins_flag = 0;
11430 ip += 5;
11431 break;
11434 #ifndef DISABLE_REMOTING
11435 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
11436 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
11437 MonoInst *iargs [4];
11439 GSHAREDVT_FAILURE (op);
11441 iargs [0] = sp [0];
11442 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11443 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11444 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
11445 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11446 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
11447 iargs, ip, cfg->real_offset, TRUE);
11448 CHECK_CFG_EXCEPTION;
11449 g_assert (costs > 0);
11451 cfg->real_offset += 5;
11453 *sp++ = iargs [0];
11455 inline_costs += costs;
11456 } else {
11457 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
11458 *sp++ = ins;
11460 } else
11461 #endif
11462 if (is_instance) {
11463 if (sp [0]->type == STACK_VTYPE) {
11464 MonoInst *var;
11466 /* Have to compute the address of the variable */
11468 var = get_vreg_to_inst (cfg, sp [0]->dreg);
11469 if (!var)
11470 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
11471 else
11472 g_assert (var->klass == klass);
11474 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
11475 sp [0] = ins;
11478 if (op == CEE_LDFLDA) {
11479 if (sp [0]->type == STACK_OBJ) {
11480 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
11481 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
11484 dreg = alloc_ireg_mp (cfg);
11486 if (mini_is_gsharedvt_klass (klass)) {
11487 MonoInst *offset_ins;
11489 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11490 /* The value is offset by 1 */
11491 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11492 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11493 } else {
11494 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11496 ins->klass = mono_class_from_mono_type (field->type);
11497 ins->type = STACK_MP;
11498 *sp++ = ins;
11499 } else {
11500 MonoInst *load;
11502 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11504 if (mini_is_gsharedvt_klass (klass)) {
11505 MonoInst *offset_ins;
11507 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11508 /* The value is offset by 1 */
11509 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11510 dreg = alloc_ireg_mp (cfg);
11511 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11512 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
11513 } else {
11514 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
11516 load->flags |= ins_flag;
11517 if (sp [0]->opcode != OP_LDADDR)
11518 load->flags |= MONO_INST_FAULT;
11519 *sp++ = load;
11523 if (is_instance) {
11524 ins_flag = 0;
11525 ip += 5;
11526 break;
11529 /* STATIC CASE */
11530 context_used = mini_class_check_context_used (cfg, klass);
11532 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) {
11533 mono_error_set_field_load (&cfg->error, field->parent, field->name, "Using static instructions with literal field");
11534 CHECK_CFG_ERROR;
11537 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11538 * to be called here.
11540 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11541 mono_class_vtable (cfg->domain, klass);
11542 CHECK_TYPELOAD (klass);
11544 mono_domain_lock (cfg->domain);
11545 if (cfg->domain->special_static_fields)
11546 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11547 mono_domain_unlock (cfg->domain);
11549 is_special_static = mono_class_field_is_special_static (field);
11551 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11552 thread_ins = mono_get_thread_intrinsic (cfg);
11553 else
11554 thread_ins = NULL;
11556 /* Generate IR to compute the field address */
11557 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11559 * Fast access to TLS data
11560 * Inline version of get_thread_static_data () in
11561 * threads.c.
11563 guint32 offset;
11564 int idx, static_data_reg, array_reg, dreg;
11566 GSHAREDVT_FAILURE (op);
11568 MONO_ADD_INS (cfg->cbb, thread_ins);
11569 static_data_reg = alloc_ireg (cfg);
11570 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11572 if (cfg->compile_aot) {
11573 int offset_reg, offset2_reg, idx_reg;
11575 /* For TLS variables, this will return the TLS offset */
11576 EMIT_NEW_SFLDACONST (cfg, ins, field);
11577 offset_reg = ins->dreg;
11578 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11579 idx_reg = alloc_ireg (cfg);
11580 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
11581 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11582 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11583 array_reg = alloc_ireg (cfg);
11584 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11585 offset2_reg = alloc_ireg (cfg);
11586 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
11587 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
11588 dreg = alloc_ireg (cfg);
11589 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11590 } else {
11591 offset = (gsize)addr & 0x7fffffff;
11592 idx = offset & 0x3f;
11594 array_reg = alloc_ireg (cfg);
11595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11596 dreg = alloc_ireg (cfg);
11597 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
11599 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11600 (cfg->compile_aot && is_special_static) ||
11601 (context_used && is_special_static)) {
11602 MonoInst *iargs [2];
11604 g_assert (field->parent);
11605 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11606 if (context_used) {
11607 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11608 field, MONO_RGCTX_INFO_CLASS_FIELD);
11609 } else {
11610 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11612 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11613 } else if (context_used) {
11614 MonoInst *static_data;
11617 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11618 method->klass->name_space, method->klass->name, method->name,
11619 depth, field->offset);
11622 if (mono_class_needs_cctor_run (klass, method))
11623 emit_class_init (cfg, klass);
11626 * The pointer we're computing here is
11628 * super_info.static_data + field->offset
11630 static_data = emit_get_rgctx_klass (cfg, context_used,
11631 klass, MONO_RGCTX_INFO_STATIC_DATA);
11633 if (mini_is_gsharedvt_klass (klass)) {
11634 MonoInst *offset_ins;
11636 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11637 /* The value is offset by 1 */
11638 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11639 dreg = alloc_ireg_mp (cfg);
11640 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11641 } else if (field->offset == 0) {
11642 ins = static_data;
11643 } else {
11644 int addr_reg = mono_alloc_preg (cfg);
11645 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11647 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11648 MonoInst *iargs [2];
11650 g_assert (field->parent);
11651 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11652 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11653 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11654 } else {
11655 MonoVTable *vtable = NULL;
11657 if (!cfg->compile_aot)
11658 vtable = mono_class_vtable (cfg->domain, klass);
11659 CHECK_TYPELOAD (klass);
11661 if (!addr) {
11662 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11663 if (!(g_slist_find (class_inits, klass))) {
11664 emit_class_init (cfg, klass);
11665 if (cfg->verbose_level > 2)
11666 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11667 class_inits = g_slist_prepend (class_inits, klass);
11669 } else {
11670 if (cfg->run_cctors) {
11671 /* This makes so that inline cannot trigger */
11672 /* .cctors: too many apps depend on them */
11673 /* running with a specific order... */
11674 g_assert (vtable);
11675 if (! vtable->initialized)
11676 INLINE_FAILURE ("class init");
11677 if (!mono_runtime_class_init_full (vtable, &cfg->error)) {
11678 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
11679 goto exception_exit;
11683 if (cfg->compile_aot)
11684 EMIT_NEW_SFLDACONST (cfg, ins, field);
11685 else {
11686 g_assert (vtable);
11687 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11688 g_assert (addr);
11689 EMIT_NEW_PCONST (cfg, ins, addr);
11691 } else {
11692 MonoInst *iargs [1];
11693 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11694 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11698 /* Generate IR to do the actual load/store operation */
11700 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11701 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11702 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11705 if (op == CEE_LDSFLDA) {
11706 ins->klass = mono_class_from_mono_type (ftype);
11707 ins->type = STACK_PTR;
11708 *sp++ = ins;
11709 } else if (op == CEE_STSFLD) {
11710 MonoInst *store;
11712 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11713 store->flags |= ins_flag;
11714 } else {
11715 gboolean is_const = FALSE;
11716 MonoVTable *vtable = NULL;
11717 gpointer addr = NULL;
11719 if (!context_used) {
11720 vtable = mono_class_vtable (cfg->domain, klass);
11721 CHECK_TYPELOAD (klass);
11723 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11724 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11725 int ro_type = ftype->type;
11726 if (!addr)
11727 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11728 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11729 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11732 GSHAREDVT_FAILURE (op);
11734 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11735 is_const = TRUE;
11736 switch (ro_type) {
11737 case MONO_TYPE_BOOLEAN:
11738 case MONO_TYPE_U1:
11739 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11740 sp++;
11741 break;
11742 case MONO_TYPE_I1:
11743 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11744 sp++;
11745 break;
11746 case MONO_TYPE_CHAR:
11747 case MONO_TYPE_U2:
11748 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11749 sp++;
11750 break;
11751 case MONO_TYPE_I2:
11752 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11753 sp++;
11754 break;
11755 break;
11756 case MONO_TYPE_I4:
11757 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11758 sp++;
11759 break;
11760 case MONO_TYPE_U4:
11761 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11762 sp++;
11763 break;
11764 case MONO_TYPE_I:
11765 case MONO_TYPE_U:
11766 case MONO_TYPE_PTR:
11767 case MONO_TYPE_FNPTR:
11768 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11769 type_to_eval_stack_type ((cfg), field->type, *sp);
11770 sp++;
11771 break;
11772 case MONO_TYPE_STRING:
11773 case MONO_TYPE_OBJECT:
11774 case MONO_TYPE_CLASS:
11775 case MONO_TYPE_SZARRAY:
11776 case MONO_TYPE_ARRAY:
11777 if (!mono_gc_is_moving ()) {
11778 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11779 type_to_eval_stack_type ((cfg), field->type, *sp);
11780 sp++;
11781 } else {
11782 is_const = FALSE;
11784 break;
11785 case MONO_TYPE_I8:
11786 case MONO_TYPE_U8:
11787 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11788 sp++;
11789 break;
11790 case MONO_TYPE_R4:
11791 case MONO_TYPE_R8:
11792 case MONO_TYPE_VALUETYPE:
11793 default:
11794 is_const = FALSE;
11795 break;
11799 if (!is_const) {
11800 MonoInst *load;
11802 CHECK_STACK_OVF (1);
11804 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11805 load->flags |= ins_flag;
11806 ins_flag = 0;
11807 *sp++ = load;
11811 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11812 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11813 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11816 ins_flag = 0;
11817 ip += 5;
11818 break;
11820 case CEE_STOBJ:
11821 CHECK_STACK (2);
11822 sp -= 2;
11823 CHECK_OPSIZE (5);
11824 token = read32 (ip + 1);
11825 klass = mini_get_class (method, token, generic_context);
11826 CHECK_TYPELOAD (klass);
11827 if (ins_flag & MONO_INST_VOLATILE) {
11828 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11829 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11831 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11832 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11833 ins->flags |= ins_flag;
11834 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11835 generic_class_is_reference_type (cfg, klass) && !MONO_INS_IS_PCONST_NULL (sp [1])) {
11836 /* insert call to write barrier */
11837 emit_write_barrier (cfg, sp [0], sp [1]);
11839 ins_flag = 0;
11840 ip += 5;
11841 inline_costs += 1;
11842 break;
11845 * Array opcodes
11847 case CEE_NEWARR: {
11848 MonoInst *len_ins;
11849 const char *data_ptr;
11850 int data_size = 0;
11851 guint32 field_token;
11853 CHECK_STACK (1);
11854 --sp;
11856 CHECK_OPSIZE (5);
11857 token = read32 (ip + 1);
11859 klass = mini_get_class (method, token, generic_context);
11860 CHECK_TYPELOAD (klass);
11862 context_used = mini_class_check_context_used (cfg, klass);
11864 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11865 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11866 ins->sreg1 = sp [0]->dreg;
11867 ins->type = STACK_I4;
11868 ins->dreg = alloc_ireg (cfg);
11869 MONO_ADD_INS (cfg->cbb, ins);
11870 *sp = mono_decompose_opcode (cfg, ins);
11873 if (context_used) {
11874 MonoInst *args [3];
11875 MonoClass *array_class = mono_array_class_get (klass, 1);
11876 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11878 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11880 /* vtable */
11881 args [0] = emit_get_rgctx_klass (cfg, context_used,
11882 array_class, MONO_RGCTX_INFO_VTABLE);
11883 /* array len */
11884 args [1] = sp [0];
11886 if (managed_alloc)
11887 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11888 else
11889 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
11890 } else {
11891 if (cfg->opt & MONO_OPT_SHARED) {
11892 /* Decompose now to avoid problems with references to the domainvar */
11893 MonoInst *iargs [3];
11895 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11896 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11897 iargs [2] = sp [0];
11899 ins = mono_emit_jit_icall (cfg, ves_icall_array_new, iargs);
11900 } else {
11901 /* Decompose later since it is needed by abcrem */
11902 MonoClass *array_type = mono_array_class_get (klass, 1);
11903 mono_class_vtable (cfg->domain, array_type);
11904 CHECK_TYPELOAD (array_type);
11906 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11907 ins->dreg = alloc_ireg_ref (cfg);
11908 ins->sreg1 = sp [0]->dreg;
11909 ins->inst_newa_class = klass;
11910 ins->type = STACK_OBJ;
11911 ins->klass = array_type;
11912 MONO_ADD_INS (cfg->cbb, ins);
11913 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11914 cfg->cbb->has_array_access = TRUE;
11916 /* Needed so mono_emit_load_get_addr () gets called */
11917 mono_get_got_var (cfg);
11921 len_ins = sp [0];
11922 ip += 5;
11923 *sp++ = ins;
11924 inline_costs += 1;
11927 * we inline/optimize the initialization sequence if possible.
11928 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11929 * for small sizes open code the memcpy
11930 * ensure the rva field is big enough
11932 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11933 MonoMethod *memcpy_method = get_memcpy_method ();
11934 MonoInst *iargs [3];
11935 int add_reg = alloc_ireg_mp (cfg);
11937 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11938 if (cfg->compile_aot) {
11939 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11940 } else {
11941 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11943 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11944 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11945 ip += 11;
11948 break;
11950 case CEE_LDLEN:
11951 CHECK_STACK (1);
11952 --sp;
11953 if (sp [0]->type != STACK_OBJ)
11954 UNVERIFIED;
11956 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11957 ins->dreg = alloc_preg (cfg);
11958 ins->sreg1 = sp [0]->dreg;
11959 ins->type = STACK_I4;
11960 /* This flag will be inherited by the decomposition */
11961 ins->flags |= MONO_INST_FAULT;
11962 MONO_ADD_INS (cfg->cbb, ins);
11963 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11964 cfg->cbb->has_array_access = TRUE;
11965 ip ++;
11966 *sp++ = ins;
11967 break;
11968 case CEE_LDELEMA:
11969 CHECK_STACK (2);
11970 sp -= 2;
11971 CHECK_OPSIZE (5);
11972 if (sp [0]->type != STACK_OBJ)
11973 UNVERIFIED;
11975 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11977 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11978 CHECK_TYPELOAD (klass);
11979 /* we need to make sure that this array is exactly the type it needs
11980 * to be for correctness. the wrappers are lax with their usage
11981 * so we need to ignore them here
11983 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11984 MonoClass *array_class = mono_array_class_get (klass, 1);
11985 mini_emit_check_array_type (cfg, sp [0], array_class);
11986 CHECK_TYPELOAD (array_class);
11989 readonly = FALSE;
11990 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11991 *sp++ = ins;
11992 ip += 5;
11993 break;
11994 case CEE_LDELEM:
11995 case CEE_LDELEM_I1:
11996 case CEE_LDELEM_U1:
11997 case CEE_LDELEM_I2:
11998 case CEE_LDELEM_U2:
11999 case CEE_LDELEM_I4:
12000 case CEE_LDELEM_U4:
12001 case CEE_LDELEM_I8:
12002 case CEE_LDELEM_I:
12003 case CEE_LDELEM_R4:
12004 case CEE_LDELEM_R8:
12005 case CEE_LDELEM_REF: {
12006 MonoInst *addr;
12008 CHECK_STACK (2);
12009 sp -= 2;
12011 if (*ip == CEE_LDELEM) {
12012 CHECK_OPSIZE (5);
12013 token = read32 (ip + 1);
12014 klass = mini_get_class (method, token, generic_context);
12015 CHECK_TYPELOAD (klass);
12016 mono_class_init (klass);
12018 else
12019 klass = array_access_to_klass (*ip);
12021 if (sp [0]->type != STACK_OBJ)
12022 UNVERIFIED;
12024 cfg->flags |= MONO_CFG_HAS_LDELEMA;
12026 if (mini_is_gsharedvt_variable_klass (klass)) {
12027 // FIXME-VT: OP_ICONST optimization
12028 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
12029 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
12030 ins->opcode = OP_LOADV_MEMBASE;
12031 } else if (sp [1]->opcode == OP_ICONST) {
12032 int array_reg = sp [0]->dreg;
12033 int index_reg = sp [1]->dreg;
12034 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
12036 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
12037 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
12039 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
12040 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
12041 } else {
12042 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
12043 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
12045 *sp++ = ins;
12046 if (*ip == CEE_LDELEM)
12047 ip += 5;
12048 else
12049 ++ip;
12050 break;
12052 case CEE_STELEM_I:
12053 case CEE_STELEM_I1:
12054 case CEE_STELEM_I2:
12055 case CEE_STELEM_I4:
12056 case CEE_STELEM_I8:
12057 case CEE_STELEM_R4:
12058 case CEE_STELEM_R8:
12059 case CEE_STELEM_REF:
12060 case CEE_STELEM: {
12061 CHECK_STACK (3);
12062 sp -= 3;
12064 cfg->flags |= MONO_CFG_HAS_LDELEMA;
12066 if (*ip == CEE_STELEM) {
12067 CHECK_OPSIZE (5);
12068 token = read32 (ip + 1);
12069 klass = mini_get_class (method, token, generic_context);
12070 CHECK_TYPELOAD (klass);
12071 mono_class_init (klass);
12073 else
12074 klass = array_access_to_klass (*ip);
12076 if (sp [0]->type != STACK_OBJ)
12077 UNVERIFIED;
12079 emit_array_store (cfg, klass, sp, TRUE);
12081 if (*ip == CEE_STELEM)
12082 ip += 5;
12083 else
12084 ++ip;
12085 inline_costs += 1;
12086 break;
12088 case CEE_CKFINITE: {
12089 CHECK_STACK (1);
12090 --sp;
12092 if (cfg->llvm_only) {
12093 MonoInst *iargs [1];
12095 iargs [0] = sp [0];
12096 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
12097 } else {
12098 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
12099 ins->sreg1 = sp [0]->dreg;
12100 ins->dreg = alloc_freg (cfg);
12101 ins->type = STACK_R8;
12102 MONO_ADD_INS (cfg->cbb, ins);
12104 *sp++ = mono_decompose_opcode (cfg, ins);
12107 ++ip;
12108 break;
12110 case CEE_REFANYVAL: {
12111 MonoInst *src_var, *src;
12113 int klass_reg = alloc_preg (cfg);
12114 int dreg = alloc_preg (cfg);
12116 GSHAREDVT_FAILURE (*ip);
12118 CHECK_STACK (1);
12119 MONO_INST_NEW (cfg, ins, *ip);
12120 --sp;
12121 CHECK_OPSIZE (5);
12122 klass = mini_get_class (method, read32 (ip + 1), generic_context);
12123 CHECK_TYPELOAD (klass);
12125 context_used = mini_class_check_context_used (cfg, klass);
12127 // FIXME:
12128 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12129 if (!src_var)
12130 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12131 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12132 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
12134 if (context_used) {
12135 MonoInst *klass_ins;
12137 klass_ins = emit_get_rgctx_klass (cfg, context_used,
12138 klass, MONO_RGCTX_INFO_KLASS);
12140 // FIXME:
12141 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
12142 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
12143 } else {
12144 mini_emit_class_check (cfg, klass_reg, klass);
12146 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
12147 ins->type = STACK_MP;
12148 ins->klass = klass;
12149 *sp++ = ins;
12150 ip += 5;
12151 break;
12153 case CEE_MKREFANY: {
12154 MonoInst *loc, *addr;
12156 GSHAREDVT_FAILURE (*ip);
12158 CHECK_STACK (1);
12159 MONO_INST_NEW (cfg, ins, *ip);
12160 --sp;
12161 CHECK_OPSIZE (5);
12162 klass = mini_get_class (method, read32 (ip + 1), generic_context);
12163 CHECK_TYPELOAD (klass);
12165 context_used = mini_class_check_context_used (cfg, klass);
12167 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
12168 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
12170 if (context_used) {
12171 MonoInst *const_ins;
12172 int type_reg = alloc_preg (cfg);
12174 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
12175 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
12176 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
12177 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
12178 } else {
12179 int const_reg = alloc_preg (cfg);
12180 int type_reg = alloc_preg (cfg);
12182 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
12183 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
12184 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
12185 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
12187 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
12189 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
12190 ins->type = STACK_VTYPE;
12191 ins->klass = mono_defaults.typed_reference_class;
12192 *sp++ = ins;
12193 ip += 5;
12194 break;
12196 case CEE_LDTOKEN: {
12197 gpointer handle;
12198 MonoClass *handle_class;
12200 CHECK_STACK_OVF (1);
12202 CHECK_OPSIZE (5);
12203 n = read32 (ip + 1);
12205 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
12206 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
12207 handle = mono_method_get_wrapper_data (method, n);
12208 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
12209 if (handle_class == mono_defaults.typehandle_class)
12210 handle = &((MonoClass*)handle)->byval_arg;
12212 else {
12213 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
12214 CHECK_CFG_ERROR;
12216 if (!handle)
12217 LOAD_ERROR;
12218 mono_class_init (handle_class);
12219 if (cfg->gshared) {
12220 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
12221 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
12222 /* This case handles ldtoken
12223 of an open type, like for
12224 typeof(Gen<>). */
12225 context_used = 0;
12226 } else if (handle_class == mono_defaults.typehandle_class) {
12227 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
12228 } else if (handle_class == mono_defaults.fieldhandle_class)
12229 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
12230 else if (handle_class == mono_defaults.methodhandle_class)
12231 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
12232 else
12233 g_assert_not_reached ();
12236 if ((cfg->opt & MONO_OPT_SHARED) &&
12237 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
12238 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
12239 MonoInst *addr, *vtvar, *iargs [3];
12240 int method_context_used;
12242 method_context_used = mini_method_check_context_used (cfg, method);
12244 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
12246 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
12247 EMIT_NEW_ICONST (cfg, iargs [1], n);
12248 if (method_context_used) {
12249 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
12250 method, MONO_RGCTX_INFO_METHOD);
12251 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
12252 } else {
12253 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
12254 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
12256 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12258 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12260 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12261 } else {
12262 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
12263 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
12264 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
12265 (cmethod->klass == mono_defaults.systemtype_class) &&
12266 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
12267 MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
12269 mono_class_init (tclass);
12270 if (context_used) {
12271 ins = emit_get_rgctx_klass (cfg, context_used,
12272 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
12273 } else if (cfg->compile_aot) {
12274 if (method->wrapper_type) {
12275 mono_error_init (&error); //got to do it since there are multiple conditionals below
12276 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
12277 /* Special case for static synchronized wrappers */
12278 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
12279 } else {
12280 mono_error_cleanup (&error); /* FIXME don't swallow the error */
12281 /* FIXME: n is not a normal token */
12282 DISABLE_AOT (cfg);
12283 EMIT_NEW_PCONST (cfg, ins, NULL);
12285 } else {
12286 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
12288 } else {
12289 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &cfg->error);
12290 CHECK_CFG_ERROR;
12291 EMIT_NEW_PCONST (cfg, ins, rt);
12293 ins->type = STACK_OBJ;
12294 ins->klass = cmethod->klass;
12295 ip += 5;
12296 } else {
12297 MonoInst *addr, *vtvar;
12299 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
12301 if (context_used) {
12302 if (handle_class == mono_defaults.typehandle_class) {
12303 ins = emit_get_rgctx_klass (cfg, context_used,
12304 mono_class_from_mono_type ((MonoType *)handle),
12305 MONO_RGCTX_INFO_TYPE);
12306 } else if (handle_class == mono_defaults.methodhandle_class) {
12307 ins = emit_get_rgctx_method (cfg, context_used,
12308 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
12309 } else if (handle_class == mono_defaults.fieldhandle_class) {
12310 ins = emit_get_rgctx_field (cfg, context_used,
12311 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
12312 } else {
12313 g_assert_not_reached ();
12315 } else if (cfg->compile_aot) {
12316 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
12317 } else {
12318 EMIT_NEW_PCONST (cfg, ins, handle);
12320 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12321 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12322 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12326 *sp++ = ins;
12327 ip += 5;
12328 break;
12330 case CEE_THROW:
12331 CHECK_STACK (1);
12332 if (sp [-1]->type != STACK_OBJ)
12333 UNVERIFIED;
12335 MONO_INST_NEW (cfg, ins, OP_THROW);
12336 --sp;
12337 ins->sreg1 = sp [0]->dreg;
12338 ip++;
12339 cfg->cbb->out_of_line = TRUE;
12340 MONO_ADD_INS (cfg->cbb, ins);
12341 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12342 MONO_ADD_INS (cfg->cbb, ins);
12343 sp = stack_start;
12345 link_bblock (cfg, cfg->cbb, end_bblock);
12346 start_new_bblock = 1;
12347 /* This can complicate code generation for llvm since the return value might not be defined */
12348 if (COMPILE_LLVM (cfg))
12349 INLINE_FAILURE ("throw");
12350 break;
12351 case CEE_ENDFINALLY:
12352 if (!ip_in_finally_clause (cfg, ip - header->code))
12353 UNVERIFIED;
12354 /* mono_save_seq_point_info () depends on this */
12355 if (sp != stack_start)
12356 emit_seq_point (cfg, method, ip, FALSE, FALSE);
12357 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
12358 MONO_ADD_INS (cfg->cbb, ins);
12359 ip++;
12360 start_new_bblock = 1;
12363 * Control will leave the method so empty the stack, otherwise
12364 * the next basic block will start with a nonempty stack.
12366 while (sp != stack_start) {
12367 sp--;
12369 break;
12370 case CEE_LEAVE:
12371 case CEE_LEAVE_S: {
12372 GList *handlers;
12374 if (*ip == CEE_LEAVE) {
12375 CHECK_OPSIZE (5);
12376 target = ip + 5 + (gint32)read32(ip + 1);
12377 } else {
12378 CHECK_OPSIZE (2);
12379 target = ip + 2 + (signed char)(ip [1]);
12382 /* empty the stack */
12383 while (sp != stack_start) {
12384 sp--;
12388 * If this leave statement is in a catch block, check for a
12389 * pending exception, and rethrow it if necessary.
12390 * We avoid doing this in runtime invoke wrappers, since those are called
12391 * by native code which excepts the wrapper to catch all exceptions.
12393 for (i = 0; i < header->num_clauses; ++i) {
12394 MonoExceptionClause *clause = &header->clauses [i];
12397 * Use <= in the final comparison to handle clauses with multiple
12398 * leave statements, like in bug #78024.
12399 * The ordering of the exception clauses guarantees that we find the
12400 * innermost clause.
12402 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
12403 MonoInst *exc_ins;
12404 MonoBasicBlock *dont_throw;
12407 MonoInst *load;
12409 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
12412 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
12414 NEW_BBLOCK (cfg, dont_throw);
12417 * Currently, we always rethrow the abort exception, despite the
12418 * fact that this is not correct. See thread6.cs for an example.
12419 * But propagating the abort exception is more important than
12420 * getting the sematics right.
12422 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
12423 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
12424 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
12426 MONO_START_BB (cfg, dont_throw);
12430 #ifdef ENABLE_LLVM
12431 cfg->cbb->try_end = (intptr_t)(ip - header->code);
12432 #endif
12434 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
12435 GList *tmp;
12436 MonoExceptionClause *clause;
12438 for (tmp = handlers; tmp; tmp = tmp->next) {
12439 clause = (MonoExceptionClause *)tmp->data;
12440 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
12441 g_assert (tblock);
12442 link_bblock (cfg, cfg->cbb, tblock);
12443 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
12444 ins->inst_target_bb = tblock;
12445 ins->inst_eh_block = clause;
12446 MONO_ADD_INS (cfg->cbb, ins);
12447 cfg->cbb->has_call_handler = 1;
12448 if (COMPILE_LLVM (cfg)) {
12449 MonoBasicBlock *target_bb;
12452 * Link the finally bblock with the target, since it will
12453 * conceptually branch there.
12455 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
12456 GET_BBLOCK (cfg, target_bb, target);
12457 link_bblock (cfg, tblock, target_bb);
12460 g_list_free (handlers);
12463 MONO_INST_NEW (cfg, ins, OP_BR);
12464 MONO_ADD_INS (cfg->cbb, ins);
12465 GET_BBLOCK (cfg, tblock, target);
12466 link_bblock (cfg, cfg->cbb, tblock);
12467 ins->inst_target_bb = tblock;
12469 start_new_bblock = 1;
12471 if (*ip == CEE_LEAVE)
12472 ip += 5;
12473 else
12474 ip += 2;
12476 break;
12480 * Mono specific opcodes
12482 case MONO_CUSTOM_PREFIX: {
12484 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
12486 CHECK_OPSIZE (2);
12487 switch (ip [1]) {
12488 case CEE_MONO_ICALL: {
12489 gpointer func;
12490 MonoJitICallInfo *info;
12492 token = read32 (ip + 2);
12493 func = mono_method_get_wrapper_data (method, token);
12494 info = mono_find_jit_icall_by_addr (func);
12495 if (!info)
12496 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
12497 g_assert (info);
12499 CHECK_STACK (info->sig->param_count);
12500 sp -= info->sig->param_count;
12502 ins = mono_emit_jit_icall (cfg, info->func, sp);
12503 if (!MONO_TYPE_IS_VOID (info->sig->ret))
12504 *sp++ = ins;
12506 ip += 6;
12507 inline_costs += 10 * num_calls++;
12509 break;
12511 case CEE_MONO_LDPTR_CARD_TABLE:
12512 case CEE_MONO_LDPTR_NURSERY_START:
12513 case CEE_MONO_LDPTR_NURSERY_BITS:
12514 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
12515 CHECK_STACK_OVF (1);
12517 switch (ip [1]) {
12518 case CEE_MONO_LDPTR_CARD_TABLE:
12519 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
12520 break;
12521 case CEE_MONO_LDPTR_NURSERY_START:
12522 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
12523 break;
12524 case CEE_MONO_LDPTR_NURSERY_BITS:
12525 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
12526 break;
12527 case CEE_MONO_LDPTR_INT_REQ_FLAG:
12528 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
12529 break;
12532 *sp++ = ins;
12533 ip += 2;
12534 inline_costs += 10 * num_calls++;
12535 break;
12537 case CEE_MONO_LDPTR: {
12538 gpointer ptr;
12540 CHECK_STACK_OVF (1);
12541 CHECK_OPSIZE (6);
12542 token = read32 (ip + 2);
12544 ptr = mono_method_get_wrapper_data (method, token);
12545 EMIT_NEW_PCONST (cfg, ins, ptr);
12546 *sp++ = ins;
12547 ip += 6;
12548 inline_costs += 10 * num_calls++;
12549 /* Can't embed random pointers into AOT code */
12550 DISABLE_AOT (cfg);
12551 break;
12553 case CEE_MONO_JIT_ICALL_ADDR: {
12554 MonoJitICallInfo *callinfo;
12555 gpointer ptr;
12557 CHECK_STACK_OVF (1);
12558 CHECK_OPSIZE (6);
12559 token = read32 (ip + 2);
12561 ptr = mono_method_get_wrapper_data (method, token);
12562 callinfo = mono_find_jit_icall_by_addr (ptr);
12563 g_assert (callinfo);
12564 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12565 *sp++ = ins;
12566 ip += 6;
12567 inline_costs += 10 * num_calls++;
12568 break;
12570 case CEE_MONO_ICALL_ADDR: {
12571 MonoMethod *cmethod;
12572 gpointer ptr;
12574 CHECK_STACK_OVF (1);
12575 CHECK_OPSIZE (6);
12576 token = read32 (ip + 2);
12578 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
12580 if (cfg->compile_aot) {
12581 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12582 } else {
12583 ptr = mono_lookup_internal_call (cmethod);
12584 g_assert (ptr);
12585 EMIT_NEW_PCONST (cfg, ins, ptr);
12587 *sp++ = ins;
12588 ip += 6;
12589 break;
12591 case CEE_MONO_VTADDR: {
12592 MonoInst *src_var, *src;
12594 CHECK_STACK (1);
12595 --sp;
12597 // FIXME:
12598 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12599 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12600 *sp++ = src;
12601 ip += 2;
12602 break;
12604 case CEE_MONO_NEWOBJ: {
12605 MonoInst *iargs [2];
12607 CHECK_STACK_OVF (1);
12608 CHECK_OPSIZE (6);
12609 token = read32 (ip + 2);
12610 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12611 mono_class_init (klass);
12612 NEW_DOMAINCONST (cfg, iargs [0]);
12613 MONO_ADD_INS (cfg->cbb, iargs [0]);
12614 NEW_CLASSCONST (cfg, iargs [1], klass);
12615 MONO_ADD_INS (cfg->cbb, iargs [1]);
12616 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
12617 ip += 6;
12618 inline_costs += 10 * num_calls++;
12619 break;
12621 case CEE_MONO_OBJADDR:
12622 CHECK_STACK (1);
12623 --sp;
12624 MONO_INST_NEW (cfg, ins, OP_MOVE);
12625 ins->dreg = alloc_ireg_mp (cfg);
12626 ins->sreg1 = sp [0]->dreg;
12627 ins->type = STACK_MP;
12628 MONO_ADD_INS (cfg->cbb, ins);
12629 *sp++ = ins;
12630 ip += 2;
12631 break;
12632 case CEE_MONO_LDNATIVEOBJ:
12634 * Similar to LDOBJ, but instead load the unmanaged
12635 * representation of the vtype to the stack.
12637 CHECK_STACK (1);
12638 CHECK_OPSIZE (6);
12639 --sp;
12640 token = read32 (ip + 2);
12641 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12642 g_assert (klass->valuetype);
12643 mono_class_init (klass);
12646 MonoInst *src, *dest, *temp;
12648 src = sp [0];
12649 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12650 temp->backend.is_pinvoke = 1;
12651 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12652 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12654 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12655 dest->type = STACK_VTYPE;
12656 dest->klass = klass;
12658 *sp ++ = dest;
12659 ip += 6;
12661 break;
12662 case CEE_MONO_RETOBJ: {
12664 * Same as RET, but return the native representation of a vtype
12665 * to the caller.
12667 g_assert (cfg->ret);
12668 g_assert (mono_method_signature (method)->pinvoke);
12669 CHECK_STACK (1);
12670 --sp;
12672 CHECK_OPSIZE (6);
12673 token = read32 (ip + 2);
12674 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12676 if (!cfg->vret_addr) {
12677 g_assert (cfg->ret_var_is_local);
12679 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12680 } else {
12681 EMIT_NEW_RETLOADA (cfg, ins);
12683 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12685 if (sp != stack_start)
12686 UNVERIFIED;
12688 MONO_INST_NEW (cfg, ins, OP_BR);
12689 ins->inst_target_bb = end_bblock;
12690 MONO_ADD_INS (cfg->cbb, ins);
12691 link_bblock (cfg, cfg->cbb, end_bblock);
12692 start_new_bblock = 1;
12693 ip += 6;
12694 break;
12696 case CEE_MONO_CISINST:
12697 case CEE_MONO_CCASTCLASS: {
12698 int token;
12699 CHECK_STACK (1);
12700 --sp;
12701 CHECK_OPSIZE (6);
12702 token = read32 (ip + 2);
12703 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12704 if (ip [1] == CEE_MONO_CISINST)
12705 ins = handle_cisinst (cfg, klass, sp [0]);
12706 else
12707 ins = handle_ccastclass (cfg, klass, sp [0]);
12708 *sp++ = ins;
12709 ip += 6;
12710 break;
12712 case CEE_MONO_SAVE_LMF:
12713 case CEE_MONO_RESTORE_LMF:
12714 ip += 2;
12715 break;
12716 case CEE_MONO_CLASSCONST:
12717 CHECK_STACK_OVF (1);
12718 CHECK_OPSIZE (6);
12719 token = read32 (ip + 2);
12720 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12721 *sp++ = ins;
12722 ip += 6;
12723 inline_costs += 10 * num_calls++;
12724 break;
12725 case CEE_MONO_NOT_TAKEN:
12726 cfg->cbb->out_of_line = TRUE;
12727 ip += 2;
12728 break;
12729 case CEE_MONO_TLS: {
12730 MonoTlsKey key;
12732 CHECK_STACK_OVF (1);
12733 CHECK_OPSIZE (6);
12734 key = (MonoTlsKey)read32 (ip + 2);
12735 g_assert (key < TLS_KEY_NUM);
12737 ins = mono_create_tls_get (cfg, key);
12738 if (!ins) {
12739 if (cfg->compile_aot) {
12740 DISABLE_AOT (cfg);
12741 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12742 ins->dreg = alloc_preg (cfg);
12743 ins->type = STACK_PTR;
12744 } else {
12745 g_assert_not_reached ();
12748 ins->type = STACK_PTR;
12749 MONO_ADD_INS (cfg->cbb, ins);
12750 *sp++ = ins;
12751 ip += 6;
12752 break;
12754 case CEE_MONO_DYN_CALL: {
12755 MonoCallInst *call;
12757 /* It would be easier to call a trampoline, but that would put an
12758 * extra frame on the stack, confusing exception handling. So
12759 * implement it inline using an opcode for now.
12762 if (!cfg->dyn_call_var) {
12763 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12764 /* prevent it from being register allocated */
12765 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12768 /* Has to use a call inst since it local regalloc expects it */
12769 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12770 ins = (MonoInst*)call;
12771 sp -= 2;
12772 ins->sreg1 = sp [0]->dreg;
12773 ins->sreg2 = sp [1]->dreg;
12774 MONO_ADD_INS (cfg->cbb, ins);
12776 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12778 ip += 2;
12779 inline_costs += 10 * num_calls++;
12781 break;
12783 case CEE_MONO_MEMORY_BARRIER: {
12784 CHECK_OPSIZE (6);
12785 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12786 ip += 6;
12787 break;
12789 case CEE_MONO_ATOMIC_STORE_I4: {
12790 g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4));
12792 CHECK_OPSIZE (6);
12793 CHECK_STACK (2);
12794 sp -= 2;
12796 MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4);
12797 ins->dreg = sp [0]->dreg;
12798 ins->sreg1 = sp [1]->dreg;
12799 ins->backend.memory_barrier_kind = (int) read32 (ip + 2);
12800 MONO_ADD_INS (cfg->cbb, ins);
12802 ip += 6;
12803 break;
12805 case CEE_MONO_JIT_ATTACH: {
12806 MonoInst *args [16], *domain_ins;
12807 MonoInst *ad_ins, *jit_tls_ins;
12808 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12810 g_assert (!mono_threads_is_coop_enabled ());
12812 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12814 EMIT_NEW_PCONST (cfg, ins, NULL);
12815 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12817 ad_ins = mono_get_domain_intrinsic (cfg);
12818 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12820 if (cfg->backend->have_tls_get && ad_ins && jit_tls_ins) {
12821 NEW_BBLOCK (cfg, next_bb);
12822 NEW_BBLOCK (cfg, call_bb);
12824 if (cfg->compile_aot) {
12825 /* AOT code is only used in the root domain */
12826 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12827 } else {
12828 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12830 MONO_ADD_INS (cfg->cbb, ad_ins);
12831 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12832 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12834 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12835 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12836 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12838 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12839 MONO_START_BB (cfg, call_bb);
12842 /* AOT code is only used in the root domain */
12843 EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
12844 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12845 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12847 if (next_bb)
12848 MONO_START_BB (cfg, next_bb);
12851 ip += 2;
12852 break;
12854 case CEE_MONO_JIT_DETACH: {
12855 MonoInst *args [16];
12857 /* Restore the original domain */
12858 dreg = alloc_ireg (cfg);
12859 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12860 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12861 ip += 2;
12862 break;
12864 case CEE_MONO_CALLI_EXTRA_ARG: {
12865 MonoInst *addr;
12866 MonoMethodSignature *fsig;
12867 MonoInst *arg;
12870 * This is the same as CEE_CALLI, but passes an additional argument
12871 * to the called method in llvmonly mode.
12872 * This is only used by delegate invoke wrappers to call the
12873 * actual delegate method.
12875 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
12877 CHECK_OPSIZE (6);
12878 token = read32 (ip + 2);
12880 ins = NULL;
12882 cmethod = NULL;
12883 CHECK_STACK (1);
12884 --sp;
12885 addr = *sp;
12886 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
12887 CHECK_CFG_ERROR;
12889 if (cfg->llvm_only)
12890 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
12892 n = fsig->param_count + fsig->hasthis + 1;
12894 CHECK_STACK (n);
12896 sp -= n;
12897 arg = sp [n - 1];
12899 if (cfg->llvm_only) {
12901 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
12902 * cconv. This is set by mono_init_delegate ().
12904 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
12905 MonoInst *callee = addr;
12906 MonoInst *call, *localloc_ins;
12907 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12908 int low_bit_reg = alloc_preg (cfg);
12910 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12911 NEW_BBLOCK (cfg, end_bb);
12913 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12914 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12915 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12917 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
12918 addr = emit_get_rgctx_sig (cfg, context_used,
12919 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12921 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
12923 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12924 ins->dreg = alloc_preg (cfg);
12925 ins->inst_imm = 2 * SIZEOF_VOID_P;
12926 MONO_ADD_INS (cfg->cbb, ins);
12927 localloc_ins = ins;
12928 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12929 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12930 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12932 call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12933 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12935 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
12936 MONO_START_BB (cfg, is_gsharedvt_bb);
12937 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12938 ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12939 ins->dreg = call->dreg;
12941 MONO_START_BB (cfg, end_bb);
12942 } else {
12943 /* Caller uses a normal calling conv */
12945 MonoInst *callee = addr;
12946 MonoInst *call, *localloc_ins;
12947 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12948 int low_bit_reg = alloc_preg (cfg);
12950 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12951 NEW_BBLOCK (cfg, end_bb);
12953 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12954 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12955 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12957 /* Normal case: callee uses a normal cconv, no conversion is needed */
12958 call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12959 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12960 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
12961 MONO_START_BB (cfg, is_gsharedvt_bb);
12962 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12963 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
12964 MONO_ADD_INS (cfg->cbb, addr);
12966 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
12968 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12969 ins->dreg = alloc_preg (cfg);
12970 ins->inst_imm = 2 * SIZEOF_VOID_P;
12971 MONO_ADD_INS (cfg->cbb, ins);
12972 localloc_ins = ins;
12973 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12974 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12975 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12977 ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12978 ins->dreg = call->dreg;
12979 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12981 MONO_START_BB (cfg, end_bb);
12983 } else {
12984 /* Same as CEE_CALLI */
12985 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
12987 * We pass the address to the gsharedvt trampoline in the rgctx reg
12989 MonoInst *callee = addr;
12991 addr = emit_get_rgctx_sig (cfg, context_used,
12992 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12993 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
12994 } else {
12995 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
12999 if (!MONO_TYPE_IS_VOID (fsig->ret))
13000 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
13002 CHECK_CFG_EXCEPTION;
13004 ip += 6;
13005 ins_flag = 0;
13006 constrained_class = NULL;
13007 break;
13009 case CEE_MONO_LDDOMAIN:
13010 CHECK_STACK_OVF (1);
13011 EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : cfg->domain);
13012 ip += 2;
13013 *sp++ = ins;
13014 break;
13015 case CEE_MONO_GET_LAST_ERROR:
13016 CHECK_OPSIZE (2);
13017 CHECK_STACK_OVF (1);
13019 MONO_INST_NEW (cfg, ins, OP_GET_LAST_ERROR);
13020 ins->dreg = alloc_dreg (cfg, STACK_I4);
13021 ins->type = STACK_I4;
13022 MONO_ADD_INS (cfg->cbb, ins);
13024 ip += 2;
13025 *sp++ = ins;
13026 break;
13027 default:
13028 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
13029 break;
13031 break;
13034 case CEE_PREFIX1: {
13035 CHECK_OPSIZE (2);
13036 switch (ip [1]) {
13037 case CEE_ARGLIST: {
13038 /* somewhat similar to LDTOKEN */
13039 MonoInst *addr, *vtvar;
13040 CHECK_STACK_OVF (1);
13041 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
13043 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
13044 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
13046 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
13047 ins->type = STACK_VTYPE;
13048 ins->klass = mono_defaults.argumenthandle_class;
13049 *sp++ = ins;
13050 ip += 2;
13051 break;
13053 case CEE_CEQ:
13054 case CEE_CGT:
13055 case CEE_CGT_UN:
13056 case CEE_CLT:
13057 case CEE_CLT_UN: {
13058 MonoInst *cmp, *arg1, *arg2;
13060 CHECK_STACK (2);
13061 sp -= 2;
13062 arg1 = sp [0];
13063 arg2 = sp [1];
13066 * The following transforms:
13067 * CEE_CEQ into OP_CEQ
13068 * CEE_CGT into OP_CGT
13069 * CEE_CGT_UN into OP_CGT_UN
13070 * CEE_CLT into OP_CLT
13071 * CEE_CLT_UN into OP_CLT_UN
13073 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
13075 MONO_INST_NEW (cfg, ins, cmp->opcode);
13076 cmp->sreg1 = arg1->dreg;
13077 cmp->sreg2 = arg2->dreg;
13078 type_from_op (cfg, cmp, arg1, arg2);
13079 CHECK_TYPE (cmp);
13080 add_widen_op (cfg, cmp, &arg1, &arg2);
13081 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
13082 cmp->opcode = OP_LCOMPARE;
13083 else if (arg1->type == STACK_R4)
13084 cmp->opcode = OP_RCOMPARE;
13085 else if (arg1->type == STACK_R8)
13086 cmp->opcode = OP_FCOMPARE;
13087 else
13088 cmp->opcode = OP_ICOMPARE;
13089 MONO_ADD_INS (cfg->cbb, cmp);
13090 ins->type = STACK_I4;
13091 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
13092 type_from_op (cfg, ins, arg1, arg2);
13094 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
13096 * The backends expect the fceq opcodes to do the
13097 * comparison too.
13099 ins->sreg1 = cmp->sreg1;
13100 ins->sreg2 = cmp->sreg2;
13101 NULLIFY_INS (cmp);
13103 MONO_ADD_INS (cfg->cbb, ins);
13104 *sp++ = ins;
13105 ip += 2;
13106 break;
13108 case CEE_LDFTN: {
13109 MonoInst *argconst;
13110 MonoMethod *cil_method;
13112 CHECK_STACK_OVF (1);
13113 CHECK_OPSIZE (6);
13114 n = read32 (ip + 2);
13115 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
13116 CHECK_CFG_ERROR;
13118 mono_class_init (cmethod->klass);
13120 mono_save_token_info (cfg, image, n, cmethod);
13122 context_used = mini_method_check_context_used (cfg, cmethod);
13124 cil_method = cmethod;
13125 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
13126 emit_method_access_failure (cfg, method, cil_method);
13128 if (mono_security_core_clr_enabled ())
13129 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
13132 * Optimize the common case of ldftn+delegate creation
13134 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
13135 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
13136 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
13137 MonoInst *target_ins, *handle_ins;
13138 MonoMethod *invoke;
13139 int invoke_context_used;
13141 invoke = mono_get_delegate_invoke (ctor_method->klass);
13142 if (!invoke || !mono_method_signature (invoke))
13143 LOAD_ERROR;
13145 invoke_context_used = mini_method_check_context_used (cfg, invoke);
13147 target_ins = sp [-1];
13149 if (mono_security_core_clr_enabled ())
13150 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
13152 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
13153 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
13154 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
13155 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
13156 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
13160 /* FIXME: SGEN support */
13161 if (invoke_context_used == 0 || cfg->llvm_only) {
13162 ip += 6;
13163 if (cfg->verbose_level > 3)
13164 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
13165 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
13166 sp --;
13167 *sp = handle_ins;
13168 CHECK_CFG_EXCEPTION;
13169 ip += 5;
13170 sp ++;
13171 break;
13173 ip -= 6;
13178 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
13179 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
13180 *sp++ = ins;
13182 ip += 6;
13183 inline_costs += 10 * num_calls++;
13184 break;
13186 case CEE_LDVIRTFTN: {
13187 MonoInst *args [2];
13189 CHECK_STACK (1);
13190 CHECK_OPSIZE (6);
13191 n = read32 (ip + 2);
13192 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
13193 CHECK_CFG_ERROR;
13195 mono_class_init (cmethod->klass);
13197 context_used = mini_method_check_context_used (cfg, cmethod);
13199 if (mono_security_core_clr_enabled ())
13200 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
13203 * Optimize the common case of ldvirtftn+delegate creation
13205 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
13206 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
13207 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
13208 MonoInst *target_ins, *handle_ins;
13209 MonoMethod *invoke;
13210 int invoke_context_used;
13211 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
13213 invoke = mono_get_delegate_invoke (ctor_method->klass);
13214 if (!invoke || !mono_method_signature (invoke))
13215 LOAD_ERROR;
13217 invoke_context_used = mini_method_check_context_used (cfg, invoke);
13219 target_ins = sp [-1];
13221 if (mono_security_core_clr_enabled ())
13222 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
13224 /* FIXME: SGEN support */
13225 if (invoke_context_used == 0 || cfg->llvm_only) {
13226 ip += 6;
13227 if (cfg->verbose_level > 3)
13228 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
13229 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
13230 sp -= 2;
13231 *sp = handle_ins;
13232 CHECK_CFG_EXCEPTION;
13233 ip += 5;
13234 sp ++;
13235 break;
13237 ip -= 6;
13242 --sp;
13243 args [0] = *sp;
13245 args [1] = emit_get_rgctx_method (cfg, context_used,
13246 cmethod, MONO_RGCTX_INFO_METHOD);
13248 if (context_used)
13249 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
13250 else
13251 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
13253 ip += 6;
13254 inline_costs += 10 * num_calls++;
13255 break;
13257 case CEE_LDARG:
13258 CHECK_STACK_OVF (1);
13259 CHECK_OPSIZE (4);
13260 n = read16 (ip + 2);
13261 CHECK_ARG (n);
13262 EMIT_NEW_ARGLOAD (cfg, ins, n);
13263 *sp++ = ins;
13264 ip += 4;
13265 break;
13266 case CEE_LDARGA:
13267 CHECK_STACK_OVF (1);
13268 CHECK_OPSIZE (4);
13269 n = read16 (ip + 2);
13270 CHECK_ARG (n);
13271 NEW_ARGLOADA (cfg, ins, n);
13272 MONO_ADD_INS (cfg->cbb, ins);
13273 *sp++ = ins;
13274 ip += 4;
13275 break;
13276 case CEE_STARG:
13277 CHECK_STACK (1);
13278 --sp;
13279 CHECK_OPSIZE (4);
13280 n = read16 (ip + 2);
13281 CHECK_ARG (n);
13282 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
13283 UNVERIFIED;
13284 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
13285 ip += 4;
13286 break;
13287 case CEE_LDLOC:
13288 CHECK_STACK_OVF (1);
13289 CHECK_OPSIZE (4);
13290 n = read16 (ip + 2);
13291 CHECK_LOCAL (n);
13292 EMIT_NEW_LOCLOAD (cfg, ins, n);
13293 *sp++ = ins;
13294 ip += 4;
13295 break;
13296 case CEE_LDLOCA: {
13297 unsigned char *tmp_ip;
13298 CHECK_STACK_OVF (1);
13299 CHECK_OPSIZE (4);
13300 n = read16 (ip + 2);
13301 CHECK_LOCAL (n);
13303 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
13304 ip = tmp_ip;
13305 inline_costs += 1;
13306 break;
13309 EMIT_NEW_LOCLOADA (cfg, ins, n);
13310 *sp++ = ins;
13311 ip += 4;
13312 break;
13314 case CEE_STLOC:
13315 CHECK_STACK (1);
13316 --sp;
13317 CHECK_OPSIZE (4);
13318 n = read16 (ip + 2);
13319 CHECK_LOCAL (n);
13320 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
13321 UNVERIFIED;
13322 emit_stloc_ir (cfg, sp, header, n);
13323 ip += 4;
13324 inline_costs += 1;
13325 break;
13326 case CEE_LOCALLOC: {
13327 CHECK_STACK (1);
13328 MonoBasicBlock *non_zero_bb, *end_bb;
13329 int alloc_ptr = alloc_preg (cfg);
13330 --sp;
13331 if (sp != stack_start)
13332 UNVERIFIED;
13333 if (cfg->method != method)
13335 * Inlining this into a loop in a parent could lead to
13336 * stack overflows which is different behavior than the
13337 * non-inlined case, thus disable inlining in this case.
13339 INLINE_FAILURE("localloc");
13341 NEW_BBLOCK (cfg, non_zero_bb);
13342 NEW_BBLOCK (cfg, end_bb);
13344 /* if size != zero */
13345 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
13346 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_zero_bb);
13348 //size is zero, so result is NULL
13349 MONO_EMIT_NEW_PCONST (cfg, alloc_ptr, NULL);
13350 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
13352 MONO_START_BB (cfg, non_zero_bb);
13353 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
13354 ins->dreg = alloc_ptr;
13355 ins->sreg1 = sp [0]->dreg;
13356 ins->type = STACK_PTR;
13357 MONO_ADD_INS (cfg->cbb, ins);
13359 cfg->flags |= MONO_CFG_HAS_ALLOCA;
13360 if (init_locals)
13361 ins->flags |= MONO_INST_INIT;
13363 MONO_START_BB (cfg, end_bb);
13364 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), alloc_ptr);
13365 ins->type = STACK_PTR;
13367 *sp++ = ins;
13368 ip += 2;
13369 break;
13371 case CEE_ENDFILTER: {
13372 MonoExceptionClause *clause, *nearest;
13373 int cc;
13375 CHECK_STACK (1);
13376 --sp;
13377 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
13378 UNVERIFIED;
13379 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
13380 ins->sreg1 = (*sp)->dreg;
13381 MONO_ADD_INS (cfg->cbb, ins);
13382 start_new_bblock = 1;
13383 ip += 2;
13385 nearest = NULL;
13386 for (cc = 0; cc < header->num_clauses; ++cc) {
13387 clause = &header->clauses [cc];
13388 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
13389 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
13390 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
13391 nearest = clause;
13393 g_assert (nearest);
13394 if ((ip - header->code) != nearest->handler_offset)
13395 UNVERIFIED;
13397 break;
13399 case CEE_UNALIGNED_:
13400 ins_flag |= MONO_INST_UNALIGNED;
13401 /* FIXME: record alignment? we can assume 1 for now */
13402 CHECK_OPSIZE (3);
13403 ip += 3;
13404 break;
13405 case CEE_VOLATILE_:
13406 ins_flag |= MONO_INST_VOLATILE;
13407 ip += 2;
13408 break;
13409 case CEE_TAIL_:
13410 ins_flag |= MONO_INST_TAILCALL;
13411 cfg->flags |= MONO_CFG_HAS_TAIL;
13412 /* Can't inline tail calls at this time */
13413 inline_costs += 100000;
13414 ip += 2;
13415 break;
13416 case CEE_INITOBJ:
13417 CHECK_STACK (1);
13418 --sp;
13419 CHECK_OPSIZE (6);
13420 token = read32 (ip + 2);
13421 klass = mini_get_class (method, token, generic_context);
13422 CHECK_TYPELOAD (klass);
13423 if (generic_class_is_reference_type (cfg, klass))
13424 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
13425 else
13426 mini_emit_initobj (cfg, *sp, NULL, klass);
13427 ip += 6;
13428 inline_costs += 1;
13429 break;
13430 case CEE_CONSTRAINED_:
13431 CHECK_OPSIZE (6);
13432 token = read32 (ip + 2);
13433 constrained_class = mini_get_class (method, token, generic_context);
13434 CHECK_TYPELOAD (constrained_class);
13435 ip += 6;
13436 break;
13437 case CEE_CPBLK:
13438 case CEE_INITBLK: {
13439 MonoInst *iargs [3];
13440 CHECK_STACK (3);
13441 sp -= 3;
13443 /* Skip optimized paths for volatile operations. */
13444 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
13445 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
13446 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
13447 /* emit_memset only works when val == 0 */
13448 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
13449 } else {
13450 MonoInst *call;
13451 iargs [0] = sp [0];
13452 iargs [1] = sp [1];
13453 iargs [2] = sp [2];
13454 if (ip [1] == CEE_CPBLK) {
13456 * FIXME: It's unclear whether we should be emitting both the acquire
13457 * and release barriers for cpblk. It is technically both a load and
13458 * store operation, so it seems like that's the sensible thing to do.
13460 * FIXME: We emit full barriers on both sides of the operation for
13461 * simplicity. We should have a separate atomic memcpy method instead.
13463 MonoMethod *memcpy_method = get_memcpy_method ();
13465 if (ins_flag & MONO_INST_VOLATILE)
13466 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
13468 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
13469 call->flags |= ins_flag;
13471 if (ins_flag & MONO_INST_VOLATILE)
13472 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
13473 } else {
13474 MonoMethod *memset_method = get_memset_method ();
13475 if (ins_flag & MONO_INST_VOLATILE) {
13476 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
13477 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
13479 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
13480 call->flags |= ins_flag;
13483 ip += 2;
13484 ins_flag = 0;
13485 inline_costs += 1;
13486 break;
13488 case CEE_NO_:
13489 CHECK_OPSIZE (3);
13490 if (ip [2] & 0x1)
13491 ins_flag |= MONO_INST_NOTYPECHECK;
13492 if (ip [2] & 0x2)
13493 ins_flag |= MONO_INST_NORANGECHECK;
13494 /* we ignore the no-nullcheck for now since we
13495 * really do it explicitly only when doing callvirt->call
13497 ip += 3;
13498 break;
13499 case CEE_RETHROW: {
13500 MonoInst *load;
13501 int handler_offset = -1;
13503 for (i = 0; i < header->num_clauses; ++i) {
13504 MonoExceptionClause *clause = &header->clauses [i];
13505 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
13506 handler_offset = clause->handler_offset;
13507 break;
13511 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
13513 if (handler_offset == -1)
13514 UNVERIFIED;
13516 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
13517 MONO_INST_NEW (cfg, ins, OP_RETHROW);
13518 ins->sreg1 = load->dreg;
13519 MONO_ADD_INS (cfg->cbb, ins);
13521 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
13522 MONO_ADD_INS (cfg->cbb, ins);
13524 sp = stack_start;
13525 link_bblock (cfg, cfg->cbb, end_bblock);
13526 start_new_bblock = 1;
13527 ip += 2;
13528 break;
13530 case CEE_SIZEOF: {
13531 guint32 val;
13532 int ialign;
13534 CHECK_STACK_OVF (1);
13535 CHECK_OPSIZE (6);
13536 token = read32 (ip + 2);
13537 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
13538 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
13539 CHECK_CFG_ERROR;
13541 val = mono_type_size (type, &ialign);
13542 } else {
13543 MonoClass *klass = mini_get_class (method, token, generic_context);
13544 CHECK_TYPELOAD (klass);
13546 val = mono_type_size (&klass->byval_arg, &ialign);
13548 if (mini_is_gsharedvt_klass (klass))
13549 GSHAREDVT_FAILURE (*ip);
13551 EMIT_NEW_ICONST (cfg, ins, val);
13552 *sp++= ins;
13553 ip += 6;
13554 break;
13556 case CEE_REFANYTYPE: {
13557 MonoInst *src_var, *src;
13559 GSHAREDVT_FAILURE (*ip);
13561 CHECK_STACK (1);
13562 --sp;
13564 // FIXME:
13565 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
13566 if (!src_var)
13567 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
13568 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
13569 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
13570 *sp++ = ins;
13571 ip += 2;
13572 break;
13574 case CEE_READONLY_:
13575 readonly = TRUE;
13576 ip += 2;
13577 break;
13579 case CEE_UNUSED56:
13580 case CEE_UNUSED57:
13581 case CEE_UNUSED70:
13582 case CEE_UNUSED:
13583 case CEE_UNUSED99:
13584 UNVERIFIED;
13586 default:
13587 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
13588 UNVERIFIED;
13590 break;
13592 case CEE_UNUSED58:
13593 case CEE_UNUSED1:
13594 UNVERIFIED;
13596 default:
13597 g_warning ("opcode 0x%02x not handled", *ip);
13598 UNVERIFIED;
13601 if (start_new_bblock != 1)
13602 UNVERIFIED;
13604 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
13605 if (cfg->cbb->next_bb) {
13606 /* This could already be set because of inlining, #693905 */
13607 MonoBasicBlock *bb = cfg->cbb;
13609 while (bb->next_bb)
13610 bb = bb->next_bb;
13611 bb->next_bb = end_bblock;
13612 } else {
13613 cfg->cbb->next_bb = end_bblock;
13616 if (cfg->method == method && cfg->domainvar) {
13617 MonoInst *store;
13618 MonoInst *get_domain;
13620 cfg->cbb = init_localsbb;
13622 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
13623 MONO_ADD_INS (cfg->cbb, get_domain);
13624 } else {
13625 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
13627 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
13628 MONO_ADD_INS (cfg->cbb, store);
13631 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
13632 if (cfg->compile_aot)
13633 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
13634 mono_get_got_var (cfg);
13635 #endif
13637 if (cfg->method == method && cfg->got_var)
13638 mono_emit_load_got_addr (cfg);
13640 if (init_localsbb) {
13641 cfg->cbb = init_localsbb;
13642 cfg->ip = NULL;
13643 for (i = 0; i < header->num_locals; ++i) {
13644 emit_init_local (cfg, i, header->locals [i], init_locals);
13648 if (cfg->init_ref_vars && cfg->method == method) {
13649 /* Emit initialization for ref vars */
13650 // FIXME: Avoid duplication initialization for IL locals.
13651 for (i = 0; i < cfg->num_varinfo; ++i) {
13652 MonoInst *ins = cfg->varinfo [i];
13654 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
13655 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
13659 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
13660 cfg->cbb = init_localsbb;
13661 emit_push_lmf (cfg);
13664 cfg->cbb = init_localsbb;
13665 emit_instrumentation_call (cfg, mono_profiler_method_enter);
13667 if (seq_points) {
13668 MonoBasicBlock *bb;
13671 * Make seq points at backward branch targets interruptable.
13673 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
13674 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
13675 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
13678 /* Add a sequence point for method entry/exit events */
13679 if (seq_points && cfg->gen_sdb_seq_points) {
13680 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
13681 MONO_ADD_INS (init_localsbb, ins);
13682 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
13683 MONO_ADD_INS (cfg->bb_exit, ins);
13687 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13688 * the code they refer to was dead (#11880).
13690 if (sym_seq_points) {
13691 for (i = 0; i < header->code_size; ++i) {
13692 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13693 MonoInst *ins;
13695 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13696 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13701 cfg->ip = NULL;
13703 if (cfg->method == method) {
13704 MonoBasicBlock *bb;
13705 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13706 if (bb == cfg->bb_init)
13707 bb->region = -1;
13708 else
13709 bb->region = mono_find_block_region (cfg, bb->real_offset);
13710 if (cfg->spvars)
13711 mono_create_spvar_for_region (cfg, bb->region);
13712 if (cfg->verbose_level > 2)
13713 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13715 } else {
13716 MonoBasicBlock *bb;
13717 /* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
13718 for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) {
13719 bb->real_offset = inline_offset;
13723 if (inline_costs < 0) {
13724 char *mname;
13726 /* Method is too large */
13727 mname = mono_method_full_name (method, TRUE);
13728 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
13729 g_free (mname);
13732 if ((cfg->verbose_level > 2) && (cfg->method == method))
13733 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13735 goto cleanup;
13737 mono_error_exit:
13738 g_assert (!mono_error_ok (&cfg->error));
13739 goto cleanup;
13741 exception_exit:
13742 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13743 goto cleanup;
13745 unverified:
13746 set_exception_type_from_invalid_il (cfg, method, ip);
13747 goto cleanup;
13749 cleanup:
13750 g_slist_free (class_inits);
13751 mono_basic_block_free (original_bb);
13752 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13753 if (cfg->exception_type)
13754 return -1;
13755 else
13756 return inline_costs;
13759 static int
13760 store_membase_reg_to_store_membase_imm (int opcode)
13762 switch (opcode) {
13763 case OP_STORE_MEMBASE_REG:
13764 return OP_STORE_MEMBASE_IMM;
13765 case OP_STOREI1_MEMBASE_REG:
13766 return OP_STOREI1_MEMBASE_IMM;
13767 case OP_STOREI2_MEMBASE_REG:
13768 return OP_STOREI2_MEMBASE_IMM;
13769 case OP_STOREI4_MEMBASE_REG:
13770 return OP_STOREI4_MEMBASE_IMM;
13771 case OP_STOREI8_MEMBASE_REG:
13772 return OP_STOREI8_MEMBASE_IMM;
13773 default:
13774 g_assert_not_reached ();
13777 return -1;
13781 mono_op_to_op_imm (int opcode)
13783 switch (opcode) {
13784 case OP_IADD:
13785 return OP_IADD_IMM;
13786 case OP_ISUB:
13787 return OP_ISUB_IMM;
13788 case OP_IDIV:
13789 return OP_IDIV_IMM;
13790 case OP_IDIV_UN:
13791 return OP_IDIV_UN_IMM;
13792 case OP_IREM:
13793 return OP_IREM_IMM;
13794 case OP_IREM_UN:
13795 return OP_IREM_UN_IMM;
13796 case OP_IMUL:
13797 return OP_IMUL_IMM;
13798 case OP_IAND:
13799 return OP_IAND_IMM;
13800 case OP_IOR:
13801 return OP_IOR_IMM;
13802 case OP_IXOR:
13803 return OP_IXOR_IMM;
13804 case OP_ISHL:
13805 return OP_ISHL_IMM;
13806 case OP_ISHR:
13807 return OP_ISHR_IMM;
13808 case OP_ISHR_UN:
13809 return OP_ISHR_UN_IMM;
13811 case OP_LADD:
13812 return OP_LADD_IMM;
13813 case OP_LSUB:
13814 return OP_LSUB_IMM;
13815 case OP_LAND:
13816 return OP_LAND_IMM;
13817 case OP_LOR:
13818 return OP_LOR_IMM;
13819 case OP_LXOR:
13820 return OP_LXOR_IMM;
13821 case OP_LSHL:
13822 return OP_LSHL_IMM;
13823 case OP_LSHR:
13824 return OP_LSHR_IMM;
13825 case OP_LSHR_UN:
13826 return OP_LSHR_UN_IMM;
13827 #if SIZEOF_REGISTER == 8
13828 case OP_LREM:
13829 return OP_LREM_IMM;
13830 #endif
13832 case OP_COMPARE:
13833 return OP_COMPARE_IMM;
13834 case OP_ICOMPARE:
13835 return OP_ICOMPARE_IMM;
13836 case OP_LCOMPARE:
13837 return OP_LCOMPARE_IMM;
13839 case OP_STORE_MEMBASE_REG:
13840 return OP_STORE_MEMBASE_IMM;
13841 case OP_STOREI1_MEMBASE_REG:
13842 return OP_STOREI1_MEMBASE_IMM;
13843 case OP_STOREI2_MEMBASE_REG:
13844 return OP_STOREI2_MEMBASE_IMM;
13845 case OP_STOREI4_MEMBASE_REG:
13846 return OP_STOREI4_MEMBASE_IMM;
13848 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13849 case OP_X86_PUSH:
13850 return OP_X86_PUSH_IMM;
13851 case OP_X86_COMPARE_MEMBASE_REG:
13852 return OP_X86_COMPARE_MEMBASE_IMM;
13853 #endif
13854 #if defined(TARGET_AMD64)
13855 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13856 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13857 #endif
13858 case OP_VOIDCALL_REG:
13859 return OP_VOIDCALL;
13860 case OP_CALL_REG:
13861 return OP_CALL;
13862 case OP_LCALL_REG:
13863 return OP_LCALL;
13864 case OP_FCALL_REG:
13865 return OP_FCALL;
13866 case OP_LOCALLOC:
13867 return OP_LOCALLOC_IMM;
13870 return -1;
13873 static int
13874 ldind_to_load_membase (int opcode)
13876 switch (opcode) {
13877 case CEE_LDIND_I1:
13878 return OP_LOADI1_MEMBASE;
13879 case CEE_LDIND_U1:
13880 return OP_LOADU1_MEMBASE;
13881 case CEE_LDIND_I2:
13882 return OP_LOADI2_MEMBASE;
13883 case CEE_LDIND_U2:
13884 return OP_LOADU2_MEMBASE;
13885 case CEE_LDIND_I4:
13886 return OP_LOADI4_MEMBASE;
13887 case CEE_LDIND_U4:
13888 return OP_LOADU4_MEMBASE;
13889 case CEE_LDIND_I:
13890 return OP_LOAD_MEMBASE;
13891 case CEE_LDIND_REF:
13892 return OP_LOAD_MEMBASE;
13893 case CEE_LDIND_I8:
13894 return OP_LOADI8_MEMBASE;
13895 case CEE_LDIND_R4:
13896 return OP_LOADR4_MEMBASE;
13897 case CEE_LDIND_R8:
13898 return OP_LOADR8_MEMBASE;
13899 default:
13900 g_assert_not_reached ();
13903 return -1;
13906 static int
13907 stind_to_store_membase (int opcode)
13909 switch (opcode) {
13910 case CEE_STIND_I1:
13911 return OP_STOREI1_MEMBASE_REG;
13912 case CEE_STIND_I2:
13913 return OP_STOREI2_MEMBASE_REG;
13914 case CEE_STIND_I4:
13915 return OP_STOREI4_MEMBASE_REG;
13916 case CEE_STIND_I:
13917 case CEE_STIND_REF:
13918 return OP_STORE_MEMBASE_REG;
13919 case CEE_STIND_I8:
13920 return OP_STOREI8_MEMBASE_REG;
13921 case CEE_STIND_R4:
13922 return OP_STORER4_MEMBASE_REG;
13923 case CEE_STIND_R8:
13924 return OP_STORER8_MEMBASE_REG;
13925 default:
13926 g_assert_not_reached ();
13929 return -1;
13933 mono_load_membase_to_load_mem (int opcode)
13935 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13936 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13937 switch (opcode) {
13938 case OP_LOAD_MEMBASE:
13939 return OP_LOAD_MEM;
13940 case OP_LOADU1_MEMBASE:
13941 return OP_LOADU1_MEM;
13942 case OP_LOADU2_MEMBASE:
13943 return OP_LOADU2_MEM;
13944 case OP_LOADI4_MEMBASE:
13945 return OP_LOADI4_MEM;
13946 case OP_LOADU4_MEMBASE:
13947 return OP_LOADU4_MEM;
13948 #if SIZEOF_REGISTER == 8
13949 case OP_LOADI8_MEMBASE:
13950 return OP_LOADI8_MEM;
13951 #endif
13953 #endif
13955 return -1;
13958 static inline int
13959 op_to_op_dest_membase (int store_opcode, int opcode)
13961 #if defined(TARGET_X86)
13962 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13963 return -1;
13965 switch (opcode) {
13966 case OP_IADD:
13967 return OP_X86_ADD_MEMBASE_REG;
13968 case OP_ISUB:
13969 return OP_X86_SUB_MEMBASE_REG;
13970 case OP_IAND:
13971 return OP_X86_AND_MEMBASE_REG;
13972 case OP_IOR:
13973 return OP_X86_OR_MEMBASE_REG;
13974 case OP_IXOR:
13975 return OP_X86_XOR_MEMBASE_REG;
13976 case OP_ADD_IMM:
13977 case OP_IADD_IMM:
13978 return OP_X86_ADD_MEMBASE_IMM;
13979 case OP_SUB_IMM:
13980 case OP_ISUB_IMM:
13981 return OP_X86_SUB_MEMBASE_IMM;
13982 case OP_AND_IMM:
13983 case OP_IAND_IMM:
13984 return OP_X86_AND_MEMBASE_IMM;
13985 case OP_OR_IMM:
13986 case OP_IOR_IMM:
13987 return OP_X86_OR_MEMBASE_IMM;
13988 case OP_XOR_IMM:
13989 case OP_IXOR_IMM:
13990 return OP_X86_XOR_MEMBASE_IMM;
13991 case OP_MOVE:
13992 return OP_NOP;
13994 #endif
13996 #if defined(TARGET_AMD64)
13997 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13998 return -1;
14000 switch (opcode) {
14001 case OP_IADD:
14002 return OP_X86_ADD_MEMBASE_REG;
14003 case OP_ISUB:
14004 return OP_X86_SUB_MEMBASE_REG;
14005 case OP_IAND:
14006 return OP_X86_AND_MEMBASE_REG;
14007 case OP_IOR:
14008 return OP_X86_OR_MEMBASE_REG;
14009 case OP_IXOR:
14010 return OP_X86_XOR_MEMBASE_REG;
14011 case OP_IADD_IMM:
14012 return OP_X86_ADD_MEMBASE_IMM;
14013 case OP_ISUB_IMM:
14014 return OP_X86_SUB_MEMBASE_IMM;
14015 case OP_IAND_IMM:
14016 return OP_X86_AND_MEMBASE_IMM;
14017 case OP_IOR_IMM:
14018 return OP_X86_OR_MEMBASE_IMM;
14019 case OP_IXOR_IMM:
14020 return OP_X86_XOR_MEMBASE_IMM;
14021 case OP_LADD:
14022 return OP_AMD64_ADD_MEMBASE_REG;
14023 case OP_LSUB:
14024 return OP_AMD64_SUB_MEMBASE_REG;
14025 case OP_LAND:
14026 return OP_AMD64_AND_MEMBASE_REG;
14027 case OP_LOR:
14028 return OP_AMD64_OR_MEMBASE_REG;
14029 case OP_LXOR:
14030 return OP_AMD64_XOR_MEMBASE_REG;
14031 case OP_ADD_IMM:
14032 case OP_LADD_IMM:
14033 return OP_AMD64_ADD_MEMBASE_IMM;
14034 case OP_SUB_IMM:
14035 case OP_LSUB_IMM:
14036 return OP_AMD64_SUB_MEMBASE_IMM;
14037 case OP_AND_IMM:
14038 case OP_LAND_IMM:
14039 return OP_AMD64_AND_MEMBASE_IMM;
14040 case OP_OR_IMM:
14041 case OP_LOR_IMM:
14042 return OP_AMD64_OR_MEMBASE_IMM;
14043 case OP_XOR_IMM:
14044 case OP_LXOR_IMM:
14045 return OP_AMD64_XOR_MEMBASE_IMM;
14046 case OP_MOVE:
14047 return OP_NOP;
14049 #endif
14051 return -1;
14054 static inline int
14055 op_to_op_store_membase (int store_opcode, int opcode)
14057 #if defined(TARGET_X86) || defined(TARGET_AMD64)
14058 switch (opcode) {
14059 case OP_ICEQ:
14060 if (store_opcode == OP_STOREI1_MEMBASE_REG)
14061 return OP_X86_SETEQ_MEMBASE;
14062 case OP_CNE:
14063 if (store_opcode == OP_STOREI1_MEMBASE_REG)
14064 return OP_X86_SETNE_MEMBASE;
14066 #endif
14068 return -1;
14071 static inline int
14072 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
14074 #ifdef TARGET_X86
14075 /* FIXME: This has sign extension issues */
14077 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
14078 return OP_X86_COMPARE_MEMBASE8_IMM;
14081 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
14082 return -1;
14084 switch (opcode) {
14085 case OP_X86_PUSH:
14086 return OP_X86_PUSH_MEMBASE;
14087 case OP_COMPARE_IMM:
14088 case OP_ICOMPARE_IMM:
14089 return OP_X86_COMPARE_MEMBASE_IMM;
14090 case OP_COMPARE:
14091 case OP_ICOMPARE:
14092 return OP_X86_COMPARE_MEMBASE_REG;
14094 #endif
14096 #ifdef TARGET_AMD64
14097 /* FIXME: This has sign extension issues */
14099 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
14100 return OP_X86_COMPARE_MEMBASE8_IMM;
14103 switch (opcode) {
14104 case OP_X86_PUSH:
14105 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
14106 return OP_X86_PUSH_MEMBASE;
14107 break;
14108 /* FIXME: This only works for 32 bit immediates
14109 case OP_COMPARE_IMM:
14110 case OP_LCOMPARE_IMM:
14111 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
14112 return OP_AMD64_COMPARE_MEMBASE_IMM;
14114 case OP_ICOMPARE_IMM:
14115 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
14116 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
14117 break;
14118 case OP_COMPARE:
14119 case OP_LCOMPARE:
14120 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
14121 return OP_AMD64_ICOMPARE_MEMBASE_REG;
14122 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
14123 return OP_AMD64_COMPARE_MEMBASE_REG;
14124 break;
14125 case OP_ICOMPARE:
14126 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
14127 return OP_AMD64_ICOMPARE_MEMBASE_REG;
14128 break;
14130 #endif
14132 return -1;
14135 static inline int
14136 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
14138 #ifdef TARGET_X86
14139 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
14140 return -1;
14142 switch (opcode) {
14143 case OP_COMPARE:
14144 case OP_ICOMPARE:
14145 return OP_X86_COMPARE_REG_MEMBASE;
14146 case OP_IADD:
14147 return OP_X86_ADD_REG_MEMBASE;
14148 case OP_ISUB:
14149 return OP_X86_SUB_REG_MEMBASE;
14150 case OP_IAND:
14151 return OP_X86_AND_REG_MEMBASE;
14152 case OP_IOR:
14153 return OP_X86_OR_REG_MEMBASE;
14154 case OP_IXOR:
14155 return OP_X86_XOR_REG_MEMBASE;
14157 #endif
14159 #ifdef TARGET_AMD64
14160 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
14161 switch (opcode) {
14162 case OP_ICOMPARE:
14163 return OP_AMD64_ICOMPARE_REG_MEMBASE;
14164 case OP_IADD:
14165 return OP_X86_ADD_REG_MEMBASE;
14166 case OP_ISUB:
14167 return OP_X86_SUB_REG_MEMBASE;
14168 case OP_IAND:
14169 return OP_X86_AND_REG_MEMBASE;
14170 case OP_IOR:
14171 return OP_X86_OR_REG_MEMBASE;
14172 case OP_IXOR:
14173 return OP_X86_XOR_REG_MEMBASE;
14175 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
14176 switch (opcode) {
14177 case OP_COMPARE:
14178 case OP_LCOMPARE:
14179 return OP_AMD64_COMPARE_REG_MEMBASE;
14180 case OP_LADD:
14181 return OP_AMD64_ADD_REG_MEMBASE;
14182 case OP_LSUB:
14183 return OP_AMD64_SUB_REG_MEMBASE;
14184 case OP_LAND:
14185 return OP_AMD64_AND_REG_MEMBASE;
14186 case OP_LOR:
14187 return OP_AMD64_OR_REG_MEMBASE;
14188 case OP_LXOR:
14189 return OP_AMD64_XOR_REG_MEMBASE;
14192 #endif
14194 return -1;
14198 mono_op_to_op_imm_noemul (int opcode)
14200 switch (opcode) {
14201 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
14202 case OP_LSHR:
14203 case OP_LSHL:
14204 case OP_LSHR_UN:
14205 return -1;
14206 #endif
14207 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
14208 case OP_IDIV:
14209 case OP_IDIV_UN:
14210 case OP_IREM:
14211 case OP_IREM_UN:
14212 return -1;
14213 #endif
14214 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
14215 case OP_IMUL:
14216 return -1;
14217 #endif
14218 default:
14219 return mono_op_to_op_imm (opcode);
14224 * mono_handle_global_vregs:
14226 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
14227 * for them.
14229 void
14230 mono_handle_global_vregs (MonoCompile *cfg)
14232 gint32 *vreg_to_bb;
14233 MonoBasicBlock *bb;
14234 int i, pos;
14236 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
14238 #ifdef MONO_ARCH_SIMD_INTRINSICS
14239 if (cfg->uses_simd_intrinsics)
14240 mono_simd_simplify_indirection (cfg);
14241 #endif
14243 /* Find local vregs used in more than one bb */
14244 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14245 MonoInst *ins = bb->code;
14246 int block_num = bb->block_num;
14248 if (cfg->verbose_level > 2)
14249 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
14251 cfg->cbb = bb;
14252 for (; ins; ins = ins->next) {
14253 const char *spec = INS_INFO (ins->opcode);
14254 int regtype = 0, regindex;
14255 gint32 prev_bb;
14257 if (G_UNLIKELY (cfg->verbose_level > 2))
14258 mono_print_ins (ins);
14260 g_assert (ins->opcode >= MONO_CEE_LAST);
14262 for (regindex = 0; regindex < 4; regindex ++) {
14263 int vreg = 0;
14265 if (regindex == 0) {
14266 regtype = spec [MONO_INST_DEST];
14267 if (regtype == ' ')
14268 continue;
14269 vreg = ins->dreg;
14270 } else if (regindex == 1) {
14271 regtype = spec [MONO_INST_SRC1];
14272 if (regtype == ' ')
14273 continue;
14274 vreg = ins->sreg1;
14275 } else if (regindex == 2) {
14276 regtype = spec [MONO_INST_SRC2];
14277 if (regtype == ' ')
14278 continue;
14279 vreg = ins->sreg2;
14280 } else if (regindex == 3) {
14281 regtype = spec [MONO_INST_SRC3];
14282 if (regtype == ' ')
14283 continue;
14284 vreg = ins->sreg3;
14287 #if SIZEOF_REGISTER == 4
14288 /* In the LLVM case, the long opcodes are not decomposed */
14289 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
14291 * Since some instructions reference the original long vreg,
14292 * and some reference the two component vregs, it is quite hard
14293 * to determine when it needs to be global. So be conservative.
14295 if (!get_vreg_to_inst (cfg, vreg)) {
14296 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
14298 if (cfg->verbose_level > 2)
14299 printf ("LONG VREG R%d made global.\n", vreg);
14303 * Make the component vregs volatile since the optimizations can
14304 * get confused otherwise.
14306 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
14307 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
14309 #endif
14311 g_assert (vreg != -1);
14313 prev_bb = vreg_to_bb [vreg];
14314 if (prev_bb == 0) {
14315 /* 0 is a valid block num */
14316 vreg_to_bb [vreg] = block_num + 1;
14317 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
14318 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
14319 continue;
14321 if (!get_vreg_to_inst (cfg, vreg)) {
14322 if (G_UNLIKELY (cfg->verbose_level > 2))
14323 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
14325 switch (regtype) {
14326 case 'i':
14327 if (vreg_is_ref (cfg, vreg))
14328 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
14329 else
14330 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
14331 break;
14332 case 'l':
14333 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
14334 break;
14335 case 'f':
14336 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
14337 break;
14338 case 'v':
14339 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
14340 break;
14341 default:
14342 g_assert_not_reached ();
14346 /* Flag as having been used in more than one bb */
14347 vreg_to_bb [vreg] = -1;
14353 /* If a variable is used in only one bblock, convert it into a local vreg */
14354 for (i = 0; i < cfg->num_varinfo; i++) {
14355 MonoInst *var = cfg->varinfo [i];
14356 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
14358 switch (var->type) {
14359 case STACK_I4:
14360 case STACK_OBJ:
14361 case STACK_PTR:
14362 case STACK_MP:
14363 case STACK_VTYPE:
14364 #if SIZEOF_REGISTER == 8
14365 case STACK_I8:
14366 #endif
14367 #if !defined(TARGET_X86)
14368 /* Enabling this screws up the fp stack on x86 */
14369 case STACK_R8:
14370 #endif
14371 if (mono_arch_is_soft_float ())
14372 break;
14375 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
14376 break;
14379 /* Arguments are implicitly global */
14380 /* Putting R4 vars into registers doesn't work currently */
14381 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
14382 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
14384 * Make that the variable's liveness interval doesn't contain a call, since
14385 * that would cause the lvreg to be spilled, making the whole optimization
14386 * useless.
14388 /* This is too slow for JIT compilation */
14389 #if 0
14390 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
14391 MonoInst *ins;
14392 int def_index, call_index, ins_index;
14393 gboolean spilled = FALSE;
14395 def_index = -1;
14396 call_index = -1;
14397 ins_index = 0;
14398 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
14399 const char *spec = INS_INFO (ins->opcode);
14401 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
14402 def_index = ins_index;
14404 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
14405 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
14406 if (call_index > def_index) {
14407 spilled = TRUE;
14408 break;
14412 if (MONO_IS_CALL (ins))
14413 call_index = ins_index;
14415 ins_index ++;
14418 if (spilled)
14419 break;
14421 #endif
14423 if (G_UNLIKELY (cfg->verbose_level > 2))
14424 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
14425 var->flags |= MONO_INST_IS_DEAD;
14426 cfg->vreg_to_inst [var->dreg] = NULL;
14428 break;
14433 * Compress the varinfo and vars tables so the liveness computation is faster and
14434 * takes up less space.
14436 pos = 0;
14437 for (i = 0; i < cfg->num_varinfo; ++i) {
14438 MonoInst *var = cfg->varinfo [i];
14439 if (pos < i && cfg->locals_start == i)
14440 cfg->locals_start = pos;
14441 if (!(var->flags & MONO_INST_IS_DEAD)) {
14442 if (pos < i) {
14443 cfg->varinfo [pos] = cfg->varinfo [i];
14444 cfg->varinfo [pos]->inst_c0 = pos;
14445 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
14446 cfg->vars [pos].idx = pos;
14447 #if SIZEOF_REGISTER == 4
14448 if (cfg->varinfo [pos]->type == STACK_I8) {
14449 /* Modify the two component vars too */
14450 MonoInst *var1;
14452 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
14453 var1->inst_c0 = pos;
14454 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
14455 var1->inst_c0 = pos;
14457 #endif
14459 pos ++;
14462 cfg->num_varinfo = pos;
14463 if (cfg->locals_start > cfg->num_varinfo)
14464 cfg->locals_start = cfg->num_varinfo;
14468 * mono_allocate_gsharedvt_vars:
14470 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
14471 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
14473 void
14474 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
14476 int i;
14478 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
14480 for (i = 0; i < cfg->num_varinfo; ++i) {
14481 MonoInst *ins = cfg->varinfo [i];
14482 int idx;
14484 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
14485 if (i >= cfg->locals_start) {
14486 /* Local */
14487 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
14488 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
14489 ins->opcode = OP_GSHAREDVT_LOCAL;
14490 ins->inst_imm = idx;
14491 } else {
14492 /* Arg */
14493 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
14494 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
14501 * mono_spill_global_vars:
14503 * Generate spill code for variables which are not allocated to registers,
14504 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
14505 * code is generated which could be optimized by the local optimization passes.
14507 void
14508 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
14510 MonoBasicBlock *bb;
14511 char spec2 [16];
14512 int orig_next_vreg;
14513 guint32 *vreg_to_lvreg;
14514 guint32 *lvregs;
14515 guint32 i, lvregs_len;
14516 gboolean dest_has_lvreg = FALSE;
14517 MonoStackType stacktypes [128];
14518 MonoInst **live_range_start, **live_range_end;
14519 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
14521 *need_local_opts = FALSE;
14523 memset (spec2, 0, sizeof (spec2));
14525 /* FIXME: Move this function to mini.c */
14526 stacktypes ['i'] = STACK_PTR;
14527 stacktypes ['l'] = STACK_I8;
14528 stacktypes ['f'] = STACK_R8;
14529 #ifdef MONO_ARCH_SIMD_INTRINSICS
14530 stacktypes ['x'] = STACK_VTYPE;
14531 #endif
14533 #if SIZEOF_REGISTER == 4
14534 /* Create MonoInsts for longs */
14535 for (i = 0; i < cfg->num_varinfo; i++) {
14536 MonoInst *ins = cfg->varinfo [i];
14538 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
14539 switch (ins->type) {
14540 case STACK_R8:
14541 case STACK_I8: {
14542 MonoInst *tree;
14544 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
14545 break;
14547 g_assert (ins->opcode == OP_REGOFFSET);
14549 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
14550 g_assert (tree);
14551 tree->opcode = OP_REGOFFSET;
14552 tree->inst_basereg = ins->inst_basereg;
14553 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
14555 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
14556 g_assert (tree);
14557 tree->opcode = OP_REGOFFSET;
14558 tree->inst_basereg = ins->inst_basereg;
14559 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
14560 break;
14562 default:
14563 break;
14567 #endif
14569 if (cfg->compute_gc_maps) {
14570 /* registers need liveness info even for !non refs */
14571 for (i = 0; i < cfg->num_varinfo; i++) {
14572 MonoInst *ins = cfg->varinfo [i];
14574 if (ins->opcode == OP_REGVAR)
14575 ins->flags |= MONO_INST_GC_TRACK;
14579 /* FIXME: widening and truncation */
14582 * As an optimization, when a variable allocated to the stack is first loaded into
14583 * an lvreg, we will remember the lvreg and use it the next time instead of loading
14584 * the variable again.
14586 orig_next_vreg = cfg->next_vreg;
14587 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
14588 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
14589 lvregs_len = 0;
14592 * These arrays contain the first and last instructions accessing a given
14593 * variable.
14594 * Since we emit bblocks in the same order we process them here, and we
14595 * don't split live ranges, these will precisely describe the live range of
14596 * the variable, i.e. the instruction range where a valid value can be found
14597 * in the variables location.
14598 * The live range is computed using the liveness info computed by the liveness pass.
14599 * We can't use vmv->range, since that is an abstract live range, and we need
14600 * one which is instruction precise.
14601 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
14603 /* FIXME: Only do this if debugging info is requested */
14604 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
14605 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
14606 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14607 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14609 /* Add spill loads/stores */
14610 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14611 MonoInst *ins;
14613 if (cfg->verbose_level > 2)
14614 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
14616 /* Clear vreg_to_lvreg array */
14617 for (i = 0; i < lvregs_len; i++)
14618 vreg_to_lvreg [lvregs [i]] = 0;
14619 lvregs_len = 0;
14621 cfg->cbb = bb;
14622 MONO_BB_FOR_EACH_INS (bb, ins) {
14623 const char *spec = INS_INFO (ins->opcode);
14624 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
14625 gboolean store, no_lvreg;
14626 int sregs [MONO_MAX_SRC_REGS];
14628 if (G_UNLIKELY (cfg->verbose_level > 2))
14629 mono_print_ins (ins);
14631 if (ins->opcode == OP_NOP)
14632 continue;
14635 * We handle LDADDR here as well, since it can only be decomposed
14636 * when variable addresses are known.
14638 if (ins->opcode == OP_LDADDR) {
14639 MonoInst *var = (MonoInst *)ins->inst_p0;
14641 if (var->opcode == OP_VTARG_ADDR) {
14642 /* Happens on SPARC/S390 where vtypes are passed by reference */
14643 MonoInst *vtaddr = var->inst_left;
14644 if (vtaddr->opcode == OP_REGVAR) {
14645 ins->opcode = OP_MOVE;
14646 ins->sreg1 = vtaddr->dreg;
14648 else if (var->inst_left->opcode == OP_REGOFFSET) {
14649 ins->opcode = OP_LOAD_MEMBASE;
14650 ins->inst_basereg = vtaddr->inst_basereg;
14651 ins->inst_offset = vtaddr->inst_offset;
14652 } else
14653 NOT_IMPLEMENTED;
14654 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
14655 /* gsharedvt arg passed by ref */
14656 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
14658 ins->opcode = OP_LOAD_MEMBASE;
14659 ins->inst_basereg = var->inst_basereg;
14660 ins->inst_offset = var->inst_offset;
14661 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
14662 MonoInst *load, *load2, *load3;
14663 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
14664 int reg1, reg2, reg3;
14665 MonoInst *info_var = cfg->gsharedvt_info_var;
14666 MonoInst *locals_var = cfg->gsharedvt_locals_var;
14669 * gsharedvt local.
14670 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
14673 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
14675 g_assert (info_var);
14676 g_assert (locals_var);
14678 /* Mark the instruction used to compute the locals var as used */
14679 cfg->gsharedvt_locals_var_ins = NULL;
14681 /* Load the offset */
14682 if (info_var->opcode == OP_REGOFFSET) {
14683 reg1 = alloc_ireg (cfg);
14684 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14685 } else if (info_var->opcode == OP_REGVAR) {
14686 load = NULL;
14687 reg1 = info_var->dreg;
14688 } else {
14689 g_assert_not_reached ();
14691 reg2 = alloc_ireg (cfg);
14692 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14693 /* Load the locals area address */
14694 reg3 = alloc_ireg (cfg);
14695 if (locals_var->opcode == OP_REGOFFSET) {
14696 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14697 } else if (locals_var->opcode == OP_REGVAR) {
14698 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14699 } else {
14700 g_assert_not_reached ();
14702 /* Compute the address */
14703 ins->opcode = OP_PADD;
14704 ins->sreg1 = reg3;
14705 ins->sreg2 = reg2;
14707 mono_bblock_insert_before_ins (bb, ins, load3);
14708 mono_bblock_insert_before_ins (bb, load3, load2);
14709 if (load)
14710 mono_bblock_insert_before_ins (bb, load2, load);
14711 } else {
14712 g_assert (var->opcode == OP_REGOFFSET);
14714 ins->opcode = OP_ADD_IMM;
14715 ins->sreg1 = var->inst_basereg;
14716 ins->inst_imm = var->inst_offset;
14719 *need_local_opts = TRUE;
14720 spec = INS_INFO (ins->opcode);
14723 if (ins->opcode < MONO_CEE_LAST) {
14724 mono_print_ins (ins);
14725 g_assert_not_reached ();
14729 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14730 * src register.
14731 * FIXME:
14733 if (MONO_IS_STORE_MEMBASE (ins)) {
14734 tmp_reg = ins->dreg;
14735 ins->dreg = ins->sreg2;
14736 ins->sreg2 = tmp_reg;
14737 store = TRUE;
14739 spec2 [MONO_INST_DEST] = ' ';
14740 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14741 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14742 spec2 [MONO_INST_SRC3] = ' ';
14743 spec = spec2;
14744 } else if (MONO_IS_STORE_MEMINDEX (ins))
14745 g_assert_not_reached ();
14746 else
14747 store = FALSE;
14748 no_lvreg = FALSE;
14750 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14751 printf ("\t %.3s %d", spec, ins->dreg);
14752 num_sregs = mono_inst_get_src_registers (ins, sregs);
14753 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14754 printf (" %d", sregs [srcindex]);
14755 printf ("\n");
14758 /***************/
14759 /* DREG */
14760 /***************/
14761 regtype = spec [MONO_INST_DEST];
14762 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14763 prev_dreg = -1;
14765 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14766 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14767 MonoInst *store_ins;
14768 int store_opcode;
14769 MonoInst *def_ins = ins;
14770 int dreg = ins->dreg; /* The original vreg */
14772 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14774 if (var->opcode == OP_REGVAR) {
14775 ins->dreg = var->dreg;
14776 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14778 * Instead of emitting a load+store, use a _membase opcode.
14780 g_assert (var->opcode == OP_REGOFFSET);
14781 if (ins->opcode == OP_MOVE) {
14782 NULLIFY_INS (ins);
14783 def_ins = NULL;
14784 } else {
14785 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14786 ins->inst_basereg = var->inst_basereg;
14787 ins->inst_offset = var->inst_offset;
14788 ins->dreg = -1;
14790 spec = INS_INFO (ins->opcode);
14791 } else {
14792 guint32 lvreg;
14794 g_assert (var->opcode == OP_REGOFFSET);
14796 prev_dreg = ins->dreg;
14798 /* Invalidate any previous lvreg for this vreg */
14799 vreg_to_lvreg [ins->dreg] = 0;
14801 lvreg = 0;
14803 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14804 regtype = 'l';
14805 store_opcode = OP_STOREI8_MEMBASE_REG;
14808 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14810 #if SIZEOF_REGISTER != 8
14811 if (regtype == 'l') {
14812 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
14813 mono_bblock_insert_after_ins (bb, ins, store_ins);
14814 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
14815 mono_bblock_insert_after_ins (bb, ins, store_ins);
14816 def_ins = store_ins;
14818 else
14819 #endif
14821 g_assert (store_opcode != OP_STOREV_MEMBASE);
14823 /* Try to fuse the store into the instruction itself */
14824 /* FIXME: Add more instructions */
14825 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14826 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14827 ins->inst_imm = ins->inst_c0;
14828 ins->inst_destbasereg = var->inst_basereg;
14829 ins->inst_offset = var->inst_offset;
14830 spec = INS_INFO (ins->opcode);
14831 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14832 ins->opcode = store_opcode;
14833 ins->inst_destbasereg = var->inst_basereg;
14834 ins->inst_offset = var->inst_offset;
14836 no_lvreg = TRUE;
14838 tmp_reg = ins->dreg;
14839 ins->dreg = ins->sreg2;
14840 ins->sreg2 = tmp_reg;
14841 store = TRUE;
14843 spec2 [MONO_INST_DEST] = ' ';
14844 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14845 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14846 spec2 [MONO_INST_SRC3] = ' ';
14847 spec = spec2;
14848 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14849 // FIXME: The backends expect the base reg to be in inst_basereg
14850 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14851 ins->dreg = -1;
14852 ins->inst_basereg = var->inst_basereg;
14853 ins->inst_offset = var->inst_offset;
14854 spec = INS_INFO (ins->opcode);
14855 } else {
14856 /* printf ("INS: "); mono_print_ins (ins); */
14857 /* Create a store instruction */
14858 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14860 /* Insert it after the instruction */
14861 mono_bblock_insert_after_ins (bb, ins, store_ins);
14863 def_ins = store_ins;
14866 * We can't assign ins->dreg to var->dreg here, since the
14867 * sregs could use it. So set a flag, and do it after
14868 * the sregs.
14870 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14871 dest_has_lvreg = TRUE;
14876 if (def_ins && !live_range_start [dreg]) {
14877 live_range_start [dreg] = def_ins;
14878 live_range_start_bb [dreg] = bb;
14881 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14882 MonoInst *tmp;
14884 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14885 tmp->inst_c1 = dreg;
14886 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14890 /************/
14891 /* SREGS */
14892 /************/
14893 num_sregs = mono_inst_get_src_registers (ins, sregs);
14894 for (srcindex = 0; srcindex < 3; ++srcindex) {
14895 regtype = spec [MONO_INST_SRC1 + srcindex];
14896 sreg = sregs [srcindex];
14898 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14899 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14900 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14901 MonoInst *use_ins = ins;
14902 MonoInst *load_ins;
14903 guint32 load_opcode;
14905 if (var->opcode == OP_REGVAR) {
14906 sregs [srcindex] = var->dreg;
14907 //mono_inst_set_src_registers (ins, sregs);
14908 live_range_end [sreg] = use_ins;
14909 live_range_end_bb [sreg] = bb;
14911 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14912 MonoInst *tmp;
14914 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14915 /* var->dreg is a hreg */
14916 tmp->inst_c1 = sreg;
14917 mono_bblock_insert_after_ins (bb, ins, tmp);
14920 continue;
14923 g_assert (var->opcode == OP_REGOFFSET);
14925 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14927 g_assert (load_opcode != OP_LOADV_MEMBASE);
14929 if (vreg_to_lvreg [sreg]) {
14930 g_assert (vreg_to_lvreg [sreg] != -1);
14932 /* The variable is already loaded to an lvreg */
14933 if (G_UNLIKELY (cfg->verbose_level > 2))
14934 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14935 sregs [srcindex] = vreg_to_lvreg [sreg];
14936 //mono_inst_set_src_registers (ins, sregs);
14937 continue;
14940 /* Try to fuse the load into the instruction */
14941 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14942 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14943 sregs [0] = var->inst_basereg;
14944 //mono_inst_set_src_registers (ins, sregs);
14945 ins->inst_offset = var->inst_offset;
14946 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14947 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14948 sregs [1] = var->inst_basereg;
14949 //mono_inst_set_src_registers (ins, sregs);
14950 ins->inst_offset = var->inst_offset;
14951 } else {
14952 if (MONO_IS_REAL_MOVE (ins)) {
14953 ins->opcode = OP_NOP;
14954 sreg = ins->dreg;
14955 } else {
14956 //printf ("%d ", srcindex); mono_print_ins (ins);
14958 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14960 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14961 if (var->dreg == prev_dreg) {
14963 * sreg refers to the value loaded by the load
14964 * emitted below, but we need to use ins->dreg
14965 * since it refers to the store emitted earlier.
14967 sreg = ins->dreg;
14969 g_assert (sreg != -1);
14970 vreg_to_lvreg [var->dreg] = sreg;
14971 g_assert (lvregs_len < 1024);
14972 lvregs [lvregs_len ++] = var->dreg;
14976 sregs [srcindex] = sreg;
14977 //mono_inst_set_src_registers (ins, sregs);
14979 #if SIZEOF_REGISTER != 8
14980 if (regtype == 'l') {
14981 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14982 mono_bblock_insert_before_ins (bb, ins, load_ins);
14983 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14984 mono_bblock_insert_before_ins (bb, ins, load_ins);
14985 use_ins = load_ins;
14987 else
14988 #endif
14990 #if SIZEOF_REGISTER == 4
14991 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14992 #endif
14993 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14994 mono_bblock_insert_before_ins (bb, ins, load_ins);
14995 use_ins = load_ins;
14999 if (var->dreg < orig_next_vreg) {
15000 live_range_end [var->dreg] = use_ins;
15001 live_range_end_bb [var->dreg] = bb;
15004 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
15005 MonoInst *tmp;
15007 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
15008 tmp->inst_c1 = var->dreg;
15009 mono_bblock_insert_after_ins (bb, ins, tmp);
15013 mono_inst_set_src_registers (ins, sregs);
15015 if (dest_has_lvreg) {
15016 g_assert (ins->dreg != -1);
15017 vreg_to_lvreg [prev_dreg] = ins->dreg;
15018 g_assert (lvregs_len < 1024);
15019 lvregs [lvregs_len ++] = prev_dreg;
15020 dest_has_lvreg = FALSE;
15023 if (store) {
15024 tmp_reg = ins->dreg;
15025 ins->dreg = ins->sreg2;
15026 ins->sreg2 = tmp_reg;
15029 if (MONO_IS_CALL (ins)) {
15030 /* Clear vreg_to_lvreg array */
15031 for (i = 0; i < lvregs_len; i++)
15032 vreg_to_lvreg [lvregs [i]] = 0;
15033 lvregs_len = 0;
15034 } else if (ins->opcode == OP_NOP) {
15035 ins->dreg = -1;
15036 MONO_INST_NULLIFY_SREGS (ins);
15039 if (cfg->verbose_level > 2)
15040 mono_print_ins_index (1, ins);
15043 /* Extend the live range based on the liveness info */
15044 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
15045 for (i = 0; i < cfg->num_varinfo; i ++) {
15046 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
15048 if (vreg_is_volatile (cfg, vi->vreg))
15049 /* The liveness info is incomplete */
15050 continue;
15052 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
15053 /* Live from at least the first ins of this bb */
15054 live_range_start [vi->vreg] = bb->code;
15055 live_range_start_bb [vi->vreg] = bb;
15058 if (mono_bitset_test_fast (bb->live_out_set, i)) {
15059 /* Live at least until the last ins of this bb */
15060 live_range_end [vi->vreg] = bb->last_ins;
15061 live_range_end_bb [vi->vreg] = bb;
15068 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
15069 * by storing the current native offset into MonoMethodVar->live_range_start/end.
15071 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
15072 for (i = 0; i < cfg->num_varinfo; ++i) {
15073 int vreg = MONO_VARINFO (cfg, i)->vreg;
15074 MonoInst *ins;
15076 if (live_range_start [vreg]) {
15077 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
15078 ins->inst_c0 = i;
15079 ins->inst_c1 = vreg;
15080 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
15082 if (live_range_end [vreg]) {
15083 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
15084 ins->inst_c0 = i;
15085 ins->inst_c1 = vreg;
15086 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
15087 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
15088 else
15089 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
15094 if (cfg->gsharedvt_locals_var_ins) {
15095 /* Nullify if unused */
15096 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
15097 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
15100 g_free (live_range_start);
15101 g_free (live_range_end);
15102 g_free (live_range_start_bb);
15103 g_free (live_range_end_bb);
15106 static void
15107 mono_decompose_typecheck (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins)
15109 MonoInst *ret, *move, *source;
15110 MonoClass *klass = ins->klass;
15111 int context_used = mini_class_check_context_used (cfg, klass);
15112 int is_isinst = ins->opcode == OP_ISINST;
15113 g_assert (is_isinst || ins->opcode == OP_CASTCLASS);
15114 source = get_vreg_to_inst (cfg, ins->sreg1);
15115 if (!source || source == (MonoInst *) -1)
15116 source = mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, ins->sreg1);
15117 g_assert (source && source != (MonoInst *) -1);
15119 MonoBasicBlock *first_bb;
15120 NEW_BBLOCK (cfg, first_bb);
15121 cfg->cbb = first_bb;
15123 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
15124 if (is_isinst)
15125 ret = emit_isinst_with_cache_nonshared (cfg, source, klass);
15126 else
15127 ret = emit_castclass_with_cache_nonshared (cfg, source, klass);
15128 } else if (!context_used && (mono_class_is_marshalbyref (klass) || mono_class_is_interface (klass))) {
15129 MonoInst *iargs [1];
15130 int costs;
15132 iargs [0] = source;
15133 if (is_isinst) {
15134 MonoMethod *wrapper = mono_marshal_get_isinst (klass);
15135 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper), iargs, 0, 0, TRUE);
15136 } else {
15137 MonoMethod *wrapper = mono_marshal_get_castclass (klass);
15138 save_cast_details (cfg, klass, source->dreg, TRUE);
15139 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper), iargs, 0, 0, TRUE);
15140 reset_cast_details (cfg);
15142 g_assert (costs > 0);
15143 ret = iargs [0];
15144 } else {
15145 if (is_isinst)
15146 ret = handle_isinst (cfg, klass, source, context_used);
15147 else
15148 ret = handle_castclass (cfg, klass, source, context_used);
15150 EMIT_NEW_UNALU (cfg, move, OP_MOVE, ins->dreg, ret->dreg);
15152 g_assert (cfg->cbb->code || first_bb->code);
15153 MonoInst *prev = ins->prev;
15154 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
15157 void
15158 mono_decompose_typechecks (MonoCompile *cfg)
15160 for (MonoBasicBlock *bb = cfg->bb_entry; bb; bb = bb->next_bb) {
15161 MonoInst *ins;
15162 MONO_BB_FOR_EACH_INS (bb, ins) {
15163 switch (ins->opcode) {
15164 case OP_ISINST:
15165 case OP_CASTCLASS:
15166 mono_decompose_typecheck (cfg, bb, ins);
15167 break;
15175 * FIXME:
15176 * - use 'iadd' instead of 'int_add'
15177 * - handling ovf opcodes: decompose in method_to_ir.
15178 * - unify iregs/fregs
15179 * -> partly done, the missing parts are:
15180 * - a more complete unification would involve unifying the hregs as well, so
15181 * code wouldn't need if (fp) all over the place. but that would mean the hregs
15182 * would no longer map to the machine hregs, so the code generators would need to
15183 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
15184 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
15185 * fp/non-fp branches speeds it up by about 15%.
15186 * - use sext/zext opcodes instead of shifts
15187 * - add OP_ICALL
15188 * - get rid of TEMPLOADs if possible and use vregs instead
15189 * - clean up usage of OP_P/OP_ opcodes
15190 * - cleanup usage of DUMMY_USE
15191 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
15192 * stack
15193 * - set the stack type and allocate a dreg in the EMIT_NEW macros
15194 * - get rid of all the <foo>2 stuff when the new JIT is ready.
15195 * - make sure handle_stack_args () is called before the branch is emitted
15196 * - when the new IR is done, get rid of all unused stuff
15197 * - COMPARE/BEQ as separate instructions or unify them ?
15198 * - keeping them separate allows specialized compare instructions like
15199 * compare_imm, compare_membase
15200 * - most back ends unify fp compare+branch, fp compare+ceq
15201 * - integrate mono_save_args into inline_method
15202 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
15203 * - handle long shift opts on 32 bit platforms somehow: they require
15204 * 3 sregs (2 for arg1 and 1 for arg2)
15205 * - make byref a 'normal' type.
15206 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
15207 * variable if needed.
15208 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
15209 * like inline_method.
15210 * - remove inlining restrictions
15211 * - fix LNEG and enable cfold of INEG
15212 * - generalize x86 optimizations like ldelema as a peephole optimization
15213 * - add store_mem_imm for amd64
15214 * - optimize the loading of the interruption flag in the managed->native wrappers
15215 * - avoid special handling of OP_NOP in passes
15216 * - move code inserting instructions into one function/macro.
15217 * - try a coalescing phase after liveness analysis
15218 * - add float -> vreg conversion + local optimizations on !x86
15219 * - figure out how to handle decomposed branches during optimizations, ie.
15220 * compare+branch, op_jump_table+op_br etc.
15221 * - promote RuntimeXHandles to vregs
15222 * - vtype cleanups:
15223 * - add a NEW_VARLOADA_VREG macro
15224 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
15225 * accessing vtype fields.
15226 * - get rid of I8CONST on 64 bit platforms
15227 * - dealing with the increase in code size due to branches created during opcode
15228 * decomposition:
15229 * - use extended basic blocks
15230 * - all parts of the JIT
15231 * - handle_global_vregs () && local regalloc
15232 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
15233 * - sources of increase in code size:
15234 * - vtypes
15235 * - long compares
15236 * - isinst and castclass
15237 * - lvregs not allocated to global registers even if used multiple times
15238 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
15239 * meaningful.
15240 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
15241 * - add all micro optimizations from the old JIT
15242 * - put tree optimizations into the deadce pass
15243 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
15244 * specific function.
15245 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
15246 * fcompare + branchCC.
15247 * - create a helper function for allocating a stack slot, taking into account
15248 * MONO_CFG_HAS_SPILLUP.
15249 * - merge r68207.
15250 * - merge the ia64 switch changes.
15251 * - optimize mono_regstate2_alloc_int/float.
15252 * - fix the pessimistic handling of variables accessed in exception handler blocks.
15253 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
15254 * parts of the tree could be separated by other instructions, killing the tree
15255 * arguments, or stores killing loads etc. Also, should we fold loads into other
15256 * instructions if the result of the load is used multiple times ?
15257 * - make the REM_IMM optimization in mini-x86.c arch-independent.
15258 * - LAST MERGE: 108395.
15259 * - when returning vtypes in registers, generate IR and append it to the end of the
15260 * last bb instead of doing it in the epilog.
15261 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
15266 NOTES
15267 -----
15269 - When to decompose opcodes:
15270 - earlier: this makes some optimizations hard to implement, since the low level IR
15271 no longer contains the neccessary information. But it is easier to do.
15272 - later: harder to implement, enables more optimizations.
15273 - Branches inside bblocks:
15274 - created when decomposing complex opcodes.
15275 - branches to another bblock: harmless, but not tracked by the branch
15276 optimizations, so need to branch to a label at the start of the bblock.
15277 - branches to inside the same bblock: very problematic, trips up the local
15278 reg allocator. Can be fixed by spitting the current bblock, but that is a
15279 complex operation, since some local vregs can become global vregs etc.
15280 - Local/global vregs:
15281 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
15282 local register allocator.
15283 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
15284 structure, created by mono_create_var (). Assigned to hregs or the stack by
15285 the global register allocator.
15286 - When to do optimizations like alu->alu_imm:
15287 - earlier -> saves work later on since the IR will be smaller/simpler
15288 - later -> can work on more instructions
15289 - Handling of valuetypes:
15290 - When a vtype is pushed on the stack, a new temporary is created, an
15291 instruction computing its address (LDADDR) is emitted and pushed on
15292 the stack. Need to optimize cases when the vtype is used immediately as in
15293 argument passing, stloc etc.
15294 - Instead of the to_end stuff in the old JIT, simply call the function handling
15295 the values on the stack before emitting the last instruction of the bb.
15298 #endif /* DISABLE_JIT */