update rx (mobile builds).
[mono-project.git] / mono / mini / method-to-ir.c
blob491a25364e35b3da3b49b5d3e8d6fe59cf7e5945
1 /*
2 * method-to-ir.c: Convert CIL to the JIT internal representation
4 * Author:
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
13 #include <config.h>
14 #include <signal.h>
16 #ifdef HAVE_UNISTD_H
17 #include <unistd.h>
18 #endif
20 #include <math.h>
21 #include <string.h>
22 #include <ctype.h>
24 #ifdef HAVE_SYS_TIME_H
25 #include <sys/time.h>
26 #endif
28 #ifdef HAVE_ALLOCA_H
29 #include <alloca.h>
30 #endif
32 #include <mono/utils/memcheck.h>
34 #include <mono/metadata/assembly.h>
35 #include <mono/metadata/attrdefs.h>
36 #include <mono/metadata/loader.h>
37 #include <mono/metadata/tabledefs.h>
38 #include <mono/metadata/class.h>
39 #include <mono/metadata/object.h>
40 #include <mono/metadata/exception.h>
41 #include <mono/metadata/opcodes.h>
42 #include <mono/metadata/mono-endian.h>
43 #include <mono/metadata/tokentype.h>
44 #include <mono/metadata/tabledefs.h>
45 #include <mono/metadata/marshal.h>
46 #include <mono/metadata/debug-helpers.h>
47 #include <mono/metadata/mono-debug.h>
48 #include <mono/metadata/gc-internal.h>
49 #include <mono/metadata/security-manager.h>
50 #include <mono/metadata/threads-types.h>
51 #include <mono/metadata/security-core-clr.h>
52 #include <mono/metadata/monitor.h>
53 #include <mono/metadata/profiler-private.h>
54 #include <mono/metadata/profiler.h>
55 #include <mono/metadata/debug-mono-symfile.h>
56 #include <mono/utils/mono-compiler.h>
57 #include <mono/utils/mono-memory-model.h>
58 #include <mono/metadata/mono-basic-block.h>
60 #include "mini.h"
61 #include "trace.h"
63 #include "ir-emit.h"
65 #include "jit-icalls.h"
66 #include "jit.h"
67 #include "debugger-agent.h"
69 #define BRANCH_COST 10
70 #define INLINE_LENGTH_LIMIT 20
71 #define INLINE_FAILURE(msg) do { \
72 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE)) { \
73 if (cfg->verbose_level >= 2) \
74 printf ("inline failed: %s\n", msg); \
75 goto inline_failure; \
76 } \
77 } while (0)
78 #define CHECK_CFG_EXCEPTION do {\
79 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
80 goto exception_exit;\
81 } while (0)
82 #define METHOD_ACCESS_FAILURE do { \
83 char *method_fname = mono_method_full_name (method, TRUE); \
84 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
85 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
86 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
87 g_free (method_fname); \
88 g_free (cil_method_fname); \
89 goto exception_exit; \
90 } while (0)
91 #define FIELD_ACCESS_FAILURE do { \
92 char *method_fname = mono_method_full_name (method, TRUE); \
93 char *field_fname = mono_field_full_name (field); \
94 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
95 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
96 g_free (method_fname); \
97 g_free (field_fname); \
98 goto exception_exit; \
99 } while (0)
100 #define GENERIC_SHARING_FAILURE(opcode) do { \
101 if (cfg->generic_sharing_context) { \
102 if (cfg->verbose_level > 2) \
103 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
104 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
105 goto exception_exit; \
107 } while (0)
108 #define GSHAREDVT_FAILURE(opcode) do { \
109 if (cfg->gsharedvt) { \
110 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __FILE__, __LINE__); \
111 if (cfg->verbose_level >= 2) \
112 printf ("%s\n", cfg->exception_message); \
113 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
114 goto exception_exit; \
116 } while (0)
117 #define OUT_OF_MEMORY_FAILURE do { \
118 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
119 goto exception_exit; \
120 } while (0)
121 /* Determine whenever 'ins' represents a load of the 'this' argument */
122 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
124 static int ldind_to_load_membase (int opcode);
125 static int stind_to_store_membase (int opcode);
127 int mono_op_to_op_imm (int opcode);
128 int mono_op_to_op_imm_noemul (int opcode);
130 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
132 /* helper methods signatures */
133 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
134 static MonoMethodSignature *helper_sig_domain_get = NULL;
135 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
136 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
137 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
138 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
139 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
142 * Instruction metadata
144 #ifdef MINI_OP
145 #undef MINI_OP
146 #endif
147 #ifdef MINI_OP3
148 #undef MINI_OP3
149 #endif
150 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
151 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
152 #define NONE ' '
153 #define IREG 'i'
154 #define FREG 'f'
155 #define VREG 'v'
156 #define XREG 'x'
157 #if SIZEOF_REGISTER == 8
158 #define LREG IREG
159 #else
160 #define LREG 'l'
161 #endif
162 /* keep in sync with the enum in mini.h */
163 const char
164 ins_info[] = {
165 #include "mini-ops.h"
167 #undef MINI_OP
168 #undef MINI_OP3
170 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
171 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
173 * This should contain the index of the last sreg + 1. This is not the same
174 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
176 const gint8 ins_sreg_counts[] = {
177 #include "mini-ops.h"
179 #undef MINI_OP
180 #undef MINI_OP3
182 #define MONO_INIT_VARINFO(vi,id) do { \
183 (vi)->range.first_use.pos.bid = 0xffff; \
184 (vi)->reg = -1; \
185 (vi)->idx = (id); \
186 } while (0)
188 void
189 mono_inst_set_src_registers (MonoInst *ins, int *regs)
191 ins->sreg1 = regs [0];
192 ins->sreg2 = regs [1];
193 ins->sreg3 = regs [2];
196 guint32
197 mono_alloc_ireg (MonoCompile *cfg)
199 return alloc_ireg (cfg);
202 guint32
203 mono_alloc_freg (MonoCompile *cfg)
205 return alloc_freg (cfg);
208 guint32
209 mono_alloc_preg (MonoCompile *cfg)
211 return alloc_preg (cfg);
214 guint32
215 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
217 return alloc_dreg (cfg, stack_type);
221 * mono_alloc_ireg_ref:
223 * Allocate an IREG, and mark it as holding a GC ref.
225 guint32
226 mono_alloc_ireg_ref (MonoCompile *cfg)
228 return alloc_ireg_ref (cfg);
232 * mono_alloc_ireg_mp:
234 * Allocate an IREG, and mark it as holding a managed pointer.
236 guint32
237 mono_alloc_ireg_mp (MonoCompile *cfg)
239 return alloc_ireg_mp (cfg);
243 * mono_alloc_ireg_copy:
245 * Allocate an IREG with the same GC type as VREG.
247 guint32
248 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
250 if (vreg_is_ref (cfg, vreg))
251 return alloc_ireg_ref (cfg);
252 else if (vreg_is_mp (cfg, vreg))
253 return alloc_ireg_mp (cfg);
254 else
255 return alloc_ireg (cfg);
258 guint
259 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
261 if (type->byref)
262 return OP_MOVE;
264 handle_enum:
265 switch (type->type) {
266 case MONO_TYPE_I1:
267 case MONO_TYPE_U1:
268 case MONO_TYPE_BOOLEAN:
269 return OP_MOVE;
270 case MONO_TYPE_I2:
271 case MONO_TYPE_U2:
272 case MONO_TYPE_CHAR:
273 return OP_MOVE;
274 case MONO_TYPE_I4:
275 case MONO_TYPE_U4:
276 return OP_MOVE;
277 case MONO_TYPE_I:
278 case MONO_TYPE_U:
279 case MONO_TYPE_PTR:
280 case MONO_TYPE_FNPTR:
281 return OP_MOVE;
282 case MONO_TYPE_CLASS:
283 case MONO_TYPE_STRING:
284 case MONO_TYPE_OBJECT:
285 case MONO_TYPE_SZARRAY:
286 case MONO_TYPE_ARRAY:
287 return OP_MOVE;
288 case MONO_TYPE_I8:
289 case MONO_TYPE_U8:
290 #if SIZEOF_REGISTER == 8
291 return OP_MOVE;
292 #else
293 return OP_LMOVE;
294 #endif
295 case MONO_TYPE_R4:
296 return OP_FMOVE;
297 case MONO_TYPE_R8:
298 return OP_FMOVE;
299 case MONO_TYPE_VALUETYPE:
300 if (type->data.klass->enumtype) {
301 type = mono_class_enum_basetype (type->data.klass);
302 goto handle_enum;
304 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
305 return OP_XMOVE;
306 return OP_VMOVE;
307 case MONO_TYPE_TYPEDBYREF:
308 return OP_VMOVE;
309 case MONO_TYPE_GENERICINST:
310 type = &type->data.generic_class->container_class->byval_arg;
311 goto handle_enum;
312 case MONO_TYPE_VAR:
313 case MONO_TYPE_MVAR:
314 g_assert (cfg->generic_sharing_context);
315 if (mini_type_var_is_vt (cfg, type))
316 return OP_VMOVE;
317 else
318 return OP_MOVE;
319 default:
320 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
322 return -1;
325 void
326 mono_print_bb (MonoBasicBlock *bb, const char *msg)
328 int i;
329 MonoInst *tree;
331 printf ("\n%s %d: [IN: ", msg, bb->block_num);
332 for (i = 0; i < bb->in_count; ++i)
333 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
334 printf (", OUT: ");
335 for (i = 0; i < bb->out_count; ++i)
336 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
337 printf (" ]\n");
338 for (tree = bb->code; tree; tree = tree->next)
339 mono_print_ins_index (-1, tree);
342 void
343 mono_create_helper_signatures (void)
345 helper_sig_domain_get = mono_create_icall_signature ("ptr");
346 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
347 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
348 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
349 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
350 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
351 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
355 * Can't put this at the beginning, since other files reference stuff from this
356 * file.
358 #ifndef DISABLE_JIT
361 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
362 * foo<T> (int i) { ldarg.0; box T; }
364 #define UNVERIFIED do { \
365 if (cfg->gsharedvt) { \
366 if (cfg->verbose_level > 2) \
367 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
368 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
369 goto exception_exit; \
371 if (mini_get_debug_options ()->break_on_unverified) \
372 G_BREAKPOINT (); \
373 else \
374 goto unverified; \
375 } while (0)
377 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
379 #define TYPE_LOAD_ERROR(klass) do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else { cfg->exception_ptr = klass; goto load_error; } } while (0)
381 #define GET_BBLOCK(cfg,tblock,ip) do { \
382 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
383 if (!(tblock)) { \
384 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
385 NEW_BBLOCK (cfg, (tblock)); \
386 (tblock)->cil_code = (ip); \
387 ADD_BBLOCK (cfg, (tblock)); \
389 } while (0)
391 #if defined(TARGET_X86) || defined(TARGET_AMD64)
392 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
393 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
394 (dest)->dreg = alloc_ireg_mp ((cfg)); \
395 (dest)->sreg1 = (sr1); \
396 (dest)->sreg2 = (sr2); \
397 (dest)->inst_imm = (imm); \
398 (dest)->backend.shift_amount = (shift); \
399 MONO_ADD_INS ((cfg)->cbb, (dest)); \
400 } while (0)
401 #endif
403 #if SIZEOF_REGISTER == 8
404 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
405 /* FIXME: Need to add many more cases */ \
406 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
407 MonoInst *widen; \
408 int dr = alloc_preg (cfg); \
409 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
410 (ins)->sreg2 = widen->dreg; \
412 } while (0)
413 #else
414 #define ADD_WIDEN_OP(ins, arg1, arg2)
415 #endif
417 #define ADD_BINOP(op) do { \
418 MONO_INST_NEW (cfg, ins, (op)); \
419 sp -= 2; \
420 ins->sreg1 = sp [0]->dreg; \
421 ins->sreg2 = sp [1]->dreg; \
422 type_from_op (ins, sp [0], sp [1]); \
423 CHECK_TYPE (ins); \
424 /* Have to insert a widening op */ \
425 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
426 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
427 MONO_ADD_INS ((cfg)->cbb, (ins)); \
428 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
429 } while (0)
431 #define ADD_UNOP(op) do { \
432 MONO_INST_NEW (cfg, ins, (op)); \
433 sp--; \
434 ins->sreg1 = sp [0]->dreg; \
435 type_from_op (ins, sp [0], NULL); \
436 CHECK_TYPE (ins); \
437 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
438 MONO_ADD_INS ((cfg)->cbb, (ins)); \
439 *sp++ = mono_decompose_opcode (cfg, ins); \
440 } while (0)
442 #define ADD_BINCOND(next_block) do { \
443 MonoInst *cmp; \
444 sp -= 2; \
445 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
446 cmp->sreg1 = sp [0]->dreg; \
447 cmp->sreg2 = sp [1]->dreg; \
448 type_from_op (cmp, sp [0], sp [1]); \
449 CHECK_TYPE (cmp); \
450 type_from_op (ins, sp [0], sp [1]); \
451 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
452 GET_BBLOCK (cfg, tblock, target); \
453 link_bblock (cfg, bblock, tblock); \
454 ins->inst_true_bb = tblock; \
455 if ((next_block)) { \
456 link_bblock (cfg, bblock, (next_block)); \
457 ins->inst_false_bb = (next_block); \
458 start_new_bblock = 1; \
459 } else { \
460 GET_BBLOCK (cfg, tblock, ip); \
461 link_bblock (cfg, bblock, tblock); \
462 ins->inst_false_bb = tblock; \
463 start_new_bblock = 2; \
465 if (sp != stack_start) { \
466 handle_stack_args (cfg, stack_start, sp - stack_start); \
467 CHECK_UNVERIFIABLE (cfg); \
469 MONO_ADD_INS (bblock, cmp); \
470 MONO_ADD_INS (bblock, ins); \
471 } while (0)
473 /* *
474 * link_bblock: Links two basic blocks
476 * links two basic blocks in the control flow graph, the 'from'
477 * argument is the starting block and the 'to' argument is the block
478 * the control flow ends to after 'from'.
480 static void
481 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
483 MonoBasicBlock **newa;
484 int i, found;
486 #if 0
487 if (from->cil_code) {
488 if (to->cil_code)
489 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
490 else
491 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
492 } else {
493 if (to->cil_code)
494 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
495 else
496 printf ("edge from entry to exit\n");
498 #endif
500 found = FALSE;
501 for (i = 0; i < from->out_count; ++i) {
502 if (to == from->out_bb [i]) {
503 found = TRUE;
504 break;
507 if (!found) {
508 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
509 for (i = 0; i < from->out_count; ++i) {
510 newa [i] = from->out_bb [i];
512 newa [i] = to;
513 from->out_count++;
514 from->out_bb = newa;
517 found = FALSE;
518 for (i = 0; i < to->in_count; ++i) {
519 if (from == to->in_bb [i]) {
520 found = TRUE;
521 break;
524 if (!found) {
525 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
526 for (i = 0; i < to->in_count; ++i) {
527 newa [i] = to->in_bb [i];
529 newa [i] = from;
530 to->in_count++;
531 to->in_bb = newa;
535 void
536 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
538 link_bblock (cfg, from, to);
542 * mono_find_block_region:
544 * We mark each basic block with a region ID. We use that to avoid BB
545 * optimizations when blocks are in different regions.
547 * Returns:
548 * A region token that encodes where this region is, and information
549 * about the clause owner for this block.
551 * The region encodes the try/catch/filter clause that owns this block
552 * as well as the type. -1 is a special value that represents a block
553 * that is in none of try/catch/filter.
555 static int
556 mono_find_block_region (MonoCompile *cfg, int offset)
558 MonoMethodHeader *header = cfg->header;
559 MonoExceptionClause *clause;
560 int i;
562 for (i = 0; i < header->num_clauses; ++i) {
563 clause = &header->clauses [i];
564 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
565 (offset < (clause->handler_offset)))
566 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
568 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
569 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
570 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
571 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
572 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
573 else
574 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
577 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
578 return ((i + 1) << 8) | clause->flags;
581 return -1;
584 static GList*
585 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
587 MonoMethodHeader *header = cfg->header;
588 MonoExceptionClause *clause;
589 int i;
590 GList *res = NULL;
592 for (i = 0; i < header->num_clauses; ++i) {
593 clause = &header->clauses [i];
594 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
595 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
596 if (clause->flags == type)
597 res = g_list_append (res, clause);
600 return res;
603 static void
604 mono_create_spvar_for_region (MonoCompile *cfg, int region)
606 MonoInst *var;
608 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
609 if (var)
610 return;
612 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
613 /* prevent it from being register allocated */
614 var->flags |= MONO_INST_INDIRECT;
616 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
619 MonoInst *
620 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
622 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
625 static MonoInst*
626 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
628 MonoInst *var;
630 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
631 if (var)
632 return var;
634 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
635 /* prevent it from being register allocated */
636 var->flags |= MONO_INST_INDIRECT;
638 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
640 return var;
644 * Returns the type used in the eval stack when @type is loaded.
645 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
647 void
648 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
650 MonoClass *klass;
652 inst->klass = klass = mono_class_from_mono_type (type);
653 if (type->byref) {
654 inst->type = STACK_MP;
655 return;
658 handle_enum:
659 switch (type->type) {
660 case MONO_TYPE_VOID:
661 inst->type = STACK_INV;
662 return;
663 case MONO_TYPE_I1:
664 case MONO_TYPE_U1:
665 case MONO_TYPE_BOOLEAN:
666 case MONO_TYPE_I2:
667 case MONO_TYPE_U2:
668 case MONO_TYPE_CHAR:
669 case MONO_TYPE_I4:
670 case MONO_TYPE_U4:
671 inst->type = STACK_I4;
672 return;
673 case MONO_TYPE_I:
674 case MONO_TYPE_U:
675 case MONO_TYPE_PTR:
676 case MONO_TYPE_FNPTR:
677 inst->type = STACK_PTR;
678 return;
679 case MONO_TYPE_CLASS:
680 case MONO_TYPE_STRING:
681 case MONO_TYPE_OBJECT:
682 case MONO_TYPE_SZARRAY:
683 case MONO_TYPE_ARRAY:
684 inst->type = STACK_OBJ;
685 return;
686 case MONO_TYPE_I8:
687 case MONO_TYPE_U8:
688 inst->type = STACK_I8;
689 return;
690 case MONO_TYPE_R4:
691 case MONO_TYPE_R8:
692 inst->type = STACK_R8;
693 return;
694 case MONO_TYPE_VALUETYPE:
695 if (type->data.klass->enumtype) {
696 type = mono_class_enum_basetype (type->data.klass);
697 goto handle_enum;
698 } else {
699 inst->klass = klass;
700 inst->type = STACK_VTYPE;
701 return;
703 case MONO_TYPE_TYPEDBYREF:
704 inst->klass = mono_defaults.typed_reference_class;
705 inst->type = STACK_VTYPE;
706 return;
707 case MONO_TYPE_GENERICINST:
708 type = &type->data.generic_class->container_class->byval_arg;
709 goto handle_enum;
710 case MONO_TYPE_VAR:
711 case MONO_TYPE_MVAR:
712 g_assert (cfg->generic_sharing_context);
713 if (mini_is_gsharedvt_type (cfg, type)) {
714 g_assert (cfg->gsharedvt);
715 inst->type = STACK_VTYPE;
716 } else {
717 inst->type = STACK_OBJ;
719 return;
720 default:
721 g_error ("unknown type 0x%02x in eval stack type", type->type);
726 * The following tables are used to quickly validate the IL code in type_from_op ().
728 static const char
729 bin_num_table [STACK_MAX] [STACK_MAX] = {
730 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
731 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
732 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
733 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
734 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
735 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
736 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
737 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
740 static const char
741 neg_table [] = {
742 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
745 /* reduce the size of this table */
746 static const char
747 bin_int_table [STACK_MAX] [STACK_MAX] = {
748 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
749 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
750 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
751 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
752 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
753 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
754 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
755 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
758 static const char
759 bin_comp_table [STACK_MAX] [STACK_MAX] = {
760 /* Inv i L p F & O vt */
761 {0},
762 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
763 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
764 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
765 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
766 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
767 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
768 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
771 /* reduce the size of this table */
772 static const char
773 shift_table [STACK_MAX] [STACK_MAX] = {
774 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
775 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
776 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
777 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
778 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
779 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
780 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
781 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
785 * Tables to map from the non-specific opcode to the matching
786 * type-specific opcode.
788 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
789 static const guint16
790 binops_op_map [STACK_MAX] = {
791 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
794 /* handles from CEE_NEG to CEE_CONV_U8 */
795 static const guint16
796 unops_op_map [STACK_MAX] = {
797 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
800 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
801 static const guint16
802 ovfops_op_map [STACK_MAX] = {
803 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
806 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
807 static const guint16
808 ovf2ops_op_map [STACK_MAX] = {
809 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
812 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
813 static const guint16
814 ovf3ops_op_map [STACK_MAX] = {
815 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
818 /* handles from CEE_BEQ to CEE_BLT_UN */
819 static const guint16
820 beqops_op_map [STACK_MAX] = {
821 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
824 /* handles from CEE_CEQ to CEE_CLT_UN */
825 static const guint16
826 ceqops_op_map [STACK_MAX] = {
827 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
831 * Sets ins->type (the type on the eval stack) according to the
832 * type of the opcode and the arguments to it.
833 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
835 * FIXME: this function sets ins->type unconditionally in some cases, but
836 * it should set it to invalid for some types (a conv.x on an object)
838 static void
839 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
841 switch (ins->opcode) {
842 /* binops */
843 case CEE_ADD:
844 case CEE_SUB:
845 case CEE_MUL:
846 case CEE_DIV:
847 case CEE_REM:
848 /* FIXME: check unverifiable args for STACK_MP */
849 ins->type = bin_num_table [src1->type] [src2->type];
850 ins->opcode += binops_op_map [ins->type];
851 break;
852 case CEE_DIV_UN:
853 case CEE_REM_UN:
854 case CEE_AND:
855 case CEE_OR:
856 case CEE_XOR:
857 ins->type = bin_int_table [src1->type] [src2->type];
858 ins->opcode += binops_op_map [ins->type];
859 break;
860 case CEE_SHL:
861 case CEE_SHR:
862 case CEE_SHR_UN:
863 ins->type = shift_table [src1->type] [src2->type];
864 ins->opcode += binops_op_map [ins->type];
865 break;
866 case OP_COMPARE:
867 case OP_LCOMPARE:
868 case OP_ICOMPARE:
869 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
870 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
871 ins->opcode = OP_LCOMPARE;
872 else if (src1->type == STACK_R8)
873 ins->opcode = OP_FCOMPARE;
874 else
875 ins->opcode = OP_ICOMPARE;
876 break;
877 case OP_ICOMPARE_IMM:
878 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
879 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
880 ins->opcode = OP_LCOMPARE_IMM;
881 break;
882 case CEE_BEQ:
883 case CEE_BGE:
884 case CEE_BGT:
885 case CEE_BLE:
886 case CEE_BLT:
887 case CEE_BNE_UN:
888 case CEE_BGE_UN:
889 case CEE_BGT_UN:
890 case CEE_BLE_UN:
891 case CEE_BLT_UN:
892 ins->opcode += beqops_op_map [src1->type];
893 break;
894 case OP_CEQ:
895 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
896 ins->opcode += ceqops_op_map [src1->type];
897 break;
898 case OP_CGT:
899 case OP_CGT_UN:
900 case OP_CLT:
901 case OP_CLT_UN:
902 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
903 ins->opcode += ceqops_op_map [src1->type];
904 break;
905 /* unops */
906 case CEE_NEG:
907 ins->type = neg_table [src1->type];
908 ins->opcode += unops_op_map [ins->type];
909 break;
910 case CEE_NOT:
911 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
912 ins->type = src1->type;
913 else
914 ins->type = STACK_INV;
915 ins->opcode += unops_op_map [ins->type];
916 break;
917 case CEE_CONV_I1:
918 case CEE_CONV_I2:
919 case CEE_CONV_I4:
920 case CEE_CONV_U4:
921 ins->type = STACK_I4;
922 ins->opcode += unops_op_map [src1->type];
923 break;
924 case CEE_CONV_R_UN:
925 ins->type = STACK_R8;
926 switch (src1->type) {
927 case STACK_I4:
928 case STACK_PTR:
929 ins->opcode = OP_ICONV_TO_R_UN;
930 break;
931 case STACK_I8:
932 ins->opcode = OP_LCONV_TO_R_UN;
933 break;
935 break;
936 case CEE_CONV_OVF_I1:
937 case CEE_CONV_OVF_U1:
938 case CEE_CONV_OVF_I2:
939 case CEE_CONV_OVF_U2:
940 case CEE_CONV_OVF_I4:
941 case CEE_CONV_OVF_U4:
942 ins->type = STACK_I4;
943 ins->opcode += ovf3ops_op_map [src1->type];
944 break;
945 case CEE_CONV_OVF_I_UN:
946 case CEE_CONV_OVF_U_UN:
947 ins->type = STACK_PTR;
948 ins->opcode += ovf2ops_op_map [src1->type];
949 break;
950 case CEE_CONV_OVF_I1_UN:
951 case CEE_CONV_OVF_I2_UN:
952 case CEE_CONV_OVF_I4_UN:
953 case CEE_CONV_OVF_U1_UN:
954 case CEE_CONV_OVF_U2_UN:
955 case CEE_CONV_OVF_U4_UN:
956 ins->type = STACK_I4;
957 ins->opcode += ovf2ops_op_map [src1->type];
958 break;
959 case CEE_CONV_U:
960 ins->type = STACK_PTR;
961 switch (src1->type) {
962 case STACK_I4:
963 ins->opcode = OP_ICONV_TO_U;
964 break;
965 case STACK_PTR:
966 case STACK_MP:
967 #if SIZEOF_VOID_P == 8
968 ins->opcode = OP_LCONV_TO_U;
969 #else
970 ins->opcode = OP_MOVE;
971 #endif
972 break;
973 case STACK_I8:
974 ins->opcode = OP_LCONV_TO_U;
975 break;
976 case STACK_R8:
977 ins->opcode = OP_FCONV_TO_U;
978 break;
980 break;
981 case CEE_CONV_I8:
982 case CEE_CONV_U8:
983 ins->type = STACK_I8;
984 ins->opcode += unops_op_map [src1->type];
985 break;
986 case CEE_CONV_OVF_I8:
987 case CEE_CONV_OVF_U8:
988 ins->type = STACK_I8;
989 ins->opcode += ovf3ops_op_map [src1->type];
990 break;
991 case CEE_CONV_OVF_U8_UN:
992 case CEE_CONV_OVF_I8_UN:
993 ins->type = STACK_I8;
994 ins->opcode += ovf2ops_op_map [src1->type];
995 break;
996 case CEE_CONV_R4:
997 case CEE_CONV_R8:
998 ins->type = STACK_R8;
999 ins->opcode += unops_op_map [src1->type];
1000 break;
1001 case OP_CKFINITE:
1002 ins->type = STACK_R8;
1003 break;
1004 case CEE_CONV_U2:
1005 case CEE_CONV_U1:
1006 ins->type = STACK_I4;
1007 ins->opcode += ovfops_op_map [src1->type];
1008 break;
1009 case CEE_CONV_I:
1010 case CEE_CONV_OVF_I:
1011 case CEE_CONV_OVF_U:
1012 ins->type = STACK_PTR;
1013 ins->opcode += ovfops_op_map [src1->type];
1014 break;
1015 case CEE_ADD_OVF:
1016 case CEE_ADD_OVF_UN:
1017 case CEE_MUL_OVF:
1018 case CEE_MUL_OVF_UN:
1019 case CEE_SUB_OVF:
1020 case CEE_SUB_OVF_UN:
1021 ins->type = bin_num_table [src1->type] [src2->type];
1022 ins->opcode += ovfops_op_map [src1->type];
1023 if (ins->type == STACK_R8)
1024 ins->type = STACK_INV;
1025 break;
1026 case OP_LOAD_MEMBASE:
1027 ins->type = STACK_PTR;
1028 break;
1029 case OP_LOADI1_MEMBASE:
1030 case OP_LOADU1_MEMBASE:
1031 case OP_LOADI2_MEMBASE:
1032 case OP_LOADU2_MEMBASE:
1033 case OP_LOADI4_MEMBASE:
1034 case OP_LOADU4_MEMBASE:
1035 ins->type = STACK_PTR;
1036 break;
1037 case OP_LOADI8_MEMBASE:
1038 ins->type = STACK_I8;
1039 break;
1040 case OP_LOADR4_MEMBASE:
1041 case OP_LOADR8_MEMBASE:
1042 ins->type = STACK_R8;
1043 break;
1044 default:
1045 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1046 break;
1049 if (ins->type == STACK_MP)
1050 ins->klass = mono_defaults.object_class;
1053 static const char
1054 ldind_type [] = {
1055 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1058 #if 0
1060 static const char
1061 param_table [STACK_MAX] [STACK_MAX] = {
1062 {0},
1065 static int
1066 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1067 int i;
1069 if (sig->hasthis) {
1070 switch (args->type) {
1071 case STACK_I4:
1072 case STACK_I8:
1073 case STACK_R8:
1074 case STACK_VTYPE:
1075 case STACK_INV:
1076 return 0;
1078 args++;
1080 for (i = 0; i < sig->param_count; ++i) {
1081 switch (args [i].type) {
1082 case STACK_INV:
1083 return 0;
1084 case STACK_MP:
1085 if (!sig->params [i]->byref)
1086 return 0;
1087 continue;
1088 case STACK_OBJ:
1089 if (sig->params [i]->byref)
1090 return 0;
1091 switch (sig->params [i]->type) {
1092 case MONO_TYPE_CLASS:
1093 case MONO_TYPE_STRING:
1094 case MONO_TYPE_OBJECT:
1095 case MONO_TYPE_SZARRAY:
1096 case MONO_TYPE_ARRAY:
1097 break;
1098 default:
1099 return 0;
1101 continue;
1102 case STACK_R8:
1103 if (sig->params [i]->byref)
1104 return 0;
1105 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1106 return 0;
1107 continue;
1108 case STACK_PTR:
1109 case STACK_I4:
1110 case STACK_I8:
1111 case STACK_VTYPE:
1112 break;
1114 /*if (!param_table [args [i].type] [sig->params [i]->type])
1115 return 0;*/
1117 return 1;
1119 #endif
1122 * When we need a pointer to the current domain many times in a method, we
1123 * call mono_domain_get() once and we store the result in a local variable.
1124 * This function returns the variable that represents the MonoDomain*.
1126 inline static MonoInst *
1127 mono_get_domainvar (MonoCompile *cfg)
1129 if (!cfg->domainvar)
1130 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1131 return cfg->domainvar;
1135 * The got_var contains the address of the Global Offset Table when AOT
1136 * compiling.
1138 MonoInst *
1139 mono_get_got_var (MonoCompile *cfg)
1141 #ifdef MONO_ARCH_NEED_GOT_VAR
1142 if (!cfg->compile_aot)
1143 return NULL;
1144 if (!cfg->got_var) {
1145 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1147 return cfg->got_var;
1148 #else
1149 return NULL;
1150 #endif
1153 static MonoInst *
1154 mono_get_vtable_var (MonoCompile *cfg)
1156 g_assert (cfg->generic_sharing_context);
1158 if (!cfg->rgctx_var) {
1159 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1160 /* force the var to be stack allocated */
1161 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1164 return cfg->rgctx_var;
1167 static MonoType*
1168 type_from_stack_type (MonoInst *ins) {
1169 switch (ins->type) {
1170 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1171 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1172 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1173 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1174 case STACK_MP:
1175 return &ins->klass->this_arg;
1176 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1177 case STACK_VTYPE: return &ins->klass->byval_arg;
1178 default:
1179 g_error ("stack type %d to monotype not handled\n", ins->type);
1181 return NULL;
1184 static G_GNUC_UNUSED int
1185 type_to_stack_type (MonoType *t)
1187 t = mono_type_get_underlying_type (t);
1188 switch (t->type) {
1189 case MONO_TYPE_I1:
1190 case MONO_TYPE_U1:
1191 case MONO_TYPE_BOOLEAN:
1192 case MONO_TYPE_I2:
1193 case MONO_TYPE_U2:
1194 case MONO_TYPE_CHAR:
1195 case MONO_TYPE_I4:
1196 case MONO_TYPE_U4:
1197 return STACK_I4;
1198 case MONO_TYPE_I:
1199 case MONO_TYPE_U:
1200 case MONO_TYPE_PTR:
1201 case MONO_TYPE_FNPTR:
1202 return STACK_PTR;
1203 case MONO_TYPE_CLASS:
1204 case MONO_TYPE_STRING:
1205 case MONO_TYPE_OBJECT:
1206 case MONO_TYPE_SZARRAY:
1207 case MONO_TYPE_ARRAY:
1208 return STACK_OBJ;
1209 case MONO_TYPE_I8:
1210 case MONO_TYPE_U8:
1211 return STACK_I8;
1212 case MONO_TYPE_R4:
1213 case MONO_TYPE_R8:
1214 return STACK_R8;
1215 case MONO_TYPE_VALUETYPE:
1216 case MONO_TYPE_TYPEDBYREF:
1217 return STACK_VTYPE;
1218 case MONO_TYPE_GENERICINST:
1219 if (mono_type_generic_inst_is_valuetype (t))
1220 return STACK_VTYPE;
1221 else
1222 return STACK_OBJ;
1223 break;
1224 default:
1225 g_assert_not_reached ();
1228 return -1;
1231 static MonoClass*
1232 array_access_to_klass (int opcode)
1234 switch (opcode) {
1235 case CEE_LDELEM_U1:
1236 return mono_defaults.byte_class;
1237 case CEE_LDELEM_U2:
1238 return mono_defaults.uint16_class;
1239 case CEE_LDELEM_I:
1240 case CEE_STELEM_I:
1241 return mono_defaults.int_class;
1242 case CEE_LDELEM_I1:
1243 case CEE_STELEM_I1:
1244 return mono_defaults.sbyte_class;
1245 case CEE_LDELEM_I2:
1246 case CEE_STELEM_I2:
1247 return mono_defaults.int16_class;
1248 case CEE_LDELEM_I4:
1249 case CEE_STELEM_I4:
1250 return mono_defaults.int32_class;
1251 case CEE_LDELEM_U4:
1252 return mono_defaults.uint32_class;
1253 case CEE_LDELEM_I8:
1254 case CEE_STELEM_I8:
1255 return mono_defaults.int64_class;
1256 case CEE_LDELEM_R4:
1257 case CEE_STELEM_R4:
1258 return mono_defaults.single_class;
1259 case CEE_LDELEM_R8:
1260 case CEE_STELEM_R8:
1261 return mono_defaults.double_class;
1262 case CEE_LDELEM_REF:
1263 case CEE_STELEM_REF:
1264 return mono_defaults.object_class;
1265 default:
1266 g_assert_not_reached ();
1268 return NULL;
1272 * We try to share variables when possible
1274 static MonoInst *
1275 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1277 MonoInst *res;
1278 int pos, vnum;
1280 /* inlining can result in deeper stacks */
1281 if (slot >= cfg->header->max_stack)
1282 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1284 pos = ins->type - 1 + slot * STACK_MAX;
1286 switch (ins->type) {
1287 case STACK_I4:
1288 case STACK_I8:
1289 case STACK_R8:
1290 case STACK_PTR:
1291 case STACK_MP:
1292 case STACK_OBJ:
1293 if ((vnum = cfg->intvars [pos]))
1294 return cfg->varinfo [vnum];
1295 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1296 cfg->intvars [pos] = res->inst_c0;
1297 break;
1298 default:
1299 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1301 return res;
1304 static void
1305 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1308 * Don't use this if a generic_context is set, since that means AOT can't
1309 * look up the method using just the image+token.
1310 * table == 0 means this is a reference made from a wrapper.
1312 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1313 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1314 jump_info_token->image = image;
1315 jump_info_token->token = token;
1316 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1321 * This function is called to handle items that are left on the evaluation stack
1322 * at basic block boundaries. What happens is that we save the values to local variables
1323 * and we reload them later when first entering the target basic block (with the
1324 * handle_loaded_temps () function).
1325 * A single joint point will use the same variables (stored in the array bb->out_stack or
1326 * bb->in_stack, if the basic block is before or after the joint point).
1328 * This function needs to be called _before_ emitting the last instruction of
1329 * the bb (i.e. before emitting a branch).
1330 * If the stack merge fails at a join point, cfg->unverifiable is set.
1332 static void
1333 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1335 int i, bindex;
1336 MonoBasicBlock *bb = cfg->cbb;
1337 MonoBasicBlock *outb;
1338 MonoInst *inst, **locals;
1339 gboolean found;
1341 if (!count)
1342 return;
1343 if (cfg->verbose_level > 3)
1344 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1345 if (!bb->out_scount) {
1346 bb->out_scount = count;
1347 //printf ("bblock %d has out:", bb->block_num);
1348 found = FALSE;
1349 for (i = 0; i < bb->out_count; ++i) {
1350 outb = bb->out_bb [i];
1351 /* exception handlers are linked, but they should not be considered for stack args */
1352 if (outb->flags & BB_EXCEPTION_HANDLER)
1353 continue;
1354 //printf (" %d", outb->block_num);
1355 if (outb->in_stack) {
1356 found = TRUE;
1357 bb->out_stack = outb->in_stack;
1358 break;
1361 //printf ("\n");
1362 if (!found) {
1363 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1364 for (i = 0; i < count; ++i) {
1366 * try to reuse temps already allocated for this purpouse, if they occupy the same
1367 * stack slot and if they are of the same type.
1368 * This won't cause conflicts since if 'local' is used to
1369 * store one of the values in the in_stack of a bblock, then
1370 * the same variable will be used for the same outgoing stack
1371 * slot as well.
1372 * This doesn't work when inlining methods, since the bblocks
1373 * in the inlined methods do not inherit their in_stack from
1374 * the bblock they are inlined to. See bug #58863 for an
1375 * example.
1377 if (cfg->inlined_method)
1378 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1379 else
1380 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1385 for (i = 0; i < bb->out_count; ++i) {
1386 outb = bb->out_bb [i];
1387 /* exception handlers are linked, but they should not be considered for stack args */
1388 if (outb->flags & BB_EXCEPTION_HANDLER)
1389 continue;
1390 if (outb->in_scount) {
1391 if (outb->in_scount != bb->out_scount) {
1392 cfg->unverifiable = TRUE;
1393 return;
1395 continue; /* check they are the same locals */
1397 outb->in_scount = count;
1398 outb->in_stack = bb->out_stack;
1401 locals = bb->out_stack;
1402 cfg->cbb = bb;
1403 for (i = 0; i < count; ++i) {
1404 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1405 inst->cil_code = sp [i]->cil_code;
1406 sp [i] = locals [i];
1407 if (cfg->verbose_level > 3)
1408 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1412 * It is possible that the out bblocks already have in_stack assigned, and
1413 * the in_stacks differ. In this case, we will store to all the different
1414 * in_stacks.
1417 found = TRUE;
1418 bindex = 0;
1419 while (found) {
1420 /* Find a bblock which has a different in_stack */
1421 found = FALSE;
1422 while (bindex < bb->out_count) {
1423 outb = bb->out_bb [bindex];
1424 /* exception handlers are linked, but they should not be considered for stack args */
1425 if (outb->flags & BB_EXCEPTION_HANDLER) {
1426 bindex++;
1427 continue;
1429 if (outb->in_stack != locals) {
1430 for (i = 0; i < count; ++i) {
1431 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1432 inst->cil_code = sp [i]->cil_code;
1433 sp [i] = locals [i];
1434 if (cfg->verbose_level > 3)
1435 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1437 locals = outb->in_stack;
1438 found = TRUE;
1439 break;
1441 bindex ++;
1446 /* Emit code which loads interface_offsets [klass->interface_id]
1447 * The array is stored in memory before vtable.
1449 static void
1450 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1452 if (cfg->compile_aot) {
1453 int ioffset_reg = alloc_preg (cfg);
1454 int iid_reg = alloc_preg (cfg);
1456 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1457 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1458 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1460 else {
1461 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1465 static void
1466 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1468 int ibitmap_reg = alloc_preg (cfg);
1469 #ifdef COMPRESSED_INTERFACE_BITMAP
1470 MonoInst *args [2];
1471 MonoInst *res, *ins;
1472 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1473 MONO_ADD_INS (cfg->cbb, ins);
1474 args [0] = ins;
1475 if (cfg->compile_aot)
1476 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1477 else
1478 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1479 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1480 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1481 #else
1482 int ibitmap_byte_reg = alloc_preg (cfg);
1484 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1486 if (cfg->compile_aot) {
1487 int iid_reg = alloc_preg (cfg);
1488 int shifted_iid_reg = alloc_preg (cfg);
1489 int ibitmap_byte_address_reg = alloc_preg (cfg);
1490 int masked_iid_reg = alloc_preg (cfg);
1491 int iid_one_bit_reg = alloc_preg (cfg);
1492 int iid_bit_reg = alloc_preg (cfg);
1493 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1494 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1495 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1496 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1497 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1498 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1499 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1500 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1501 } else {
1502 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1503 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1505 #endif
1509 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1510 * stored in "klass_reg" implements the interface "klass".
1512 static void
1513 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1515 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1519 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1520 * stored in "vtable_reg" implements the interface "klass".
1522 static void
1523 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1525 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1529 * Emit code which checks whenever the interface id of @klass is smaller than
1530 * than the value given by max_iid_reg.
1532 static void
1533 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1534 MonoBasicBlock *false_target)
1536 if (cfg->compile_aot) {
1537 int iid_reg = alloc_preg (cfg);
1538 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1539 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1541 else
1542 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1543 if (false_target)
1544 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1545 else
1546 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1549 /* Same as above, but obtains max_iid from a vtable */
1550 static void
1551 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1552 MonoBasicBlock *false_target)
1554 int max_iid_reg = alloc_preg (cfg);
1556 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1557 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1560 /* Same as above, but obtains max_iid from a klass */
1561 static void
1562 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1563 MonoBasicBlock *false_target)
1565 int max_iid_reg = alloc_preg (cfg);
1567 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1568 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1571 static void
1572 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1574 int idepth_reg = alloc_preg (cfg);
1575 int stypes_reg = alloc_preg (cfg);
1576 int stype = alloc_preg (cfg);
1578 mono_class_setup_supertypes (klass);
1580 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1581 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1582 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1583 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1585 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1586 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1587 if (klass_ins) {
1588 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1589 } else if (cfg->compile_aot) {
1590 int const_reg = alloc_preg (cfg);
1591 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1592 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1593 } else {
1594 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1596 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1599 static void
1600 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1602 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1605 static void
1606 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1608 int intf_reg = alloc_preg (cfg);
1610 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1611 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1612 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1613 if (true_target)
1614 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1615 else
1616 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1620 * Variant of the above that takes a register to the class, not the vtable.
1622 static void
1623 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1625 int intf_bit_reg = alloc_preg (cfg);
1627 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1628 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1629 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1630 if (true_target)
1631 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1632 else
1633 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1636 static inline void
1637 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1639 if (klass_inst) {
1640 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1641 } else if (cfg->compile_aot) {
1642 int const_reg = alloc_preg (cfg);
1643 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1644 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1645 } else {
1646 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1648 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1651 static inline void
1652 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1654 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1657 static inline void
1658 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1660 if (cfg->compile_aot) {
1661 int const_reg = alloc_preg (cfg);
1662 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1663 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1664 } else {
1665 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1667 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1670 static void
1671 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1673 static void
1674 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1676 if (klass->rank) {
1677 int rank_reg = alloc_preg (cfg);
1678 int eclass_reg = alloc_preg (cfg);
1680 g_assert (!klass_inst);
1681 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1682 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1683 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1684 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1686 if (klass->cast_class == mono_defaults.object_class) {
1687 int parent_reg = alloc_preg (cfg);
1688 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1689 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1690 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1691 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1692 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1693 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1694 } else if (klass->cast_class == mono_defaults.enum_class) {
1695 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1696 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1697 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1698 } else {
1699 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1700 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1703 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1704 /* Check that the object is a vector too */
1705 int bounds_reg = alloc_preg (cfg);
1706 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1707 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1708 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1710 } else {
1711 int idepth_reg = alloc_preg (cfg);
1712 int stypes_reg = alloc_preg (cfg);
1713 int stype = alloc_preg (cfg);
1715 mono_class_setup_supertypes (klass);
1717 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1718 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1719 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1720 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1722 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1723 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1724 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1728 static void
1729 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1731 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1734 static void
1735 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1737 int val_reg;
1739 g_assert (val == 0);
1741 if (align == 0)
1742 align = 4;
1744 if ((size <= 4) && (size <= align)) {
1745 switch (size) {
1746 case 1:
1747 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1748 return;
1749 case 2:
1750 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1751 return;
1752 case 4:
1753 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1754 return;
1755 #if SIZEOF_REGISTER == 8
1756 case 8:
1757 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1758 return;
1759 #endif
1763 val_reg = alloc_preg (cfg);
1765 if (SIZEOF_REGISTER == 8)
1766 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1767 else
1768 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1770 if (align < 4) {
1771 /* This could be optimized further if neccesary */
1772 while (size >= 1) {
1773 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1774 offset += 1;
1775 size -= 1;
1777 return;
1780 #if !NO_UNALIGNED_ACCESS
1781 if (SIZEOF_REGISTER == 8) {
1782 if (offset % 8) {
1783 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1784 offset += 4;
1785 size -= 4;
1787 while (size >= 8) {
1788 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1789 offset += 8;
1790 size -= 8;
1793 #endif
1795 while (size >= 4) {
1796 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1797 offset += 4;
1798 size -= 4;
1800 while (size >= 2) {
1801 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1802 offset += 2;
1803 size -= 2;
1805 while (size >= 1) {
1806 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1807 offset += 1;
1808 size -= 1;
1812 void
1813 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1815 int cur_reg;
1817 if (align == 0)
1818 align = 4;
1820 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1821 g_assert (size < 10000);
1823 if (align < 4) {
1824 /* This could be optimized further if neccesary */
1825 while (size >= 1) {
1826 cur_reg = alloc_preg (cfg);
1827 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1828 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1829 doffset += 1;
1830 soffset += 1;
1831 size -= 1;
1835 #if !NO_UNALIGNED_ACCESS
1836 if (SIZEOF_REGISTER == 8) {
1837 while (size >= 8) {
1838 cur_reg = alloc_preg (cfg);
1839 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1840 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1841 doffset += 8;
1842 soffset += 8;
1843 size -= 8;
1846 #endif
1848 while (size >= 4) {
1849 cur_reg = alloc_preg (cfg);
1850 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1851 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1852 doffset += 4;
1853 soffset += 4;
1854 size -= 4;
1856 while (size >= 2) {
1857 cur_reg = alloc_preg (cfg);
1858 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1859 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1860 doffset += 2;
1861 soffset += 2;
1862 size -= 2;
1864 while (size >= 1) {
1865 cur_reg = alloc_preg (cfg);
1866 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1867 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1868 doffset += 1;
1869 soffset += 1;
1870 size -= 1;
1874 static int
1875 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1877 if (type->byref)
1878 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1880 handle_enum:
1881 type = mini_get_basic_type_from_generic (gsctx, type);
1882 switch (type->type) {
1883 case MONO_TYPE_VOID:
1884 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1885 case MONO_TYPE_I1:
1886 case MONO_TYPE_U1:
1887 case MONO_TYPE_BOOLEAN:
1888 case MONO_TYPE_I2:
1889 case MONO_TYPE_U2:
1890 case MONO_TYPE_CHAR:
1891 case MONO_TYPE_I4:
1892 case MONO_TYPE_U4:
1893 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1894 case MONO_TYPE_I:
1895 case MONO_TYPE_U:
1896 case MONO_TYPE_PTR:
1897 case MONO_TYPE_FNPTR:
1898 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1899 case MONO_TYPE_CLASS:
1900 case MONO_TYPE_STRING:
1901 case MONO_TYPE_OBJECT:
1902 case MONO_TYPE_SZARRAY:
1903 case MONO_TYPE_ARRAY:
1904 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1905 case MONO_TYPE_I8:
1906 case MONO_TYPE_U8:
1907 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1908 case MONO_TYPE_R4:
1909 case MONO_TYPE_R8:
1910 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1911 case MONO_TYPE_VALUETYPE:
1912 if (type->data.klass->enumtype) {
1913 type = mono_class_enum_basetype (type->data.klass);
1914 goto handle_enum;
1915 } else
1916 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1917 case MONO_TYPE_TYPEDBYREF:
1918 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1919 case MONO_TYPE_GENERICINST:
1920 type = &type->data.generic_class->container_class->byval_arg;
1921 goto handle_enum;
1922 default:
1923 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1925 return -1;
1929 * target_type_is_incompatible:
1930 * @cfg: MonoCompile context
1932 * Check that the item @arg on the evaluation stack can be stored
1933 * in the target type (can be a local, or field, etc).
1934 * The cfg arg can be used to check if we need verification or just
1935 * validity checks.
1937 * Returns: non-0 value if arg can't be stored on a target.
1939 static int
1940 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1942 MonoType *simple_type;
1943 MonoClass *klass;
1945 if (target->byref) {
1946 /* FIXME: check that the pointed to types match */
1947 if (arg->type == STACK_MP)
1948 return arg->klass != mono_class_from_mono_type (target);
1949 if (arg->type == STACK_PTR)
1950 return 0;
1951 return 1;
1954 simple_type = mono_type_get_underlying_type (target);
1955 switch (simple_type->type) {
1956 case MONO_TYPE_VOID:
1957 return 1;
1958 case MONO_TYPE_I1:
1959 case MONO_TYPE_U1:
1960 case MONO_TYPE_BOOLEAN:
1961 case MONO_TYPE_I2:
1962 case MONO_TYPE_U2:
1963 case MONO_TYPE_CHAR:
1964 case MONO_TYPE_I4:
1965 case MONO_TYPE_U4:
1966 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1967 return 1;
1968 return 0;
1969 case MONO_TYPE_PTR:
1970 /* STACK_MP is needed when setting pinned locals */
1971 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1972 return 1;
1973 return 0;
1974 case MONO_TYPE_I:
1975 case MONO_TYPE_U:
1976 case MONO_TYPE_FNPTR:
1978 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
1979 * in native int. (#688008).
1981 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1982 return 1;
1983 return 0;
1984 case MONO_TYPE_CLASS:
1985 case MONO_TYPE_STRING:
1986 case MONO_TYPE_OBJECT:
1987 case MONO_TYPE_SZARRAY:
1988 case MONO_TYPE_ARRAY:
1989 if (arg->type != STACK_OBJ)
1990 return 1;
1991 /* FIXME: check type compatibility */
1992 return 0;
1993 case MONO_TYPE_I8:
1994 case MONO_TYPE_U8:
1995 if (arg->type != STACK_I8)
1996 return 1;
1997 return 0;
1998 case MONO_TYPE_R4:
1999 case MONO_TYPE_R8:
2000 if (arg->type != STACK_R8)
2001 return 1;
2002 return 0;
2003 case MONO_TYPE_VALUETYPE:
2004 if (arg->type != STACK_VTYPE)
2005 return 1;
2006 klass = mono_class_from_mono_type (simple_type);
2007 if (klass != arg->klass)
2008 return 1;
2009 return 0;
2010 case MONO_TYPE_TYPEDBYREF:
2011 if (arg->type != STACK_VTYPE)
2012 return 1;
2013 klass = mono_class_from_mono_type (simple_type);
2014 if (klass != arg->klass)
2015 return 1;
2016 return 0;
2017 case MONO_TYPE_GENERICINST:
2018 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2019 if (arg->type != STACK_VTYPE)
2020 return 1;
2021 klass = mono_class_from_mono_type (simple_type);
2022 if (klass != arg->klass)
2023 return 1;
2024 return 0;
2025 } else {
2026 if (arg->type != STACK_OBJ)
2027 return 1;
2028 /* FIXME: check type compatibility */
2029 return 0;
2031 case MONO_TYPE_VAR:
2032 case MONO_TYPE_MVAR:
2033 g_assert (cfg->generic_sharing_context);
2034 if (mini_type_var_is_vt (cfg, simple_type)) {
2035 if (arg->type != STACK_VTYPE)
2036 return 1;
2037 } else {
2038 if (arg->type != STACK_OBJ)
2039 return 1;
2041 return 0;
2042 default:
2043 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2045 return 1;
2049 * Prepare arguments for passing to a function call.
2050 * Return a non-zero value if the arguments can't be passed to the given
2051 * signature.
2052 * The type checks are not yet complete and some conversions may need
2053 * casts on 32 or 64 bit architectures.
2055 * FIXME: implement this using target_type_is_incompatible ()
2057 static int
2058 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2060 MonoType *simple_type;
2061 int i;
2063 if (sig->hasthis) {
2064 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2065 return 1;
2066 args++;
2068 for (i = 0; i < sig->param_count; ++i) {
2069 if (sig->params [i]->byref) {
2070 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2071 return 1;
2072 continue;
2074 simple_type = sig->params [i];
2075 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2076 handle_enum:
2077 switch (simple_type->type) {
2078 case MONO_TYPE_VOID:
2079 return 1;
2080 continue;
2081 case MONO_TYPE_I1:
2082 case MONO_TYPE_U1:
2083 case MONO_TYPE_BOOLEAN:
2084 case MONO_TYPE_I2:
2085 case MONO_TYPE_U2:
2086 case MONO_TYPE_CHAR:
2087 case MONO_TYPE_I4:
2088 case MONO_TYPE_U4:
2089 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2090 return 1;
2091 continue;
2092 case MONO_TYPE_I:
2093 case MONO_TYPE_U:
2094 case MONO_TYPE_PTR:
2095 case MONO_TYPE_FNPTR:
2096 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2097 return 1;
2098 continue;
2099 case MONO_TYPE_CLASS:
2100 case MONO_TYPE_STRING:
2101 case MONO_TYPE_OBJECT:
2102 case MONO_TYPE_SZARRAY:
2103 case MONO_TYPE_ARRAY:
2104 if (args [i]->type != STACK_OBJ)
2105 return 1;
2106 continue;
2107 case MONO_TYPE_I8:
2108 case MONO_TYPE_U8:
2109 if (args [i]->type != STACK_I8)
2110 return 1;
2111 continue;
2112 case MONO_TYPE_R4:
2113 case MONO_TYPE_R8:
2114 if (args [i]->type != STACK_R8)
2115 return 1;
2116 continue;
2117 case MONO_TYPE_VALUETYPE:
2118 if (simple_type->data.klass->enumtype) {
2119 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2120 goto handle_enum;
2122 if (args [i]->type != STACK_VTYPE)
2123 return 1;
2124 continue;
2125 case MONO_TYPE_TYPEDBYREF:
2126 if (args [i]->type != STACK_VTYPE)
2127 return 1;
2128 continue;
2129 case MONO_TYPE_GENERICINST:
2130 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2131 goto handle_enum;
2133 default:
2134 g_error ("unknown type 0x%02x in check_call_signature",
2135 simple_type->type);
2138 return 0;
2141 static int
2142 callvirt_to_call (int opcode)
2144 switch (opcode) {
2145 case OP_CALLVIRT:
2146 return OP_CALL;
2147 case OP_VOIDCALLVIRT:
2148 return OP_VOIDCALL;
2149 case OP_FCALLVIRT:
2150 return OP_FCALL;
2151 case OP_VCALLVIRT:
2152 return OP_VCALL;
2153 case OP_LCALLVIRT:
2154 return OP_LCALL;
2155 default:
2156 g_assert_not_reached ();
2159 return -1;
2162 static int
2163 callvirt_to_call_membase (int opcode)
2165 switch (opcode) {
2166 case OP_CALLVIRT:
2167 return OP_CALL_MEMBASE;
2168 case OP_VOIDCALLVIRT:
2169 return OP_VOIDCALL_MEMBASE;
2170 case OP_FCALLVIRT:
2171 return OP_FCALL_MEMBASE;
2172 case OP_LCALLVIRT:
2173 return OP_LCALL_MEMBASE;
2174 case OP_VCALLVIRT:
2175 return OP_VCALL_MEMBASE;
2176 default:
2177 g_assert_not_reached ();
2180 return -1;
2183 #ifdef MONO_ARCH_HAVE_IMT
2184 static void
2185 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2187 int method_reg;
2189 if (COMPILE_LLVM (cfg)) {
2190 method_reg = alloc_preg (cfg);
2192 if (imt_arg) {
2193 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2194 } else if (cfg->compile_aot) {
2195 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2196 } else {
2197 MonoInst *ins;
2198 MONO_INST_NEW (cfg, ins, OP_PCONST);
2199 ins->inst_p0 = method;
2200 ins->dreg = method_reg;
2201 MONO_ADD_INS (cfg->cbb, ins);
2204 #ifdef ENABLE_LLVM
2205 call->imt_arg_reg = method_reg;
2206 #endif
2207 #ifdef MONO_ARCH_IMT_REG
2208 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2209 #else
2210 /* Need this to keep the IMT arg alive */
2211 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2212 #endif
2213 return;
2216 #ifdef MONO_ARCH_IMT_REG
2217 method_reg = alloc_preg (cfg);
2219 if (imt_arg) {
2220 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2221 } else if (cfg->compile_aot) {
2222 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2223 } else {
2224 MonoInst *ins;
2225 MONO_INST_NEW (cfg, ins, OP_PCONST);
2226 ins->inst_p0 = method;
2227 ins->dreg = method_reg;
2228 MONO_ADD_INS (cfg->cbb, ins);
2231 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2232 #else
2233 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2234 #endif
2236 #endif
2238 static MonoJumpInfo *
2239 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2241 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2243 ji->ip.i = ip;
2244 ji->type = type;
2245 ji->data.target = target;
2247 return ji;
2250 inline static MonoCallInst *
2251 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2252 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2254 MonoCallInst *call;
2255 #ifdef MONO_ARCH_SOFT_FLOAT
2256 int i;
2257 #endif
2259 if (tail)
2260 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2261 else
2262 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2264 call->args = args;
2265 call->signature = sig;
2266 call->rgctx_reg = rgctx;
2268 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2270 if (tail) {
2271 if (mini_type_is_vtype (cfg, sig->ret)) {
2272 call->vret_var = cfg->vret_addr;
2273 //g_assert_not_reached ();
2275 } else if (mini_type_is_vtype (cfg, sig->ret)) {
2276 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2277 MonoInst *loada;
2279 temp->backend.is_pinvoke = sig->pinvoke;
2282 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2283 * address of return value to increase optimization opportunities.
2284 * Before vtype decomposition, the dreg of the call ins itself represents the
2285 * fact the call modifies the return value. After decomposition, the call will
2286 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2287 * will be transformed into an LDADDR.
2289 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2290 loada->dreg = alloc_preg (cfg);
2291 loada->inst_p0 = temp;
2292 /* We reference the call too since call->dreg could change during optimization */
2293 loada->inst_p1 = call;
2294 MONO_ADD_INS (cfg->cbb, loada);
2296 call->inst.dreg = temp->dreg;
2298 call->vret_var = loada;
2299 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2300 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2302 #ifdef MONO_ARCH_SOFT_FLOAT
2303 if (COMPILE_SOFT_FLOAT (cfg)) {
2305 * If the call has a float argument, we would need to do an r8->r4 conversion using
2306 * an icall, but that cannot be done during the call sequence since it would clobber
2307 * the call registers + the stack. So we do it before emitting the call.
2309 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2310 MonoType *t;
2311 MonoInst *in = call->args [i];
2313 if (i >= sig->hasthis)
2314 t = sig->params [i - sig->hasthis];
2315 else
2316 t = &mono_defaults.int_class->byval_arg;
2317 t = mono_type_get_underlying_type (t);
2319 if (!t->byref && t->type == MONO_TYPE_R4) {
2320 MonoInst *iargs [1];
2321 MonoInst *conv;
2323 iargs [0] = in;
2324 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2326 /* The result will be in an int vreg */
2327 call->args [i] = conv;
2331 #endif
2333 call->need_unbox_trampoline = unbox_trampoline;
2335 #ifdef ENABLE_LLVM
2336 if (COMPILE_LLVM (cfg))
2337 mono_llvm_emit_call (cfg, call);
2338 else
2339 mono_arch_emit_call (cfg, call);
2340 #else
2341 mono_arch_emit_call (cfg, call);
2342 #endif
2344 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2345 cfg->flags |= MONO_CFG_HAS_CALLS;
2347 return call;
2350 static void
2351 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2353 #ifdef MONO_ARCH_RGCTX_REG
2354 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2355 cfg->uses_rgctx_reg = TRUE;
2356 call->rgctx_reg = TRUE;
2357 #ifdef ENABLE_LLVM
2358 call->rgctx_arg_reg = rgctx_reg;
2359 #endif
2360 #else
2361 NOT_IMPLEMENTED;
2362 #endif
2365 inline static MonoInst*
2366 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2368 MonoCallInst *call;
2369 int rgctx_reg = -1;
2371 if (rgctx_arg) {
2372 rgctx_reg = mono_alloc_preg (cfg);
2373 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2376 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2378 call->inst.sreg1 = addr->dreg;
2380 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2382 if (rgctx_arg)
2383 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2385 return (MonoInst*)call;
2388 /* This is like calli, but we pass rgctx/imt arguments as well */
2389 static MonoInst*
2390 emit_gsharedvt_call (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoMethod *method, MonoInst *imt_arg, MonoInst *rgctx_arg)
2392 MonoCallInst *call;
2393 int rgctx_reg = -1;
2395 if (rgctx_arg) {
2396 rgctx_reg = mono_alloc_preg (cfg);
2397 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2400 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2402 call->inst.sreg1 = addr->dreg;
2404 if (imt_arg)
2405 emit_imt_argument (cfg, call, method, imt_arg);
2407 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2409 if (rgctx_arg)
2410 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2412 return (MonoInst*)call;
2415 static MonoInst*
2416 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2417 static MonoInst*
2418 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2420 static MonoInst*
2421 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2422 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2424 gboolean might_be_remote;
2425 gboolean virtual = this != NULL;
2426 gboolean enable_for_aot = TRUE;
2427 int context_used;
2428 MonoCallInst *call;
2429 int rgctx_reg = 0;
2430 gboolean need_unbox_trampoline;
2432 if (rgctx_arg) {
2433 rgctx_reg = mono_alloc_preg (cfg);
2434 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2437 if (method->string_ctor) {
2438 /* Create the real signature */
2439 /* FIXME: Cache these */
2440 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2441 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2443 sig = ctor_sig;
2446 context_used = mono_method_check_context_used (method);
2448 might_be_remote = this && sig->hasthis &&
2449 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2450 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2452 if (might_be_remote && context_used) {
2453 MonoInst *addr;
2455 g_assert (cfg->generic_sharing_context);
2457 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2459 return mono_emit_calli (cfg, sig, args, addr, NULL);
2462 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2464 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2466 if (might_be_remote)
2467 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2468 else
2469 call->method = method;
2470 call->inst.flags |= MONO_INST_HAS_METHOD;
2471 call->inst.inst_left = this;
2473 if (virtual) {
2474 int vtable_reg, slot_reg, this_reg;
2476 this_reg = this->dreg;
2478 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2479 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2480 MonoInst *dummy_use;
2482 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2484 /* Make a call to delegate->invoke_impl */
2485 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2486 call->inst.inst_basereg = this_reg;
2487 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2488 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2490 /* We must emit a dummy use here because the delegate trampoline will
2491 replace the 'this' argument with the delegate target making this activation
2492 no longer a root for the delegate.
2493 This is an issue for delegates that target collectible code such as dynamic
2494 methods of GC'able assemblies.
2496 For a test case look into #667921.
2498 FIXME: a dummy use is not the best way to do it as the local register allocator
2499 will put it on a caller save register and spil it around the call.
2500 Ideally, we would either put it on a callee save register or only do the store part.
2502 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2504 return (MonoInst*)call;
2506 #endif
2508 if ((!cfg->compile_aot || enable_for_aot) &&
2509 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2510 (MONO_METHOD_IS_FINAL (method) &&
2511 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2512 !(method->klass->marshalbyref && context_used)) {
2514 * the method is not virtual, we just need to ensure this is not null
2515 * and then we can call the method directly.
2517 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2519 * The check above ensures method is not gshared, this is needed since
2520 * gshared methods can't have wrappers.
2522 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2525 if (!method->string_ctor)
2526 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2528 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2529 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2531 * the method is virtual, but we can statically dispatch since either
2532 * it's class or the method itself are sealed.
2533 * But first we need to ensure it's not a null reference.
2535 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2537 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2538 } else {
2539 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2541 vtable_reg = alloc_preg (cfg);
2542 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2543 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2544 slot_reg = -1;
2545 #ifdef MONO_ARCH_HAVE_IMT
2546 if (mono_use_imt) {
2547 guint32 imt_slot = mono_method_get_imt_slot (method);
2548 emit_imt_argument (cfg, call, call->method, imt_arg);
2549 slot_reg = vtable_reg;
2550 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2552 #endif
2553 if (slot_reg == -1) {
2554 slot_reg = alloc_preg (cfg);
2555 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2556 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2558 } else {
2559 slot_reg = vtable_reg;
2560 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2561 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2562 #ifdef MONO_ARCH_HAVE_IMT
2563 if (imt_arg) {
2564 g_assert (mono_method_signature (method)->generic_param_count);
2565 emit_imt_argument (cfg, call, call->method, imt_arg);
2567 #endif
2570 call->inst.sreg1 = slot_reg;
2571 call->virtual = TRUE;
2575 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2577 if (rgctx_arg)
2578 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2580 return (MonoInst*)call;
2583 MonoInst*
2584 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2586 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL, NULL);
2589 MonoInst*
2590 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2591 MonoInst **args)
2593 MonoCallInst *call;
2595 g_assert (sig);
2597 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2598 call->fptr = func;
2600 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2602 return (MonoInst*)call;
2605 MonoInst*
2606 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2608 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2610 g_assert (info);
2612 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2616 * mono_emit_abs_call:
2618 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2620 inline static MonoInst*
2621 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2622 MonoMethodSignature *sig, MonoInst **args)
2624 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2625 MonoInst *ins;
2628 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2629 * handle it.
2631 if (cfg->abs_patches == NULL)
2632 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2633 g_hash_table_insert (cfg->abs_patches, ji, ji);
2634 ins = mono_emit_native_call (cfg, ji, sig, args);
2635 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2636 return ins;
2639 static MonoInst*
2640 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2642 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2643 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2644 int widen_op = -1;
2647 * Native code might return non register sized integers
2648 * without initializing the upper bits.
2650 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2651 case OP_LOADI1_MEMBASE:
2652 widen_op = OP_ICONV_TO_I1;
2653 break;
2654 case OP_LOADU1_MEMBASE:
2655 widen_op = OP_ICONV_TO_U1;
2656 break;
2657 case OP_LOADI2_MEMBASE:
2658 widen_op = OP_ICONV_TO_I2;
2659 break;
2660 case OP_LOADU2_MEMBASE:
2661 widen_op = OP_ICONV_TO_U2;
2662 break;
2663 default:
2664 break;
2667 if (widen_op != -1) {
2668 int dreg = alloc_preg (cfg);
2669 MonoInst *widen;
2671 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2672 widen->type = ins->type;
2673 ins = widen;
2678 return ins;
2681 static MonoMethod*
2682 get_memcpy_method (void)
2684 static MonoMethod *memcpy_method = NULL;
2685 if (!memcpy_method) {
2686 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2687 if (!memcpy_method)
2688 g_error ("Old corlib found. Install a new one");
2690 return memcpy_method;
2693 static void
2694 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2696 MonoClassField *field;
2697 gpointer iter = NULL;
2699 while ((field = mono_class_get_fields (klass, &iter))) {
2700 int foffset;
2702 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2703 continue;
2704 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2705 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2706 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2707 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2708 } else {
2709 MonoClass *field_class = mono_class_from_mono_type (field->type);
2710 if (field_class->has_references)
2711 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2716 static void
2717 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2719 int card_table_shift_bits;
2720 gpointer card_table_mask;
2721 guint8 *card_table;
2722 MonoInst *dummy_use;
2723 int nursery_shift_bits;
2724 size_t nursery_size;
2725 gboolean has_card_table_wb = FALSE;
2727 if (!cfg->gen_write_barriers)
2728 return;
2730 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2732 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2734 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2735 has_card_table_wb = TRUE;
2736 #endif
2738 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2739 MonoInst *wbarrier;
2741 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2742 wbarrier->sreg1 = ptr->dreg;
2743 if (value)
2744 wbarrier->sreg2 = value->dreg;
2745 else
2746 wbarrier->sreg2 = value_reg;
2747 MONO_ADD_INS (cfg->cbb, wbarrier);
2748 } else if (card_table) {
2749 int offset_reg = alloc_preg (cfg);
2750 int card_reg = alloc_preg (cfg);
2751 MonoInst *ins;
2753 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2754 if (card_table_mask)
2755 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2757 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2758 * IMM's larger than 32bits.
2760 if (cfg->compile_aot) {
2761 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2762 } else {
2763 MONO_INST_NEW (cfg, ins, OP_PCONST);
2764 ins->inst_p0 = card_table;
2765 ins->dreg = card_reg;
2766 MONO_ADD_INS (cfg->cbb, ins);
2769 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2770 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2771 } else {
2772 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2773 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2776 if (value) {
2777 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2778 } else {
2779 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2780 dummy_use->sreg1 = value_reg;
2781 MONO_ADD_INS (cfg->cbb, dummy_use);
2785 static gboolean
2786 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2788 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2789 unsigned need_wb = 0;
2791 if (align == 0)
2792 align = 4;
2794 /*types with references can't have alignment smaller than sizeof(void*) */
2795 if (align < SIZEOF_VOID_P)
2796 return FALSE;
2798 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2799 if (size > 32 * SIZEOF_VOID_P)
2800 return FALSE;
2802 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
2804 /* We don't unroll more than 5 stores to avoid code bloat. */
2805 if (size > 5 * SIZEOF_VOID_P) {
2806 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2807 size += (SIZEOF_VOID_P - 1);
2808 size &= ~(SIZEOF_VOID_P - 1);
2810 EMIT_NEW_ICONST (cfg, iargs [2], size);
2811 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2812 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2813 return TRUE;
2816 destreg = iargs [0]->dreg;
2817 srcreg = iargs [1]->dreg;
2818 offset = 0;
2820 dest_ptr_reg = alloc_preg (cfg);
2821 tmp_reg = alloc_preg (cfg);
2823 /*tmp = dreg*/
2824 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2826 while (size >= SIZEOF_VOID_P) {
2827 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2828 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2830 if (need_wb & 0x1)
2831 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2833 offset += SIZEOF_VOID_P;
2834 size -= SIZEOF_VOID_P;
2835 need_wb >>= 1;
2837 /*tmp += sizeof (void*)*/
2838 if (size >= SIZEOF_VOID_P) {
2839 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2840 MONO_ADD_INS (cfg->cbb, iargs [0]);
2844 /* Those cannot be references since size < sizeof (void*) */
2845 while (size >= 4) {
2846 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2847 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2848 offset += 4;
2849 size -= 4;
2852 while (size >= 2) {
2853 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2854 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2855 offset += 2;
2856 size -= 2;
2859 while (size >= 1) {
2860 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2861 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2862 offset += 1;
2863 size -= 1;
2866 return TRUE;
2870 * Emit code to copy a valuetype of type @klass whose address is stored in
2871 * @src->dreg to memory whose address is stored at @dest->dreg.
2873 void
2874 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2876 MonoInst *iargs [4];
2877 int context_used, n;
2878 guint32 align = 0;
2879 MonoMethod *memcpy_method;
2880 MonoInst *size_ins = NULL;
2882 g_assert (klass);
2884 * This check breaks with spilled vars... need to handle it during verification anyway.
2885 * g_assert (klass && klass == src->klass && klass == dest->klass);
2888 if (mini_is_gsharedvt_klass (cfg, klass)) {
2889 g_assert (!native);
2890 context_used = mono_class_check_context_used (klass);
2891 size_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_VALUE_SIZE);
2894 if (native)
2895 n = mono_class_native_size (klass, &align);
2896 else
2897 n = mono_class_value_size (klass, &align);
2899 /* if native is true there should be no references in the struct */
2900 if (cfg->gen_write_barriers && klass->has_references && !native) {
2901 /* Avoid barriers when storing to the stack */
2902 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2903 (dest->opcode == OP_LDADDR))) {
2904 int context_used = 0;
2906 iargs [0] = dest;
2907 iargs [1] = src;
2909 if (cfg->generic_sharing_context)
2910 context_used = mono_class_check_context_used (klass);
2912 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2913 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2914 return;
2915 } else if (context_used) {
2916 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2917 } else {
2918 if (cfg->compile_aot) {
2919 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2920 } else {
2921 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2922 mono_class_compute_gc_descriptor (klass);
2926 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2927 return;
2931 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2932 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2933 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2934 } else {
2935 iargs [0] = dest;
2936 iargs [1] = src;
2937 if (size_ins)
2938 iargs [2] = size_ins;
2939 else
2940 EMIT_NEW_ICONST (cfg, iargs [2], n);
2942 memcpy_method = get_memcpy_method ();
2943 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2947 static MonoMethod*
2948 get_memset_method (void)
2950 static MonoMethod *memset_method = NULL;
2951 if (!memset_method) {
2952 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2953 if (!memset_method)
2954 g_error ("Old corlib found. Install a new one");
2956 return memset_method;
2959 void
2960 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2962 MonoInst *iargs [3];
2963 int n, context_used;
2964 guint32 align;
2965 MonoMethod *memset_method;
2966 MonoInst *size_ins = NULL;
2968 /* FIXME: Optimize this for the case when dest is an LDADDR */
2970 mono_class_init (klass);
2971 if (mini_is_gsharedvt_klass (cfg, klass)) {
2972 context_used = mono_class_check_context_used (klass);
2973 size_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_VALUE_SIZE);
2974 n = -1;
2975 } else {
2976 n = mono_class_value_size (klass, &align);
2979 if (!size_ins && n <= sizeof (gpointer) * 5) {
2980 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2982 else {
2983 memset_method = get_memset_method ();
2984 iargs [0] = dest;
2985 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2986 if (size_ins)
2987 iargs [2] = size_ins;
2988 else
2989 EMIT_NEW_ICONST (cfg, iargs [2], n);
2990 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2994 static MonoInst*
2995 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2997 MonoInst *this = NULL;
2999 g_assert (cfg->generic_sharing_context);
3001 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3002 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3003 !method->klass->valuetype)
3004 EMIT_NEW_ARGLOAD (cfg, this, 0);
3006 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3007 MonoInst *mrgctx_loc, *mrgctx_var;
3009 g_assert (!this);
3010 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3012 mrgctx_loc = mono_get_vtable_var (cfg);
3013 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3015 return mrgctx_var;
3016 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3017 MonoInst *vtable_loc, *vtable_var;
3019 g_assert (!this);
3021 vtable_loc = mono_get_vtable_var (cfg);
3022 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3024 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3025 MonoInst *mrgctx_var = vtable_var;
3026 int vtable_reg;
3028 vtable_reg = alloc_preg (cfg);
3029 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3030 vtable_var->type = STACK_PTR;
3033 return vtable_var;
3034 } else {
3035 MonoInst *ins;
3036 int vtable_reg;
3038 vtable_reg = alloc_preg (cfg);
3039 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3040 return ins;
3044 static MonoJumpInfoRgctxEntry *
3045 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3047 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3048 res->method = method;
3049 res->in_mrgctx = in_mrgctx;
3050 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3051 res->data->type = patch_type;
3052 res->data->data.target = patch_data;
3053 res->info_type = info_type;
3055 return res;
3058 static inline MonoInst*
3059 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3061 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3064 static MonoInst*
3065 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3066 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3068 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3069 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3071 return emit_rgctx_fetch (cfg, rgctx, entry);
3075 * emit_get_rgctx_method:
3077 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3078 * normal constants, else emit a load from the rgctx.
3080 static MonoInst*
3081 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3082 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3084 if (!context_used) {
3085 MonoInst *ins;
3087 switch (rgctx_type) {
3088 case MONO_RGCTX_INFO_METHOD:
3089 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3090 return ins;
3091 case MONO_RGCTX_INFO_METHOD_RGCTX:
3092 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3093 return ins;
3094 default:
3095 g_assert_not_reached ();
3097 } else {
3098 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3099 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3101 return emit_rgctx_fetch (cfg, rgctx, entry);
3105 static MonoInst*
3106 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3107 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3109 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3110 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3112 return emit_rgctx_fetch (cfg, rgctx, entry);
3116 * On return the caller must check @klass for load errors.
3118 static void
3119 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3121 MonoInst *vtable_arg;
3122 MonoCallInst *call;
3123 int context_used = 0;
3125 if (cfg->generic_sharing_context)
3126 context_used = mono_class_check_context_used (klass);
3128 if (context_used) {
3129 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3130 klass, MONO_RGCTX_INFO_VTABLE);
3131 } else {
3132 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3134 if (!vtable)
3135 return;
3136 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3139 if (COMPILE_LLVM (cfg))
3140 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3141 else
3142 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3143 #ifdef MONO_ARCH_VTABLE_REG
3144 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3145 cfg->uses_vtable_reg = TRUE;
3146 #else
3147 NOT_IMPLEMENTED;
3148 #endif
3151 static void
3152 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc)
3154 MonoInst *ins;
3156 if (cfg->gen_seq_points && cfg->method == method) {
3157 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3158 MONO_ADD_INS (cfg->cbb, ins);
3162 static void
3163 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3165 if (mini_get_debug_options ()->better_cast_details) {
3166 int to_klass_reg = alloc_preg (cfg);
3167 int vtable_reg = alloc_preg (cfg);
3168 int klass_reg = alloc_preg (cfg);
3169 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3171 if (!tls_get) {
3172 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3173 exit (1);
3176 MONO_ADD_INS (cfg->cbb, tls_get);
3177 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3178 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3180 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3181 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3182 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3186 static void
3187 reset_cast_details (MonoCompile *cfg)
3189 /* Reset the variables holding the cast details */
3190 if (mini_get_debug_options ()->better_cast_details) {
3191 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3193 MONO_ADD_INS (cfg->cbb, tls_get);
3194 /* It is enough to reset the from field */
3195 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3200 * On return the caller must check @array_class for load errors
3202 static void
3203 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3205 int vtable_reg = alloc_preg (cfg);
3206 int context_used = 0;
3208 if (cfg->generic_sharing_context)
3209 context_used = mono_class_check_context_used (array_class);
3211 save_cast_details (cfg, array_class, obj->dreg);
3213 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3215 if (cfg->opt & MONO_OPT_SHARED) {
3216 int class_reg = alloc_preg (cfg);
3217 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3218 if (cfg->compile_aot) {
3219 int klass_reg = alloc_preg (cfg);
3220 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3221 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3222 } else {
3223 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3225 } else if (context_used) {
3226 MonoInst *vtable_ins;
3228 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3229 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3230 } else {
3231 if (cfg->compile_aot) {
3232 int vt_reg;
3233 MonoVTable *vtable;
3235 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3236 return;
3237 vt_reg = alloc_preg (cfg);
3238 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3239 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3240 } else {
3241 MonoVTable *vtable;
3242 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3243 return;
3244 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3248 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3250 reset_cast_details (cfg);
3254 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3255 * generic code is generated.
3257 static MonoInst*
3258 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3260 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3262 if (context_used) {
3263 MonoInst *rgctx, *addr;
3265 /* FIXME: What if the class is shared? We might not
3266 have to get the address of the method from the
3267 RGCTX. */
3268 addr = emit_get_rgctx_method (cfg, context_used, method,
3269 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3271 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3273 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3274 } else {
3275 return mono_emit_method_call (cfg, method, &val, NULL);
3279 static MonoInst*
3280 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3282 MonoInst *add;
3283 int obj_reg;
3284 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3285 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3286 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3287 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3289 obj_reg = sp [0]->dreg;
3290 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3291 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3293 /* FIXME: generics */
3294 g_assert (klass->rank == 0);
3296 // Check rank == 0
3297 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3298 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3300 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3301 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3303 if (context_used) {
3304 MonoInst *element_class;
3306 /* This assertion is from the unboxcast insn */
3307 g_assert (klass->rank == 0);
3309 element_class = emit_get_rgctx_klass (cfg, context_used,
3310 klass->element_class, MONO_RGCTX_INFO_KLASS);
3312 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3313 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3314 } else {
3315 save_cast_details (cfg, klass->element_class, obj_reg);
3316 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3317 reset_cast_details (cfg);
3320 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3321 MONO_ADD_INS (cfg->cbb, add);
3322 add->type = STACK_MP;
3323 add->klass = klass;
3325 return add;
3329 * Returns NULL and set the cfg exception on error.
3331 static MonoInst*
3332 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3334 MonoInst *iargs [2];
3335 void *alloc_ftn;
3337 if (context_used) {
3338 MonoInst *data;
3339 int rgctx_info;
3340 MonoInst *iargs [2];
3343 FIXME: we cannot get managed_alloc here because we can't get
3344 the class's vtable (because it's not a closed class)
3346 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3347 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3350 if (cfg->opt & MONO_OPT_SHARED)
3351 rgctx_info = MONO_RGCTX_INFO_KLASS;
3352 else
3353 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3354 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3356 if (cfg->opt & MONO_OPT_SHARED) {
3357 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3358 iargs [1] = data;
3359 alloc_ftn = mono_object_new;
3360 } else {
3361 iargs [0] = data;
3362 alloc_ftn = mono_object_new_specific;
3365 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3368 if (cfg->opt & MONO_OPT_SHARED) {
3369 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3370 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3372 alloc_ftn = mono_object_new;
3373 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3374 /* This happens often in argument checking code, eg. throw new FooException... */
3375 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3376 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3377 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3378 } else {
3379 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3380 MonoMethod *managed_alloc = NULL;
3381 gboolean pass_lw;
3383 if (!vtable) {
3384 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3385 cfg->exception_ptr = klass;
3386 return NULL;
3389 #ifndef MONO_CROSS_COMPILE
3390 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3391 #endif
3393 if (managed_alloc) {
3394 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3395 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3397 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3398 if (pass_lw) {
3399 guint32 lw = vtable->klass->instance_size;
3400 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3401 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3402 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3404 else {
3405 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3409 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3413 * Returns NULL and set the cfg exception on error.
3415 static MonoInst*
3416 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3418 MonoInst *alloc, *ins;
3420 if (mono_class_is_nullable (klass)) {
3421 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3423 if (context_used) {
3424 /* FIXME: What if the class is shared? We might not
3425 have to get the method address from the RGCTX. */
3426 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3427 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3428 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3430 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3431 } else {
3432 return mono_emit_method_call (cfg, method, &val, NULL);
3436 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3437 if (!alloc)
3438 return NULL;
3440 if (mini_is_gsharedvt_klass (cfg, klass)) {
3441 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3442 ins->opcode = OP_STOREV_MEMBASE;
3443 } else {
3444 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3447 return alloc;
3451 static gboolean
3452 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3454 int i;
3455 MonoGenericContainer *container;
3456 MonoGenericInst *ginst;
3458 if (klass->generic_class) {
3459 container = klass->generic_class->container_class->generic_container;
3460 ginst = klass->generic_class->context.class_inst;
3461 } else if (klass->generic_container && context_used) {
3462 container = klass->generic_container;
3463 ginst = container->context.class_inst;
3464 } else {
3465 return FALSE;
3468 for (i = 0; i < container->type_argc; ++i) {
3469 MonoType *type;
3470 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3471 continue;
3472 type = ginst->type_argv [i];
3473 if (mini_type_is_reference (cfg, type))
3474 return TRUE;
3476 return FALSE;
3479 // FIXME: This doesn't work yet (class libs tests fail?)
3480 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3483 * Returns NULL and set the cfg exception on error.
3485 static MonoInst*
3486 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3488 MonoBasicBlock *is_null_bb;
3489 int obj_reg = src->dreg;
3490 int vtable_reg = alloc_preg (cfg);
3491 MonoInst *klass_inst = NULL;
3493 if (context_used) {
3494 MonoInst *args [3];
3496 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
3497 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3498 MonoInst *cache_ins;
3500 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3502 /* obj */
3503 args [0] = src;
3505 /* klass - it's the second element of the cache entry*/
3506 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3508 /* cache */
3509 args [2] = cache_ins;
3511 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
3514 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3516 if (is_complex_isinst (klass)) {
3517 /* Complex case, handle by an icall */
3519 /* obj */
3520 args [0] = src;
3522 /* klass */
3523 args [1] = klass_inst;
3525 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3526 } else {
3527 /* Simple case, handled by the code below */
3531 NEW_BBLOCK (cfg, is_null_bb);
3533 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3534 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3536 save_cast_details (cfg, klass, obj_reg);
3538 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3539 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3540 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3541 } else {
3542 int klass_reg = alloc_preg (cfg);
3544 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3546 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3547 /* the remoting code is broken, access the class for now */
3548 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3549 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3550 if (!vt) {
3551 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3552 cfg->exception_ptr = klass;
3553 return NULL;
3555 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3556 } else {
3557 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3558 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3560 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3561 } else {
3562 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3563 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3567 MONO_START_BB (cfg, is_null_bb);
3569 reset_cast_details (cfg);
3571 return src;
3575 * Returns NULL and set the cfg exception on error.
3577 static MonoInst*
3578 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3580 MonoInst *ins;
3581 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3582 int obj_reg = src->dreg;
3583 int vtable_reg = alloc_preg (cfg);
3584 int res_reg = alloc_ireg_ref (cfg);
3585 MonoInst *klass_inst = NULL;
3587 if (context_used) {
3588 MonoInst *args [3];
3590 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
3591 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
3592 MonoInst *cache_ins;
3594 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3596 /* obj */
3597 args [0] = src;
3599 /* klass - it's the second element of the cache entry*/
3600 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3602 /* cache */
3603 args [2] = cache_ins;
3605 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
3608 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3610 if (is_complex_isinst (klass)) {
3611 /* Complex case, handle by an icall */
3613 /* obj */
3614 args [0] = src;
3616 /* klass */
3617 args [1] = klass_inst;
3619 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3620 } else {
3621 /* Simple case, the code below can handle it */
3625 NEW_BBLOCK (cfg, is_null_bb);
3626 NEW_BBLOCK (cfg, false_bb);
3627 NEW_BBLOCK (cfg, end_bb);
3629 /* Do the assignment at the beginning, so the other assignment can be if converted */
3630 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3631 ins->type = STACK_OBJ;
3632 ins->klass = klass;
3634 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3635 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3637 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3639 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3640 g_assert (!context_used);
3641 /* the is_null_bb target simply copies the input register to the output */
3642 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3643 } else {
3644 int klass_reg = alloc_preg (cfg);
3646 if (klass->rank) {
3647 int rank_reg = alloc_preg (cfg);
3648 int eclass_reg = alloc_preg (cfg);
3650 g_assert (!context_used);
3651 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3652 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3653 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3654 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3655 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3656 if (klass->cast_class == mono_defaults.object_class) {
3657 int parent_reg = alloc_preg (cfg);
3658 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3659 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3660 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3661 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3662 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3663 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3664 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3665 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3666 } else if (klass->cast_class == mono_defaults.enum_class) {
3667 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3668 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3669 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3670 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3671 } else {
3672 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3673 /* Check that the object is a vector too */
3674 int bounds_reg = alloc_preg (cfg);
3675 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3676 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3677 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3680 /* the is_null_bb target simply copies the input register to the output */
3681 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3683 } else if (mono_class_is_nullable (klass)) {
3684 g_assert (!context_used);
3685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3686 /* the is_null_bb target simply copies the input register to the output */
3687 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3688 } else {
3689 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3690 g_assert (!context_used);
3691 /* the remoting code is broken, access the class for now */
3692 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3693 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3694 if (!vt) {
3695 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3696 cfg->exception_ptr = klass;
3697 return NULL;
3699 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3700 } else {
3701 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3702 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3704 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3705 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3706 } else {
3707 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3708 /* the is_null_bb target simply copies the input register to the output */
3709 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3714 MONO_START_BB (cfg, false_bb);
3716 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3717 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3719 MONO_START_BB (cfg, is_null_bb);
3721 MONO_START_BB (cfg, end_bb);
3723 return ins;
3726 static MonoInst*
3727 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3729 /* This opcode takes as input an object reference and a class, and returns:
3730 0) if the object is an instance of the class,
3731 1) if the object is not instance of the class,
3732 2) if the object is a proxy whose type cannot be determined */
3734 MonoInst *ins;
3735 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3736 int obj_reg = src->dreg;
3737 int dreg = alloc_ireg (cfg);
3738 int tmp_reg;
3739 int klass_reg = alloc_preg (cfg);
3741 NEW_BBLOCK (cfg, true_bb);
3742 NEW_BBLOCK (cfg, false_bb);
3743 NEW_BBLOCK (cfg, false2_bb);
3744 NEW_BBLOCK (cfg, end_bb);
3745 NEW_BBLOCK (cfg, no_proxy_bb);
3747 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3748 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3750 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3751 NEW_BBLOCK (cfg, interface_fail_bb);
3753 tmp_reg = alloc_preg (cfg);
3754 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3755 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3756 MONO_START_BB (cfg, interface_fail_bb);
3757 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3759 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3761 tmp_reg = alloc_preg (cfg);
3762 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3763 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3764 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3765 } else {
3766 tmp_reg = alloc_preg (cfg);
3767 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3768 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3770 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3771 tmp_reg = alloc_preg (cfg);
3772 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3773 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3775 tmp_reg = alloc_preg (cfg);
3776 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3777 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3778 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3780 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3781 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3783 MONO_START_BB (cfg, no_proxy_bb);
3785 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3788 MONO_START_BB (cfg, false_bb);
3790 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3791 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3793 MONO_START_BB (cfg, false2_bb);
3795 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3796 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3798 MONO_START_BB (cfg, true_bb);
3800 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3802 MONO_START_BB (cfg, end_bb);
3804 /* FIXME: */
3805 MONO_INST_NEW (cfg, ins, OP_ICONST);
3806 ins->dreg = dreg;
3807 ins->type = STACK_I4;
3809 return ins;
3812 static MonoInst*
3813 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3815 /* This opcode takes as input an object reference and a class, and returns:
3816 0) if the object is an instance of the class,
3817 1) if the object is a proxy whose type cannot be determined
3818 an InvalidCastException exception is thrown otherwhise*/
3820 MonoInst *ins;
3821 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3822 int obj_reg = src->dreg;
3823 int dreg = alloc_ireg (cfg);
3824 int tmp_reg = alloc_preg (cfg);
3825 int klass_reg = alloc_preg (cfg);
3827 NEW_BBLOCK (cfg, end_bb);
3828 NEW_BBLOCK (cfg, ok_result_bb);
3830 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3831 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3833 save_cast_details (cfg, klass, obj_reg);
3835 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3836 NEW_BBLOCK (cfg, interface_fail_bb);
3838 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3839 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3840 MONO_START_BB (cfg, interface_fail_bb);
3841 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3843 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3845 tmp_reg = alloc_preg (cfg);
3846 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3847 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3848 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3850 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3851 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3853 } else {
3854 NEW_BBLOCK (cfg, no_proxy_bb);
3856 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3857 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3858 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3860 tmp_reg = alloc_preg (cfg);
3861 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3862 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3864 tmp_reg = alloc_preg (cfg);
3865 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3866 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3867 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3869 NEW_BBLOCK (cfg, fail_1_bb);
3871 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3873 MONO_START_BB (cfg, fail_1_bb);
3875 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3876 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3878 MONO_START_BB (cfg, no_proxy_bb);
3880 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3883 MONO_START_BB (cfg, ok_result_bb);
3885 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3887 MONO_START_BB (cfg, end_bb);
3889 /* FIXME: */
3890 MONO_INST_NEW (cfg, ins, OP_ICONST);
3891 ins->dreg = dreg;
3892 ins->type = STACK_I4;
3894 return ins;
3898 * Returns NULL and set the cfg exception on error.
3900 static G_GNUC_UNUSED MonoInst*
3901 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3903 MonoInst *ptr;
3904 int dreg;
3905 gpointer *trampoline;
3906 MonoInst *obj, *method_ins, *tramp_ins;
3907 MonoDomain *domain;
3908 guint8 **code_slot;
3910 obj = handle_alloc (cfg, klass, FALSE, 0);
3911 if (!obj)
3912 return NULL;
3914 /* Inline the contents of mono_delegate_ctor */
3916 /* Set target field */
3917 /* Optimize away setting of NULL target */
3918 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
3919 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3920 if (cfg->gen_write_barriers) {
3921 dreg = alloc_preg (cfg);
3922 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
3923 emit_write_barrier (cfg, ptr, target, 0);
3927 /* Set method field */
3928 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3929 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3930 if (cfg->gen_write_barriers) {
3931 dreg = alloc_preg (cfg);
3932 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
3933 emit_write_barrier (cfg, ptr, method_ins, 0);
3936 * To avoid looking up the compiled code belonging to the target method
3937 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3938 * store it, and we fill it after the method has been compiled.
3940 if (!cfg->compile_aot && !method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
3941 MonoInst *code_slot_ins;
3943 if (context_used) {
3944 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3945 } else {
3946 domain = mono_domain_get ();
3947 mono_domain_lock (domain);
3948 if (!domain_jit_info (domain)->method_code_hash)
3949 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3950 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3951 if (!code_slot) {
3952 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3953 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3955 mono_domain_unlock (domain);
3957 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3959 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3962 /* Set invoke_impl field */
3963 if (cfg->compile_aot) {
3964 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3965 } else {
3966 trampoline = mono_create_delegate_trampoline (cfg->domain, klass);
3967 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3969 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3971 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3973 return obj;
3976 static MonoInst*
3977 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3979 MonoJitICallInfo *info;
3981 /* Need to register the icall so it gets an icall wrapper */
3982 info = mono_get_array_new_va_icall (rank);
3984 cfg->flags |= MONO_CFG_HAS_VARARGS;
3986 /* mono_array_new_va () needs a vararg calling convention */
3987 cfg->disable_llvm = TRUE;
3989 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3990 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3993 static void
3994 mono_emit_load_got_addr (MonoCompile *cfg)
3996 MonoInst *getaddr, *dummy_use;
3998 if (!cfg->got_var || cfg->got_var_allocated)
3999 return;
4001 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4002 getaddr->cil_code = cfg->header->code;
4003 getaddr->dreg = cfg->got_var->dreg;
4005 /* Add it to the start of the first bblock */
4006 if (cfg->bb_entry->code) {
4007 getaddr->next = cfg->bb_entry->code;
4008 cfg->bb_entry->code = getaddr;
4010 else
4011 MONO_ADD_INS (cfg->bb_entry, getaddr);
4013 cfg->got_var_allocated = TRUE;
4016 * Add a dummy use to keep the got_var alive, since real uses might
4017 * only be generated by the back ends.
4018 * Add it to end_bblock, so the variable's lifetime covers the whole
4019 * method.
4020 * It would be better to make the usage of the got var explicit in all
4021 * cases when the backend needs it (i.e. calls, throw etc.), so this
4022 * wouldn't be needed.
4024 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4025 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4028 static int inline_limit;
4029 static gboolean inline_limit_inited;
4031 static gboolean
4032 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4034 MonoMethodHeaderSummary header;
4035 MonoVTable *vtable;
4036 #ifdef MONO_ARCH_SOFT_FLOAT
4037 MonoMethodSignature *sig = mono_method_signature (method);
4038 int i;
4039 #endif
4041 if (cfg->generic_sharing_context)
4042 return FALSE;
4044 if (cfg->inline_depth > 10)
4045 return FALSE;
4047 #ifdef MONO_ARCH_HAVE_LMF_OPS
4048 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4049 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
4050 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
4051 return TRUE;
4052 #endif
4055 if (!mono_method_get_header_summary (method, &header))
4056 return FALSE;
4058 /*runtime, icall and pinvoke are checked by summary call*/
4059 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4060 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4061 (method->klass->marshalbyref) ||
4062 header.has_clauses)
4063 return FALSE;
4065 /* also consider num_locals? */
4066 /* Do the size check early to avoid creating vtables */
4067 if (!inline_limit_inited) {
4068 if (getenv ("MONO_INLINELIMIT"))
4069 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
4070 else
4071 inline_limit = INLINE_LENGTH_LIMIT;
4072 inline_limit_inited = TRUE;
4074 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4075 return FALSE;
4078 * if we can initialize the class of the method right away, we do,
4079 * otherwise we don't allow inlining if the class needs initialization,
4080 * since it would mean inserting a call to mono_runtime_class_init()
4081 * inside the inlined code
4083 if (!(cfg->opt & MONO_OPT_SHARED)) {
4084 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4085 if (cfg->run_cctors && method->klass->has_cctor) {
4086 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4087 if (!method->klass->runtime_info)
4088 /* No vtable created yet */
4089 return FALSE;
4090 vtable = mono_class_vtable (cfg->domain, method->klass);
4091 if (!vtable)
4092 return FALSE;
4093 /* This makes so that inline cannot trigger */
4094 /* .cctors: too many apps depend on them */
4095 /* running with a specific order... */
4096 if (! vtable->initialized)
4097 return FALSE;
4098 mono_runtime_class_init (vtable);
4100 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4101 if (!method->klass->runtime_info)
4102 /* No vtable created yet */
4103 return FALSE;
4104 vtable = mono_class_vtable (cfg->domain, method->klass);
4105 if (!vtable)
4106 return FALSE;
4107 if (!vtable->initialized)
4108 return FALSE;
4110 } else {
4112 * If we're compiling for shared code
4113 * the cctor will need to be run at aot method load time, for example,
4114 * or at the end of the compilation of the inlining method.
4116 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4117 return FALSE;
4121 * CAS - do not inline methods with declarative security
4122 * Note: this has to be before any possible return TRUE;
4124 if (mono_method_has_declsec (method))
4125 return FALSE;
4127 #ifdef MONO_ARCH_SOFT_FLOAT
4128 /* FIXME: */
4129 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4130 return FALSE;
4131 for (i = 0; i < sig->param_count; ++i)
4132 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4133 return FALSE;
4134 #endif
4136 return TRUE;
4139 static gboolean
4140 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
4142 if (vtable->initialized && !cfg->compile_aot)
4143 return FALSE;
4145 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
4146 return FALSE;
4148 if (!mono_class_needs_cctor_run (vtable->klass, method))
4149 return FALSE;
4151 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
4152 /* The initialization is already done before the method is called */
4153 return FALSE;
4155 return TRUE;
4158 static MonoInst*
4159 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4161 MonoInst *ins;
4162 guint32 size;
4163 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4164 int context_used;
4166 if (mini_is_gsharedvt_klass (cfg, klass)) {
4167 size = -1;
4168 } else {
4169 mono_class_init (klass);
4170 size = mono_class_array_element_size (klass);
4173 mult_reg = alloc_preg (cfg);
4174 array_reg = arr->dreg;
4175 index_reg = index->dreg;
4177 #if SIZEOF_REGISTER == 8
4178 /* The array reg is 64 bits but the index reg is only 32 */
4179 if (COMPILE_LLVM (cfg)) {
4180 /* Not needed */
4181 index2_reg = index_reg;
4182 } else {
4183 index2_reg = alloc_preg (cfg);
4184 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4186 #else
4187 if (index->type == STACK_I8) {
4188 index2_reg = alloc_preg (cfg);
4189 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4190 } else {
4191 index2_reg = index_reg;
4193 #endif
4195 if (bcheck)
4196 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4198 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4199 if (size == 1 || size == 2 || size == 4 || size == 8) {
4200 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4202 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4203 ins->klass = mono_class_get_element_class (klass);
4204 ins->type = STACK_MP;
4206 return ins;
4208 #endif
4210 add_reg = alloc_ireg_mp (cfg);
4212 if (size == -1) {
4213 MonoInst *rgctx_ins;
4215 /* gsharedvt */
4216 g_assert (cfg->generic_sharing_context);
4217 context_used = mono_class_check_context_used (klass);
4218 g_assert (context_used);
4219 rgctx_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4220 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4221 } else {
4222 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4224 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4225 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4226 ins->klass = mono_class_get_element_class (klass);
4227 ins->type = STACK_MP;
4228 MONO_ADD_INS (cfg->cbb, ins);
4230 return ins;
4233 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4234 static MonoInst*
4235 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4237 int bounds_reg = alloc_preg (cfg);
4238 int add_reg = alloc_ireg_mp (cfg);
4239 int mult_reg = alloc_preg (cfg);
4240 int mult2_reg = alloc_preg (cfg);
4241 int low1_reg = alloc_preg (cfg);
4242 int low2_reg = alloc_preg (cfg);
4243 int high1_reg = alloc_preg (cfg);
4244 int high2_reg = alloc_preg (cfg);
4245 int realidx1_reg = alloc_preg (cfg);
4246 int realidx2_reg = alloc_preg (cfg);
4247 int sum_reg = alloc_preg (cfg);
4248 int index1, index2, tmpreg;
4249 MonoInst *ins;
4250 guint32 size;
4252 mono_class_init (klass);
4253 size = mono_class_array_element_size (klass);
4255 index1 = index_ins1->dreg;
4256 index2 = index_ins2->dreg;
4258 #if SIZEOF_REGISTER == 8
4259 /* The array reg is 64 bits but the index reg is only 32 */
4260 if (COMPILE_LLVM (cfg)) {
4261 /* Not needed */
4262 } else {
4263 tmpreg = alloc_preg (cfg);
4264 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4265 index1 = tmpreg;
4266 tmpreg = alloc_preg (cfg);
4267 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4268 index2 = tmpreg;
4270 #else
4271 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4272 tmpreg = -1;
4273 #endif
4275 /* range checking */
4276 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4277 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4279 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4280 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4281 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4282 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4283 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4284 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4285 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4287 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4288 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4289 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4290 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4291 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4292 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4293 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4295 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4296 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4297 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4298 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4299 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4301 ins->type = STACK_MP;
4302 ins->klass = klass;
4303 MONO_ADD_INS (cfg->cbb, ins);
4305 return ins;
4307 #endif
4309 static MonoInst*
4310 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4312 int rank;
4313 MonoInst *addr;
4314 MonoMethod *addr_method;
4315 int element_size;
4317 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4319 if (rank == 1)
4320 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4322 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4323 /* emit_ldelema_2 depends on OP_LMUL */
4324 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4325 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4327 #endif
4329 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4330 addr_method = mono_marshal_get_array_address (rank, element_size);
4331 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4333 return addr;
4336 static MonoBreakPolicy
4337 always_insert_breakpoint (MonoMethod *method)
4339 return MONO_BREAK_POLICY_ALWAYS;
4342 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4345 * mono_set_break_policy:
4346 * policy_callback: the new callback function
4348 * Allow embedders to decide wherther to actually obey breakpoint instructions
4349 * (both break IL instructions and Debugger.Break () method calls), for example
4350 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4351 * untrusted or semi-trusted code.
4353 * @policy_callback will be called every time a break point instruction needs to
4354 * be inserted with the method argument being the method that calls Debugger.Break()
4355 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4356 * if it wants the breakpoint to not be effective in the given method.
4357 * #MONO_BREAK_POLICY_ALWAYS is the default.
4359 void
4360 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4362 if (policy_callback)
4363 break_policy_func = policy_callback;
4364 else
4365 break_policy_func = always_insert_breakpoint;
4368 static gboolean
4369 should_insert_brekpoint (MonoMethod *method) {
4370 switch (break_policy_func (method)) {
4371 case MONO_BREAK_POLICY_ALWAYS:
4372 return TRUE;
4373 case MONO_BREAK_POLICY_NEVER:
4374 return FALSE;
4375 case MONO_BREAK_POLICY_ON_DBG:
4376 return mono_debug_using_mono_debugger ();
4377 default:
4378 g_warning ("Incorrect value returned from break policy callback");
4379 return FALSE;
4383 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4384 static MonoInst*
4385 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4387 MonoInst *addr, *store, *load;
4388 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4390 /* the bounds check is already done by the callers */
4391 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4392 if (is_set) {
4393 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4394 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4395 if (mini_type_is_reference (cfg, fsig->params [2]))
4396 emit_write_barrier (cfg, addr, load, -1);
4397 } else {
4398 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4399 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4401 return store;
4405 static gboolean
4406 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4408 return mini_type_is_reference (cfg, &klass->byval_arg);
4411 static MonoInst*
4412 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4414 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4415 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
4416 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4417 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4418 MonoInst *iargs [3];
4420 if (!helper->slot)
4421 mono_class_setup_vtable (obj_array);
4422 g_assert (helper->slot);
4424 if (sp [0]->type != STACK_OBJ)
4425 return NULL;
4426 if (sp [2]->type != STACK_OBJ)
4427 return NULL;
4429 iargs [2] = sp [2];
4430 iargs [1] = sp [1];
4431 iargs [0] = sp [0];
4433 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4434 } else {
4435 MonoInst *ins;
4437 if (mini_is_gsharedvt_klass (cfg, klass)) {
4438 MonoInst *addr;
4440 // FIXME-VT: OP_ICONST optimization
4441 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4442 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4443 ins->opcode = OP_STOREV_MEMBASE;
4444 } else if (sp [1]->opcode == OP_ICONST) {
4445 int array_reg = sp [0]->dreg;
4446 int index_reg = sp [1]->dreg;
4447 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
4449 if (safety_checks)
4450 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
4451 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
4452 } else {
4453 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
4454 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4455 if (generic_class_is_reference_type (cfg, klass))
4456 emit_write_barrier (cfg, addr, sp [2], -1);
4458 return ins;
4462 static MonoInst*
4463 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4465 MonoClass *eklass;
4467 if (is_set)
4468 eklass = mono_class_from_mono_type (fsig->params [2]);
4469 else
4470 eklass = mono_class_from_mono_type (fsig->ret);
4473 if (is_set) {
4474 return emit_array_store (cfg, eklass, args, FALSE);
4475 } else {
4476 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4477 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
4478 return ins;
4482 static MonoInst*
4483 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4485 MonoInst *ins = NULL;
4486 #ifdef MONO_ARCH_SIMD_INTRINSICS
4487 if (cfg->opt & MONO_OPT_SIMD) {
4488 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4489 if (ins)
4490 return ins;
4492 #endif
4494 return ins;
4497 static MonoInst*
4498 emit_memory_barrier (MonoCompile *cfg, int kind)
4500 MonoInst *ins = NULL;
4501 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4502 MONO_ADD_INS (cfg->cbb, ins);
4503 ins->backend.memory_barrier_kind = kind;
4505 return ins;
4508 static MonoInst*
4509 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4511 MonoInst *ins = NULL;
4512 int opcode = 0;
4514 /* The LLVM backend supports these intrinsics */
4515 if (cmethod->klass == mono_defaults.math_class) {
4516 if (strcmp (cmethod->name, "Sin") == 0) {
4517 opcode = OP_SIN;
4518 } else if (strcmp (cmethod->name, "Cos") == 0) {
4519 opcode = OP_COS;
4520 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
4521 opcode = OP_SQRT;
4522 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
4523 opcode = OP_ABS;
4526 if (opcode) {
4527 MONO_INST_NEW (cfg, ins, opcode);
4528 ins->type = STACK_R8;
4529 ins->dreg = mono_alloc_freg (cfg);
4530 ins->sreg1 = args [0]->dreg;
4531 MONO_ADD_INS (cfg->cbb, ins);
4534 opcode = 0;
4535 if (cfg->opt & MONO_OPT_CMOV) {
4536 if (strcmp (cmethod->name, "Min") == 0) {
4537 if (fsig->params [0]->type == MONO_TYPE_I4)
4538 opcode = OP_IMIN;
4539 if (fsig->params [0]->type == MONO_TYPE_U4)
4540 opcode = OP_IMIN_UN;
4541 else if (fsig->params [0]->type == MONO_TYPE_I8)
4542 opcode = OP_LMIN;
4543 else if (fsig->params [0]->type == MONO_TYPE_U8)
4544 opcode = OP_LMIN_UN;
4545 } else if (strcmp (cmethod->name, "Max") == 0) {
4546 if (fsig->params [0]->type == MONO_TYPE_I4)
4547 opcode = OP_IMAX;
4548 if (fsig->params [0]->type == MONO_TYPE_U4)
4549 opcode = OP_IMAX_UN;
4550 else if (fsig->params [0]->type == MONO_TYPE_I8)
4551 opcode = OP_LMAX;
4552 else if (fsig->params [0]->type == MONO_TYPE_U8)
4553 opcode = OP_LMAX_UN;
4557 if (opcode) {
4558 MONO_INST_NEW (cfg, ins, opcode);
4559 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
4560 ins->dreg = mono_alloc_ireg (cfg);
4561 ins->sreg1 = args [0]->dreg;
4562 ins->sreg2 = args [1]->dreg;
4563 MONO_ADD_INS (cfg->cbb, ins);
4567 return ins;
4570 static MonoInst*
4571 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4573 if (cmethod->klass == mono_defaults.array_class) {
4574 if (strcmp (cmethod->name, "UnsafeStore") == 0)
4575 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
4576 if (strcmp (cmethod->name, "UnsafeLoad") == 0)
4577 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
4580 return NULL;
4583 static MonoInst*
4584 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4586 MonoInst *ins = NULL;
4588 static MonoClass *runtime_helpers_class = NULL;
4589 if (! runtime_helpers_class)
4590 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4591 "System.Runtime.CompilerServices", "RuntimeHelpers");
4593 if (cmethod->klass == mono_defaults.string_class) {
4594 if (strcmp (cmethod->name, "get_Chars") == 0) {
4595 int dreg = alloc_ireg (cfg);
4596 int index_reg = alloc_preg (cfg);
4597 int mult_reg = alloc_preg (cfg);
4598 int add_reg = alloc_preg (cfg);
4600 #if SIZEOF_REGISTER == 8
4601 /* The array reg is 64 bits but the index reg is only 32 */
4602 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4603 #else
4604 index_reg = args [1]->dreg;
4605 #endif
4606 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4608 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4609 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4610 add_reg = ins->dreg;
4611 /* Avoid a warning */
4612 mult_reg = 0;
4613 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4614 add_reg, 0);
4615 #else
4616 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4617 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4618 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4619 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4620 #endif
4621 type_from_op (ins, NULL, NULL);
4622 return ins;
4623 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4624 int dreg = alloc_ireg (cfg);
4625 /* Decompose later to allow more optimizations */
4626 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4627 ins->type = STACK_I4;
4628 ins->flags |= MONO_INST_FAULT;
4629 cfg->cbb->has_array_access = TRUE;
4630 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4632 return ins;
4633 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4634 int mult_reg = alloc_preg (cfg);
4635 int add_reg = alloc_preg (cfg);
4637 /* The corlib functions check for oob already. */
4638 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4639 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4640 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4641 return cfg->cbb->last_ins;
4642 } else
4643 return NULL;
4644 } else if (cmethod->klass == mono_defaults.object_class) {
4646 if (strcmp (cmethod->name, "GetType") == 0) {
4647 int dreg = alloc_ireg_ref (cfg);
4648 int vt_reg = alloc_preg (cfg);
4649 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4650 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4651 type_from_op (ins, NULL, NULL);
4653 return ins;
4654 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4655 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4656 int dreg = alloc_ireg (cfg);
4657 int t1 = alloc_ireg (cfg);
4659 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4660 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4661 ins->type = STACK_I4;
4663 return ins;
4664 #endif
4665 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4666 MONO_INST_NEW (cfg, ins, OP_NOP);
4667 MONO_ADD_INS (cfg->cbb, ins);
4668 return ins;
4669 } else
4670 return NULL;
4671 } else if (cmethod->klass == mono_defaults.array_class) {
4672 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4673 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4675 #ifndef MONO_BIG_ARRAYS
4677 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4678 * Array methods.
4680 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4681 int dreg = alloc_ireg (cfg);
4682 int bounds_reg = alloc_ireg_mp (cfg);
4683 MonoBasicBlock *end_bb, *szarray_bb;
4684 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4686 NEW_BBLOCK (cfg, end_bb);
4687 NEW_BBLOCK (cfg, szarray_bb);
4689 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4690 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4691 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4692 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4693 /* Non-szarray case */
4694 if (get_length)
4695 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4696 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4697 else
4698 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4699 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4700 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4701 MONO_START_BB (cfg, szarray_bb);
4702 /* Szarray case */
4703 if (get_length)
4704 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4705 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4706 else
4707 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4708 MONO_START_BB (cfg, end_bb);
4710 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4711 ins->type = STACK_I4;
4713 return ins;
4715 #endif
4717 if (cmethod->name [0] != 'g')
4718 return NULL;
4720 if (strcmp (cmethod->name, "get_Rank") == 0) {
4721 int dreg = alloc_ireg (cfg);
4722 int vtable_reg = alloc_preg (cfg);
4723 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4724 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4725 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4726 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4727 type_from_op (ins, NULL, NULL);
4729 return ins;
4730 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4731 int dreg = alloc_ireg (cfg);
4733 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4734 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4735 type_from_op (ins, NULL, NULL);
4737 return ins;
4738 } else
4739 return NULL;
4740 } else if (cmethod->klass == runtime_helpers_class) {
4742 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4743 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4744 return ins;
4745 } else
4746 return NULL;
4747 } else if (cmethod->klass == mono_defaults.thread_class) {
4748 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4749 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4750 MONO_ADD_INS (cfg->cbb, ins);
4751 return ins;
4752 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4753 return emit_memory_barrier (cfg, FullBarrier);
4755 } else if (cmethod->klass == mono_defaults.monitor_class) {
4757 /* FIXME this should be integrated to the check below once we support the trampoline version */
4758 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4759 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
4760 MonoMethod *fast_method = NULL;
4762 /* Avoid infinite recursion */
4763 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
4764 return NULL;
4766 fast_method = mono_monitor_get_fast_path (cmethod);
4767 if (!fast_method)
4768 return NULL;
4770 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4772 #endif
4774 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4775 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
4776 MonoCallInst *call;
4778 if (COMPILE_LLVM (cfg)) {
4780 * Pass the argument normally, the LLVM backend will handle the
4781 * calling convention problems.
4783 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4784 } else {
4785 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4786 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4787 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4788 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4791 return (MonoInst*)call;
4792 } else if (strcmp (cmethod->name, "Exit") == 0) {
4793 MonoCallInst *call;
4795 if (COMPILE_LLVM (cfg)) {
4796 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4797 } else {
4798 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4799 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4800 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4801 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4804 return (MonoInst*)call;
4806 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4808 MonoMethod *fast_method = NULL;
4810 /* Avoid infinite recursion */
4811 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4812 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4813 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4814 return NULL;
4816 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
4817 strcmp (cmethod->name, "Exit") == 0)
4818 fast_method = mono_monitor_get_fast_path (cmethod);
4819 if (!fast_method)
4820 return NULL;
4822 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4824 #endif
4825 } else if (cmethod->klass->image == mono_defaults.corlib &&
4826 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4827 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4828 ins = NULL;
4830 #if SIZEOF_REGISTER == 8
4831 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4832 /* 64 bit reads are already atomic */
4833 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4834 ins->dreg = mono_alloc_preg (cfg);
4835 ins->inst_basereg = args [0]->dreg;
4836 ins->inst_offset = 0;
4837 MONO_ADD_INS (cfg->cbb, ins);
4839 #endif
4841 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4842 if (strcmp (cmethod->name, "Increment") == 0) {
4843 MonoInst *ins_iconst;
4844 guint32 opcode = 0;
4846 if (fsig->params [0]->type == MONO_TYPE_I4)
4847 opcode = OP_ATOMIC_ADD_NEW_I4;
4848 #if SIZEOF_REGISTER == 8
4849 else if (fsig->params [0]->type == MONO_TYPE_I8)
4850 opcode = OP_ATOMIC_ADD_NEW_I8;
4851 #endif
4852 if (opcode) {
4853 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4854 ins_iconst->inst_c0 = 1;
4855 ins_iconst->dreg = mono_alloc_ireg (cfg);
4856 MONO_ADD_INS (cfg->cbb, ins_iconst);
4858 MONO_INST_NEW (cfg, ins, opcode);
4859 ins->dreg = mono_alloc_ireg (cfg);
4860 ins->inst_basereg = args [0]->dreg;
4861 ins->inst_offset = 0;
4862 ins->sreg2 = ins_iconst->dreg;
4863 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4864 MONO_ADD_INS (cfg->cbb, ins);
4866 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4867 MonoInst *ins_iconst;
4868 guint32 opcode = 0;
4870 if (fsig->params [0]->type == MONO_TYPE_I4)
4871 opcode = OP_ATOMIC_ADD_NEW_I4;
4872 #if SIZEOF_REGISTER == 8
4873 else if (fsig->params [0]->type == MONO_TYPE_I8)
4874 opcode = OP_ATOMIC_ADD_NEW_I8;
4875 #endif
4876 if (opcode) {
4877 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4878 ins_iconst->inst_c0 = -1;
4879 ins_iconst->dreg = mono_alloc_ireg (cfg);
4880 MONO_ADD_INS (cfg->cbb, ins_iconst);
4882 MONO_INST_NEW (cfg, ins, opcode);
4883 ins->dreg = mono_alloc_ireg (cfg);
4884 ins->inst_basereg = args [0]->dreg;
4885 ins->inst_offset = 0;
4886 ins->sreg2 = ins_iconst->dreg;
4887 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4888 MONO_ADD_INS (cfg->cbb, ins);
4890 } else if (strcmp (cmethod->name, "Add") == 0) {
4891 guint32 opcode = 0;
4893 if (fsig->params [0]->type == MONO_TYPE_I4)
4894 opcode = OP_ATOMIC_ADD_NEW_I4;
4895 #if SIZEOF_REGISTER == 8
4896 else if (fsig->params [0]->type == MONO_TYPE_I8)
4897 opcode = OP_ATOMIC_ADD_NEW_I8;
4898 #endif
4900 if (opcode) {
4901 MONO_INST_NEW (cfg, ins, opcode);
4902 ins->dreg = mono_alloc_ireg (cfg);
4903 ins->inst_basereg = args [0]->dreg;
4904 ins->inst_offset = 0;
4905 ins->sreg2 = args [1]->dreg;
4906 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4907 MONO_ADD_INS (cfg->cbb, ins);
4910 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4912 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4913 if (strcmp (cmethod->name, "Exchange") == 0) {
4914 guint32 opcode;
4915 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4917 if (fsig->params [0]->type == MONO_TYPE_I4)
4918 opcode = OP_ATOMIC_EXCHANGE_I4;
4919 #if SIZEOF_REGISTER == 8
4920 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4921 (fsig->params [0]->type == MONO_TYPE_I))
4922 opcode = OP_ATOMIC_EXCHANGE_I8;
4923 #else
4924 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4925 opcode = OP_ATOMIC_EXCHANGE_I4;
4926 #endif
4927 else
4928 return NULL;
4930 MONO_INST_NEW (cfg, ins, opcode);
4931 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
4932 ins->inst_basereg = args [0]->dreg;
4933 ins->inst_offset = 0;
4934 ins->sreg2 = args [1]->dreg;
4935 MONO_ADD_INS (cfg->cbb, ins);
4937 switch (fsig->params [0]->type) {
4938 case MONO_TYPE_I4:
4939 ins->type = STACK_I4;
4940 break;
4941 case MONO_TYPE_I8:
4942 case MONO_TYPE_I:
4943 ins->type = STACK_I8;
4944 break;
4945 case MONO_TYPE_OBJECT:
4946 ins->type = STACK_OBJ;
4947 break;
4948 default:
4949 g_assert_not_reached ();
4952 if (cfg->gen_write_barriers && is_ref)
4953 emit_write_barrier (cfg, args [0], args [1], -1);
4955 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4957 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4958 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4959 int size = 0;
4960 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
4961 if (fsig->params [1]->type == MONO_TYPE_I4)
4962 size = 4;
4963 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4964 size = sizeof (gpointer);
4965 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4966 size = 8;
4967 if (size == 4) {
4968 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4969 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
4970 ins->sreg1 = args [0]->dreg;
4971 ins->sreg2 = args [1]->dreg;
4972 ins->sreg3 = args [2]->dreg;
4973 ins->type = STACK_I4;
4974 MONO_ADD_INS (cfg->cbb, ins);
4975 } else if (size == 8) {
4976 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4977 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
4978 ins->sreg1 = args [0]->dreg;
4979 ins->sreg2 = args [1]->dreg;
4980 ins->sreg3 = args [2]->dreg;
4981 ins->type = STACK_I8;
4982 MONO_ADD_INS (cfg->cbb, ins);
4983 } else {
4984 /* g_assert_not_reached (); */
4986 if (cfg->gen_write_barriers && is_ref)
4987 emit_write_barrier (cfg, args [0], args [1], -1);
4989 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4991 if (ins)
4992 return ins;
4993 } else if (cmethod->klass->image == mono_defaults.corlib) {
4994 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4995 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4996 if (should_insert_brekpoint (cfg->method)) {
4997 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
4998 } else {
4999 MONO_INST_NEW (cfg, ins, OP_NOP);
5000 MONO_ADD_INS (cfg->cbb, ins);
5002 return ins;
5004 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
5005 && strcmp (cmethod->klass->name, "Environment") == 0) {
5006 #ifdef TARGET_WIN32
5007 EMIT_NEW_ICONST (cfg, ins, 1);
5008 #else
5009 EMIT_NEW_ICONST (cfg, ins, 0);
5010 #endif
5011 return ins;
5013 } else if (cmethod->klass == mono_defaults.math_class) {
5015 * There is general branches code for Min/Max, but it does not work for
5016 * all inputs:
5017 * http://everything2.com/?node_id=1051618
5021 #ifdef MONO_ARCH_SIMD_INTRINSICS
5022 if (cfg->opt & MONO_OPT_SIMD) {
5023 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5024 if (ins)
5025 return ins;
5027 #endif
5029 if (COMPILE_LLVM (cfg)) {
5030 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5031 if (ins)
5032 return ins;
5035 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5039 * This entry point could be used later for arbitrary method
5040 * redirection.
5042 inline static MonoInst*
5043 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5044 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
5046 if (method->klass == mono_defaults.string_class) {
5047 /* managed string allocation support */
5048 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
5049 MonoInst *iargs [2];
5050 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5051 MonoMethod *managed_alloc = NULL;
5053 g_assert (vtable); /*Should not fail since it System.String*/
5054 #ifndef MONO_CROSS_COMPILE
5055 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
5056 #endif
5057 if (!managed_alloc)
5058 return NULL;
5059 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5060 iargs [1] = args [0];
5061 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
5064 return NULL;
5067 static void
5068 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5070 MonoInst *store, *temp;
5071 int i;
5073 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5074 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5077 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5078 * would be different than the MonoInst's used to represent arguments, and
5079 * the ldelema implementation can't deal with that.
5080 * Solution: When ldelema is used on an inline argument, create a var for
5081 * it, emit ldelema on that var, and emit the saving code below in
5082 * inline_method () if needed.
5084 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5085 cfg->args [i] = temp;
5086 /* This uses cfg->args [i] which is set by the preceeding line */
5087 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5088 store->cil_code = sp [0]->cil_code;
5089 sp++;
5093 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5094 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5096 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5097 static gboolean
5098 check_inline_called_method_name_limit (MonoMethod *called_method)
5100 int strncmp_result;
5101 static char *limit = NULL;
5103 if (limit == NULL) {
5104 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5106 if (limit_string != NULL)
5107 limit = limit_string;
5108 else
5109 limit = (char *) "";
5112 if (limit [0] != '\0') {
5113 char *called_method_name = mono_method_full_name (called_method, TRUE);
5115 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5116 g_free (called_method_name);
5118 //return (strncmp_result <= 0);
5119 return (strncmp_result == 0);
5120 } else {
5121 return TRUE;
5124 #endif
5126 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5127 static gboolean
5128 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5130 int strncmp_result;
5131 static char *limit = NULL;
5133 if (limit == NULL) {
5134 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5135 if (limit_string != NULL) {
5136 limit = limit_string;
5137 } else {
5138 limit = (char *) "";
5142 if (limit [0] != '\0') {
5143 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
5145 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
5146 g_free (caller_method_name);
5148 //return (strncmp_result <= 0);
5149 return (strncmp_result == 0);
5150 } else {
5151 return TRUE;
5154 #endif
5156 static void
5157 emit_init_rvar (MonoCompile *cfg, MonoInst *rvar, MonoType *rtype)
5159 static double r8_0 = 0.0;
5160 MonoInst *ins;
5162 switch (rvar->type) {
5163 case STACK_I4:
5164 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
5165 break;
5166 case STACK_I8:
5167 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
5168 break;
5169 case STACK_PTR:
5170 case STACK_MP:
5171 case STACK_OBJ:
5172 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
5173 break;
5174 case STACK_R8:
5175 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5176 ins->type = STACK_R8;
5177 ins->inst_p0 = (void*)&r8_0;
5178 ins->dreg = rvar->dreg;
5179 MONO_ADD_INS (cfg->cbb, ins);
5180 break;
5181 case STACK_VTYPE:
5182 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (rtype));
5183 break;
5184 default:
5185 g_assert_not_reached ();
5189 static int
5190 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
5191 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
5193 MonoInst *ins, *rvar = NULL;
5194 MonoMethodHeader *cheader;
5195 MonoBasicBlock *ebblock, *sbblock;
5196 int i, costs;
5197 MonoMethod *prev_inlined_method;
5198 MonoInst **prev_locals, **prev_args;
5199 MonoType **prev_arg_types;
5200 guint prev_real_offset;
5201 GHashTable *prev_cbb_hash;
5202 MonoBasicBlock **prev_cil_offset_to_bb;
5203 MonoBasicBlock *prev_cbb;
5204 unsigned char* prev_cil_start;
5205 guint32 prev_cil_offset_to_bb_len;
5206 MonoMethod *prev_current_method;
5207 MonoGenericContext *prev_generic_context;
5208 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
5210 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
5212 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5213 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
5214 return 0;
5215 #endif
5216 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5217 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
5218 return 0;
5219 #endif
5221 if (cfg->verbose_level > 2)
5222 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5224 if (!cmethod->inline_info) {
5225 cfg->stat_inlineable_methods++;
5226 cmethod->inline_info = 1;
5229 /* allocate local variables */
5230 cheader = mono_method_get_header (cmethod);
5232 if (cheader == NULL || mono_loader_get_last_error ()) {
5233 MonoLoaderError *error = mono_loader_get_last_error ();
5235 if (cheader)
5236 mono_metadata_free_mh (cheader);
5237 if (inline_always && error)
5238 mono_cfg_set_exception (cfg, error->exception_type);
5240 mono_loader_clear_error ();
5241 return 0;
5244 /*Must verify before creating locals as it can cause the JIT to assert.*/
5245 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
5246 mono_metadata_free_mh (cheader);
5247 return 0;
5250 /* allocate space to store the return value */
5251 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
5252 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
5255 prev_locals = cfg->locals;
5256 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
5257 for (i = 0; i < cheader->num_locals; ++i)
5258 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
5260 /* allocate start and end blocks */
5261 /* This is needed so if the inline is aborted, we can clean up */
5262 NEW_BBLOCK (cfg, sbblock);
5263 sbblock->real_offset = real_offset;
5265 NEW_BBLOCK (cfg, ebblock);
5266 ebblock->block_num = cfg->num_bblocks++;
5267 ebblock->real_offset = real_offset;
5269 prev_args = cfg->args;
5270 prev_arg_types = cfg->arg_types;
5271 prev_inlined_method = cfg->inlined_method;
5272 cfg->inlined_method = cmethod;
5273 cfg->ret_var_set = FALSE;
5274 cfg->inline_depth ++;
5275 prev_real_offset = cfg->real_offset;
5276 prev_cbb_hash = cfg->cbb_hash;
5277 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
5278 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
5279 prev_cil_start = cfg->cil_start;
5280 prev_cbb = cfg->cbb;
5281 prev_current_method = cfg->current_method;
5282 prev_generic_context = cfg->generic_context;
5283 prev_ret_var_set = cfg->ret_var_set;
5285 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
5286 virtual = TRUE;
5288 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
5290 ret_var_set = cfg->ret_var_set;
5292 cfg->inlined_method = prev_inlined_method;
5293 cfg->real_offset = prev_real_offset;
5294 cfg->cbb_hash = prev_cbb_hash;
5295 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
5296 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
5297 cfg->cil_start = prev_cil_start;
5298 cfg->locals = prev_locals;
5299 cfg->args = prev_args;
5300 cfg->arg_types = prev_arg_types;
5301 cfg->current_method = prev_current_method;
5302 cfg->generic_context = prev_generic_context;
5303 cfg->ret_var_set = prev_ret_var_set;
5304 cfg->inline_depth --;
5306 if ((costs >= 0 && costs < 60) || inline_always) {
5307 if (cfg->verbose_level > 2)
5308 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5310 cfg->stat_inlined_methods++;
5312 /* always add some code to avoid block split failures */
5313 MONO_INST_NEW (cfg, ins, OP_NOP);
5314 MONO_ADD_INS (prev_cbb, ins);
5316 prev_cbb->next_bb = sbblock;
5317 link_bblock (cfg, prev_cbb, sbblock);
5320 * Get rid of the begin and end bblocks if possible to aid local
5321 * optimizations.
5323 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
5325 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
5326 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
5328 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
5329 MonoBasicBlock *prev = ebblock->in_bb [0];
5330 mono_merge_basic_blocks (cfg, prev, ebblock);
5331 cfg->cbb = prev;
5332 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
5333 mono_merge_basic_blocks (cfg, prev_cbb, prev);
5334 cfg->cbb = prev_cbb;
5336 } else {
5338 * Its possible that the rvar is set in some prev bblock, but not in others.
5339 * (#1835).
5341 if (rvar) {
5342 MonoBasicBlock *bb;
5344 for (i = 0; i < ebblock->in_count; ++i) {
5345 bb = ebblock->in_bb [i];
5347 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
5348 cfg->cbb = bb;
5350 emit_init_rvar (cfg, rvar, fsig->ret);
5355 cfg->cbb = ebblock;
5358 if (rvar) {
5360 * If the inlined method contains only a throw, then the ret var is not
5361 * set, so set it to a dummy value.
5363 if (!ret_var_set)
5364 emit_init_rvar (cfg, rvar, fsig->ret);
5366 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
5367 *sp++ = ins;
5369 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5370 return costs + 1;
5371 } else {
5372 if (cfg->verbose_level > 2)
5373 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
5374 cfg->exception_type = MONO_EXCEPTION_NONE;
5375 mono_loader_clear_error ();
5377 /* This gets rid of the newly added bblocks */
5378 cfg->cbb = prev_cbb;
5380 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5381 return 0;
5385 * Some of these comments may well be out-of-date.
5386 * Design decisions: we do a single pass over the IL code (and we do bblock
5387 * splitting/merging in the few cases when it's required: a back jump to an IL
5388 * address that was not already seen as bblock starting point).
5389 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5390 * Complex operations are decomposed in simpler ones right away. We need to let the
5391 * arch-specific code peek and poke inside this process somehow (except when the
5392 * optimizations can take advantage of the full semantic info of coarse opcodes).
5393 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5394 * MonoInst->opcode initially is the IL opcode or some simplification of that
5395 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5396 * opcode with value bigger than OP_LAST.
5397 * At this point the IR can be handed over to an interpreter, a dumb code generator
5398 * or to the optimizing code generator that will translate it to SSA form.
5400 * Profiling directed optimizations.
5401 * We may compile by default with few or no optimizations and instrument the code
5402 * or the user may indicate what methods to optimize the most either in a config file
5403 * or through repeated runs where the compiler applies offline the optimizations to
5404 * each method and then decides if it was worth it.
5407 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5408 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5409 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5410 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5411 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5412 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5413 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5414 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5416 /* offset from br.s -> br like opcodes */
5417 #define BIG_BRANCH_OFFSET 13
5419 static gboolean
5420 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5422 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
5424 return b == NULL || b == bb;
5427 static int
5428 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
5430 unsigned char *ip = start;
5431 unsigned char *target;
5432 int i;
5433 guint cli_addr;
5434 MonoBasicBlock *bblock;
5435 const MonoOpcode *opcode;
5437 while (ip < end) {
5438 cli_addr = ip - start;
5439 i = mono_opcode_value ((const guint8 **)&ip, end);
5440 if (i < 0)
5441 UNVERIFIED;
5442 opcode = &mono_opcodes [i];
5443 switch (opcode->argument) {
5444 case MonoInlineNone:
5445 ip++;
5446 break;
5447 case MonoInlineString:
5448 case MonoInlineType:
5449 case MonoInlineField:
5450 case MonoInlineMethod:
5451 case MonoInlineTok:
5452 case MonoInlineSig:
5453 case MonoShortInlineR:
5454 case MonoInlineI:
5455 ip += 5;
5456 break;
5457 case MonoInlineVar:
5458 ip += 3;
5459 break;
5460 case MonoShortInlineVar:
5461 case MonoShortInlineI:
5462 ip += 2;
5463 break;
5464 case MonoShortInlineBrTarget:
5465 target = start + cli_addr + 2 + (signed char)ip [1];
5466 GET_BBLOCK (cfg, bblock, target);
5467 ip += 2;
5468 if (ip < end)
5469 GET_BBLOCK (cfg, bblock, ip);
5470 break;
5471 case MonoInlineBrTarget:
5472 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
5473 GET_BBLOCK (cfg, bblock, target);
5474 ip += 5;
5475 if (ip < end)
5476 GET_BBLOCK (cfg, bblock, ip);
5477 break;
5478 case MonoInlineSwitch: {
5479 guint32 n = read32 (ip + 1);
5480 guint32 j;
5481 ip += 5;
5482 cli_addr += 5 + 4 * n;
5483 target = start + cli_addr;
5484 GET_BBLOCK (cfg, bblock, target);
5486 for (j = 0; j < n; ++j) {
5487 target = start + cli_addr + (gint32)read32 (ip);
5488 GET_BBLOCK (cfg, bblock, target);
5489 ip += 4;
5491 break;
5493 case MonoInlineR:
5494 case MonoInlineI8:
5495 ip += 9;
5496 break;
5497 default:
5498 g_assert_not_reached ();
5501 if (i == CEE_THROW) {
5502 unsigned char *bb_start = ip - 1;
5504 /* Find the start of the bblock containing the throw */
5505 bblock = NULL;
5506 while ((bb_start >= start) && !bblock) {
5507 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5508 bb_start --;
5510 if (bblock)
5511 bblock->out_of_line = 1;
5514 return 0;
5515 unverified:
5516 exception_exit:
5517 *pos = ip;
5518 return 1;
5521 static inline MonoMethod *
5522 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5524 MonoMethod *method;
5526 if (m->wrapper_type != MONO_WRAPPER_NONE)
5527 return mono_method_get_wrapper_data (m, token);
5529 method = mono_get_method_full (m->klass->image, token, klass, context);
5531 return method;
5534 static inline MonoMethod *
5535 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5537 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5539 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5540 return NULL;
5542 return method;
5545 static inline MonoClass*
5546 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5548 MonoClass *klass;
5550 if (method->wrapper_type != MONO_WRAPPER_NONE)
5551 klass = mono_method_get_wrapper_data (method, token);
5552 else
5553 klass = mono_class_get_full (method->klass->image, token, context);
5554 if (klass)
5555 mono_class_init (klass);
5556 return klass;
5560 * Returns TRUE if the JIT should abort inlining because "callee"
5561 * is influenced by security attributes.
5563 static
5564 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5566 guint32 result;
5568 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5569 return TRUE;
5572 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5573 if (result == MONO_JIT_SECURITY_OK)
5574 return FALSE;
5576 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5577 /* Generate code to throw a SecurityException before the actual call/link */
5578 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5579 MonoInst *args [2];
5581 NEW_ICONST (cfg, args [0], 4);
5582 NEW_METHODCONST (cfg, args [1], caller);
5583 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5584 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5585 /* don't hide previous results */
5586 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
5587 cfg->exception_data = result;
5588 return TRUE;
5591 return FALSE;
5594 static MonoMethod*
5595 throw_exception (void)
5597 static MonoMethod *method = NULL;
5599 if (!method) {
5600 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5601 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5603 g_assert (method);
5604 return method;
5607 static void
5608 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5610 MonoMethod *thrower = throw_exception ();
5611 MonoInst *args [1];
5613 EMIT_NEW_PCONST (cfg, args [0], ex);
5614 mono_emit_method_call (cfg, thrower, args, NULL);
5618 * Return the original method is a wrapper is specified. We can only access
5619 * the custom attributes from the original method.
5621 static MonoMethod*
5622 get_original_method (MonoMethod *method)
5624 if (method->wrapper_type == MONO_WRAPPER_NONE)
5625 return method;
5627 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5628 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5629 return NULL;
5631 /* in other cases we need to find the original method */
5632 return mono_marshal_method_from_wrapper (method);
5635 static void
5636 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5637 MonoBasicBlock *bblock, unsigned char *ip)
5639 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5640 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5641 if (ex)
5642 emit_throw_exception (cfg, ex);
5645 static void
5646 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5647 MonoBasicBlock *bblock, unsigned char *ip)
5649 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5650 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5651 if (ex)
5652 emit_throw_exception (cfg, ex);
5656 * Check that the IL instructions at ip are the array initialization
5657 * sequence and return the pointer to the data and the size.
5659 static const char*
5660 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5663 * newarr[System.Int32]
5664 * dup
5665 * ldtoken field valuetype ...
5666 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5668 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5669 guint32 token = read32 (ip + 7);
5670 guint32 field_token = read32 (ip + 2);
5671 guint32 field_index = field_token & 0xffffff;
5672 guint32 rva;
5673 const char *data_ptr;
5674 int size = 0;
5675 MonoMethod *cmethod;
5676 MonoClass *dummy_class;
5677 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5678 int dummy_align;
5680 if (!field)
5681 return NULL;
5683 *out_field_token = field_token;
5685 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5686 if (!cmethod)
5687 return NULL;
5688 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5689 return NULL;
5690 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5691 case MONO_TYPE_BOOLEAN:
5692 case MONO_TYPE_I1:
5693 case MONO_TYPE_U1:
5694 size = 1; break;
5695 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5696 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5697 case MONO_TYPE_CHAR:
5698 case MONO_TYPE_I2:
5699 case MONO_TYPE_U2:
5700 size = 2; break;
5701 case MONO_TYPE_I4:
5702 case MONO_TYPE_U4:
5703 case MONO_TYPE_R4:
5704 size = 4; break;
5705 case MONO_TYPE_R8:
5706 #ifdef ARM_FPU_FPA
5707 return NULL; /* stupid ARM FP swapped format */
5708 #endif
5709 case MONO_TYPE_I8:
5710 case MONO_TYPE_U8:
5711 size = 8; break;
5712 #endif
5713 default:
5714 return NULL;
5716 size *= len;
5717 if (size > mono_type_size (field->type, &dummy_align))
5718 return NULL;
5719 *out_size = size;
5720 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5721 if (!method->klass->image->dynamic) {
5722 field_index = read32 (ip + 2) & 0xffffff;
5723 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5724 data_ptr = mono_image_rva_map (method->klass->image, rva);
5725 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5726 /* for aot code we do the lookup on load */
5727 if (aot && data_ptr)
5728 return GUINT_TO_POINTER (rva);
5729 } else {
5730 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5731 g_assert (!aot);
5732 data_ptr = mono_field_get_data (field);
5734 return data_ptr;
5736 return NULL;
5739 static void
5740 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5742 char *method_fname = mono_method_full_name (method, TRUE);
5743 char *method_code;
5744 MonoMethodHeader *header = mono_method_get_header (method);
5746 if (header->code_size == 0)
5747 method_code = g_strdup ("method body is empty.");
5748 else
5749 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5750 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5751 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5752 g_free (method_fname);
5753 g_free (method_code);
5754 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5757 static void
5758 set_exception_object (MonoCompile *cfg, MonoException *exception)
5760 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
5761 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
5762 cfg->exception_ptr = exception;
5765 static void
5766 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5768 MonoInst *ins;
5769 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5770 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5771 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5772 /* Optimize reg-reg moves away */
5774 * Can't optimize other opcodes, since sp[0] might point to
5775 * the last ins of a decomposed opcode.
5777 sp [0]->dreg = (cfg)->locals [n]->dreg;
5778 } else {
5779 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5784 * ldloca inhibits many optimizations so try to get rid of it in common
5785 * cases.
5787 static inline unsigned char *
5788 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5790 int local, token;
5791 MonoClass *klass;
5793 if (size == 1) {
5794 local = ip [1];
5795 ip += 2;
5796 } else {
5797 local = read16 (ip + 2);
5798 ip += 4;
5801 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5802 gboolean skip = FALSE;
5804 /* From the INITOBJ case */
5805 token = read32 (ip + 2);
5806 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5807 CHECK_TYPELOAD (klass);
5808 if (mini_type_is_reference (cfg, &klass->byval_arg)) {
5809 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5810 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5811 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5812 } else {
5813 skip = TRUE;
5816 if (!skip)
5817 return ip + 6;
5819 load_error:
5820 return NULL;
5823 static gboolean
5824 is_exception_class (MonoClass *class)
5826 while (class) {
5827 if (class == mono_defaults.exception_class)
5828 return TRUE;
5829 class = class->parent;
5831 return FALSE;
5835 * is_jit_optimizer_disabled:
5837 * Determine whenever M's assembly has a DebuggableAttribute with the
5838 * IsJITOptimizerDisabled flag set.
5840 static gboolean
5841 is_jit_optimizer_disabled (MonoMethod *m)
5843 MonoAssembly *ass = m->klass->image->assembly;
5844 MonoCustomAttrInfo* attrs;
5845 static MonoClass *klass;
5846 int i;
5847 gboolean val = FALSE;
5849 g_assert (ass);
5850 if (ass->jit_optimizer_disabled_inited)
5851 return ass->jit_optimizer_disabled;
5853 if (!klass)
5854 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
5855 if (!klass) {
5856 /* Linked away */
5857 ass->jit_optimizer_disabled = FALSE;
5858 mono_memory_barrier ();
5859 ass->jit_optimizer_disabled_inited = TRUE;
5860 return FALSE;
5863 attrs = mono_custom_attrs_from_assembly (ass);
5864 if (attrs) {
5865 for (i = 0; i < attrs->num_attrs; ++i) {
5866 MonoCustomAttrEntry *attr = &attrs->attrs [i];
5867 const gchar *p;
5868 int len;
5869 MonoMethodSignature *sig;
5871 if (!attr->ctor || attr->ctor->klass != klass)
5872 continue;
5873 /* Decode the attribute. See reflection.c */
5874 len = attr->data_size;
5875 p = (const char*)attr->data;
5876 g_assert (read16 (p) == 0x0001);
5877 p += 2;
5879 // FIXME: Support named parameters
5880 sig = mono_method_signature (attr->ctor);
5881 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
5882 continue;
5883 /* Two boolean arguments */
5884 p ++;
5885 val = *p;
5887 mono_custom_attrs_free (attrs);
5890 ass->jit_optimizer_disabled = val;
5891 mono_memory_barrier ();
5892 ass->jit_optimizer_disabled_inited = TRUE;
5894 return val;
5897 static gboolean
5898 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig)
5900 gboolean supported_tail_call;
5901 int i;
5903 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5904 supported_tail_call = MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
5905 #else
5906 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
5907 #endif
5909 for (i = 0; i < fsig->param_count; ++i) {
5910 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
5911 /* These can point to the current method's stack */
5912 supported_tail_call = FALSE;
5914 if (fsig->hasthis && cmethod->klass->valuetype)
5915 /* this might point to the current method's stack */
5916 supported_tail_call = FALSE;
5917 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
5918 supported_tail_call = FALSE;
5919 if (cfg->method->save_lmf)
5920 supported_tail_call = FALSE;
5921 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
5922 supported_tail_call = FALSE;
5924 /* Debugging support */
5925 #if 0
5926 if (supported_tail_call) {
5927 if (!mono_debug_count ())
5928 supported_tail_call = FALSE;
5930 #endif
5932 return supported_tail_call;
5935 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
5936 * it to the thread local value based on the tls_offset field. Every other kind of access to
5937 * the field causes an assert.
5939 static gboolean
5940 is_magic_tls_access (MonoClassField *field)
5942 if (strcmp (field->name, "tlsdata"))
5943 return FALSE;
5944 if (strcmp (field->parent->name, "ThreadLocal`1"))
5945 return FALSE;
5946 return field->parent->image == mono_defaults.corlib;
5949 /* emits the code needed to access a managed tls var (like ThreadStatic)
5950 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
5951 * pointer for the current thread.
5952 * Returns the MonoInst* representing the address of the tls var.
5954 static MonoInst*
5955 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
5957 MonoInst *addr;
5958 int static_data_reg, array_reg, dreg;
5959 int offset2_reg, idx_reg;
5960 // inlined access to the tls data
5961 // idx = (offset >> 24) - 1;
5962 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
5963 static_data_reg = alloc_ireg (cfg);
5964 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
5965 idx_reg = alloc_ireg (cfg);
5966 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
5967 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
5968 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
5969 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
5970 array_reg = alloc_ireg (cfg);
5971 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
5972 offset2_reg = alloc_ireg (cfg);
5973 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
5974 dreg = alloc_ireg (cfg);
5975 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
5976 return addr;
5980 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
5981 * this address is cached per-method in cached_tls_addr.
5983 static MonoInst*
5984 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
5986 MonoInst *load, *addr, *temp, *store, *thread_ins;
5987 MonoClassField *offset_field;
5989 if (*cached_tls_addr) {
5990 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
5991 return addr;
5993 thread_ins = mono_get_thread_intrinsic (cfg);
5994 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
5996 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
5997 if (thread_ins) {
5998 MONO_ADD_INS (cfg->cbb, thread_ins);
5999 } else {
6000 MonoMethod *thread_method;
6001 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
6002 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
6004 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
6005 addr->klass = mono_class_from_mono_type (tls_field->type);
6006 addr->type = STACK_MP;
6007 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
6008 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
6010 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
6011 return addr;
6015 * mono_method_to_ir:
6017 * Translate the .net IL into linear IR.
6020 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
6021 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
6022 guint inline_offset, gboolean is_virtual_call)
6024 MonoError error;
6025 MonoInst *ins, **sp, **stack_start;
6026 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
6027 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
6028 MonoMethod *cmethod, *method_definition;
6029 MonoInst **arg_array;
6030 MonoMethodHeader *header;
6031 MonoImage *image;
6032 guint32 token, ins_flag;
6033 MonoClass *klass;
6034 MonoClass *constrained_call = NULL;
6035 unsigned char *ip, *end, *target, *err_pos;
6036 static double r8_0 = 0.0;
6037 MonoMethodSignature *sig;
6038 MonoGenericContext *generic_context = NULL;
6039 MonoGenericContainer *generic_container = NULL;
6040 MonoType **param_types;
6041 int i, n, start_new_bblock, dreg;
6042 int num_calls = 0, inline_costs = 0;
6043 int breakpoint_id = 0;
6044 guint num_args;
6045 MonoBoolean security, pinvoke;
6046 MonoSecurityManager* secman = NULL;
6047 MonoDeclSecurityActions actions;
6048 GSList *class_inits = NULL;
6049 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
6050 int context_used;
6051 gboolean init_locals, seq_points, skip_dead_blocks;
6052 gboolean disable_inline, sym_seq_points = FALSE;
6053 MonoInst *cached_tls_addr = NULL;
6054 MonoDebugMethodInfo *minfo;
6055 MonoBitSet *seq_point_locs = NULL;
6057 disable_inline = is_jit_optimizer_disabled (method);
6059 /* serialization and xdomain stuff may need access to private fields and methods */
6060 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
6061 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
6062 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
6063 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
6064 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
6065 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
6067 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
6069 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
6070 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
6071 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
6072 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
6073 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
6075 image = method->klass->image;
6076 header = mono_method_get_header (method);
6077 if (!header) {
6078 MonoLoaderError *error;
6080 if ((error = mono_loader_get_last_error ())) {
6081 mono_cfg_set_exception (cfg, error->exception_type);
6082 } else {
6083 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6084 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
6086 goto exception_exit;
6088 generic_container = mono_method_get_generic_container (method);
6089 sig = mono_method_signature (method);
6090 num_args = sig->hasthis + sig->param_count;
6091 ip = (unsigned char*)header->code;
6092 cfg->cil_start = ip;
6093 end = ip + header->code_size;
6094 cfg->stat_cil_code_size += header->code_size;
6095 init_locals = header->init_locals;
6097 seq_points = cfg->gen_seq_points && cfg->method == method;
6098 #ifdef PLATFORM_ANDROID
6099 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
6100 #endif
6102 if (cfg->gen_seq_points && cfg->method == method) {
6103 minfo = mono_debug_lookup_method (method);
6104 if (minfo) {
6105 int i, n_il_offsets;
6106 int *il_offsets;
6107 int *line_numbers;
6109 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL);
6110 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6111 sym_seq_points = TRUE;
6112 for (i = 0; i < n_il_offsets; ++i) {
6113 if (il_offsets [i] < header->code_size)
6114 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
6120 * Methods without init_locals set could cause asserts in various passes
6121 * (#497220).
6123 init_locals = TRUE;
6125 method_definition = method;
6126 while (method_definition->is_inflated) {
6127 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
6128 method_definition = imethod->declaring;
6131 /* SkipVerification is not allowed if core-clr is enabled */
6132 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
6133 dont_verify = TRUE;
6134 dont_verify_stloc = TRUE;
6137 if (mono_debug_using_mono_debugger ())
6138 cfg->keep_cil_nops = TRUE;
6140 if (sig->is_inflated)
6141 generic_context = mono_method_get_context (method);
6142 else if (generic_container)
6143 generic_context = &generic_container->context;
6144 cfg->generic_context = generic_context;
6146 if (!cfg->generic_sharing_context)
6147 g_assert (!sig->has_type_parameters);
6149 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
6150 g_assert (method->is_inflated);
6151 g_assert (mono_method_get_context (method)->method_inst);
6153 if (method->is_inflated && mono_method_get_context (method)->method_inst)
6154 g_assert (sig->generic_param_count);
6156 if (cfg->method == method) {
6157 cfg->real_offset = 0;
6158 } else {
6159 cfg->real_offset = inline_offset;
6162 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
6163 cfg->cil_offset_to_bb_len = header->code_size;
6165 cfg->current_method = method;
6167 if (cfg->verbose_level > 2)
6168 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
6170 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
6171 if (sig->hasthis)
6172 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
6173 for (n = 0; n < sig->param_count; ++n)
6174 param_types [n + sig->hasthis] = sig->params [n];
6175 cfg->arg_types = param_types;
6177 dont_inline = g_list_prepend (dont_inline, method);
6178 if (cfg->method == method) {
6180 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
6181 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
6183 /* ENTRY BLOCK */
6184 NEW_BBLOCK (cfg, start_bblock);
6185 cfg->bb_entry = start_bblock;
6186 start_bblock->cil_code = NULL;
6187 start_bblock->cil_length = 0;
6188 #if defined(__native_client_codegen__)
6189 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
6190 ins->dreg = alloc_dreg (cfg, STACK_I4);
6191 MONO_ADD_INS (start_bblock, ins);
6192 #endif
6194 /* EXIT BLOCK */
6195 NEW_BBLOCK (cfg, end_bblock);
6196 cfg->bb_exit = end_bblock;
6197 end_bblock->cil_code = NULL;
6198 end_bblock->cil_length = 0;
6199 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
6200 g_assert (cfg->num_bblocks == 2);
6202 arg_array = cfg->args;
6204 if (header->num_clauses) {
6205 cfg->spvars = g_hash_table_new (NULL, NULL);
6206 cfg->exvars = g_hash_table_new (NULL, NULL);
6208 /* handle exception clauses */
6209 for (i = 0; i < header->num_clauses; ++i) {
6210 MonoBasicBlock *try_bb;
6211 MonoExceptionClause *clause = &header->clauses [i];
6212 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
6213 try_bb->real_offset = clause->try_offset;
6214 try_bb->try_start = TRUE;
6215 try_bb->region = ((i + 1) << 8) | clause->flags;
6216 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
6217 tblock->real_offset = clause->handler_offset;
6218 tblock->flags |= BB_EXCEPTION_HANDLER;
6220 link_bblock (cfg, try_bb, tblock);
6222 if (*(ip + clause->handler_offset) == CEE_POP)
6223 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
6225 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
6226 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
6227 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
6228 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6229 MONO_ADD_INS (tblock, ins);
6231 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
6232 /* finally clauses already have a seq point */
6233 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
6234 MONO_ADD_INS (tblock, ins);
6237 /* todo: is a fault block unsafe to optimize? */
6238 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
6239 tblock->flags |= BB_EXCEPTION_UNSAFE;
6243 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
6244 while (p < end) {
6245 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
6247 /* catch and filter blocks get the exception object on the stack */
6248 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
6249 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6250 MonoInst *dummy_use;
6252 /* mostly like handle_stack_args (), but just sets the input args */
6253 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
6254 tblock->in_scount = 1;
6255 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6256 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6259 * Add a dummy use for the exvar so its liveness info will be
6260 * correct.
6262 cfg->cbb = tblock;
6263 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
6265 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6266 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
6267 tblock->flags |= BB_EXCEPTION_HANDLER;
6268 tblock->real_offset = clause->data.filter_offset;
6269 tblock->in_scount = 1;
6270 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6271 /* The filter block shares the exvar with the handler block */
6272 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6273 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6274 MONO_ADD_INS (tblock, ins);
6278 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
6279 clause->data.catch_class &&
6280 cfg->generic_sharing_context &&
6281 mono_class_check_context_used (clause->data.catch_class)) {
6283 * In shared generic code with catch
6284 * clauses containing type variables
6285 * the exception handling code has to
6286 * be able to get to the rgctx.
6287 * Therefore we have to make sure that
6288 * the vtable/mrgctx argument (for
6289 * static or generic methods) or the
6290 * "this" argument (for non-static
6291 * methods) are live.
6293 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6294 mini_method_get_context (method)->method_inst ||
6295 method->klass->valuetype) {
6296 mono_get_vtable_var (cfg);
6297 } else {
6298 MonoInst *dummy_use;
6300 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
6304 } else {
6305 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
6306 cfg->cbb = start_bblock;
6307 cfg->args = arg_array;
6308 mono_save_args (cfg, sig, inline_args);
6311 /* FIRST CODE BLOCK */
6312 NEW_BBLOCK (cfg, bblock);
6313 bblock->cil_code = ip;
6314 cfg->cbb = bblock;
6315 cfg->ip = ip;
6317 ADD_BBLOCK (cfg, bblock);
6319 if (cfg->method == method) {
6320 breakpoint_id = mono_debugger_method_has_breakpoint (method);
6321 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
6322 MONO_INST_NEW (cfg, ins, OP_BREAK);
6323 MONO_ADD_INS (bblock, ins);
6327 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6328 secman = mono_security_manager_get_methods ();
6330 security = (secman && mono_method_has_declsec (method));
6331 /* at this point having security doesn't mean we have any code to generate */
6332 if (security && (cfg->method == method)) {
6333 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
6334 * And we do not want to enter the next section (with allocation) if we
6335 * have nothing to generate */
6336 security = mono_declsec_get_demands (method, &actions);
6339 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
6340 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
6341 if (pinvoke) {
6342 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6343 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6344 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
6346 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
6347 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6348 pinvoke = FALSE;
6350 if (custom)
6351 mono_custom_attrs_free (custom);
6353 if (pinvoke) {
6354 custom = mono_custom_attrs_from_class (wrapped->klass);
6355 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6356 pinvoke = FALSE;
6358 if (custom)
6359 mono_custom_attrs_free (custom);
6361 } else {
6362 /* not a P/Invoke after all */
6363 pinvoke = FALSE;
6367 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
6368 /* we use a separate basic block for the initialization code */
6369 NEW_BBLOCK (cfg, init_localsbb);
6370 cfg->bb_init = init_localsbb;
6371 init_localsbb->real_offset = cfg->real_offset;
6372 start_bblock->next_bb = init_localsbb;
6373 init_localsbb->next_bb = bblock;
6374 link_bblock (cfg, start_bblock, init_localsbb);
6375 link_bblock (cfg, init_localsbb, bblock);
6377 cfg->cbb = init_localsbb;
6378 } else {
6379 start_bblock->next_bb = bblock;
6380 link_bblock (cfg, start_bblock, bblock);
6383 /* at this point we know, if security is TRUE, that some code needs to be generated */
6384 if (security && (cfg->method == method)) {
6385 MonoInst *args [2];
6387 cfg->stat_cas_demand_generation++;
6389 if (actions.demand.blob) {
6390 /* Add code for SecurityAction.Demand */
6391 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
6392 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
6393 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6394 mono_emit_method_call (cfg, secman->demand, args, NULL);
6396 if (actions.noncasdemand.blob) {
6397 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
6398 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
6399 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
6400 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
6401 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6402 mono_emit_method_call (cfg, secman->demand, args, NULL);
6404 if (actions.demandchoice.blob) {
6405 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
6406 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
6407 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
6408 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
6409 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
6413 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
6414 if (pinvoke) {
6415 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
6418 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
6419 /* check if this is native code, e.g. an icall or a p/invoke */
6420 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
6421 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6422 if (wrapped) {
6423 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
6424 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
6426 /* if this ia a native call then it can only be JITted from platform code */
6427 if ((icall || pinvk) && method->klass && method->klass->image) {
6428 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
6429 MonoException *ex = icall ? mono_get_exception_security () :
6430 mono_get_exception_method_access ();
6431 emit_throw_exception (cfg, ex);
6438 if (header->code_size == 0)
6439 UNVERIFIED;
6441 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
6442 ip = err_pos;
6443 UNVERIFIED;
6446 if (cfg->method == method)
6447 mono_debug_init_method (cfg, bblock, breakpoint_id);
6449 for (n = 0; n < header->num_locals; ++n) {
6450 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
6451 UNVERIFIED;
6453 class_inits = NULL;
6455 /* We force the vtable variable here for all shared methods
6456 for the possibility that they might show up in a stack
6457 trace where their exact instantiation is needed. */
6458 if (cfg->generic_sharing_context && method == cfg->method) {
6459 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6460 mini_method_get_context (method)->method_inst ||
6461 method->klass->valuetype) {
6462 mono_get_vtable_var (cfg);
6463 } else {
6464 /* FIXME: Is there a better way to do this?
6465 We need the variable live for the duration
6466 of the whole method. */
6467 cfg->args [0]->flags |= MONO_INST_INDIRECT;
6471 /* add a check for this != NULL to inlined methods */
6472 if (is_virtual_call) {
6473 MonoInst *arg_ins;
6475 NEW_ARGLOAD (cfg, arg_ins, 0);
6476 MONO_ADD_INS (cfg->cbb, arg_ins);
6477 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
6480 skip_dead_blocks = !dont_verify;
6481 if (skip_dead_blocks) {
6482 original_bb = bb = mono_basic_block_split (method, &error);
6483 if (!mono_error_ok (&error)) {
6484 mono_error_cleanup (&error);
6485 UNVERIFIED;
6487 g_assert (bb);
6490 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
6491 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
6493 ins_flag = 0;
6494 start_new_bblock = 0;
6495 cfg->cbb = bblock;
6496 while (ip < end) {
6497 if (cfg->method == method)
6498 cfg->real_offset = ip - header->code;
6499 else
6500 cfg->real_offset = inline_offset;
6501 cfg->ip = ip;
6503 context_used = 0;
6505 if (start_new_bblock) {
6506 bblock->cil_length = ip - bblock->cil_code;
6507 if (start_new_bblock == 2) {
6508 g_assert (ip == tblock->cil_code);
6509 } else {
6510 GET_BBLOCK (cfg, tblock, ip);
6512 bblock->next_bb = tblock;
6513 bblock = tblock;
6514 cfg->cbb = bblock;
6515 start_new_bblock = 0;
6516 for (i = 0; i < bblock->in_scount; ++i) {
6517 if (cfg->verbose_level > 3)
6518 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6519 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6520 *sp++ = ins;
6522 if (class_inits)
6523 g_slist_free (class_inits);
6524 class_inits = NULL;
6525 } else {
6526 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
6527 link_bblock (cfg, bblock, tblock);
6528 if (sp != stack_start) {
6529 handle_stack_args (cfg, stack_start, sp - stack_start);
6530 sp = stack_start;
6531 CHECK_UNVERIFIABLE (cfg);
6533 bblock->next_bb = tblock;
6534 bblock = tblock;
6535 cfg->cbb = bblock;
6536 for (i = 0; i < bblock->in_scount; ++i) {
6537 if (cfg->verbose_level > 3)
6538 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6539 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6540 *sp++ = ins;
6542 g_slist_free (class_inits);
6543 class_inits = NULL;
6547 if (skip_dead_blocks) {
6548 int ip_offset = ip - header->code;
6550 if (ip_offset == bb->end)
6551 bb = bb->next;
6553 if (bb->dead) {
6554 int op_size = mono_opcode_size (ip, end);
6555 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
6557 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
6559 if (ip_offset + op_size == bb->end) {
6560 MONO_INST_NEW (cfg, ins, OP_NOP);
6561 MONO_ADD_INS (bblock, ins);
6562 start_new_bblock = 1;
6565 ip += op_size;
6566 continue;
6570 * Sequence points are points where the debugger can place a breakpoint.
6571 * Currently, we generate these automatically at points where the IL
6572 * stack is empty.
6574 if (seq_points && ((sp == stack_start) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
6576 * Make methods interruptable at the beginning, and at the targets of
6577 * backward branches.
6578 * Also, do this at the start of every bblock in methods with clauses too,
6579 * to be able to handle instructions with inprecise control flow like
6580 * throw/endfinally.
6581 * Backward branches are handled at the end of method-to-ir ().
6583 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
6585 /* Avoid sequence points on empty IL like .volatile */
6586 // FIXME: Enable this
6587 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
6588 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
6589 MONO_ADD_INS (cfg->cbb, ins);
6592 bblock->real_offset = cfg->real_offset;
6594 if ((cfg->method == method) && cfg->coverage_info) {
6595 guint32 cil_offset = ip - header->code;
6596 cfg->coverage_info->data [cil_offset].cil_code = ip;
6598 /* TODO: Use an increment here */
6599 #if defined(TARGET_X86)
6600 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
6601 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
6602 ins->inst_imm = 1;
6603 MONO_ADD_INS (cfg->cbb, ins);
6604 #else
6605 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
6606 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
6607 #endif
6610 if (cfg->verbose_level > 3)
6611 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
6613 switch (*ip) {
6614 case CEE_NOP:
6615 if (seq_points && !sym_seq_points && sp != stack_start) {
6617 * The C# compiler uses these nops to notify the JIT that it should
6618 * insert seq points.
6620 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
6621 MONO_ADD_INS (cfg->cbb, ins);
6623 if (cfg->keep_cil_nops)
6624 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
6625 else
6626 MONO_INST_NEW (cfg, ins, OP_NOP);
6627 ip++;
6628 MONO_ADD_INS (bblock, ins);
6629 break;
6630 case CEE_BREAK:
6631 if (should_insert_brekpoint (cfg->method)) {
6632 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6633 } else {
6634 MONO_INST_NEW (cfg, ins, OP_NOP);
6636 ip++;
6637 MONO_ADD_INS (bblock, ins);
6638 break;
6639 case CEE_LDARG_0:
6640 case CEE_LDARG_1:
6641 case CEE_LDARG_2:
6642 case CEE_LDARG_3:
6643 CHECK_STACK_OVF (1);
6644 n = (*ip)-CEE_LDARG_0;
6645 CHECK_ARG (n);
6646 EMIT_NEW_ARGLOAD (cfg, ins, n);
6647 ip++;
6648 *sp++ = ins;
6649 break;
6650 case CEE_LDLOC_0:
6651 case CEE_LDLOC_1:
6652 case CEE_LDLOC_2:
6653 case CEE_LDLOC_3:
6654 CHECK_STACK_OVF (1);
6655 n = (*ip)-CEE_LDLOC_0;
6656 CHECK_LOCAL (n);
6657 EMIT_NEW_LOCLOAD (cfg, ins, n);
6658 ip++;
6659 *sp++ = ins;
6660 break;
6661 case CEE_STLOC_0:
6662 case CEE_STLOC_1:
6663 case CEE_STLOC_2:
6664 case CEE_STLOC_3: {
6665 CHECK_STACK (1);
6666 n = (*ip)-CEE_STLOC_0;
6667 CHECK_LOCAL (n);
6668 --sp;
6669 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
6670 UNVERIFIED;
6671 emit_stloc_ir (cfg, sp, header, n);
6672 ++ip;
6673 inline_costs += 1;
6674 break;
6676 case CEE_LDARG_S:
6677 CHECK_OPSIZE (2);
6678 CHECK_STACK_OVF (1);
6679 n = ip [1];
6680 CHECK_ARG (n);
6681 EMIT_NEW_ARGLOAD (cfg, ins, n);
6682 *sp++ = ins;
6683 ip += 2;
6684 break;
6685 case CEE_LDARGA_S:
6686 CHECK_OPSIZE (2);
6687 CHECK_STACK_OVF (1);
6688 n = ip [1];
6689 CHECK_ARG (n);
6690 NEW_ARGLOADA (cfg, ins, n);
6691 MONO_ADD_INS (cfg->cbb, ins);
6692 *sp++ = ins;
6693 ip += 2;
6694 break;
6695 case CEE_STARG_S:
6696 CHECK_OPSIZE (2);
6697 CHECK_STACK (1);
6698 --sp;
6699 n = ip [1];
6700 CHECK_ARG (n);
6701 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
6702 UNVERIFIED;
6703 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
6704 ip += 2;
6705 break;
6706 case CEE_LDLOC_S:
6707 CHECK_OPSIZE (2);
6708 CHECK_STACK_OVF (1);
6709 n = ip [1];
6710 CHECK_LOCAL (n);
6711 EMIT_NEW_LOCLOAD (cfg, ins, n);
6712 *sp++ = ins;
6713 ip += 2;
6714 break;
6715 case CEE_LDLOCA_S: {
6716 unsigned char *tmp_ip;
6717 CHECK_OPSIZE (2);
6718 CHECK_STACK_OVF (1);
6719 CHECK_LOCAL (ip [1]);
6721 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
6722 ip = tmp_ip;
6723 inline_costs += 1;
6724 break;
6727 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
6728 *sp++ = ins;
6729 ip += 2;
6730 break;
6732 case CEE_STLOC_S:
6733 CHECK_OPSIZE (2);
6734 CHECK_STACK (1);
6735 --sp;
6736 CHECK_LOCAL (ip [1]);
6737 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
6738 UNVERIFIED;
6739 emit_stloc_ir (cfg, sp, header, ip [1]);
6740 ip += 2;
6741 inline_costs += 1;
6742 break;
6743 case CEE_LDNULL:
6744 CHECK_STACK_OVF (1);
6745 EMIT_NEW_PCONST (cfg, ins, NULL);
6746 ins->type = STACK_OBJ;
6747 ++ip;
6748 *sp++ = ins;
6749 break;
6750 case CEE_LDC_I4_M1:
6751 CHECK_STACK_OVF (1);
6752 EMIT_NEW_ICONST (cfg, ins, -1);
6753 ++ip;
6754 *sp++ = ins;
6755 break;
6756 case CEE_LDC_I4_0:
6757 case CEE_LDC_I4_1:
6758 case CEE_LDC_I4_2:
6759 case CEE_LDC_I4_3:
6760 case CEE_LDC_I4_4:
6761 case CEE_LDC_I4_5:
6762 case CEE_LDC_I4_6:
6763 case CEE_LDC_I4_7:
6764 case CEE_LDC_I4_8:
6765 CHECK_STACK_OVF (1);
6766 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6767 ++ip;
6768 *sp++ = ins;
6769 break;
6770 case CEE_LDC_I4_S:
6771 CHECK_OPSIZE (2);
6772 CHECK_STACK_OVF (1);
6773 ++ip;
6774 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6775 ++ip;
6776 *sp++ = ins;
6777 break;
6778 case CEE_LDC_I4:
6779 CHECK_OPSIZE (5);
6780 CHECK_STACK_OVF (1);
6781 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6782 ip += 5;
6783 *sp++ = ins;
6784 break;
6785 case CEE_LDC_I8:
6786 CHECK_OPSIZE (9);
6787 CHECK_STACK_OVF (1);
6788 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6789 ins->type = STACK_I8;
6790 ins->dreg = alloc_dreg (cfg, STACK_I8);
6791 ++ip;
6792 ins->inst_l = (gint64)read64 (ip);
6793 MONO_ADD_INS (bblock, ins);
6794 ip += 8;
6795 *sp++ = ins;
6796 break;
6797 case CEE_LDC_R4: {
6798 float *f;
6799 gboolean use_aotconst = FALSE;
6801 #ifdef TARGET_POWERPC
6802 /* FIXME: Clean this up */
6803 if (cfg->compile_aot)
6804 use_aotconst = TRUE;
6805 #endif
6807 /* FIXME: we should really allocate this only late in the compilation process */
6808 f = mono_domain_alloc (cfg->domain, sizeof (float));
6809 CHECK_OPSIZE (5);
6810 CHECK_STACK_OVF (1);
6812 if (use_aotconst) {
6813 MonoInst *cons;
6814 int dreg;
6816 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6818 dreg = alloc_freg (cfg);
6819 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6820 ins->type = STACK_R8;
6821 } else {
6822 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6823 ins->type = STACK_R8;
6824 ins->dreg = alloc_dreg (cfg, STACK_R8);
6825 ins->inst_p0 = f;
6826 MONO_ADD_INS (bblock, ins);
6828 ++ip;
6829 readr4 (ip, f);
6830 ip += 4;
6831 *sp++ = ins;
6832 break;
6834 case CEE_LDC_R8: {
6835 double *d;
6836 gboolean use_aotconst = FALSE;
6838 #ifdef TARGET_POWERPC
6839 /* FIXME: Clean this up */
6840 if (cfg->compile_aot)
6841 use_aotconst = TRUE;
6842 #endif
6844 /* FIXME: we should really allocate this only late in the compilation process */
6845 d = mono_domain_alloc (cfg->domain, sizeof (double));
6846 CHECK_OPSIZE (9);
6847 CHECK_STACK_OVF (1);
6849 if (use_aotconst) {
6850 MonoInst *cons;
6851 int dreg;
6853 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6855 dreg = alloc_freg (cfg);
6856 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6857 ins->type = STACK_R8;
6858 } else {
6859 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6860 ins->type = STACK_R8;
6861 ins->dreg = alloc_dreg (cfg, STACK_R8);
6862 ins->inst_p0 = d;
6863 MONO_ADD_INS (bblock, ins);
6865 ++ip;
6866 readr8 (ip, d);
6867 ip += 8;
6868 *sp++ = ins;
6869 break;
6871 case CEE_DUP: {
6872 MonoInst *temp, *store;
6873 CHECK_STACK (1);
6874 CHECK_STACK_OVF (1);
6875 sp--;
6876 ins = *sp;
6878 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6879 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6881 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6882 *sp++ = ins;
6884 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6885 *sp++ = ins;
6887 ++ip;
6888 inline_costs += 2;
6889 break;
6891 case CEE_POP:
6892 CHECK_STACK (1);
6893 ip++;
6894 --sp;
6896 #ifdef TARGET_X86
6897 if (sp [0]->type == STACK_R8)
6898 /* we need to pop the value from the x86 FP stack */
6899 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6900 #endif
6901 break;
6902 case CEE_JMP: {
6903 MonoCallInst *call;
6905 INLINE_FAILURE ("jmp");
6906 GSHAREDVT_FAILURE (*ip);
6908 CHECK_OPSIZE (5);
6909 if (stack_start != sp)
6910 UNVERIFIED;
6911 token = read32 (ip + 1);
6912 /* FIXME: check the signature matches */
6913 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6915 if (!cmethod || mono_loader_get_last_error ())
6916 LOAD_ERROR;
6918 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6919 GENERIC_SHARING_FAILURE (CEE_JMP);
6921 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6922 CHECK_CFG_EXCEPTION;
6924 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6926 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6927 int i, n;
6929 /* Handle tail calls similarly to calls */
6930 n = fsig->param_count + fsig->hasthis;
6932 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6933 call->method = cmethod;
6934 call->tail_call = TRUE;
6935 call->signature = mono_method_signature (cmethod);
6936 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6937 call->inst.inst_p0 = cmethod;
6938 for (i = 0; i < n; ++i)
6939 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6941 mono_arch_emit_call (cfg, call);
6942 MONO_ADD_INS (bblock, (MonoInst*)call);
6944 #else
6945 for (i = 0; i < num_args; ++i)
6946 /* Prevent arguments from being optimized away */
6947 arg_array [i]->flags |= MONO_INST_VOLATILE;
6949 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6950 ins = (MonoInst*)call;
6951 ins->inst_p0 = cmethod;
6952 MONO_ADD_INS (bblock, ins);
6953 #endif
6955 ip += 5;
6956 start_new_bblock = 1;
6957 break;
6959 case CEE_CALLI:
6960 case CEE_CALL:
6961 case CEE_CALLVIRT: {
6962 MonoInst *addr = NULL;
6963 MonoMethodSignature *fsig = NULL;
6964 int array_rank = 0;
6965 int virtual = *ip == CEE_CALLVIRT;
6966 int calli = *ip == CEE_CALLI;
6967 gboolean pass_imt_from_rgctx = FALSE;
6968 MonoInst *imt_arg = NULL;
6969 gboolean pass_vtable = FALSE;
6970 gboolean pass_mrgctx = FALSE;
6971 MonoInst *vtable_arg = NULL;
6972 gboolean check_this = FALSE;
6973 gboolean supported_tail_call = FALSE;
6974 gboolean need_seq_point = FALSE;
6976 CHECK_OPSIZE (5);
6977 token = read32 (ip + 1);
6979 if (calli) {
6980 GSHAREDVT_FAILURE (*ip);
6981 cmethod = NULL;
6982 CHECK_STACK (1);
6983 --sp;
6984 addr = *sp;
6985 if (method->wrapper_type != MONO_WRAPPER_NONE)
6986 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6987 else
6988 fsig = mono_metadata_parse_signature (image, token);
6990 n = fsig->param_count + fsig->hasthis;
6992 if (method->dynamic && fsig->pinvoke) {
6993 MonoInst *args [3];
6996 * This is a call through a function pointer using a pinvoke
6997 * signature. Have to create a wrapper and call that instead.
6998 * FIXME: This is very slow, need to create a wrapper at JIT time
6999 * instead based on the signature.
7001 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
7002 EMIT_NEW_PCONST (cfg, args [1], fsig);
7003 args [2] = addr;
7004 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
7006 } else {
7007 MonoMethod *cil_method;
7009 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7010 if (constrained_call && cfg->verbose_level > 2)
7011 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7012 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
7013 cil_method = cmethod;
7014 if (constrained_call && !((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
7015 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
7016 cfg->generic_sharing_context)) {
7017 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
7019 } else if (constrained_call) {
7020 if (cfg->verbose_level > 2)
7021 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7023 GSHAREDVT_FAILURE (*ip);
7025 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
7027 * This is needed since get_method_constrained can't find
7028 * the method in klass representing a type var.
7029 * The type var is guaranteed to be a reference type in this
7030 * case.
7032 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7033 cil_method = cmethod;
7034 g_assert (!cmethod->klass->valuetype);
7035 } else {
7036 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
7038 } else {
7039 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7040 cil_method = cmethod;
7043 if (!cmethod || mono_loader_get_last_error ())
7044 LOAD_ERROR;
7045 if (!dont_verify && !cfg->skip_visibility) {
7046 MonoMethod *target_method = cil_method;
7047 if (method->is_inflated) {
7048 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
7050 if (!mono_method_can_access_method (method_definition, target_method) &&
7051 !mono_method_can_access_method (method, cil_method))
7052 METHOD_ACCESS_FAILURE;
7055 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7056 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
7058 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
7059 /* MS.NET seems to silently convert this to a callvirt */
7060 virtual = 1;
7064 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
7065 * converts to a callvirt.
7067 * tests/bug-515884.il is an example of this behavior
7069 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
7070 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
7071 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
7072 virtual = 1;
7075 if (!cmethod->klass->inited)
7076 if (!mono_class_init (cmethod->klass))
7077 TYPE_LOAD_ERROR (cmethod->klass);
7079 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
7080 mini_class_is_system_array (cmethod->klass)) {
7081 array_rank = cmethod->klass->rank;
7082 fsig = mono_method_signature (cmethod);
7083 } else {
7084 fsig = mono_method_signature (cmethod);
7086 if (!fsig)
7087 LOAD_ERROR;
7089 if (fsig->pinvoke) {
7090 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
7091 check_for_pending_exc, FALSE);
7092 fsig = mono_method_signature (wrapper);
7093 } else if (constrained_call) {
7094 fsig = mono_method_signature (cmethod);
7095 } else {
7096 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
7100 mono_save_token_info (cfg, image, token, cil_method);
7102 if (!MONO_TYPE_IS_VOID (fsig->ret) && !sym_seq_points) {
7104 * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
7105 * foo (bar (), baz ())
7106 * works correctly. MS does this also:
7107 * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
7108 * The problem with this approach is that the debugger will stop after all calls returning a value,
7109 * even for simple cases, like:
7110 * int i = foo ();
7112 /* Special case a few common successor opcodes */
7113 if (!(ip + 5 < end && ip [5] == CEE_POP))
7114 need_seq_point = TRUE;
7117 n = fsig->param_count + fsig->hasthis;
7119 /* Don't support calls made using type arguments for now */
7121 if (cfg->gsharedvt) {
7122 if (mini_is_gsharedvt_signature (cfg, fsig))
7123 GSHAREDVT_FAILURE (*ip);
7127 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7128 if (check_linkdemand (cfg, method, cmethod))
7129 INLINE_FAILURE ("linkdemand");
7130 CHECK_CFG_EXCEPTION;
7133 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
7134 g_assert_not_reached ();
7137 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
7138 UNVERIFIED;
7140 if (!cfg->generic_sharing_context && cmethod)
7141 g_assert (!mono_method_check_context_used (cmethod));
7143 CHECK_STACK (n);
7145 //g_assert (!virtual || fsig->hasthis);
7147 sp -= n;
7149 if (constrained_call) {
7151 * We have the `constrained.' prefix opcode.
7153 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
7155 * The type parameter is instantiated as a valuetype,
7156 * but that type doesn't override the method we're
7157 * calling, so we need to box `this'.
7159 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7160 ins->klass = constrained_call;
7161 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
7162 CHECK_CFG_EXCEPTION;
7163 } else if (!constrained_call->valuetype) {
7164 int dreg = alloc_ireg_ref (cfg);
7167 * The type parameter is instantiated as a reference
7168 * type. We have a managed pointer on the stack, so
7169 * we need to dereference it here.
7171 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
7172 ins->type = STACK_OBJ;
7173 sp [0] = ins;
7174 } else {
7175 if (cmethod->klass->valuetype) {
7176 /* Own method */
7177 } else {
7178 /* Interface method */
7179 int ioffset, slot;
7181 mono_class_setup_vtable (constrained_call);
7182 CHECK_TYPELOAD (constrained_call);
7183 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
7184 if (ioffset == -1)
7185 TYPE_LOAD_ERROR (constrained_call);
7186 slot = mono_method_get_vtable_slot (cmethod);
7187 if (slot == -1)
7188 TYPE_LOAD_ERROR (cmethod->klass);
7189 cmethod = constrained_call->vtable [ioffset + slot];
7191 if (cmethod->klass == mono_defaults.enum_class) {
7192 /* Enum implements some interfaces, so treat this as the first case */
7193 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7194 ins->klass = constrained_call;
7195 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
7196 CHECK_CFG_EXCEPTION;
7199 virtual = 0;
7201 constrained_call = NULL;
7204 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
7205 UNVERIFIED;
7207 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
7208 bblock = cfg->cbb;
7209 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7210 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7211 *sp = ins;
7212 sp++;
7215 CHECK_CFG_EXCEPTION;
7217 ip += 5;
7218 ins_flag = 0;
7219 if (need_seq_point)
7220 emit_seq_point (cfg, method, ip, FALSE);
7221 break;
7225 * If the callee is a shared method, then its static cctor
7226 * might not get called after the call was patched.
7228 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7229 emit_generic_class_init (cfg, cmethod->klass);
7230 CHECK_TYPELOAD (cmethod->klass);
7233 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
7234 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
7235 gboolean sharable = FALSE;
7237 if (mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7238 sharable = TRUE;
7239 } else {
7240 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
7241 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
7242 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
7244 sharable = sharing_enabled && context_sharable;
7248 * Pass vtable iff target method might
7249 * be shared, which means that sharing
7250 * is enabled for its class and its
7251 * context is sharable (and it's not a
7252 * generic method).
7254 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
7255 pass_vtable = TRUE;
7258 if (cmethod && mini_method_get_context (cmethod) &&
7259 mini_method_get_context (cmethod)->method_inst) {
7260 g_assert (!pass_vtable);
7262 if (mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7263 pass_mrgctx = TRUE;
7264 } else {
7265 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
7266 MonoGenericContext *context = mini_method_get_context (cmethod);
7267 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
7269 if (sharing_enabled && context_sharable)
7270 pass_mrgctx = TRUE;
7271 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
7272 pass_mrgctx = TRUE;
7276 if (cfg->generic_sharing_context && cmethod) {
7277 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
7279 context_used = mono_method_check_context_used (cmethod);
7281 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7282 /* Generic method interface
7283 calls are resolved via a
7284 helper function and don't
7285 need an imt. */
7286 if (!cmethod_context || !cmethod_context->method_inst)
7287 pass_imt_from_rgctx = TRUE;
7291 * If a shared method calls another
7292 * shared method then the caller must
7293 * have a generic sharing context
7294 * because the magic trampoline
7295 * requires it. FIXME: We shouldn't
7296 * have to force the vtable/mrgctx
7297 * variable here. Instead there
7298 * should be a flag in the cfg to
7299 * request a generic sharing context.
7301 if (context_used &&
7302 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
7303 mono_get_vtable_var (cfg);
7306 if (pass_vtable) {
7307 if (context_used) {
7308 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7309 } else {
7310 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7312 CHECK_TYPELOAD (cmethod->klass);
7313 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7317 if (pass_mrgctx) {
7318 g_assert (!vtable_arg);
7320 if (!cfg->compile_aot) {
7322 * emit_get_rgctx_method () calls mono_class_vtable () so check
7323 * for type load errors before.
7325 mono_class_setup_vtable (cmethod->klass);
7326 CHECK_TYPELOAD (cmethod->klass);
7329 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7331 /* !marshalbyref is needed to properly handle generic methods + remoting */
7332 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
7333 MONO_METHOD_IS_FINAL (cmethod)) &&
7334 !cmethod->klass->marshalbyref) {
7335 if (virtual)
7336 check_this = TRUE;
7337 virtual = 0;
7341 if (pass_imt_from_rgctx) {
7342 g_assert (!pass_vtable);
7343 g_assert (cmethod);
7345 imt_arg = emit_get_rgctx_method (cfg, context_used,
7346 cmethod, MONO_RGCTX_INFO_METHOD);
7349 if (check_this)
7350 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
7352 /* Calling virtual generic methods */
7353 if (cmethod && virtual &&
7354 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
7355 !(MONO_METHOD_IS_FINAL (cmethod) &&
7356 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
7357 mono_method_signature (cmethod)->generic_param_count) {
7358 MonoInst *this_temp, *this_arg_temp, *store;
7359 MonoInst *iargs [4];
7361 g_assert (mono_method_signature (cmethod)->is_inflated);
7363 /* Prevent inlining of methods that contain indirect calls */
7364 INLINE_FAILURE ("virtual generic call");
7366 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
7367 GSHAREDVT_FAILURE (*ip);
7369 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
7370 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
7371 g_assert (!imt_arg);
7372 if (!context_used)
7373 g_assert (cmethod->is_inflated);
7374 imt_arg = emit_get_rgctx_method (cfg, context_used,
7375 cmethod, MONO_RGCTX_INFO_METHOD);
7376 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg, NULL);
7377 } else
7378 #endif
7380 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
7381 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
7382 MONO_ADD_INS (bblock, store);
7384 /* FIXME: This should be a managed pointer */
7385 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7387 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
7388 iargs [1] = emit_get_rgctx_method (cfg, context_used,
7389 cmethod, MONO_RGCTX_INFO_METHOD);
7390 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
7391 addr = mono_emit_jit_icall (cfg,
7392 mono_helper_compile_generic_method, iargs);
7394 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
7396 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
7399 if (!MONO_TYPE_IS_VOID (fsig->ret))
7400 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7402 CHECK_CFG_EXCEPTION;
7404 ip += 5;
7405 ins_flag = 0;
7406 if (need_seq_point)
7407 emit_seq_point (cfg, method, ip, FALSE);
7408 break;
7412 * Implement a workaround for the inherent races involved in locking:
7413 * Monitor.Enter ()
7414 * try {
7415 * } finally {
7416 * Monitor.Exit ()
7418 * If a thread abort happens between the call to Monitor.Enter () and the start of the
7419 * try block, the Exit () won't be executed, see:
7420 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
7421 * To work around this, we extend such try blocks to include the last x bytes
7422 * of the Monitor.Enter () call.
7424 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
7425 MonoBasicBlock *tbb;
7427 GET_BBLOCK (cfg, tbb, ip + 5);
7429 * Only extend try blocks with a finally, to avoid catching exceptions thrown
7430 * from Monitor.Enter like ArgumentNullException.
7432 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
7433 /* Mark this bblock as needing to be extended */
7434 tbb->extend_try_block = TRUE;
7438 /* Conversion to a JIT intrinsic */
7439 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
7440 bblock = cfg->cbb;
7441 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7442 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7443 *sp = ins;
7444 sp++;
7447 CHECK_CFG_EXCEPTION;
7449 ip += 5;
7450 ins_flag = 0;
7451 if (need_seq_point)
7452 emit_seq_point (cfg, method, ip, FALSE);
7453 break;
7456 /* Inlining */
7457 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
7458 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
7459 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
7460 !g_list_find (dont_inline, cmethod)) {
7461 int costs;
7462 gboolean always = FALSE;
7464 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
7465 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7466 /* Prevent inlining of methods that call wrappers */
7467 INLINE_FAILURE ("wrapper call");
7468 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
7469 always = TRUE;
7472 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always))) {
7473 ip += 5;
7474 cfg->real_offset += 5;
7475 bblock = cfg->cbb;
7477 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7478 /* *sp is already set by inline_method */
7479 sp++;
7482 inline_costs += costs;
7483 ins_flag = 0;
7484 if (need_seq_point)
7485 emit_seq_point (cfg, method, ip, FALSE);
7486 break;
7491 * Making generic calls out of gsharedvt methods.
7493 if (cmethod && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
7494 MonoInst *addr;
7496 if (virtual) {
7497 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
7498 //GSHAREDVT_FAILURE (*ip);
7499 // disable for possible remoting calls
7500 if (fsig->hasthis && (method->klass->marshalbyref || method->klass == mono_defaults.object_class))
7501 GSHAREDVT_FAILURE (*ip);
7502 // virtual generic calls were disabled earlier
7505 if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
7506 /* test_0_multi_dim_arrays () in gshared.cs */
7507 GSHAREDVT_FAILURE (*ip);
7509 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
7510 addr = emit_get_rgctx_method (cfg, context_used,
7511 cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT);
7512 else
7513 addr = emit_get_rgctx_method (cfg, context_used,
7514 cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7515 ins = emit_gsharedvt_call (cfg, fsig, sp, addr, cmethod, imt_arg, vtable_arg);
7517 if (!MONO_TYPE_IS_VOID (fsig->ret))
7518 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7520 CHECK_CFG_EXCEPTION;
7522 ip += 5;
7523 ins_flag = 0;
7524 if (need_seq_point)
7525 emit_seq_point (cfg, method, ip, FALSE);
7526 break;
7529 if (virtual && cmethod && cfg->gsharedvt && cmethod->slot == -1) {
7530 mono_class_setup_vtable (cmethod->klass);
7531 if (cmethod->slot == -1)
7532 // FIXME: How can this happen ?
7533 GSHAREDVT_FAILURE (*ip);
7536 inline_costs += 10 * num_calls++;
7538 /* Tail recursion elimination */
7539 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
7540 gboolean has_vtargs = FALSE;
7541 int i;
7543 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7544 INLINE_FAILURE ("tail call");
7546 /* keep it simple */
7547 for (i = fsig->param_count - 1; i >= 0; i--) {
7548 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
7549 has_vtargs = TRUE;
7552 if (!has_vtargs) {
7553 for (i = 0; i < n; ++i)
7554 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7555 MONO_INST_NEW (cfg, ins, OP_BR);
7556 MONO_ADD_INS (bblock, ins);
7557 tblock = start_bblock->out_bb [0];
7558 link_bblock (cfg, bblock, tblock);
7559 ins->inst_target_bb = tblock;
7560 start_new_bblock = 1;
7562 /* skip the CEE_RET, too */
7563 if (ip_in_bb (cfg, bblock, ip + 5))
7564 ip += 6;
7565 else
7566 ip += 5;
7568 ins_flag = 0;
7569 break;
7573 /* Generic sharing */
7574 /* FIXME: only do this for generic methods if
7575 they are not shared! */
7576 if (context_used && !imt_arg && !array_rank &&
7577 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7578 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
7579 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
7580 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
7581 INLINE_FAILURE ("gshared");
7583 g_assert (cfg->generic_sharing_context && cmethod);
7584 g_assert (!addr);
7587 * We are compiling a call to a
7588 * generic method from shared code,
7589 * which means that we have to look up
7590 * the method in the rgctx and do an
7591 * indirect call.
7593 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7596 /* Indirect calls */
7597 if (addr) {
7598 g_assert (!imt_arg);
7600 if (*ip == CEE_CALL)
7601 g_assert (context_used);
7602 else if (*ip == CEE_CALLI)
7603 g_assert (!vtable_arg);
7604 else
7605 /* FIXME: what the hell is this??? */
7606 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
7607 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
7609 /* Prevent inlining of methods with indirect calls */
7610 INLINE_FAILURE ("indirect call");
7612 if (vtable_arg) {
7613 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, vtable_arg);
7614 } else {
7615 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
7617 * Instead of emitting an indirect call, emit a direct call
7618 * with the contents of the aotconst as the patch info.
7620 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
7621 NULLIFY_INS (addr);
7622 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
7623 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
7624 NULLIFY_INS (addr);
7625 } else {
7626 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
7629 if (!MONO_TYPE_IS_VOID (fsig->ret))
7630 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7632 CHECK_CFG_EXCEPTION;
7634 ip += 5;
7635 ins_flag = 0;
7636 if (need_seq_point)
7637 emit_seq_point (cfg, method, ip, FALSE);
7638 break;
7641 /* Array methods */
7642 if (array_rank) {
7643 MonoInst *addr;
7645 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
7646 MonoInst *val = sp [fsig->param_count];
7648 if (val->type == STACK_OBJ) {
7649 MonoInst *iargs [2];
7651 iargs [0] = sp [0];
7652 iargs [1] = val;
7654 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
7657 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
7658 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
7659 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
7660 emit_write_barrier (cfg, addr, val, 0);
7661 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
7662 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7664 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
7666 *sp++ = ins;
7667 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
7668 if (!cmethod->klass->element_class->valuetype && !readonly)
7669 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
7670 CHECK_TYPELOAD (cmethod->klass);
7672 readonly = FALSE;
7673 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7674 *sp++ = addr;
7675 } else {
7676 g_assert_not_reached ();
7679 CHECK_CFG_EXCEPTION;
7681 ip += 5;
7682 ins_flag = 0;
7683 emit_seq_point (cfg, method, ip, FALSE);
7684 break;
7687 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
7688 if (ins) {
7689 if (!MONO_TYPE_IS_VOID (fsig->ret))
7690 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7692 CHECK_CFG_EXCEPTION;
7694 ip += 5;
7695 ins_flag = 0;
7696 if (need_seq_point)
7697 emit_seq_point (cfg, method, ip, FALSE);
7698 break;
7701 /* Tail prefix / tail call optimization */
7703 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
7704 /* FIXME: runtime generic context pointer for jumps? */
7705 /* FIXME: handle this for generic sharing eventually */
7706 supported_tail_call = cmethod &&
7707 ((((ins_flag & MONO_INST_TAILCALL) && (*ip == CEE_CALL))
7708 ))//|| ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && ip [5] == CEE_RET))
7709 && !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig);
7711 if (supported_tail_call) {
7712 MonoCallInst *call;
7714 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7715 INLINE_FAILURE ("tail call");
7717 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7719 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
7720 /* Handle tail calls similarly to calls */
7721 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE, FALSE, FALSE);
7722 #else
7723 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7724 call->tail_call = TRUE;
7725 call->method = cmethod;
7726 call->signature = mono_method_signature (cmethod);
7729 * We implement tail calls by storing the actual arguments into the
7730 * argument variables, then emitting a CEE_JMP.
7732 for (i = 0; i < n; ++i) {
7733 /* Prevent argument from being register allocated */
7734 arg_array [i]->flags |= MONO_INST_VOLATILE;
7735 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7737 #endif
7739 ins = (MonoInst*)call;
7740 ins->inst_p0 = cmethod;
7741 ins->inst_p1 = arg_array [0];
7742 MONO_ADD_INS (bblock, ins);
7743 link_bblock (cfg, bblock, end_bblock);
7744 start_new_bblock = 1;
7746 CHECK_CFG_EXCEPTION;
7748 ip += 5;
7749 ins_flag = 0;
7751 // FIXME: Eliminate unreachable epilogs
7754 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
7755 * only reachable from this call.
7757 GET_BBLOCK (cfg, tblock, ip);
7758 if (tblock == bblock || tblock->in_count == 0)
7759 ip += 1;
7760 break;
7764 * Synchronized wrappers.
7765 * Its hard to determine where to replace a method with its synchronized
7766 * wrapper without causing an infinite recursion. The current solution is
7767 * to add the synchronized wrapper in the trampolines, and to
7768 * change the called method to a dummy wrapper, and resolve that wrapper
7769 * to the real method in mono_jit_compile_method ().
7771 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED && mono_marshal_method_from_wrapper (cfg->method) == cmethod) {
7772 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
7775 /* Common call */
7776 INLINE_FAILURE ("call");
7777 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
7778 imt_arg, vtable_arg);
7780 if (!MONO_TYPE_IS_VOID (fsig->ret))
7781 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7783 CHECK_CFG_EXCEPTION;
7785 ip += 5;
7786 ins_flag = 0;
7787 if (need_seq_point)
7788 emit_seq_point (cfg, method, ip, FALSE);
7789 break;
7791 case CEE_RET:
7792 if (cfg->method != method) {
7793 /* return from inlined method */
7795 * If in_count == 0, that means the ret is unreachable due to
7796 * being preceeded by a throw. In that case, inline_method () will
7797 * handle setting the return value
7798 * (test case: test_0_inline_throw ()).
7800 if (return_var && cfg->cbb->in_count) {
7801 MonoType *ret_type = mono_method_signature (method)->ret;
7803 MonoInst *store;
7804 CHECK_STACK (1);
7805 --sp;
7807 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7808 UNVERIFIED;
7810 //g_assert (returnvar != -1);
7811 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
7812 cfg->ret_var_set = TRUE;
7814 } else {
7815 if (cfg->ret) {
7816 MonoType *ret_type = mono_method_signature (method)->ret;
7818 if (seq_points && !sym_seq_points) {
7820 * Place a seq point here too even through the IL stack is not
7821 * empty, so a step over on
7822 * call <FOO>
7823 * ret
7824 * will work correctly.
7826 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
7827 MONO_ADD_INS (cfg->cbb, ins);
7830 g_assert (!return_var);
7831 CHECK_STACK (1);
7832 --sp;
7834 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7835 UNVERIFIED;
7837 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7838 MonoInst *ret_addr;
7840 if (!cfg->vret_addr) {
7841 MonoInst *ins;
7843 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
7844 } else {
7845 EMIT_NEW_RETLOADA (cfg, ret_addr);
7847 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
7848 ins->klass = mono_class_from_mono_type (ret_type);
7850 } else {
7851 #ifdef MONO_ARCH_SOFT_FLOAT
7852 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7853 MonoInst *iargs [1];
7854 MonoInst *conv;
7856 iargs [0] = *sp;
7857 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7858 mono_arch_emit_setret (cfg, method, conv);
7859 } else {
7860 mono_arch_emit_setret (cfg, method, *sp);
7862 #else
7863 mono_arch_emit_setret (cfg, method, *sp);
7864 #endif
7868 if (sp != stack_start)
7869 UNVERIFIED;
7870 MONO_INST_NEW (cfg, ins, OP_BR);
7871 ip++;
7872 ins->inst_target_bb = end_bblock;
7873 MONO_ADD_INS (bblock, ins);
7874 link_bblock (cfg, bblock, end_bblock);
7875 start_new_bblock = 1;
7876 break;
7877 case CEE_BR_S:
7878 CHECK_OPSIZE (2);
7879 MONO_INST_NEW (cfg, ins, OP_BR);
7880 ip++;
7881 target = ip + 1 + (signed char)(*ip);
7882 ++ip;
7883 GET_BBLOCK (cfg, tblock, target);
7884 link_bblock (cfg, bblock, tblock);
7885 ins->inst_target_bb = tblock;
7886 if (sp != stack_start) {
7887 handle_stack_args (cfg, stack_start, sp - stack_start);
7888 sp = stack_start;
7889 CHECK_UNVERIFIABLE (cfg);
7891 MONO_ADD_INS (bblock, ins);
7892 start_new_bblock = 1;
7893 inline_costs += BRANCH_COST;
7894 break;
7895 case CEE_BEQ_S:
7896 case CEE_BGE_S:
7897 case CEE_BGT_S:
7898 case CEE_BLE_S:
7899 case CEE_BLT_S:
7900 case CEE_BNE_UN_S:
7901 case CEE_BGE_UN_S:
7902 case CEE_BGT_UN_S:
7903 case CEE_BLE_UN_S:
7904 case CEE_BLT_UN_S:
7905 CHECK_OPSIZE (2);
7906 CHECK_STACK (2);
7907 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
7908 ip++;
7909 target = ip + 1 + *(signed char*)ip;
7910 ip++;
7912 ADD_BINCOND (NULL);
7914 sp = stack_start;
7915 inline_costs += BRANCH_COST;
7916 break;
7917 case CEE_BR:
7918 CHECK_OPSIZE (5);
7919 MONO_INST_NEW (cfg, ins, OP_BR);
7920 ip++;
7922 target = ip + 4 + (gint32)read32(ip);
7923 ip += 4;
7924 GET_BBLOCK (cfg, tblock, target);
7925 link_bblock (cfg, bblock, tblock);
7926 ins->inst_target_bb = tblock;
7927 if (sp != stack_start) {
7928 handle_stack_args (cfg, stack_start, sp - stack_start);
7929 sp = stack_start;
7930 CHECK_UNVERIFIABLE (cfg);
7933 MONO_ADD_INS (bblock, ins);
7935 start_new_bblock = 1;
7936 inline_costs += BRANCH_COST;
7937 break;
7938 case CEE_BRFALSE_S:
7939 case CEE_BRTRUE_S:
7940 case CEE_BRFALSE:
7941 case CEE_BRTRUE: {
7942 MonoInst *cmp;
7943 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
7944 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
7945 guint32 opsize = is_short ? 1 : 4;
7947 CHECK_OPSIZE (opsize);
7948 CHECK_STACK (1);
7949 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
7950 UNVERIFIED;
7951 ip ++;
7952 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
7953 ip += opsize;
7955 sp--;
7957 GET_BBLOCK (cfg, tblock, target);
7958 link_bblock (cfg, bblock, tblock);
7959 GET_BBLOCK (cfg, tblock, ip);
7960 link_bblock (cfg, bblock, tblock);
7962 if (sp != stack_start) {
7963 handle_stack_args (cfg, stack_start, sp - stack_start);
7964 CHECK_UNVERIFIABLE (cfg);
7967 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
7968 cmp->sreg1 = sp [0]->dreg;
7969 type_from_op (cmp, sp [0], NULL);
7970 CHECK_TYPE (cmp);
7972 #if SIZEOF_REGISTER == 4
7973 if (cmp->opcode == OP_LCOMPARE_IMM) {
7974 /* Convert it to OP_LCOMPARE */
7975 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7976 ins->type = STACK_I8;
7977 ins->dreg = alloc_dreg (cfg, STACK_I8);
7978 ins->inst_l = 0;
7979 MONO_ADD_INS (bblock, ins);
7980 cmp->opcode = OP_LCOMPARE;
7981 cmp->sreg2 = ins->dreg;
7983 #endif
7984 MONO_ADD_INS (bblock, cmp);
7986 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
7987 type_from_op (ins, sp [0], NULL);
7988 MONO_ADD_INS (bblock, ins);
7989 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
7990 GET_BBLOCK (cfg, tblock, target);
7991 ins->inst_true_bb = tblock;
7992 GET_BBLOCK (cfg, tblock, ip);
7993 ins->inst_false_bb = tblock;
7994 start_new_bblock = 2;
7996 sp = stack_start;
7997 inline_costs += BRANCH_COST;
7998 break;
8000 case CEE_BEQ:
8001 case CEE_BGE:
8002 case CEE_BGT:
8003 case CEE_BLE:
8004 case CEE_BLT:
8005 case CEE_BNE_UN:
8006 case CEE_BGE_UN:
8007 case CEE_BGT_UN:
8008 case CEE_BLE_UN:
8009 case CEE_BLT_UN:
8010 CHECK_OPSIZE (5);
8011 CHECK_STACK (2);
8012 MONO_INST_NEW (cfg, ins, *ip);
8013 ip++;
8014 target = ip + 4 + (gint32)read32(ip);
8015 ip += 4;
8017 ADD_BINCOND (NULL);
8019 sp = stack_start;
8020 inline_costs += BRANCH_COST;
8021 break;
8022 case CEE_SWITCH: {
8023 MonoInst *src1;
8024 MonoBasicBlock **targets;
8025 MonoBasicBlock *default_bblock;
8026 MonoJumpInfoBBTable *table;
8027 int offset_reg = alloc_preg (cfg);
8028 int target_reg = alloc_preg (cfg);
8029 int table_reg = alloc_preg (cfg);
8030 int sum_reg = alloc_preg (cfg);
8031 gboolean use_op_switch;
8033 CHECK_OPSIZE (5);
8034 CHECK_STACK (1);
8035 n = read32 (ip + 1);
8036 --sp;
8037 src1 = sp [0];
8038 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
8039 UNVERIFIED;
8041 ip += 5;
8042 CHECK_OPSIZE (n * sizeof (guint32));
8043 target = ip + n * sizeof (guint32);
8045 GET_BBLOCK (cfg, default_bblock, target);
8046 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8048 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
8049 for (i = 0; i < n; ++i) {
8050 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
8051 targets [i] = tblock;
8052 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
8053 ip += 4;
8056 if (sp != stack_start) {
8058 * Link the current bb with the targets as well, so handle_stack_args
8059 * will set their in_stack correctly.
8061 link_bblock (cfg, bblock, default_bblock);
8062 for (i = 0; i < n; ++i)
8063 link_bblock (cfg, bblock, targets [i]);
8065 handle_stack_args (cfg, stack_start, sp - stack_start);
8066 sp = stack_start;
8067 CHECK_UNVERIFIABLE (cfg);
8070 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
8071 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
8072 bblock = cfg->cbb;
8074 for (i = 0; i < n; ++i)
8075 link_bblock (cfg, bblock, targets [i]);
8077 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
8078 table->table = targets;
8079 table->table_size = n;
8081 use_op_switch = FALSE;
8082 #ifdef TARGET_ARM
8083 /* ARM implements SWITCH statements differently */
8084 /* FIXME: Make it use the generic implementation */
8085 if (!cfg->compile_aot)
8086 use_op_switch = TRUE;
8087 #endif
8089 if (COMPILE_LLVM (cfg))
8090 use_op_switch = TRUE;
8092 cfg->cbb->has_jump_table = 1;
8094 if (use_op_switch) {
8095 MONO_INST_NEW (cfg, ins, OP_SWITCH);
8096 ins->sreg1 = src1->dreg;
8097 ins->inst_p0 = table;
8098 ins->inst_many_bb = targets;
8099 ins->klass = GUINT_TO_POINTER (n);
8100 MONO_ADD_INS (cfg->cbb, ins);
8101 } else {
8102 if (sizeof (gpointer) == 8)
8103 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
8104 else
8105 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
8107 #if SIZEOF_REGISTER == 8
8108 /* The upper word might not be zero, and we add it to a 64 bit address later */
8109 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
8110 #endif
8112 if (cfg->compile_aot) {
8113 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
8114 } else {
8115 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
8116 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
8117 ins->inst_p0 = table;
8118 ins->dreg = table_reg;
8119 MONO_ADD_INS (cfg->cbb, ins);
8122 /* FIXME: Use load_memindex */
8123 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
8124 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
8125 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
8127 start_new_bblock = 1;
8128 inline_costs += (BRANCH_COST * 2);
8129 break;
8131 case CEE_LDIND_I1:
8132 case CEE_LDIND_U1:
8133 case CEE_LDIND_I2:
8134 case CEE_LDIND_U2:
8135 case CEE_LDIND_I4:
8136 case CEE_LDIND_U4:
8137 case CEE_LDIND_I8:
8138 case CEE_LDIND_I:
8139 case CEE_LDIND_R4:
8140 case CEE_LDIND_R8:
8141 case CEE_LDIND_REF:
8142 CHECK_STACK (1);
8143 --sp;
8145 switch (*ip) {
8146 case CEE_LDIND_R4:
8147 case CEE_LDIND_R8:
8148 dreg = alloc_freg (cfg);
8149 break;
8150 case CEE_LDIND_I8:
8151 dreg = alloc_lreg (cfg);
8152 break;
8153 case CEE_LDIND_REF:
8154 dreg = alloc_ireg_ref (cfg);
8155 break;
8156 default:
8157 dreg = alloc_preg (cfg);
8160 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
8161 ins->type = ldind_type [*ip - CEE_LDIND_I1];
8162 ins->flags |= ins_flag;
8163 ins_flag = 0;
8164 MONO_ADD_INS (bblock, ins);
8165 *sp++ = ins;
8166 if (ins->flags & MONO_INST_VOLATILE) {
8167 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
8168 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8169 emit_memory_barrier (cfg, FullBarrier);
8171 ++ip;
8172 break;
8173 case CEE_STIND_REF:
8174 case CEE_STIND_I1:
8175 case CEE_STIND_I2:
8176 case CEE_STIND_I4:
8177 case CEE_STIND_I8:
8178 case CEE_STIND_R4:
8179 case CEE_STIND_R8:
8180 case CEE_STIND_I:
8181 CHECK_STACK (2);
8182 sp -= 2;
8184 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
8185 ins->flags |= ins_flag;
8186 ins_flag = 0;
8188 if (ins->flags & MONO_INST_VOLATILE) {
8189 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
8190 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8191 emit_memory_barrier (cfg, FullBarrier);
8194 MONO_ADD_INS (bblock, ins);
8196 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
8197 emit_write_barrier (cfg, sp [0], sp [1], -1);
8199 inline_costs += 1;
8200 ++ip;
8201 break;
8203 case CEE_MUL:
8204 CHECK_STACK (2);
8206 MONO_INST_NEW (cfg, ins, (*ip));
8207 sp -= 2;
8208 ins->sreg1 = sp [0]->dreg;
8209 ins->sreg2 = sp [1]->dreg;
8210 type_from_op (ins, sp [0], sp [1]);
8211 CHECK_TYPE (ins);
8212 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8214 /* Use the immediate opcodes if possible */
8215 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
8216 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8217 if (imm_opcode != -1) {
8218 ins->opcode = imm_opcode;
8219 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
8220 ins->sreg2 = -1;
8222 sp [1]->opcode = OP_NOP;
8226 MONO_ADD_INS ((cfg)->cbb, (ins));
8228 *sp++ = mono_decompose_opcode (cfg, ins);
8229 ip++;
8230 break;
8231 case CEE_ADD:
8232 case CEE_SUB:
8233 case CEE_DIV:
8234 case CEE_DIV_UN:
8235 case CEE_REM:
8236 case CEE_REM_UN:
8237 case CEE_AND:
8238 case CEE_OR:
8239 case CEE_XOR:
8240 case CEE_SHL:
8241 case CEE_SHR:
8242 case CEE_SHR_UN:
8243 CHECK_STACK (2);
8245 MONO_INST_NEW (cfg, ins, (*ip));
8246 sp -= 2;
8247 ins->sreg1 = sp [0]->dreg;
8248 ins->sreg2 = sp [1]->dreg;
8249 type_from_op (ins, sp [0], sp [1]);
8250 CHECK_TYPE (ins);
8251 ADD_WIDEN_OP (ins, sp [0], sp [1]);
8252 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8254 /* FIXME: Pass opcode to is_inst_imm */
8256 /* Use the immediate opcodes if possible */
8257 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
8258 int imm_opcode;
8260 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8261 if (imm_opcode != -1) {
8262 ins->opcode = imm_opcode;
8263 if (sp [1]->opcode == OP_I8CONST) {
8264 #if SIZEOF_REGISTER == 8
8265 ins->inst_imm = sp [1]->inst_l;
8266 #else
8267 ins->inst_ls_word = sp [1]->inst_ls_word;
8268 ins->inst_ms_word = sp [1]->inst_ms_word;
8269 #endif
8271 else
8272 ins->inst_imm = (gssize)(sp [1]->inst_c0);
8273 ins->sreg2 = -1;
8275 /* Might be followed by an instruction added by ADD_WIDEN_OP */
8276 if (sp [1]->next == NULL)
8277 sp [1]->opcode = OP_NOP;
8280 MONO_ADD_INS ((cfg)->cbb, (ins));
8282 *sp++ = mono_decompose_opcode (cfg, ins);
8283 ip++;
8284 break;
8285 case CEE_NEG:
8286 case CEE_NOT:
8287 case CEE_CONV_I1:
8288 case CEE_CONV_I2:
8289 case CEE_CONV_I4:
8290 case CEE_CONV_R4:
8291 case CEE_CONV_R8:
8292 case CEE_CONV_U4:
8293 case CEE_CONV_I8:
8294 case CEE_CONV_U8:
8295 case CEE_CONV_OVF_I8:
8296 case CEE_CONV_OVF_U8:
8297 case CEE_CONV_R_UN:
8298 CHECK_STACK (1);
8300 /* Special case this earlier so we have long constants in the IR */
8301 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
8302 int data = sp [-1]->inst_c0;
8303 sp [-1]->opcode = OP_I8CONST;
8304 sp [-1]->type = STACK_I8;
8305 #if SIZEOF_REGISTER == 8
8306 if ((*ip) == CEE_CONV_U8)
8307 sp [-1]->inst_c0 = (guint32)data;
8308 else
8309 sp [-1]->inst_c0 = data;
8310 #else
8311 sp [-1]->inst_ls_word = data;
8312 if ((*ip) == CEE_CONV_U8)
8313 sp [-1]->inst_ms_word = 0;
8314 else
8315 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
8316 #endif
8317 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
8319 else {
8320 ADD_UNOP (*ip);
8322 ip++;
8323 break;
8324 case CEE_CONV_OVF_I4:
8325 case CEE_CONV_OVF_I1:
8326 case CEE_CONV_OVF_I2:
8327 case CEE_CONV_OVF_I:
8328 case CEE_CONV_OVF_U:
8329 CHECK_STACK (1);
8331 if (sp [-1]->type == STACK_R8) {
8332 ADD_UNOP (CEE_CONV_OVF_I8);
8333 ADD_UNOP (*ip);
8334 } else {
8335 ADD_UNOP (*ip);
8337 ip++;
8338 break;
8339 case CEE_CONV_OVF_U1:
8340 case CEE_CONV_OVF_U2:
8341 case CEE_CONV_OVF_U4:
8342 CHECK_STACK (1);
8344 if (sp [-1]->type == STACK_R8) {
8345 ADD_UNOP (CEE_CONV_OVF_U8);
8346 ADD_UNOP (*ip);
8347 } else {
8348 ADD_UNOP (*ip);
8350 ip++;
8351 break;
8352 case CEE_CONV_OVF_I1_UN:
8353 case CEE_CONV_OVF_I2_UN:
8354 case CEE_CONV_OVF_I4_UN:
8355 case CEE_CONV_OVF_I8_UN:
8356 case CEE_CONV_OVF_U1_UN:
8357 case CEE_CONV_OVF_U2_UN:
8358 case CEE_CONV_OVF_U4_UN:
8359 case CEE_CONV_OVF_U8_UN:
8360 case CEE_CONV_OVF_I_UN:
8361 case CEE_CONV_OVF_U_UN:
8362 case CEE_CONV_U2:
8363 case CEE_CONV_U1:
8364 case CEE_CONV_I:
8365 case CEE_CONV_U:
8366 CHECK_STACK (1);
8367 ADD_UNOP (*ip);
8368 CHECK_CFG_EXCEPTION;
8369 ip++;
8370 break;
8371 case CEE_ADD_OVF:
8372 case CEE_ADD_OVF_UN:
8373 case CEE_MUL_OVF:
8374 case CEE_MUL_OVF_UN:
8375 case CEE_SUB_OVF:
8376 case CEE_SUB_OVF_UN:
8377 CHECK_STACK (2);
8378 ADD_BINOP (*ip);
8379 ip++;
8380 break;
8381 case CEE_CPOBJ:
8382 GSHAREDVT_FAILURE (*ip);
8383 CHECK_OPSIZE (5);
8384 CHECK_STACK (2);
8385 token = read32 (ip + 1);
8386 klass = mini_get_class (method, token, generic_context);
8387 CHECK_TYPELOAD (klass);
8388 sp -= 2;
8389 if (generic_class_is_reference_type (cfg, klass)) {
8390 MonoInst *store, *load;
8391 int dreg = alloc_ireg_ref (cfg);
8393 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
8394 load->flags |= ins_flag;
8395 MONO_ADD_INS (cfg->cbb, load);
8397 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
8398 store->flags |= ins_flag;
8399 MONO_ADD_INS (cfg->cbb, store);
8401 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
8402 emit_write_barrier (cfg, sp [0], sp [1], -1);
8403 } else {
8404 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
8406 ins_flag = 0;
8407 ip += 5;
8408 break;
8409 case CEE_LDOBJ: {
8410 int loc_index = -1;
8411 int stloc_len = 0;
8413 CHECK_OPSIZE (5);
8414 CHECK_STACK (1);
8415 --sp;
8416 token = read32 (ip + 1);
8417 klass = mini_get_class (method, token, generic_context);
8418 CHECK_TYPELOAD (klass);
8420 /* Optimize the common ldobj+stloc combination */
8421 switch (ip [5]) {
8422 case CEE_STLOC_S:
8423 loc_index = ip [6];
8424 stloc_len = 2;
8425 break;
8426 case CEE_STLOC_0:
8427 case CEE_STLOC_1:
8428 case CEE_STLOC_2:
8429 case CEE_STLOC_3:
8430 loc_index = ip [5] - CEE_STLOC_0;
8431 stloc_len = 1;
8432 break;
8433 default:
8434 break;
8437 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
8438 CHECK_LOCAL (loc_index);
8440 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8441 ins->dreg = cfg->locals [loc_index]->dreg;
8442 ip += 5;
8443 ip += stloc_len;
8444 break;
8447 /* Optimize the ldobj+stobj combination */
8448 /* The reference case ends up being a load+store anyway */
8449 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
8450 CHECK_STACK (1);
8452 sp --;
8454 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
8456 ip += 5 + 5;
8457 ins_flag = 0;
8458 break;
8461 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8462 *sp++ = ins;
8464 ip += 5;
8465 ins_flag = 0;
8466 inline_costs += 1;
8467 break;
8469 case CEE_LDSTR:
8470 CHECK_STACK_OVF (1);
8471 CHECK_OPSIZE (5);
8472 n = read32 (ip + 1);
8474 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
8475 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
8476 ins->type = STACK_OBJ;
8477 *sp = ins;
8479 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
8480 MonoInst *iargs [1];
8482 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
8483 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
8484 } else {
8485 if (cfg->opt & MONO_OPT_SHARED) {
8486 MonoInst *iargs [3];
8488 if (cfg->compile_aot) {
8489 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
8491 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8492 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
8493 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
8494 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
8495 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
8496 } else {
8497 if (bblock->out_of_line) {
8498 MonoInst *iargs [2];
8500 if (image == mono_defaults.corlib) {
8502 * Avoid relocations in AOT and save some space by using a
8503 * version of helper_ldstr specialized to mscorlib.
8505 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
8506 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
8507 } else {
8508 /* Avoid creating the string object */
8509 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8510 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
8511 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
8514 else
8515 if (cfg->compile_aot) {
8516 NEW_LDSTRCONST (cfg, ins, image, n);
8517 *sp = ins;
8518 MONO_ADD_INS (bblock, ins);
8520 else {
8521 NEW_PCONST (cfg, ins, NULL);
8522 ins->type = STACK_OBJ;
8523 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
8524 if (!ins->inst_p0)
8525 OUT_OF_MEMORY_FAILURE;
8527 *sp = ins;
8528 MONO_ADD_INS (bblock, ins);
8533 sp++;
8534 ip += 5;
8535 break;
8536 case CEE_NEWOBJ: {
8537 MonoInst *iargs [2];
8538 MonoMethodSignature *fsig;
8539 MonoInst this_ins;
8540 MonoInst *alloc;
8541 MonoInst *vtable_arg = NULL;
8543 CHECK_OPSIZE (5);
8544 token = read32 (ip + 1);
8545 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8546 if (!cmethod || mono_loader_get_last_error ())
8547 LOAD_ERROR;
8548 fsig = mono_method_get_signature (cmethod, image, token);
8549 if (!fsig)
8550 LOAD_ERROR;
8552 mono_save_token_info (cfg, image, token, cmethod);
8554 if (!mono_class_init (cmethod->klass))
8555 TYPE_LOAD_ERROR (cmethod->klass);
8557 if (cfg->generic_sharing_context)
8558 context_used = mono_method_check_context_used (cmethod);
8560 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8561 if (check_linkdemand (cfg, method, cmethod))
8562 INLINE_FAILURE ("linkdemand");
8563 CHECK_CFG_EXCEPTION;
8564 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8565 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8568 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8569 emit_generic_class_init (cfg, cmethod->klass);
8570 CHECK_TYPELOAD (cmethod->klass);
8573 if (cmethod->klass->valuetype)
8574 GSHAREDVT_FAILURE (*ip);
8577 if (cfg->gsharedvt) {
8578 if (mini_is_gsharedvt_variable_signature (sig))
8579 GSHAREDVT_FAILURE (*ip);
8583 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
8584 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
8585 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
8586 mono_class_vtable (cfg->domain, cmethod->klass);
8587 CHECK_TYPELOAD (cmethod->klass);
8589 vtable_arg = emit_get_rgctx_method (cfg, context_used,
8590 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8591 } else {
8592 if (context_used) {
8593 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
8594 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8595 } else {
8596 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8598 CHECK_TYPELOAD (cmethod->klass);
8599 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8604 n = fsig->param_count;
8605 CHECK_STACK (n);
8608 * Generate smaller code for the common newobj <exception> instruction in
8609 * argument checking code.
8611 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
8612 is_exception_class (cmethod->klass) && n <= 2 &&
8613 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
8614 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
8615 MonoInst *iargs [3];
8617 g_assert (!vtable_arg);
8619 sp -= n;
8621 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
8622 switch (n) {
8623 case 0:
8624 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
8625 break;
8626 case 1:
8627 iargs [1] = sp [0];
8628 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
8629 break;
8630 case 2:
8631 iargs [1] = sp [0];
8632 iargs [2] = sp [1];
8633 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
8634 break;
8635 default:
8636 g_assert_not_reached ();
8639 ip += 5;
8640 inline_costs += 5;
8641 break;
8644 /* move the args to allow room for 'this' in the first position */
8645 while (n--) {
8646 --sp;
8647 sp [1] = sp [0];
8650 /* check_call_signature () requires sp[0] to be set */
8651 this_ins.type = STACK_OBJ;
8652 sp [0] = &this_ins;
8653 if (check_call_signature (cfg, fsig, sp))
8654 UNVERIFIED;
8656 iargs [0] = NULL;
8658 if (mini_class_is_system_array (cmethod->klass)) {
8659 g_assert (!vtable_arg);
8661 *sp = emit_get_rgctx_method (cfg, context_used,
8662 cmethod, MONO_RGCTX_INFO_METHOD);
8664 /* Avoid varargs in the common case */
8665 if (fsig->param_count == 1)
8666 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
8667 else if (fsig->param_count == 2)
8668 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
8669 else if (fsig->param_count == 3)
8670 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
8671 else
8672 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
8673 } else if (cmethod->string_ctor) {
8674 g_assert (!context_used);
8675 g_assert (!vtable_arg);
8676 /* we simply pass a null pointer */
8677 EMIT_NEW_PCONST (cfg, *sp, NULL);
8678 /* now call the string ctor */
8679 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL, NULL);
8680 } else {
8681 MonoInst* callvirt_this_arg = NULL;
8683 if (cmethod->klass->valuetype) {
8684 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
8685 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
8686 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
8688 alloc = NULL;
8691 * The code generated by mini_emit_virtual_call () expects
8692 * iargs [0] to be a boxed instance, but luckily the vcall
8693 * will be transformed into a normal call there.
8695 } else if (context_used) {
8696 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
8697 *sp = alloc;
8698 } else {
8699 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8701 CHECK_TYPELOAD (cmethod->klass);
8704 * TypeInitializationExceptions thrown from the mono_runtime_class_init
8705 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
8706 * As a workaround, we call class cctors before allocating objects.
8708 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8709 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8710 if (cfg->verbose_level > 2)
8711 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
8712 class_inits = g_slist_prepend (class_inits, vtable);
8715 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
8716 *sp = alloc;
8718 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
8720 if (alloc)
8721 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
8723 /* Now call the actual ctor */
8724 /* Avoid virtual calls to ctors if possible */
8725 if (cmethod->klass->marshalbyref)
8726 callvirt_this_arg = sp [0];
8729 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8730 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8731 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8732 *sp = ins;
8733 sp++;
8736 CHECK_CFG_EXCEPTION;
8737 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8738 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8739 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
8740 !g_list_find (dont_inline, cmethod)) {
8741 int costs;
8743 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
8744 cfg->real_offset += 5;
8745 bblock = cfg->cbb;
8747 inline_costs += costs - 5;
8748 } else {
8749 INLINE_FAILURE ("inline failure");
8750 // FIXME-VT: Clean this up
8751 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
8752 GSHAREDVT_FAILURE(*ip);
8753 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL, NULL);
8755 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8756 MonoInst *addr;
8758 addr = emit_get_rgctx_method (cfg, context_used,
8759 cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
8760 mono_emit_calli (cfg, fsig, sp, addr, vtable_arg);
8761 } else if (context_used &&
8762 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
8763 !mono_class_generic_sharing_enabled (cmethod->klass))) {
8764 MonoInst *cmethod_addr;
8766 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8767 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8769 mono_emit_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
8770 } else {
8771 INLINE_FAILURE ("ctor call");
8772 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp,
8773 callvirt_this_arg, NULL, vtable_arg);
8777 if (alloc == NULL) {
8778 /* Valuetype */
8779 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
8780 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
8781 *sp++= ins;
8783 else
8784 *sp++ = alloc;
8786 ip += 5;
8787 inline_costs += 5;
8788 break;
8790 case CEE_CASTCLASS:
8791 CHECK_STACK (1);
8792 --sp;
8793 CHECK_OPSIZE (5);
8794 token = read32 (ip + 1);
8795 klass = mini_get_class (method, token, generic_context);
8796 CHECK_TYPELOAD (klass);
8797 if (sp [0]->type != STACK_OBJ)
8798 UNVERIFIED;
8800 if (cfg->generic_sharing_context)
8801 context_used = mono_class_check_context_used (klass);
8803 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8804 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8805 MonoInst *args [3];
8807 /* obj */
8808 args [0] = *sp;
8810 /* klass */
8811 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8813 /* inline cache*/
8814 if (cfg->compile_aot)
8815 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8816 else
8817 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8819 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8820 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8821 ip += 5;
8822 inline_costs += 2;
8823 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8824 MonoMethod *mono_castclass;
8825 MonoInst *iargs [1];
8826 int costs;
8828 mono_castclass = mono_marshal_get_castclass (klass);
8829 iargs [0] = sp [0];
8831 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8832 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8833 CHECK_CFG_EXCEPTION;
8834 g_assert (costs > 0);
8836 ip += 5;
8837 cfg->real_offset += 5;
8838 bblock = cfg->cbb;
8840 *sp++ = iargs [0];
8842 inline_costs += costs;
8844 else {
8845 ins = handle_castclass (cfg, klass, *sp, context_used);
8846 CHECK_CFG_EXCEPTION;
8847 bblock = cfg->cbb;
8848 *sp ++ = ins;
8849 ip += 5;
8851 break;
8852 case CEE_ISINST: {
8853 CHECK_STACK (1);
8854 --sp;
8855 CHECK_OPSIZE (5);
8856 token = read32 (ip + 1);
8857 klass = mini_get_class (method, token, generic_context);
8858 CHECK_TYPELOAD (klass);
8859 if (sp [0]->type != STACK_OBJ)
8860 UNVERIFIED;
8862 if (cfg->generic_sharing_context)
8863 context_used = mono_class_check_context_used (klass);
8865 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8866 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
8867 MonoInst *args [3];
8869 /* obj */
8870 args [0] = *sp;
8872 /* klass */
8873 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8875 /* inline cache*/
8876 if (cfg->compile_aot)
8877 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8878 else
8879 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8881 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
8882 ip += 5;
8883 inline_costs += 2;
8884 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8885 MonoMethod *mono_isinst;
8886 MonoInst *iargs [1];
8887 int costs;
8889 mono_isinst = mono_marshal_get_isinst (klass);
8890 iargs [0] = sp [0];
8892 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
8893 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8894 CHECK_CFG_EXCEPTION;
8895 g_assert (costs > 0);
8897 ip += 5;
8898 cfg->real_offset += 5;
8899 bblock = cfg->cbb;
8901 *sp++= iargs [0];
8903 inline_costs += costs;
8905 else {
8906 ins = handle_isinst (cfg, klass, *sp, context_used);
8907 CHECK_CFG_EXCEPTION;
8908 bblock = cfg->cbb;
8909 *sp ++ = ins;
8910 ip += 5;
8912 break;
8914 case CEE_UNBOX_ANY: {
8915 CHECK_STACK (1);
8916 --sp;
8917 CHECK_OPSIZE (5);
8918 token = read32 (ip + 1);
8919 klass = mini_get_class (method, token, generic_context);
8920 CHECK_TYPELOAD (klass);
8922 mono_save_token_info (cfg, image, token, klass);
8924 if (cfg->generic_sharing_context)
8925 context_used = mono_class_check_context_used (klass);
8927 if (mini_is_gsharedvt_klass (cfg, klass))
8928 /* Need to check for nullable types at runtime */
8929 GSHAREDVT_FAILURE (*ip);
8931 if (generic_class_is_reference_type (cfg, klass)) {
8932 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
8933 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8934 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8935 MonoInst *args [3];
8937 /* obj */
8938 args [0] = *sp;
8940 /* klass */
8941 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8943 /* inline cache*/
8944 /*FIXME AOT support*/
8945 if (cfg->compile_aot)
8946 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8947 else
8948 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8950 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8951 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8952 ip += 5;
8953 inline_costs += 2;
8954 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8955 MonoMethod *mono_castclass;
8956 MonoInst *iargs [1];
8957 int costs;
8959 mono_castclass = mono_marshal_get_castclass (klass);
8960 iargs [0] = sp [0];
8962 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8963 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8964 CHECK_CFG_EXCEPTION;
8965 g_assert (costs > 0);
8967 ip += 5;
8968 cfg->real_offset += 5;
8969 bblock = cfg->cbb;
8971 *sp++ = iargs [0];
8972 inline_costs += costs;
8973 } else {
8974 ins = handle_castclass (cfg, klass, *sp, context_used);
8975 CHECK_CFG_EXCEPTION;
8976 bblock = cfg->cbb;
8977 *sp ++ = ins;
8978 ip += 5;
8980 break;
8983 if (mono_class_is_nullable (klass)) {
8984 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
8985 *sp++= ins;
8986 ip += 5;
8987 break;
8990 /* UNBOX */
8991 ins = handle_unbox (cfg, klass, sp, context_used);
8992 *sp = ins;
8994 ip += 5;
8996 /* LDOBJ */
8997 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8998 *sp++ = ins;
9000 inline_costs += 2;
9001 break;
9003 case CEE_BOX: {
9004 MonoInst *val;
9006 CHECK_STACK (1);
9007 --sp;
9008 val = *sp;
9009 CHECK_OPSIZE (5);
9010 token = read32 (ip + 1);
9011 klass = mini_get_class (method, token, generic_context);
9012 CHECK_TYPELOAD (klass);
9014 mono_save_token_info (cfg, image, token, klass);
9016 if (cfg->generic_sharing_context)
9017 context_used = mono_class_check_context_used (klass);
9019 if (generic_class_is_reference_type (cfg, klass)) {
9020 *sp++ = val;
9021 ip += 5;
9022 break;
9025 if (klass == mono_defaults.void_class)
9026 UNVERIFIED;
9027 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
9028 UNVERIFIED;
9029 /* frequent check in generic code: box (struct), brtrue */
9031 // FIXME: LLVM can't handle the inconsistent bb linking
9032 if (!mono_class_is_nullable (klass) &&
9033 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
9034 (ip [5] == CEE_BRTRUE ||
9035 ip [5] == CEE_BRTRUE_S ||
9036 ip [5] == CEE_BRFALSE ||
9037 ip [5] == CEE_BRFALSE_S)) {
9038 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
9039 int dreg;
9040 MonoBasicBlock *true_bb, *false_bb;
9042 ip += 5;
9044 if (cfg->verbose_level > 3) {
9045 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9046 printf ("<box+brtrue opt>\n");
9049 switch (*ip) {
9050 case CEE_BRTRUE_S:
9051 case CEE_BRFALSE_S:
9052 CHECK_OPSIZE (2);
9053 ip++;
9054 target = ip + 1 + (signed char)(*ip);
9055 ip++;
9056 break;
9057 case CEE_BRTRUE:
9058 case CEE_BRFALSE:
9059 CHECK_OPSIZE (5);
9060 ip++;
9061 target = ip + 4 + (gint)(read32 (ip));
9062 ip += 4;
9063 break;
9064 default:
9065 g_assert_not_reached ();
9069 * We need to link both bblocks, since it is needed for handling stack
9070 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
9071 * Branching to only one of them would lead to inconsistencies, so
9072 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
9074 GET_BBLOCK (cfg, true_bb, target);
9075 GET_BBLOCK (cfg, false_bb, ip);
9077 mono_link_bblock (cfg, cfg->cbb, true_bb);
9078 mono_link_bblock (cfg, cfg->cbb, false_bb);
9080 if (sp != stack_start) {
9081 handle_stack_args (cfg, stack_start, sp - stack_start);
9082 sp = stack_start;
9083 CHECK_UNVERIFIABLE (cfg);
9086 if (COMPILE_LLVM (cfg)) {
9087 dreg = alloc_ireg (cfg);
9088 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
9089 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
9091 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
9092 } else {
9093 /* The JIT can't eliminate the iconst+compare */
9094 MONO_INST_NEW (cfg, ins, OP_BR);
9095 ins->inst_target_bb = is_true ? true_bb : false_bb;
9096 MONO_ADD_INS (cfg->cbb, ins);
9099 start_new_bblock = 1;
9100 break;
9103 *sp++ = handle_box (cfg, val, klass, context_used);
9105 CHECK_CFG_EXCEPTION;
9106 ip += 5;
9107 inline_costs += 1;
9108 break;
9110 case CEE_UNBOX: {
9111 CHECK_STACK (1);
9112 --sp;
9113 CHECK_OPSIZE (5);
9114 token = read32 (ip + 1);
9115 klass = mini_get_class (method, token, generic_context);
9116 CHECK_TYPELOAD (klass);
9118 mono_save_token_info (cfg, image, token, klass);
9120 if (cfg->generic_sharing_context)
9121 context_used = mono_class_check_context_used (klass);
9123 if (mono_class_is_nullable (klass)) {
9124 MonoInst *val;
9126 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
9127 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
9129 *sp++= ins;
9130 } else {
9131 ins = handle_unbox (cfg, klass, sp, context_used);
9132 *sp++ = ins;
9134 ip += 5;
9135 inline_costs += 2;
9136 break;
9138 case CEE_LDFLD:
9139 case CEE_LDFLDA:
9140 case CEE_STFLD:
9141 case CEE_LDSFLD:
9142 case CEE_LDSFLDA:
9143 case CEE_STSFLD: {
9144 MonoClassField *field;
9145 int costs;
9146 guint foffset;
9147 gboolean is_instance;
9148 int op;
9149 gpointer addr = NULL;
9150 gboolean is_special_static;
9151 MonoType *ftype;
9152 MonoInst *store_val = NULL;
9154 op = *ip;
9155 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
9156 if (is_instance) {
9157 if (op == CEE_STFLD) {
9158 CHECK_STACK (2);
9159 sp -= 2;
9160 store_val = sp [1];
9161 } else {
9162 CHECK_STACK (1);
9163 --sp;
9165 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
9166 UNVERIFIED;
9167 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
9168 UNVERIFIED;
9169 } else {
9170 if (op == CEE_STSFLD) {
9171 CHECK_STACK (1);
9172 sp--;
9173 store_val = sp [0];
9177 CHECK_OPSIZE (5);
9178 token = read32 (ip + 1);
9179 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9180 field = mono_method_get_wrapper_data (method, token);
9181 klass = field->parent;
9183 else {
9184 field = mono_field_from_token (image, token, &klass, generic_context);
9186 if (!field)
9187 LOAD_ERROR;
9188 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
9189 FIELD_ACCESS_FAILURE;
9190 mono_class_init (klass);
9192 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
9193 UNVERIFIED;
9195 /* if the class is Critical then transparent code cannot access it's fields */
9196 if (!is_instance && mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
9197 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9199 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
9200 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
9201 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
9202 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9206 * LDFLD etc. is usable on static fields as well, so convert those cases to
9207 * the static case.
9209 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
9210 switch (op) {
9211 case CEE_LDFLD:
9212 op = CEE_LDSFLD;
9213 break;
9214 case CEE_STFLD:
9215 op = CEE_STSFLD;
9216 break;
9217 case CEE_LDFLDA:
9218 op = CEE_LDSFLDA;
9219 break;
9220 default:
9221 g_assert_not_reached ();
9223 is_instance = FALSE;
9226 if (cfg->generic_sharing_context)
9227 context_used = mono_class_check_context_used (klass);
9229 /* INSTANCE CASE */
9231 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
9232 if (op == CEE_STFLD) {
9233 if (target_type_is_incompatible (cfg, field->type, sp [1]))
9234 UNVERIFIED;
9235 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
9236 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
9237 MonoInst *iargs [5];
9239 GSHAREDVT_FAILURE (op);
9241 iargs [0] = sp [0];
9242 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9243 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9244 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
9245 field->offset);
9246 iargs [4] = sp [1];
9248 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9249 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
9250 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9251 CHECK_CFG_EXCEPTION;
9252 g_assert (costs > 0);
9254 cfg->real_offset += 5;
9255 bblock = cfg->cbb;
9257 inline_costs += costs;
9258 } else {
9259 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
9261 } else {
9262 MonoInst *store;
9264 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9266 if (mini_is_gsharedvt_klass (cfg, klass)) {
9267 MonoInst *offset_ins;
9269 if (cfg->generic_sharing_context)
9270 context_used = mono_class_check_context_used (klass);
9272 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9273 dreg = alloc_ireg_mp (cfg);
9274 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9275 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
9276 // FIXME-VT: wbarriers ?
9277 } else {
9278 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
9280 if (sp [0]->opcode != OP_LDADDR)
9281 store->flags |= MONO_INST_FAULT;
9283 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
9284 /* insert call to write barrier */
9285 MonoInst *ptr;
9286 int dreg;
9288 dreg = alloc_ireg_mp (cfg);
9289 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9290 emit_write_barrier (cfg, ptr, sp [1], -1);
9293 store->flags |= ins_flag;
9295 ins_flag = 0;
9296 ip += 5;
9297 break;
9300 if (is_instance && ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class)) {
9301 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
9302 MonoInst *iargs [4];
9304 GSHAREDVT_FAILURE (op);
9306 iargs [0] = sp [0];
9307 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9308 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9309 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
9310 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9311 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
9312 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9313 CHECK_CFG_EXCEPTION;
9314 bblock = cfg->cbb;
9315 g_assert (costs > 0);
9317 cfg->real_offset += 5;
9319 *sp++ = iargs [0];
9321 inline_costs += costs;
9322 } else {
9323 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
9324 *sp++ = ins;
9326 } else if (is_instance) {
9327 if (sp [0]->type == STACK_VTYPE) {
9328 MonoInst *var;
9330 /* Have to compute the address of the variable */
9332 var = get_vreg_to_inst (cfg, sp [0]->dreg);
9333 if (!var)
9334 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
9335 else
9336 g_assert (var->klass == klass);
9338 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
9339 sp [0] = ins;
9342 if (op == CEE_LDFLDA) {
9343 if (is_magic_tls_access (field)) {
9344 GSHAREDVT_FAILURE (*ip);
9345 ins = sp [0];
9346 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
9347 } else {
9348 if (sp [0]->type == STACK_OBJ) {
9349 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
9350 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
9353 dreg = alloc_ireg_mp (cfg);
9355 if (mini_is_gsharedvt_klass (cfg, klass)) {
9356 MonoInst *offset_ins;
9358 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9359 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9360 } else {
9361 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9363 ins->klass = mono_class_from_mono_type (field->type);
9364 ins->type = STACK_MP;
9365 *sp++ = ins;
9367 } else {
9368 MonoInst *load;
9370 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9372 if (mini_is_gsharedvt_klass (cfg, klass)) {
9373 MonoInst *offset_ins;
9375 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9376 dreg = alloc_ireg_mp (cfg);
9377 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9378 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
9379 } else {
9380 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
9382 load->flags |= ins_flag;
9383 if (sp [0]->opcode != OP_LDADDR)
9384 load->flags |= MONO_INST_FAULT;
9385 *sp++ = load;
9389 if (is_instance) {
9390 ins_flag = 0;
9391 ip += 5;
9392 break;
9395 /* STATIC CASE */
9398 * We can only support shared generic static
9399 * field access on architectures where the
9400 * trampoline code has been extended to handle
9401 * the generic class init.
9403 #ifndef MONO_ARCH_VTABLE_REG
9404 GENERIC_SHARING_FAILURE (op);
9405 #endif
9407 if (cfg->generic_sharing_context)
9408 context_used = mono_class_check_context_used (klass);
9410 ftype = mono_field_get_type (field);
9412 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
9413 UNVERIFIED;
9415 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
9416 * to be called here.
9418 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
9419 mono_class_vtable (cfg->domain, klass);
9420 CHECK_TYPELOAD (klass);
9422 mono_domain_lock (cfg->domain);
9423 if (cfg->domain->special_static_fields)
9424 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
9425 mono_domain_unlock (cfg->domain);
9427 is_special_static = mono_class_field_is_special_static (field);
9429 /* Generate IR to compute the field address */
9430 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
9432 * Fast access to TLS data
9433 * Inline version of get_thread_static_data () in
9434 * threads.c.
9436 guint32 offset;
9437 int idx, static_data_reg, array_reg, dreg;
9438 MonoInst *thread_ins;
9440 GSHAREDVT_FAILURE (op);
9442 // offset &= 0x7fffffff;
9443 // idx = (offset >> 24) - 1;
9444 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
9446 thread_ins = mono_get_thread_intrinsic (cfg);
9447 MONO_ADD_INS (cfg->cbb, thread_ins);
9448 static_data_reg = alloc_ireg (cfg);
9449 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
9451 if (cfg->compile_aot) {
9452 int offset_reg, offset2_reg, idx_reg;
9454 /* For TLS variables, this will return the TLS offset */
9455 EMIT_NEW_SFLDACONST (cfg, ins, field);
9456 offset_reg = ins->dreg;
9457 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
9458 idx_reg = alloc_ireg (cfg);
9459 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
9460 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
9461 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
9462 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
9463 array_reg = alloc_ireg (cfg);
9464 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
9465 offset2_reg = alloc_ireg (cfg);
9466 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
9467 dreg = alloc_ireg (cfg);
9468 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
9469 } else {
9470 offset = (gsize)addr & 0x7fffffff;
9471 idx = (offset >> 24) - 1;
9473 array_reg = alloc_ireg (cfg);
9474 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
9475 dreg = alloc_ireg (cfg);
9476 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
9478 } else if ((cfg->opt & MONO_OPT_SHARED) ||
9479 (cfg->compile_aot && is_special_static) ||
9480 (context_used && is_special_static)) {
9481 MonoInst *iargs [2];
9483 g_assert (field->parent);
9484 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9485 if (context_used) {
9486 iargs [1] = emit_get_rgctx_field (cfg, context_used,
9487 field, MONO_RGCTX_INFO_CLASS_FIELD);
9488 } else {
9489 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
9491 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
9492 } else if (context_used) {
9493 MonoInst *static_data;
9496 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
9497 method->klass->name_space, method->klass->name, method->name,
9498 depth, field->offset);
9501 if (mono_class_needs_cctor_run (klass, method))
9502 emit_generic_class_init (cfg, klass);
9505 * The pointer we're computing here is
9507 * super_info.static_data + field->offset
9509 static_data = emit_get_rgctx_klass (cfg, context_used,
9510 klass, MONO_RGCTX_INFO_STATIC_DATA);
9512 if (mini_is_gsharedvt_klass (cfg, klass)) {
9513 MonoInst *offset_ins;
9515 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9516 dreg = alloc_ireg_mp (cfg);
9517 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
9518 } else if (field->offset == 0) {
9519 ins = static_data;
9520 } else {
9521 int addr_reg = mono_alloc_preg (cfg);
9522 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
9524 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
9525 MonoInst *iargs [2];
9527 g_assert (field->parent);
9528 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9529 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
9530 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
9531 } else {
9532 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
9534 CHECK_TYPELOAD (klass);
9535 if (!addr) {
9536 if (mini_field_access_needs_cctor_run (cfg, method, vtable)) {
9537 if (!(g_slist_find (class_inits, vtable))) {
9538 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
9539 if (cfg->verbose_level > 2)
9540 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
9541 class_inits = g_slist_prepend (class_inits, vtable);
9543 } else {
9544 if (cfg->run_cctors) {
9545 MonoException *ex;
9546 /* This makes so that inline cannot trigger */
9547 /* .cctors: too many apps depend on them */
9548 /* running with a specific order... */
9549 if (! vtable->initialized)
9550 INLINE_FAILURE ("class init");
9551 ex = mono_runtime_class_init_full (vtable, FALSE);
9552 if (ex) {
9553 set_exception_object (cfg, ex);
9554 goto exception_exit;
9558 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
9560 if (cfg->compile_aot)
9561 EMIT_NEW_SFLDACONST (cfg, ins, field);
9562 else
9563 EMIT_NEW_PCONST (cfg, ins, addr);
9564 } else {
9565 MonoInst *iargs [1];
9566 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
9567 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
9571 /* Generate IR to do the actual load/store operation */
9573 if (op == CEE_LDSFLDA) {
9574 ins->klass = mono_class_from_mono_type (ftype);
9575 ins->type = STACK_PTR;
9576 *sp++ = ins;
9577 } else if (op == CEE_STSFLD) {
9578 MonoInst *store;
9580 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
9581 store->flags |= ins_flag;
9582 } else {
9583 gboolean is_const = FALSE;
9584 MonoVTable *vtable = NULL;
9585 gpointer addr = NULL;
9587 if (!context_used) {
9588 vtable = mono_class_vtable (cfg->domain, klass);
9589 CHECK_TYPELOAD (klass);
9591 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
9592 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
9593 int ro_type = ftype->type;
9594 if (!addr)
9595 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
9596 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
9597 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
9600 GSHAREDVT_FAILURE (op);
9602 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
9603 is_const = TRUE;
9604 switch (ro_type) {
9605 case MONO_TYPE_BOOLEAN:
9606 case MONO_TYPE_U1:
9607 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
9608 sp++;
9609 break;
9610 case MONO_TYPE_I1:
9611 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
9612 sp++;
9613 break;
9614 case MONO_TYPE_CHAR:
9615 case MONO_TYPE_U2:
9616 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
9617 sp++;
9618 break;
9619 case MONO_TYPE_I2:
9620 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
9621 sp++;
9622 break;
9623 break;
9624 case MONO_TYPE_I4:
9625 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
9626 sp++;
9627 break;
9628 case MONO_TYPE_U4:
9629 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
9630 sp++;
9631 break;
9632 case MONO_TYPE_I:
9633 case MONO_TYPE_U:
9634 case MONO_TYPE_PTR:
9635 case MONO_TYPE_FNPTR:
9636 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
9637 type_to_eval_stack_type ((cfg), field->type, *sp);
9638 sp++;
9639 break;
9640 case MONO_TYPE_STRING:
9641 case MONO_TYPE_OBJECT:
9642 case MONO_TYPE_CLASS:
9643 case MONO_TYPE_SZARRAY:
9644 case MONO_TYPE_ARRAY:
9645 if (!mono_gc_is_moving ()) {
9646 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
9647 type_to_eval_stack_type ((cfg), field->type, *sp);
9648 sp++;
9649 } else {
9650 is_const = FALSE;
9652 break;
9653 case MONO_TYPE_I8:
9654 case MONO_TYPE_U8:
9655 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
9656 sp++;
9657 break;
9658 case MONO_TYPE_R4:
9659 case MONO_TYPE_R8:
9660 case MONO_TYPE_VALUETYPE:
9661 default:
9662 is_const = FALSE;
9663 break;
9667 if (!is_const) {
9668 MonoInst *load;
9670 CHECK_STACK_OVF (1);
9672 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
9673 load->flags |= ins_flag;
9674 ins_flag = 0;
9675 *sp++ = load;
9678 ins_flag = 0;
9679 ip += 5;
9680 break;
9682 case CEE_STOBJ:
9683 CHECK_STACK (2);
9684 sp -= 2;
9685 CHECK_OPSIZE (5);
9686 token = read32 (ip + 1);
9687 klass = mini_get_class (method, token, generic_context);
9688 CHECK_TYPELOAD (klass);
9689 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
9690 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
9691 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
9692 generic_class_is_reference_type (cfg, klass)) {
9693 /* insert call to write barrier */
9694 emit_write_barrier (cfg, sp [0], sp [1], -1);
9696 ins_flag = 0;
9697 ip += 5;
9698 inline_costs += 1;
9699 break;
9702 * Array opcodes
9704 case CEE_NEWARR: {
9705 MonoInst *len_ins;
9706 const char *data_ptr;
9707 int data_size = 0;
9708 guint32 field_token;
9710 CHECK_STACK (1);
9711 --sp;
9713 CHECK_OPSIZE (5);
9714 token = read32 (ip + 1);
9716 klass = mini_get_class (method, token, generic_context);
9717 CHECK_TYPELOAD (klass);
9719 if (cfg->generic_sharing_context)
9720 context_used = mono_class_check_context_used (klass);
9722 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
9723 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
9724 ins->sreg1 = sp [0]->dreg;
9725 ins->type = STACK_I4;
9726 ins->dreg = alloc_ireg (cfg);
9727 MONO_ADD_INS (cfg->cbb, ins);
9728 *sp = mono_decompose_opcode (cfg, ins);
9731 if (context_used) {
9732 MonoInst *args [3];
9733 MonoClass *array_class = mono_array_class_get (klass, 1);
9734 /* FIXME: we cannot get a managed
9735 allocator because we can't get the
9736 open generic class's vtable. We
9737 have the same problem in
9738 handle_alloc(). This
9739 needs to be solved so that we can
9740 have managed allocs of shared
9741 generic classes. */
9743 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
9744 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
9746 MonoMethod *managed_alloc = NULL;
9748 /* FIXME: Decompose later to help abcrem */
9750 /* vtable */
9751 args [0] = emit_get_rgctx_klass (cfg, context_used,
9752 array_class, MONO_RGCTX_INFO_VTABLE);
9753 /* array len */
9754 args [1] = sp [0];
9756 if (managed_alloc)
9757 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
9758 else
9759 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
9760 } else {
9761 if (cfg->opt & MONO_OPT_SHARED) {
9762 /* Decompose now to avoid problems with references to the domainvar */
9763 MonoInst *iargs [3];
9765 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9766 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9767 iargs [2] = sp [0];
9769 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
9770 } else {
9771 /* Decompose later since it is needed by abcrem */
9772 MonoClass *array_type = mono_array_class_get (klass, 1);
9773 mono_class_vtable (cfg->domain, array_type);
9774 CHECK_TYPELOAD (array_type);
9776 MONO_INST_NEW (cfg, ins, OP_NEWARR);
9777 ins->dreg = alloc_ireg_ref (cfg);
9778 ins->sreg1 = sp [0]->dreg;
9779 ins->inst_newa_class = klass;
9780 ins->type = STACK_OBJ;
9781 ins->klass = array_type;
9782 MONO_ADD_INS (cfg->cbb, ins);
9783 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9784 cfg->cbb->has_array_access = TRUE;
9786 /* Needed so mono_emit_load_get_addr () gets called */
9787 mono_get_got_var (cfg);
9791 len_ins = sp [0];
9792 ip += 5;
9793 *sp++ = ins;
9794 inline_costs += 1;
9797 * we inline/optimize the initialization sequence if possible.
9798 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
9799 * for small sizes open code the memcpy
9800 * ensure the rva field is big enough
9802 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
9803 MonoMethod *memcpy_method = get_memcpy_method ();
9804 MonoInst *iargs [3];
9805 int add_reg = alloc_ireg_mp (cfg);
9807 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
9808 if (cfg->compile_aot) {
9809 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
9810 } else {
9811 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
9813 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
9814 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9815 ip += 11;
9818 break;
9820 case CEE_LDLEN:
9821 CHECK_STACK (1);
9822 --sp;
9823 if (sp [0]->type != STACK_OBJ)
9824 UNVERIFIED;
9826 MONO_INST_NEW (cfg, ins, OP_LDLEN);
9827 ins->dreg = alloc_preg (cfg);
9828 ins->sreg1 = sp [0]->dreg;
9829 ins->type = STACK_I4;
9830 /* This flag will be inherited by the decomposition */
9831 ins->flags |= MONO_INST_FAULT;
9832 MONO_ADD_INS (cfg->cbb, ins);
9833 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9834 cfg->cbb->has_array_access = TRUE;
9835 ip ++;
9836 *sp++ = ins;
9837 break;
9838 case CEE_LDELEMA:
9839 CHECK_STACK (2);
9840 sp -= 2;
9841 CHECK_OPSIZE (5);
9842 if (sp [0]->type != STACK_OBJ)
9843 UNVERIFIED;
9845 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9847 klass = mini_get_class (method, read32 (ip + 1), generic_context);
9848 CHECK_TYPELOAD (klass);
9849 /* we need to make sure that this array is exactly the type it needs
9850 * to be for correctness. the wrappers are lax with their usage
9851 * so we need to ignore them here
9853 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
9854 MonoClass *array_class = mono_array_class_get (klass, 1);
9855 mini_emit_check_array_type (cfg, sp [0], array_class);
9856 CHECK_TYPELOAD (array_class);
9859 readonly = FALSE;
9860 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9861 *sp++ = ins;
9862 ip += 5;
9863 break;
9864 case CEE_LDELEM:
9865 case CEE_LDELEM_I1:
9866 case CEE_LDELEM_U1:
9867 case CEE_LDELEM_I2:
9868 case CEE_LDELEM_U2:
9869 case CEE_LDELEM_I4:
9870 case CEE_LDELEM_U4:
9871 case CEE_LDELEM_I8:
9872 case CEE_LDELEM_I:
9873 case CEE_LDELEM_R4:
9874 case CEE_LDELEM_R8:
9875 case CEE_LDELEM_REF: {
9876 MonoInst *addr;
9878 CHECK_STACK (2);
9879 sp -= 2;
9881 if (*ip == CEE_LDELEM) {
9882 CHECK_OPSIZE (5);
9883 token = read32 (ip + 1);
9884 klass = mini_get_class (method, token, generic_context);
9885 CHECK_TYPELOAD (klass);
9886 mono_class_init (klass);
9888 else
9889 klass = array_access_to_klass (*ip);
9891 if (sp [0]->type != STACK_OBJ)
9892 UNVERIFIED;
9894 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9896 if (mini_is_gsharedvt_klass (cfg, klass)) {
9897 // FIXME-VT: OP_ICONST optimization
9898 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9899 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
9900 ins->opcode = OP_LOADV_MEMBASE;
9901 } else if (sp [1]->opcode == OP_ICONST) {
9902 int array_reg = sp [0]->dreg;
9903 int index_reg = sp [1]->dreg;
9904 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9906 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9907 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
9908 } else {
9909 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9910 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
9912 *sp++ = ins;
9913 if (*ip == CEE_LDELEM)
9914 ip += 5;
9915 else
9916 ++ip;
9917 break;
9919 case CEE_STELEM_I:
9920 case CEE_STELEM_I1:
9921 case CEE_STELEM_I2:
9922 case CEE_STELEM_I4:
9923 case CEE_STELEM_I8:
9924 case CEE_STELEM_R4:
9925 case CEE_STELEM_R8:
9926 case CEE_STELEM_REF:
9927 case CEE_STELEM: {
9928 CHECK_STACK (3);
9929 sp -= 3;
9931 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9933 if (*ip == CEE_STELEM) {
9934 CHECK_OPSIZE (5);
9935 token = read32 (ip + 1);
9936 klass = mini_get_class (method, token, generic_context);
9937 CHECK_TYPELOAD (klass);
9938 mono_class_init (klass);
9940 else
9941 klass = array_access_to_klass (*ip);
9943 if (sp [0]->type != STACK_OBJ)
9944 UNVERIFIED;
9946 emit_array_store (cfg, klass, sp, TRUE);
9948 if (*ip == CEE_STELEM)
9949 ip += 5;
9950 else
9951 ++ip;
9952 inline_costs += 1;
9953 break;
9955 case CEE_CKFINITE: {
9956 CHECK_STACK (1);
9957 --sp;
9959 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
9960 ins->sreg1 = sp [0]->dreg;
9961 ins->dreg = alloc_freg (cfg);
9962 ins->type = STACK_R8;
9963 MONO_ADD_INS (bblock, ins);
9965 *sp++ = mono_decompose_opcode (cfg, ins);
9967 ++ip;
9968 break;
9970 case CEE_REFANYVAL: {
9971 MonoInst *src_var, *src;
9973 int klass_reg = alloc_preg (cfg);
9974 int dreg = alloc_preg (cfg);
9976 GSHAREDVT_FAILURE (*ip);
9978 CHECK_STACK (1);
9979 MONO_INST_NEW (cfg, ins, *ip);
9980 --sp;
9981 CHECK_OPSIZE (5);
9982 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9983 CHECK_TYPELOAD (klass);
9984 mono_class_init (klass);
9986 if (cfg->generic_sharing_context)
9987 context_used = mono_class_check_context_used (klass);
9989 // FIXME:
9990 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9991 if (!src_var)
9992 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9993 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9994 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
9996 if (context_used) {
9997 MonoInst *klass_ins;
9999 klass_ins = emit_get_rgctx_klass (cfg, context_used,
10000 klass, MONO_RGCTX_INFO_KLASS);
10002 // FIXME:
10003 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
10004 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
10005 } else {
10006 mini_emit_class_check (cfg, klass_reg, klass);
10008 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
10009 ins->type = STACK_MP;
10010 *sp++ = ins;
10011 ip += 5;
10012 break;
10014 case CEE_MKREFANY: {
10015 MonoInst *loc, *addr;
10017 GSHAREDVT_FAILURE (*ip);
10019 CHECK_STACK (1);
10020 MONO_INST_NEW (cfg, ins, *ip);
10021 --sp;
10022 CHECK_OPSIZE (5);
10023 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10024 CHECK_TYPELOAD (klass);
10025 mono_class_init (klass);
10027 if (cfg->generic_sharing_context)
10028 context_used = mono_class_check_context_used (klass);
10030 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
10031 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
10033 if (context_used) {
10034 MonoInst *const_ins;
10035 int type_reg = alloc_preg (cfg);
10037 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
10038 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
10039 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10040 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10041 } else if (cfg->compile_aot) {
10042 int const_reg = alloc_preg (cfg);
10043 int type_reg = alloc_preg (cfg);
10045 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
10046 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
10047 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10048 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10049 } else {
10050 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
10051 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
10053 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
10055 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
10056 ins->type = STACK_VTYPE;
10057 ins->klass = mono_defaults.typed_reference_class;
10058 *sp++ = ins;
10059 ip += 5;
10060 break;
10062 case CEE_LDTOKEN: {
10063 gpointer handle;
10064 MonoClass *handle_class;
10066 CHECK_STACK_OVF (1);
10068 CHECK_OPSIZE (5);
10069 n = read32 (ip + 1);
10071 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
10072 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
10073 handle = mono_method_get_wrapper_data (method, n);
10074 handle_class = mono_method_get_wrapper_data (method, n + 1);
10075 if (handle_class == mono_defaults.typehandle_class)
10076 handle = &((MonoClass*)handle)->byval_arg;
10078 else {
10079 handle = mono_ldtoken (image, n, &handle_class, generic_context);
10081 if (!handle)
10082 LOAD_ERROR;
10083 mono_class_init (handle_class);
10084 if (cfg->generic_sharing_context) {
10085 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
10086 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
10087 /* This case handles ldtoken
10088 of an open type, like for
10089 typeof(Gen<>). */
10090 context_used = 0;
10091 } else if (handle_class == mono_defaults.typehandle_class) {
10092 /* If we get a MONO_TYPE_CLASS
10093 then we need to provide the
10094 open type, not an
10095 instantiation of it. */
10096 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
10097 context_used = 0;
10098 else
10099 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
10100 } else if (handle_class == mono_defaults.fieldhandle_class)
10101 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
10102 else if (handle_class == mono_defaults.methodhandle_class)
10103 context_used = mono_method_check_context_used (handle);
10104 else
10105 g_assert_not_reached ();
10108 if ((cfg->opt & MONO_OPT_SHARED) &&
10109 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
10110 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
10111 MonoInst *addr, *vtvar, *iargs [3];
10112 int method_context_used;
10114 if (cfg->generic_sharing_context)
10115 method_context_used = mono_method_check_context_used (method);
10116 else
10117 method_context_used = 0;
10119 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10121 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10122 EMIT_NEW_ICONST (cfg, iargs [1], n);
10123 if (method_context_used) {
10124 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
10125 method, MONO_RGCTX_INFO_METHOD);
10126 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
10127 } else {
10128 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
10129 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
10131 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10133 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10135 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10136 } else {
10137 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
10138 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
10139 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
10140 (cmethod->klass == mono_defaults.monotype_class->parent) &&
10141 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
10142 MonoClass *tclass = mono_class_from_mono_type (handle);
10144 mono_class_init (tclass);
10145 if (context_used) {
10146 ins = emit_get_rgctx_klass (cfg, context_used,
10147 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
10148 } else if (cfg->compile_aot) {
10149 if (method->wrapper_type) {
10150 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
10151 /* Special case for static synchronized wrappers */
10152 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
10153 } else {
10154 /* FIXME: n is not a normal token */
10155 cfg->disable_aot = TRUE;
10156 EMIT_NEW_PCONST (cfg, ins, NULL);
10158 } else {
10159 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
10161 } else {
10162 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
10164 ins->type = STACK_OBJ;
10165 ins->klass = cmethod->klass;
10166 ip += 5;
10167 } else {
10168 MonoInst *addr, *vtvar;
10170 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10172 if (context_used) {
10173 if (handle_class == mono_defaults.typehandle_class) {
10174 ins = emit_get_rgctx_klass (cfg, context_used,
10175 mono_class_from_mono_type (handle),
10176 MONO_RGCTX_INFO_TYPE);
10177 } else if (handle_class == mono_defaults.methodhandle_class) {
10178 ins = emit_get_rgctx_method (cfg, context_used,
10179 handle, MONO_RGCTX_INFO_METHOD);
10180 } else if (handle_class == mono_defaults.fieldhandle_class) {
10181 ins = emit_get_rgctx_field (cfg, context_used,
10182 handle, MONO_RGCTX_INFO_CLASS_FIELD);
10183 } else {
10184 g_assert_not_reached ();
10186 } else if (cfg->compile_aot) {
10187 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
10188 } else {
10189 EMIT_NEW_PCONST (cfg, ins, handle);
10191 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10192 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10193 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10197 *sp++ = ins;
10198 ip += 5;
10199 break;
10201 case CEE_THROW:
10202 CHECK_STACK (1);
10203 MONO_INST_NEW (cfg, ins, OP_THROW);
10204 --sp;
10205 ins->sreg1 = sp [0]->dreg;
10206 ip++;
10207 bblock->out_of_line = TRUE;
10208 MONO_ADD_INS (bblock, ins);
10209 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10210 MONO_ADD_INS (bblock, ins);
10211 sp = stack_start;
10213 link_bblock (cfg, bblock, end_bblock);
10214 start_new_bblock = 1;
10215 break;
10216 case CEE_ENDFINALLY:
10217 /* mono_save_seq_point_info () depends on this */
10218 if (sp != stack_start)
10219 emit_seq_point (cfg, method, ip, FALSE);
10220 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
10221 MONO_ADD_INS (bblock, ins);
10222 ip++;
10223 start_new_bblock = 1;
10226 * Control will leave the method so empty the stack, otherwise
10227 * the next basic block will start with a nonempty stack.
10229 while (sp != stack_start) {
10230 sp--;
10232 break;
10233 case CEE_LEAVE:
10234 case CEE_LEAVE_S: {
10235 GList *handlers;
10237 if (*ip == CEE_LEAVE) {
10238 CHECK_OPSIZE (5);
10239 target = ip + 5 + (gint32)read32(ip + 1);
10240 } else {
10241 CHECK_OPSIZE (2);
10242 target = ip + 2 + (signed char)(ip [1]);
10245 /* empty the stack */
10246 while (sp != stack_start) {
10247 sp--;
10251 * If this leave statement is in a catch block, check for a
10252 * pending exception, and rethrow it if necessary.
10253 * We avoid doing this in runtime invoke wrappers, since those are called
10254 * by native code which excepts the wrapper to catch all exceptions.
10256 for (i = 0; i < header->num_clauses; ++i) {
10257 MonoExceptionClause *clause = &header->clauses [i];
10260 * Use <= in the final comparison to handle clauses with multiple
10261 * leave statements, like in bug #78024.
10262 * The ordering of the exception clauses guarantees that we find the
10263 * innermost clause.
10265 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
10266 MonoInst *exc_ins;
10267 MonoBasicBlock *dont_throw;
10270 MonoInst *load;
10272 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
10275 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
10277 NEW_BBLOCK (cfg, dont_throw);
10280 * Currently, we always rethrow the abort exception, despite the
10281 * fact that this is not correct. See thread6.cs for an example.
10282 * But propagating the abort exception is more important than
10283 * getting the sematics right.
10285 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
10286 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
10287 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
10289 MONO_START_BB (cfg, dont_throw);
10290 bblock = cfg->cbb;
10294 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
10295 GList *tmp;
10296 MonoExceptionClause *clause;
10298 for (tmp = handlers; tmp; tmp = tmp->next) {
10299 clause = tmp->data;
10300 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
10301 g_assert (tblock);
10302 link_bblock (cfg, bblock, tblock);
10303 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
10304 ins->inst_target_bb = tblock;
10305 ins->inst_eh_block = clause;
10306 MONO_ADD_INS (bblock, ins);
10307 bblock->has_call_handler = 1;
10308 if (COMPILE_LLVM (cfg)) {
10309 MonoBasicBlock *target_bb;
10312 * Link the finally bblock with the target, since it will
10313 * conceptually branch there.
10314 * FIXME: Have to link the bblock containing the endfinally.
10316 GET_BBLOCK (cfg, target_bb, target);
10317 link_bblock (cfg, tblock, target_bb);
10320 g_list_free (handlers);
10323 MONO_INST_NEW (cfg, ins, OP_BR);
10324 MONO_ADD_INS (bblock, ins);
10325 GET_BBLOCK (cfg, tblock, target);
10326 link_bblock (cfg, bblock, tblock);
10327 ins->inst_target_bb = tblock;
10328 start_new_bblock = 1;
10330 if (*ip == CEE_LEAVE)
10331 ip += 5;
10332 else
10333 ip += 2;
10335 break;
10339 * Mono specific opcodes
10341 case MONO_CUSTOM_PREFIX: {
10343 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10345 CHECK_OPSIZE (2);
10346 switch (ip [1]) {
10347 case CEE_MONO_ICALL: {
10348 gpointer func;
10349 MonoJitICallInfo *info;
10351 token = read32 (ip + 2);
10352 func = mono_method_get_wrapper_data (method, token);
10353 info = mono_find_jit_icall_by_addr (func);
10354 if (!info)
10355 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
10356 g_assert (info);
10358 CHECK_STACK (info->sig->param_count);
10359 sp -= info->sig->param_count;
10361 ins = mono_emit_jit_icall (cfg, info->func, sp);
10362 if (!MONO_TYPE_IS_VOID (info->sig->ret))
10363 *sp++ = ins;
10365 ip += 6;
10366 inline_costs += 10 * num_calls++;
10368 break;
10370 case CEE_MONO_LDPTR: {
10371 gpointer ptr;
10373 CHECK_STACK_OVF (1);
10374 CHECK_OPSIZE (6);
10375 token = read32 (ip + 2);
10377 ptr = mono_method_get_wrapper_data (method, token);
10378 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
10379 MonoJitICallInfo *callinfo;
10380 const char *icall_name;
10382 icall_name = method->name + strlen ("__icall_wrapper_");
10383 g_assert (icall_name);
10384 callinfo = mono_find_jit_icall_by_name (icall_name);
10385 g_assert (callinfo);
10387 if (ptr == callinfo->func) {
10388 /* Will be transformed into an AOTCONST later */
10389 EMIT_NEW_PCONST (cfg, ins, ptr);
10390 *sp++ = ins;
10391 ip += 6;
10392 break;
10395 /* FIXME: Generalize this */
10396 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
10397 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
10398 *sp++ = ins;
10399 ip += 6;
10400 break;
10402 EMIT_NEW_PCONST (cfg, ins, ptr);
10403 *sp++ = ins;
10404 ip += 6;
10405 inline_costs += 10 * num_calls++;
10406 /* Can't embed random pointers into AOT code */
10407 cfg->disable_aot = 1;
10408 break;
10410 case CEE_MONO_ICALL_ADDR: {
10411 MonoMethod *cmethod;
10412 gpointer ptr;
10414 CHECK_STACK_OVF (1);
10415 CHECK_OPSIZE (6);
10416 token = read32 (ip + 2);
10418 cmethod = mono_method_get_wrapper_data (method, token);
10420 if (cfg->compile_aot) {
10421 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
10422 } else {
10423 ptr = mono_lookup_internal_call (cmethod);
10424 g_assert (ptr);
10425 EMIT_NEW_PCONST (cfg, ins, ptr);
10427 *sp++ = ins;
10428 ip += 6;
10429 break;
10431 case CEE_MONO_VTADDR: {
10432 MonoInst *src_var, *src;
10434 CHECK_STACK (1);
10435 --sp;
10437 // FIXME:
10438 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10439 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
10440 *sp++ = src;
10441 ip += 2;
10442 break;
10444 case CEE_MONO_NEWOBJ: {
10445 MonoInst *iargs [2];
10447 CHECK_STACK_OVF (1);
10448 CHECK_OPSIZE (6);
10449 token = read32 (ip + 2);
10450 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10451 mono_class_init (klass);
10452 NEW_DOMAINCONST (cfg, iargs [0]);
10453 MONO_ADD_INS (cfg->cbb, iargs [0]);
10454 NEW_CLASSCONST (cfg, iargs [1], klass);
10455 MONO_ADD_INS (cfg->cbb, iargs [1]);
10456 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
10457 ip += 6;
10458 inline_costs += 10 * num_calls++;
10459 break;
10461 case CEE_MONO_OBJADDR:
10462 CHECK_STACK (1);
10463 --sp;
10464 MONO_INST_NEW (cfg, ins, OP_MOVE);
10465 ins->dreg = alloc_ireg_mp (cfg);
10466 ins->sreg1 = sp [0]->dreg;
10467 ins->type = STACK_MP;
10468 MONO_ADD_INS (cfg->cbb, ins);
10469 *sp++ = ins;
10470 ip += 2;
10471 break;
10472 case CEE_MONO_LDNATIVEOBJ:
10474 * Similar to LDOBJ, but instead load the unmanaged
10475 * representation of the vtype to the stack.
10477 CHECK_STACK (1);
10478 CHECK_OPSIZE (6);
10479 --sp;
10480 token = read32 (ip + 2);
10481 klass = mono_method_get_wrapper_data (method, token);
10482 g_assert (klass->valuetype);
10483 mono_class_init (klass);
10486 MonoInst *src, *dest, *temp;
10488 src = sp [0];
10489 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
10490 temp->backend.is_pinvoke = 1;
10491 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
10492 mini_emit_stobj (cfg, dest, src, klass, TRUE);
10494 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
10495 dest->type = STACK_VTYPE;
10496 dest->klass = klass;
10498 *sp ++ = dest;
10499 ip += 6;
10501 break;
10502 case CEE_MONO_RETOBJ: {
10504 * Same as RET, but return the native representation of a vtype
10505 * to the caller.
10507 g_assert (cfg->ret);
10508 g_assert (mono_method_signature (method)->pinvoke);
10509 CHECK_STACK (1);
10510 --sp;
10512 CHECK_OPSIZE (6);
10513 token = read32 (ip + 2);
10514 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10516 if (!cfg->vret_addr) {
10517 g_assert (cfg->ret_var_is_local);
10519 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
10520 } else {
10521 EMIT_NEW_RETLOADA (cfg, ins);
10523 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
10525 if (sp != stack_start)
10526 UNVERIFIED;
10528 MONO_INST_NEW (cfg, ins, OP_BR);
10529 ins->inst_target_bb = end_bblock;
10530 MONO_ADD_INS (bblock, ins);
10531 link_bblock (cfg, bblock, end_bblock);
10532 start_new_bblock = 1;
10533 ip += 6;
10534 break;
10536 case CEE_MONO_CISINST:
10537 case CEE_MONO_CCASTCLASS: {
10538 int token;
10539 CHECK_STACK (1);
10540 --sp;
10541 CHECK_OPSIZE (6);
10542 token = read32 (ip + 2);
10543 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10544 if (ip [1] == CEE_MONO_CISINST)
10545 ins = handle_cisinst (cfg, klass, sp [0]);
10546 else
10547 ins = handle_ccastclass (cfg, klass, sp [0]);
10548 bblock = cfg->cbb;
10549 *sp++ = ins;
10550 ip += 6;
10551 break;
10553 case CEE_MONO_SAVE_LMF:
10554 case CEE_MONO_RESTORE_LMF:
10555 #ifdef MONO_ARCH_HAVE_LMF_OPS
10556 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
10557 MONO_ADD_INS (bblock, ins);
10558 cfg->need_lmf_area = TRUE;
10559 #endif
10560 ip += 2;
10561 break;
10562 case CEE_MONO_CLASSCONST:
10563 CHECK_STACK_OVF (1);
10564 CHECK_OPSIZE (6);
10565 token = read32 (ip + 2);
10566 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
10567 *sp++ = ins;
10568 ip += 6;
10569 inline_costs += 10 * num_calls++;
10570 break;
10571 case CEE_MONO_NOT_TAKEN:
10572 bblock->out_of_line = TRUE;
10573 ip += 2;
10574 break;
10575 case CEE_MONO_TLS:
10576 CHECK_STACK_OVF (1);
10577 CHECK_OPSIZE (6);
10578 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
10579 ins->dreg = alloc_preg (cfg);
10580 ins->inst_offset = (gint32)read32 (ip + 2);
10581 ins->type = STACK_PTR;
10582 MONO_ADD_INS (bblock, ins);
10583 *sp++ = ins;
10584 ip += 6;
10585 break;
10586 case CEE_MONO_DYN_CALL: {
10587 MonoCallInst *call;
10589 /* It would be easier to call a trampoline, but that would put an
10590 * extra frame on the stack, confusing exception handling. So
10591 * implement it inline using an opcode for now.
10594 if (!cfg->dyn_call_var) {
10595 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
10596 /* prevent it from being register allocated */
10597 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
10600 /* Has to use a call inst since it local regalloc expects it */
10601 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
10602 ins = (MonoInst*)call;
10603 sp -= 2;
10604 ins->sreg1 = sp [0]->dreg;
10605 ins->sreg2 = sp [1]->dreg;
10606 MONO_ADD_INS (bblock, ins);
10608 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
10609 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
10610 #endif
10612 ip += 2;
10613 inline_costs += 10 * num_calls++;
10615 break;
10617 case CEE_MONO_MEMORY_BARRIER: {
10618 CHECK_OPSIZE (5);
10619 emit_memory_barrier (cfg, (int)read32 (ip + 1));
10620 ip += 5;
10621 break;
10623 case CEE_MONO_JIT_ATTACH: {
10624 MonoInst *args [16];
10625 MonoInst *ad_ins, *lmf_ins;
10626 MonoBasicBlock *next_bb = NULL;
10628 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
10630 EMIT_NEW_PCONST (cfg, ins, NULL);
10631 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
10633 #if TARGET_WIN32
10634 ad_ins = NULL;
10635 lmf_ins = NULL;
10636 #else
10637 ad_ins = mono_get_domain_intrinsic (cfg);
10638 lmf_ins = mono_get_lmf_intrinsic (cfg);
10639 #endif
10641 #ifdef MONO_ARCH_HAVE_TLS_GET
10642 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && lmf_ins) {
10643 NEW_BBLOCK (cfg, next_bb);
10645 MONO_ADD_INS (cfg->cbb, ad_ins);
10646 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ad_ins->dreg, 0);
10647 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
10649 MONO_ADD_INS (cfg->cbb, lmf_ins);
10650 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, lmf_ins->dreg, 0);
10651 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
10653 #endif
10655 if (cfg->compile_aot) {
10656 /* AOT code is only used in the root domain */
10657 EMIT_NEW_PCONST (cfg, args [0], NULL);
10658 } else {
10659 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
10661 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
10662 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
10664 if (next_bb) {
10665 MONO_START_BB (cfg, next_bb);
10666 bblock = cfg->cbb;
10668 ip += 2;
10669 break;
10671 case CEE_MONO_JIT_DETACH: {
10672 MonoInst *args [16];
10674 /* Restore the original domain */
10675 dreg = alloc_ireg (cfg);
10676 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
10677 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
10678 ip += 2;
10679 break;
10681 default:
10682 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
10683 break;
10685 break;
10688 case CEE_PREFIX1: {
10689 CHECK_OPSIZE (2);
10690 switch (ip [1]) {
10691 case CEE_ARGLIST: {
10692 /* somewhat similar to LDTOKEN */
10693 MonoInst *addr, *vtvar;
10694 CHECK_STACK_OVF (1);
10695 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
10697 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10698 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
10700 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10701 ins->type = STACK_VTYPE;
10702 ins->klass = mono_defaults.argumenthandle_class;
10703 *sp++ = ins;
10704 ip += 2;
10705 break;
10707 case CEE_CEQ:
10708 case CEE_CGT:
10709 case CEE_CGT_UN:
10710 case CEE_CLT:
10711 case CEE_CLT_UN: {
10712 MonoInst *cmp;
10713 CHECK_STACK (2);
10715 * The following transforms:
10716 * CEE_CEQ into OP_CEQ
10717 * CEE_CGT into OP_CGT
10718 * CEE_CGT_UN into OP_CGT_UN
10719 * CEE_CLT into OP_CLT
10720 * CEE_CLT_UN into OP_CLT_UN
10722 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
10724 MONO_INST_NEW (cfg, ins, cmp->opcode);
10725 sp -= 2;
10726 cmp->sreg1 = sp [0]->dreg;
10727 cmp->sreg2 = sp [1]->dreg;
10728 type_from_op (cmp, sp [0], sp [1]);
10729 CHECK_TYPE (cmp);
10730 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
10731 cmp->opcode = OP_LCOMPARE;
10732 else if (sp [0]->type == STACK_R8)
10733 cmp->opcode = OP_FCOMPARE;
10734 else
10735 cmp->opcode = OP_ICOMPARE;
10736 MONO_ADD_INS (bblock, cmp);
10737 ins->type = STACK_I4;
10738 ins->dreg = alloc_dreg (cfg, ins->type);
10739 type_from_op (ins, sp [0], sp [1]);
10741 if (cmp->opcode == OP_FCOMPARE) {
10743 * The backends expect the fceq opcodes to do the
10744 * comparison too.
10746 cmp->opcode = OP_NOP;
10747 ins->sreg1 = cmp->sreg1;
10748 ins->sreg2 = cmp->sreg2;
10750 MONO_ADD_INS (bblock, ins);
10751 *sp++ = ins;
10752 ip += 2;
10753 break;
10755 case CEE_LDFTN: {
10756 MonoInst *argconst;
10757 MonoMethod *cil_method;
10759 GSHAREDVT_FAILURE (*ip);
10761 CHECK_STACK_OVF (1);
10762 CHECK_OPSIZE (6);
10763 n = read32 (ip + 2);
10764 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10765 if (!cmethod || mono_loader_get_last_error ())
10766 LOAD_ERROR;
10767 mono_class_init (cmethod->klass);
10769 mono_save_token_info (cfg, image, n, cmethod);
10771 if (cfg->generic_sharing_context)
10772 context_used = mono_method_check_context_used (cmethod);
10774 cil_method = cmethod;
10775 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
10776 METHOD_ACCESS_FAILURE;
10778 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
10779 if (check_linkdemand (cfg, method, cmethod))
10780 INLINE_FAILURE ("linkdemand");
10781 CHECK_CFG_EXCEPTION;
10782 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
10783 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10787 * Optimize the common case of ldftn+delegate creation
10789 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
10790 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
10791 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
10792 MonoInst *target_ins;
10793 MonoMethod *invoke;
10794 int invoke_context_used = 0;
10796 invoke = mono_get_delegate_invoke (ctor_method->klass);
10797 if (!invoke || !mono_method_signature (invoke))
10798 LOAD_ERROR;
10800 if (cfg->generic_sharing_context)
10801 invoke_context_used = mono_method_check_context_used (invoke);
10803 target_ins = sp [-1];
10805 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
10806 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
10808 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
10809 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
10810 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
10811 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
10812 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
10816 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
10817 /* FIXME: SGEN support */
10818 if (invoke_context_used == 0) {
10819 ip += 6;
10820 if (cfg->verbose_level > 3)
10821 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10822 sp --;
10823 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
10824 CHECK_CFG_EXCEPTION;
10825 ip += 5;
10826 sp ++;
10827 break;
10829 #endif
10833 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
10834 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
10835 *sp++ = ins;
10837 ip += 6;
10838 inline_costs += 10 * num_calls++;
10839 break;
10841 case CEE_LDVIRTFTN: {
10842 MonoInst *args [2];
10844 GSHAREDVT_FAILURE (*ip);
10846 CHECK_STACK (1);
10847 CHECK_OPSIZE (6);
10848 n = read32 (ip + 2);
10849 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10850 if (!cmethod || mono_loader_get_last_error ())
10851 LOAD_ERROR;
10852 mono_class_init (cmethod->klass);
10854 if (cfg->generic_sharing_context)
10855 context_used = mono_method_check_context_used (cmethod);
10857 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
10858 if (check_linkdemand (cfg, method, cmethod))
10859 INLINE_FAILURE ("linkdemand");
10860 CHECK_CFG_EXCEPTION;
10861 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
10862 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10865 --sp;
10866 args [0] = *sp;
10868 args [1] = emit_get_rgctx_method (cfg, context_used,
10869 cmethod, MONO_RGCTX_INFO_METHOD);
10871 if (context_used)
10872 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
10873 else
10874 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
10876 ip += 6;
10877 inline_costs += 10 * num_calls++;
10878 break;
10880 case CEE_LDARG:
10881 CHECK_STACK_OVF (1);
10882 CHECK_OPSIZE (4);
10883 n = read16 (ip + 2);
10884 CHECK_ARG (n);
10885 EMIT_NEW_ARGLOAD (cfg, ins, n);
10886 *sp++ = ins;
10887 ip += 4;
10888 break;
10889 case CEE_LDARGA:
10890 CHECK_STACK_OVF (1);
10891 CHECK_OPSIZE (4);
10892 n = read16 (ip + 2);
10893 CHECK_ARG (n);
10894 NEW_ARGLOADA (cfg, ins, n);
10895 MONO_ADD_INS (cfg->cbb, ins);
10896 *sp++ = ins;
10897 ip += 4;
10898 break;
10899 case CEE_STARG:
10900 CHECK_STACK (1);
10901 --sp;
10902 CHECK_OPSIZE (4);
10903 n = read16 (ip + 2);
10904 CHECK_ARG (n);
10905 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
10906 UNVERIFIED;
10907 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
10908 ip += 4;
10909 break;
10910 case CEE_LDLOC:
10911 CHECK_STACK_OVF (1);
10912 CHECK_OPSIZE (4);
10913 n = read16 (ip + 2);
10914 CHECK_LOCAL (n);
10915 EMIT_NEW_LOCLOAD (cfg, ins, n);
10916 *sp++ = ins;
10917 ip += 4;
10918 break;
10919 case CEE_LDLOCA: {
10920 unsigned char *tmp_ip;
10921 CHECK_STACK_OVF (1);
10922 CHECK_OPSIZE (4);
10923 n = read16 (ip + 2);
10924 CHECK_LOCAL (n);
10926 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
10927 ip = tmp_ip;
10928 inline_costs += 1;
10929 break;
10932 EMIT_NEW_LOCLOADA (cfg, ins, n);
10933 *sp++ = ins;
10934 ip += 4;
10935 break;
10937 case CEE_STLOC:
10938 CHECK_STACK (1);
10939 --sp;
10940 CHECK_OPSIZE (4);
10941 n = read16 (ip + 2);
10942 CHECK_LOCAL (n);
10943 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
10944 UNVERIFIED;
10945 emit_stloc_ir (cfg, sp, header, n);
10946 ip += 4;
10947 inline_costs += 1;
10948 break;
10949 case CEE_LOCALLOC:
10950 CHECK_STACK (1);
10951 --sp;
10952 if (sp != stack_start)
10953 UNVERIFIED;
10954 if (cfg->method != method)
10956 * Inlining this into a loop in a parent could lead to
10957 * stack overflows which is different behavior than the
10958 * non-inlined case, thus disable inlining in this case.
10960 goto inline_failure;
10962 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
10963 ins->dreg = alloc_preg (cfg);
10964 ins->sreg1 = sp [0]->dreg;
10965 ins->type = STACK_PTR;
10966 MONO_ADD_INS (cfg->cbb, ins);
10968 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10969 if (init_locals)
10970 ins->flags |= MONO_INST_INIT;
10972 *sp++ = ins;
10973 ip += 2;
10974 break;
10975 case CEE_ENDFILTER: {
10976 MonoExceptionClause *clause, *nearest;
10977 int cc, nearest_num;
10979 CHECK_STACK (1);
10980 --sp;
10981 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
10982 UNVERIFIED;
10983 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
10984 ins->sreg1 = (*sp)->dreg;
10985 MONO_ADD_INS (bblock, ins);
10986 start_new_bblock = 1;
10987 ip += 2;
10989 nearest = NULL;
10990 nearest_num = 0;
10991 for (cc = 0; cc < header->num_clauses; ++cc) {
10992 clause = &header->clauses [cc];
10993 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
10994 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
10995 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
10996 nearest = clause;
10997 nearest_num = cc;
11000 g_assert (nearest);
11001 if ((ip - header->code) != nearest->handler_offset)
11002 UNVERIFIED;
11004 break;
11006 case CEE_UNALIGNED_:
11007 ins_flag |= MONO_INST_UNALIGNED;
11008 /* FIXME: record alignment? we can assume 1 for now */
11009 CHECK_OPSIZE (3);
11010 ip += 3;
11011 break;
11012 case CEE_VOLATILE_:
11013 ins_flag |= MONO_INST_VOLATILE;
11014 ip += 2;
11015 break;
11016 case CEE_TAIL_:
11017 ins_flag |= MONO_INST_TAILCALL;
11018 cfg->flags |= MONO_CFG_HAS_TAIL;
11019 /* Can't inline tail calls at this time */
11020 inline_costs += 100000;
11021 ip += 2;
11022 break;
11023 case CEE_INITOBJ:
11024 CHECK_STACK (1);
11025 --sp;
11026 CHECK_OPSIZE (6);
11027 token = read32 (ip + 2);
11028 klass = mini_get_class (method, token, generic_context);
11029 CHECK_TYPELOAD (klass);
11030 if (generic_class_is_reference_type (cfg, klass))
11031 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
11032 else
11033 mini_emit_initobj (cfg, *sp, NULL, klass);
11034 ip += 6;
11035 inline_costs += 1;
11036 break;
11037 case CEE_CONSTRAINED_:
11038 CHECK_OPSIZE (6);
11039 token = read32 (ip + 2);
11040 if (method->wrapper_type != MONO_WRAPPER_NONE)
11041 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
11042 else
11043 constrained_call = mono_class_get_full (image, token, generic_context);
11044 CHECK_TYPELOAD (constrained_call);
11045 ip += 6;
11046 break;
11047 case CEE_CPBLK:
11048 case CEE_INITBLK: {
11049 MonoInst *iargs [3];
11050 CHECK_STACK (3);
11051 sp -= 3;
11053 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
11054 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
11055 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
11056 /* emit_memset only works when val == 0 */
11057 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
11058 } else {
11059 iargs [0] = sp [0];
11060 iargs [1] = sp [1];
11061 iargs [2] = sp [2];
11062 if (ip [1] == CEE_CPBLK) {
11063 MonoMethod *memcpy_method = get_memcpy_method ();
11064 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11065 } else {
11066 MonoMethod *memset_method = get_memset_method ();
11067 mono_emit_method_call (cfg, memset_method, iargs, NULL);
11070 ip += 2;
11071 inline_costs += 1;
11072 break;
11074 case CEE_NO_:
11075 CHECK_OPSIZE (3);
11076 if (ip [2] & 0x1)
11077 ins_flag |= MONO_INST_NOTYPECHECK;
11078 if (ip [2] & 0x2)
11079 ins_flag |= MONO_INST_NORANGECHECK;
11080 /* we ignore the no-nullcheck for now since we
11081 * really do it explicitly only when doing callvirt->call
11083 ip += 3;
11084 break;
11085 case CEE_RETHROW: {
11086 MonoInst *load;
11087 int handler_offset = -1;
11089 for (i = 0; i < header->num_clauses; ++i) {
11090 MonoExceptionClause *clause = &header->clauses [i];
11091 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
11092 handler_offset = clause->handler_offset;
11093 break;
11097 bblock->flags |= BB_EXCEPTION_UNSAFE;
11099 g_assert (handler_offset != -1);
11101 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
11102 MONO_INST_NEW (cfg, ins, OP_RETHROW);
11103 ins->sreg1 = load->dreg;
11104 MONO_ADD_INS (bblock, ins);
11106 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11107 MONO_ADD_INS (bblock, ins);
11109 sp = stack_start;
11110 link_bblock (cfg, bblock, end_bblock);
11111 start_new_bblock = 1;
11112 ip += 2;
11113 break;
11115 case CEE_SIZEOF: {
11116 guint32 val;
11117 int ialign;
11119 GSHAREDVT_FAILURE (*ip);
11121 CHECK_STACK_OVF (1);
11122 CHECK_OPSIZE (6);
11123 token = read32 (ip + 2);
11124 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
11125 MonoType *type = mono_type_create_from_typespec (image, token);
11126 val = mono_type_size (type, &ialign);
11127 } else {
11128 MonoClass *klass = mono_class_get_full (image, token, generic_context);
11129 CHECK_TYPELOAD (klass);
11130 mono_class_init (klass);
11131 val = mono_type_size (&klass->byval_arg, &ialign);
11133 EMIT_NEW_ICONST (cfg, ins, val);
11134 *sp++= ins;
11135 ip += 6;
11136 break;
11138 case CEE_REFANYTYPE: {
11139 MonoInst *src_var, *src;
11141 GSHAREDVT_FAILURE (*ip);
11143 CHECK_STACK (1);
11144 --sp;
11146 // FIXME:
11147 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11148 if (!src_var)
11149 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11150 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11151 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
11152 *sp++ = ins;
11153 ip += 2;
11154 break;
11156 case CEE_READONLY_:
11157 readonly = TRUE;
11158 ip += 2;
11159 break;
11161 case CEE_UNUSED56:
11162 case CEE_UNUSED57:
11163 case CEE_UNUSED70:
11164 case CEE_UNUSED:
11165 case CEE_UNUSED99:
11166 UNVERIFIED;
11168 default:
11169 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
11170 UNVERIFIED;
11172 break;
11174 case CEE_UNUSED58:
11175 case CEE_UNUSED1:
11176 UNVERIFIED;
11178 default:
11179 g_warning ("opcode 0x%02x not handled", *ip);
11180 UNVERIFIED;
11183 if (start_new_bblock != 1)
11184 UNVERIFIED;
11186 bblock->cil_length = ip - bblock->cil_code;
11187 if (bblock->next_bb) {
11188 /* This could already be set because of inlining, #693905 */
11189 MonoBasicBlock *bb = bblock;
11191 while (bb->next_bb)
11192 bb = bb->next_bb;
11193 bb->next_bb = end_bblock;
11194 } else {
11195 bblock->next_bb = end_bblock;
11198 if (cfg->method == method && cfg->domainvar) {
11199 MonoInst *store;
11200 MonoInst *get_domain;
11202 cfg->cbb = init_localsbb;
11204 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
11205 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
11207 else {
11208 get_domain->dreg = alloc_preg (cfg);
11209 MONO_ADD_INS (cfg->cbb, get_domain);
11211 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
11212 MONO_ADD_INS (cfg->cbb, store);
11215 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
11216 if (cfg->compile_aot)
11217 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
11218 mono_get_got_var (cfg);
11219 #endif
11221 if (cfg->method == method && cfg->got_var)
11222 mono_emit_load_got_addr (cfg);
11224 if (init_locals) {
11225 MonoInst *store;
11227 cfg->cbb = init_localsbb;
11228 cfg->ip = NULL;
11229 for (i = 0; i < header->num_locals; ++i) {
11230 MonoType *ptype = header->locals [i];
11231 int t = ptype->type;
11232 dreg = cfg->locals [i]->dreg;
11234 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
11235 t = mono_class_enum_basetype (ptype->data.klass)->type;
11236 if (ptype->byref) {
11237 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
11238 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
11239 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
11240 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
11241 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
11242 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
11243 MONO_INST_NEW (cfg, ins, OP_R8CONST);
11244 ins->type = STACK_R8;
11245 ins->inst_p0 = (void*)&r8_0;
11246 ins->dreg = alloc_dreg (cfg, STACK_R8);
11247 MONO_ADD_INS (init_localsbb, ins);
11248 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
11249 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
11250 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
11251 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
11252 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, ptype)) {
11253 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
11254 } else {
11255 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
11260 if (cfg->init_ref_vars && cfg->method == method) {
11261 /* Emit initialization for ref vars */
11262 // FIXME: Avoid duplication initialization for IL locals.
11263 for (i = 0; i < cfg->num_varinfo; ++i) {
11264 MonoInst *ins = cfg->varinfo [i];
11266 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
11267 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
11271 if (seq_points) {
11272 MonoBasicBlock *bb;
11275 * Make seq points at backward branch targets interruptable.
11277 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
11278 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
11279 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
11282 /* Add a sequence point for method entry/exit events */
11283 if (seq_points) {
11284 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
11285 MONO_ADD_INS (init_localsbb, ins);
11286 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
11287 MONO_ADD_INS (cfg->bb_exit, ins);
11290 cfg->ip = NULL;
11292 if (cfg->method == method) {
11293 MonoBasicBlock *bb;
11294 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11295 bb->region = mono_find_block_region (cfg, bb->real_offset);
11296 if (cfg->spvars)
11297 mono_create_spvar_for_region (cfg, bb->region);
11298 if (cfg->verbose_level > 2)
11299 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
11303 g_slist_free (class_inits);
11304 dont_inline = g_list_remove (dont_inline, method);
11306 if (inline_costs < 0) {
11307 char *mname;
11309 /* Method is too large */
11310 mname = mono_method_full_name (method, TRUE);
11311 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
11312 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
11313 g_free (mname);
11314 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11315 mono_basic_block_free (original_bb);
11316 return -1;
11319 if ((cfg->verbose_level > 2) && (cfg->method == method))
11320 mono_print_code (cfg, "AFTER METHOD-TO-IR");
11322 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11323 mono_basic_block_free (original_bb);
11324 return inline_costs;
11326 exception_exit:
11327 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
11328 goto cleanup;
11330 inline_failure:
11331 goto cleanup;
11333 load_error:
11334 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
11335 goto cleanup;
11337 unverified:
11338 set_exception_type_from_invalid_il (cfg, method, ip);
11339 goto cleanup;
11341 cleanup:
11342 g_slist_free (class_inits);
11343 mono_basic_block_free (original_bb);
11344 dont_inline = g_list_remove (dont_inline, method);
11345 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11346 return -1;
11349 static int
11350 store_membase_reg_to_store_membase_imm (int opcode)
11352 switch (opcode) {
11353 case OP_STORE_MEMBASE_REG:
11354 return OP_STORE_MEMBASE_IMM;
11355 case OP_STOREI1_MEMBASE_REG:
11356 return OP_STOREI1_MEMBASE_IMM;
11357 case OP_STOREI2_MEMBASE_REG:
11358 return OP_STOREI2_MEMBASE_IMM;
11359 case OP_STOREI4_MEMBASE_REG:
11360 return OP_STOREI4_MEMBASE_IMM;
11361 case OP_STOREI8_MEMBASE_REG:
11362 return OP_STOREI8_MEMBASE_IMM;
11363 default:
11364 g_assert_not_reached ();
11367 return -1;
11370 #endif /* DISABLE_JIT */
11373 mono_op_to_op_imm (int opcode)
11375 switch (opcode) {
11376 case OP_IADD:
11377 return OP_IADD_IMM;
11378 case OP_ISUB:
11379 return OP_ISUB_IMM;
11380 case OP_IDIV:
11381 return OP_IDIV_IMM;
11382 case OP_IDIV_UN:
11383 return OP_IDIV_UN_IMM;
11384 case OP_IREM:
11385 return OP_IREM_IMM;
11386 case OP_IREM_UN:
11387 return OP_IREM_UN_IMM;
11388 case OP_IMUL:
11389 return OP_IMUL_IMM;
11390 case OP_IAND:
11391 return OP_IAND_IMM;
11392 case OP_IOR:
11393 return OP_IOR_IMM;
11394 case OP_IXOR:
11395 return OP_IXOR_IMM;
11396 case OP_ISHL:
11397 return OP_ISHL_IMM;
11398 case OP_ISHR:
11399 return OP_ISHR_IMM;
11400 case OP_ISHR_UN:
11401 return OP_ISHR_UN_IMM;
11403 case OP_LADD:
11404 return OP_LADD_IMM;
11405 case OP_LSUB:
11406 return OP_LSUB_IMM;
11407 case OP_LAND:
11408 return OP_LAND_IMM;
11409 case OP_LOR:
11410 return OP_LOR_IMM;
11411 case OP_LXOR:
11412 return OP_LXOR_IMM;
11413 case OP_LSHL:
11414 return OP_LSHL_IMM;
11415 case OP_LSHR:
11416 return OP_LSHR_IMM;
11417 case OP_LSHR_UN:
11418 return OP_LSHR_UN_IMM;
11420 case OP_COMPARE:
11421 return OP_COMPARE_IMM;
11422 case OP_ICOMPARE:
11423 return OP_ICOMPARE_IMM;
11424 case OP_LCOMPARE:
11425 return OP_LCOMPARE_IMM;
11427 case OP_STORE_MEMBASE_REG:
11428 return OP_STORE_MEMBASE_IMM;
11429 case OP_STOREI1_MEMBASE_REG:
11430 return OP_STOREI1_MEMBASE_IMM;
11431 case OP_STOREI2_MEMBASE_REG:
11432 return OP_STOREI2_MEMBASE_IMM;
11433 case OP_STOREI4_MEMBASE_REG:
11434 return OP_STOREI4_MEMBASE_IMM;
11436 #if defined(TARGET_X86) || defined (TARGET_AMD64)
11437 case OP_X86_PUSH:
11438 return OP_X86_PUSH_IMM;
11439 case OP_X86_COMPARE_MEMBASE_REG:
11440 return OP_X86_COMPARE_MEMBASE_IMM;
11441 #endif
11442 #if defined(TARGET_AMD64)
11443 case OP_AMD64_ICOMPARE_MEMBASE_REG:
11444 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
11445 #endif
11446 case OP_VOIDCALL_REG:
11447 return OP_VOIDCALL;
11448 case OP_CALL_REG:
11449 return OP_CALL;
11450 case OP_LCALL_REG:
11451 return OP_LCALL;
11452 case OP_FCALL_REG:
11453 return OP_FCALL;
11454 case OP_LOCALLOC:
11455 return OP_LOCALLOC_IMM;
11458 return -1;
11461 static int
11462 ldind_to_load_membase (int opcode)
11464 switch (opcode) {
11465 case CEE_LDIND_I1:
11466 return OP_LOADI1_MEMBASE;
11467 case CEE_LDIND_U1:
11468 return OP_LOADU1_MEMBASE;
11469 case CEE_LDIND_I2:
11470 return OP_LOADI2_MEMBASE;
11471 case CEE_LDIND_U2:
11472 return OP_LOADU2_MEMBASE;
11473 case CEE_LDIND_I4:
11474 return OP_LOADI4_MEMBASE;
11475 case CEE_LDIND_U4:
11476 return OP_LOADU4_MEMBASE;
11477 case CEE_LDIND_I:
11478 return OP_LOAD_MEMBASE;
11479 case CEE_LDIND_REF:
11480 return OP_LOAD_MEMBASE;
11481 case CEE_LDIND_I8:
11482 return OP_LOADI8_MEMBASE;
11483 case CEE_LDIND_R4:
11484 return OP_LOADR4_MEMBASE;
11485 case CEE_LDIND_R8:
11486 return OP_LOADR8_MEMBASE;
11487 default:
11488 g_assert_not_reached ();
11491 return -1;
11494 static int
11495 stind_to_store_membase (int opcode)
11497 switch (opcode) {
11498 case CEE_STIND_I1:
11499 return OP_STOREI1_MEMBASE_REG;
11500 case CEE_STIND_I2:
11501 return OP_STOREI2_MEMBASE_REG;
11502 case CEE_STIND_I4:
11503 return OP_STOREI4_MEMBASE_REG;
11504 case CEE_STIND_I:
11505 case CEE_STIND_REF:
11506 return OP_STORE_MEMBASE_REG;
11507 case CEE_STIND_I8:
11508 return OP_STOREI8_MEMBASE_REG;
11509 case CEE_STIND_R4:
11510 return OP_STORER4_MEMBASE_REG;
11511 case CEE_STIND_R8:
11512 return OP_STORER8_MEMBASE_REG;
11513 default:
11514 g_assert_not_reached ();
11517 return -1;
11521 mono_load_membase_to_load_mem (int opcode)
11523 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
11524 #if defined(TARGET_X86) || defined(TARGET_AMD64)
11525 switch (opcode) {
11526 case OP_LOAD_MEMBASE:
11527 return OP_LOAD_MEM;
11528 case OP_LOADU1_MEMBASE:
11529 return OP_LOADU1_MEM;
11530 case OP_LOADU2_MEMBASE:
11531 return OP_LOADU2_MEM;
11532 case OP_LOADI4_MEMBASE:
11533 return OP_LOADI4_MEM;
11534 case OP_LOADU4_MEMBASE:
11535 return OP_LOADU4_MEM;
11536 #if SIZEOF_REGISTER == 8
11537 case OP_LOADI8_MEMBASE:
11538 return OP_LOADI8_MEM;
11539 #endif
11541 #endif
11543 return -1;
11546 static inline int
11547 op_to_op_dest_membase (int store_opcode, int opcode)
11549 #if defined(TARGET_X86)
11550 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
11551 return -1;
11553 switch (opcode) {
11554 case OP_IADD:
11555 return OP_X86_ADD_MEMBASE_REG;
11556 case OP_ISUB:
11557 return OP_X86_SUB_MEMBASE_REG;
11558 case OP_IAND:
11559 return OP_X86_AND_MEMBASE_REG;
11560 case OP_IOR:
11561 return OP_X86_OR_MEMBASE_REG;
11562 case OP_IXOR:
11563 return OP_X86_XOR_MEMBASE_REG;
11564 case OP_ADD_IMM:
11565 case OP_IADD_IMM:
11566 return OP_X86_ADD_MEMBASE_IMM;
11567 case OP_SUB_IMM:
11568 case OP_ISUB_IMM:
11569 return OP_X86_SUB_MEMBASE_IMM;
11570 case OP_AND_IMM:
11571 case OP_IAND_IMM:
11572 return OP_X86_AND_MEMBASE_IMM;
11573 case OP_OR_IMM:
11574 case OP_IOR_IMM:
11575 return OP_X86_OR_MEMBASE_IMM;
11576 case OP_XOR_IMM:
11577 case OP_IXOR_IMM:
11578 return OP_X86_XOR_MEMBASE_IMM;
11579 case OP_MOVE:
11580 return OP_NOP;
11582 #endif
11584 #if defined(TARGET_AMD64)
11585 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
11586 return -1;
11588 switch (opcode) {
11589 case OP_IADD:
11590 return OP_X86_ADD_MEMBASE_REG;
11591 case OP_ISUB:
11592 return OP_X86_SUB_MEMBASE_REG;
11593 case OP_IAND:
11594 return OP_X86_AND_MEMBASE_REG;
11595 case OP_IOR:
11596 return OP_X86_OR_MEMBASE_REG;
11597 case OP_IXOR:
11598 return OP_X86_XOR_MEMBASE_REG;
11599 case OP_IADD_IMM:
11600 return OP_X86_ADD_MEMBASE_IMM;
11601 case OP_ISUB_IMM:
11602 return OP_X86_SUB_MEMBASE_IMM;
11603 case OP_IAND_IMM:
11604 return OP_X86_AND_MEMBASE_IMM;
11605 case OP_IOR_IMM:
11606 return OP_X86_OR_MEMBASE_IMM;
11607 case OP_IXOR_IMM:
11608 return OP_X86_XOR_MEMBASE_IMM;
11609 case OP_LADD:
11610 return OP_AMD64_ADD_MEMBASE_REG;
11611 case OP_LSUB:
11612 return OP_AMD64_SUB_MEMBASE_REG;
11613 case OP_LAND:
11614 return OP_AMD64_AND_MEMBASE_REG;
11615 case OP_LOR:
11616 return OP_AMD64_OR_MEMBASE_REG;
11617 case OP_LXOR:
11618 return OP_AMD64_XOR_MEMBASE_REG;
11619 case OP_ADD_IMM:
11620 case OP_LADD_IMM:
11621 return OP_AMD64_ADD_MEMBASE_IMM;
11622 case OP_SUB_IMM:
11623 case OP_LSUB_IMM:
11624 return OP_AMD64_SUB_MEMBASE_IMM;
11625 case OP_AND_IMM:
11626 case OP_LAND_IMM:
11627 return OP_AMD64_AND_MEMBASE_IMM;
11628 case OP_OR_IMM:
11629 case OP_LOR_IMM:
11630 return OP_AMD64_OR_MEMBASE_IMM;
11631 case OP_XOR_IMM:
11632 case OP_LXOR_IMM:
11633 return OP_AMD64_XOR_MEMBASE_IMM;
11634 case OP_MOVE:
11635 return OP_NOP;
11637 #endif
11639 return -1;
11642 static inline int
11643 op_to_op_store_membase (int store_opcode, int opcode)
11645 #if defined(TARGET_X86) || defined(TARGET_AMD64)
11646 switch (opcode) {
11647 case OP_ICEQ:
11648 if (store_opcode == OP_STOREI1_MEMBASE_REG)
11649 return OP_X86_SETEQ_MEMBASE;
11650 case OP_CNE:
11651 if (store_opcode == OP_STOREI1_MEMBASE_REG)
11652 return OP_X86_SETNE_MEMBASE;
11654 #endif
11656 return -1;
11659 static inline int
11660 op_to_op_src1_membase (int load_opcode, int opcode)
11662 #ifdef TARGET_X86
11663 /* FIXME: This has sign extension issues */
11665 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
11666 return OP_X86_COMPARE_MEMBASE8_IMM;
11669 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
11670 return -1;
11672 switch (opcode) {
11673 case OP_X86_PUSH:
11674 return OP_X86_PUSH_MEMBASE;
11675 case OP_COMPARE_IMM:
11676 case OP_ICOMPARE_IMM:
11677 return OP_X86_COMPARE_MEMBASE_IMM;
11678 case OP_COMPARE:
11679 case OP_ICOMPARE:
11680 return OP_X86_COMPARE_MEMBASE_REG;
11682 #endif
11684 #ifdef TARGET_AMD64
11685 /* FIXME: This has sign extension issues */
11687 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
11688 return OP_X86_COMPARE_MEMBASE8_IMM;
11691 switch (opcode) {
11692 case OP_X86_PUSH:
11693 #ifdef __mono_ilp32__
11694 if (load_opcode == OP_LOADI8_MEMBASE)
11695 #else
11696 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11697 #endif
11698 return OP_X86_PUSH_MEMBASE;
11699 break;
11700 /* FIXME: This only works for 32 bit immediates
11701 case OP_COMPARE_IMM:
11702 case OP_LCOMPARE_IMM:
11703 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11704 return OP_AMD64_COMPARE_MEMBASE_IMM;
11706 case OP_ICOMPARE_IMM:
11707 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
11708 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
11709 break;
11710 case OP_COMPARE:
11711 case OP_LCOMPARE:
11712 #ifdef __mono_ilp32__
11713 if (load_opcode == OP_LOAD_MEMBASE)
11714 return OP_AMD64_ICOMPARE_MEMBASE_REG;
11715 if (load_opcode == OP_LOADI8_MEMBASE)
11716 #else
11717 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11718 #endif
11719 return OP_AMD64_COMPARE_MEMBASE_REG;
11720 break;
11721 case OP_ICOMPARE:
11722 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
11723 return OP_AMD64_ICOMPARE_MEMBASE_REG;
11724 break;
11726 #endif
11728 return -1;
11731 static inline int
11732 op_to_op_src2_membase (int load_opcode, int opcode)
11734 #ifdef TARGET_X86
11735 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
11736 return -1;
11738 switch (opcode) {
11739 case OP_COMPARE:
11740 case OP_ICOMPARE:
11741 return OP_X86_COMPARE_REG_MEMBASE;
11742 case OP_IADD:
11743 return OP_X86_ADD_REG_MEMBASE;
11744 case OP_ISUB:
11745 return OP_X86_SUB_REG_MEMBASE;
11746 case OP_IAND:
11747 return OP_X86_AND_REG_MEMBASE;
11748 case OP_IOR:
11749 return OP_X86_OR_REG_MEMBASE;
11750 case OP_IXOR:
11751 return OP_X86_XOR_REG_MEMBASE;
11753 #endif
11755 #ifdef TARGET_AMD64
11756 #ifdef __mono_ilp32__
11757 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
11758 #else
11759 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
11760 #endif
11761 switch (opcode) {
11762 case OP_ICOMPARE:
11763 return OP_AMD64_ICOMPARE_REG_MEMBASE;
11764 case OP_IADD:
11765 return OP_X86_ADD_REG_MEMBASE;
11766 case OP_ISUB:
11767 return OP_X86_SUB_REG_MEMBASE;
11768 case OP_IAND:
11769 return OP_X86_AND_REG_MEMBASE;
11770 case OP_IOR:
11771 return OP_X86_OR_REG_MEMBASE;
11772 case OP_IXOR:
11773 return OP_X86_XOR_REG_MEMBASE;
11775 #ifdef __mono_ilp32__
11776 } else if (load_opcode == OP_LOADI8_MEMBASE) {
11777 #else
11778 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
11779 #endif
11780 switch (opcode) {
11781 case OP_COMPARE:
11782 case OP_LCOMPARE:
11783 return OP_AMD64_COMPARE_REG_MEMBASE;
11784 case OP_LADD:
11785 return OP_AMD64_ADD_REG_MEMBASE;
11786 case OP_LSUB:
11787 return OP_AMD64_SUB_REG_MEMBASE;
11788 case OP_LAND:
11789 return OP_AMD64_AND_REG_MEMBASE;
11790 case OP_LOR:
11791 return OP_AMD64_OR_REG_MEMBASE;
11792 case OP_LXOR:
11793 return OP_AMD64_XOR_REG_MEMBASE;
11796 #endif
11798 return -1;
11802 mono_op_to_op_imm_noemul (int opcode)
11804 switch (opcode) {
11805 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
11806 case OP_LSHR:
11807 case OP_LSHL:
11808 case OP_LSHR_UN:
11809 return -1;
11810 #endif
11811 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
11812 case OP_IDIV:
11813 case OP_IDIV_UN:
11814 case OP_IREM:
11815 case OP_IREM_UN:
11816 return -1;
11817 #endif
11818 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
11819 case OP_IMUL:
11820 return -1;
11821 #endif
11822 default:
11823 return mono_op_to_op_imm (opcode);
11827 #ifndef DISABLE_JIT
11830 * mono_handle_global_vregs:
11832 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
11833 * for them.
11835 void
11836 mono_handle_global_vregs (MonoCompile *cfg)
11838 gint32 *vreg_to_bb;
11839 MonoBasicBlock *bb;
11840 int i, pos;
11842 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
11844 #ifdef MONO_ARCH_SIMD_INTRINSICS
11845 if (cfg->uses_simd_intrinsics)
11846 mono_simd_simplify_indirection (cfg);
11847 #endif
11849 /* Find local vregs used in more than one bb */
11850 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11851 MonoInst *ins = bb->code;
11852 int block_num = bb->block_num;
11854 if (cfg->verbose_level > 2)
11855 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
11857 cfg->cbb = bb;
11858 for (; ins; ins = ins->next) {
11859 const char *spec = INS_INFO (ins->opcode);
11860 int regtype = 0, regindex;
11861 gint32 prev_bb;
11863 if (G_UNLIKELY (cfg->verbose_level > 2))
11864 mono_print_ins (ins);
11866 g_assert (ins->opcode >= MONO_CEE_LAST);
11868 for (regindex = 0; regindex < 4; regindex ++) {
11869 int vreg = 0;
11871 if (regindex == 0) {
11872 regtype = spec [MONO_INST_DEST];
11873 if (regtype == ' ')
11874 continue;
11875 vreg = ins->dreg;
11876 } else if (regindex == 1) {
11877 regtype = spec [MONO_INST_SRC1];
11878 if (regtype == ' ')
11879 continue;
11880 vreg = ins->sreg1;
11881 } else if (regindex == 2) {
11882 regtype = spec [MONO_INST_SRC2];
11883 if (regtype == ' ')
11884 continue;
11885 vreg = ins->sreg2;
11886 } else if (regindex == 3) {
11887 regtype = spec [MONO_INST_SRC3];
11888 if (regtype == ' ')
11889 continue;
11890 vreg = ins->sreg3;
11893 #if SIZEOF_REGISTER == 4
11894 /* In the LLVM case, the long opcodes are not decomposed */
11895 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
11897 * Since some instructions reference the original long vreg,
11898 * and some reference the two component vregs, it is quite hard
11899 * to determine when it needs to be global. So be conservative.
11901 if (!get_vreg_to_inst (cfg, vreg)) {
11902 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11904 if (cfg->verbose_level > 2)
11905 printf ("LONG VREG R%d made global.\n", vreg);
11909 * Make the component vregs volatile since the optimizations can
11910 * get confused otherwise.
11912 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
11913 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
11915 #endif
11917 g_assert (vreg != -1);
11919 prev_bb = vreg_to_bb [vreg];
11920 if (prev_bb == 0) {
11921 /* 0 is a valid block num */
11922 vreg_to_bb [vreg] = block_num + 1;
11923 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
11924 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
11925 continue;
11927 if (!get_vreg_to_inst (cfg, vreg)) {
11928 if (G_UNLIKELY (cfg->verbose_level > 2))
11929 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
11931 switch (regtype) {
11932 case 'i':
11933 if (vreg_is_ref (cfg, vreg))
11934 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
11935 else
11936 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
11937 break;
11938 case 'l':
11939 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11940 break;
11941 case 'f':
11942 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
11943 break;
11944 case 'v':
11945 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
11946 break;
11947 default:
11948 g_assert_not_reached ();
11952 /* Flag as having been used in more than one bb */
11953 vreg_to_bb [vreg] = -1;
11959 /* If a variable is used in only one bblock, convert it into a local vreg */
11960 for (i = 0; i < cfg->num_varinfo; i++) {
11961 MonoInst *var = cfg->varinfo [i];
11962 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
11964 switch (var->type) {
11965 case STACK_I4:
11966 case STACK_OBJ:
11967 case STACK_PTR:
11968 case STACK_MP:
11969 case STACK_VTYPE:
11970 #if SIZEOF_REGISTER == 8
11971 case STACK_I8:
11972 #endif
11973 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
11974 /* Enabling this screws up the fp stack on x86 */
11975 case STACK_R8:
11976 #endif
11977 /* Arguments are implicitly global */
11978 /* Putting R4 vars into registers doesn't work currently */
11979 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
11981 * Make that the variable's liveness interval doesn't contain a call, since
11982 * that would cause the lvreg to be spilled, making the whole optimization
11983 * useless.
11985 /* This is too slow for JIT compilation */
11986 #if 0
11987 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
11988 MonoInst *ins;
11989 int def_index, call_index, ins_index;
11990 gboolean spilled = FALSE;
11992 def_index = -1;
11993 call_index = -1;
11994 ins_index = 0;
11995 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
11996 const char *spec = INS_INFO (ins->opcode);
11998 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
11999 def_index = ins_index;
12001 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
12002 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
12003 if (call_index > def_index) {
12004 spilled = TRUE;
12005 break;
12009 if (MONO_IS_CALL (ins))
12010 call_index = ins_index;
12012 ins_index ++;
12015 if (spilled)
12016 break;
12018 #endif
12020 if (G_UNLIKELY (cfg->verbose_level > 2))
12021 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
12022 var->flags |= MONO_INST_IS_DEAD;
12023 cfg->vreg_to_inst [var->dreg] = NULL;
12025 break;
12030 * Compress the varinfo and vars tables so the liveness computation is faster and
12031 * takes up less space.
12033 pos = 0;
12034 for (i = 0; i < cfg->num_varinfo; ++i) {
12035 MonoInst *var = cfg->varinfo [i];
12036 if (pos < i && cfg->locals_start == i)
12037 cfg->locals_start = pos;
12038 if (!(var->flags & MONO_INST_IS_DEAD)) {
12039 if (pos < i) {
12040 cfg->varinfo [pos] = cfg->varinfo [i];
12041 cfg->varinfo [pos]->inst_c0 = pos;
12042 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
12043 cfg->vars [pos].idx = pos;
12044 #if SIZEOF_REGISTER == 4
12045 if (cfg->varinfo [pos]->type == STACK_I8) {
12046 /* Modify the two component vars too */
12047 MonoInst *var1;
12049 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
12050 var1->inst_c0 = pos;
12051 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
12052 var1->inst_c0 = pos;
12054 #endif
12056 pos ++;
12059 cfg->num_varinfo = pos;
12060 if (cfg->locals_start > cfg->num_varinfo)
12061 cfg->locals_start = cfg->num_varinfo;
12065 * mono_spill_global_vars:
12067 * Generate spill code for variables which are not allocated to registers,
12068 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
12069 * code is generated which could be optimized by the local optimization passes.
12071 void
12072 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
12074 MonoBasicBlock *bb;
12075 char spec2 [16];
12076 int orig_next_vreg;
12077 guint32 *vreg_to_lvreg;
12078 guint32 *lvregs;
12079 guint32 i, lvregs_len;
12080 gboolean dest_has_lvreg = FALSE;
12081 guint32 stacktypes [128];
12082 MonoInst **live_range_start, **live_range_end;
12083 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
12085 *need_local_opts = FALSE;
12087 memset (spec2, 0, sizeof (spec2));
12089 /* FIXME: Move this function to mini.c */
12090 stacktypes ['i'] = STACK_PTR;
12091 stacktypes ['l'] = STACK_I8;
12092 stacktypes ['f'] = STACK_R8;
12093 #ifdef MONO_ARCH_SIMD_INTRINSICS
12094 stacktypes ['x'] = STACK_VTYPE;
12095 #endif
12097 #if SIZEOF_REGISTER == 4
12098 /* Create MonoInsts for longs */
12099 for (i = 0; i < cfg->num_varinfo; i++) {
12100 MonoInst *ins = cfg->varinfo [i];
12102 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
12103 switch (ins->type) {
12104 case STACK_R8:
12105 case STACK_I8: {
12106 MonoInst *tree;
12108 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
12109 break;
12111 g_assert (ins->opcode == OP_REGOFFSET);
12113 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
12114 g_assert (tree);
12115 tree->opcode = OP_REGOFFSET;
12116 tree->inst_basereg = ins->inst_basereg;
12117 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
12119 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
12120 g_assert (tree);
12121 tree->opcode = OP_REGOFFSET;
12122 tree->inst_basereg = ins->inst_basereg;
12123 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
12124 break;
12126 default:
12127 break;
12131 #endif
12133 if (cfg->compute_gc_maps) {
12134 /* registers need liveness info even for !non refs */
12135 for (i = 0; i < cfg->num_varinfo; i++) {
12136 MonoInst *ins = cfg->varinfo [i];
12138 if (ins->opcode == OP_REGVAR)
12139 ins->flags |= MONO_INST_GC_TRACK;
12143 /* FIXME: widening and truncation */
12146 * As an optimization, when a variable allocated to the stack is first loaded into
12147 * an lvreg, we will remember the lvreg and use it the next time instead of loading
12148 * the variable again.
12150 orig_next_vreg = cfg->next_vreg;
12151 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
12152 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
12153 lvregs_len = 0;
12156 * These arrays contain the first and last instructions accessing a given
12157 * variable.
12158 * Since we emit bblocks in the same order we process them here, and we
12159 * don't split live ranges, these will precisely describe the live range of
12160 * the variable, i.e. the instruction range where a valid value can be found
12161 * in the variables location.
12162 * The live range is computed using the liveness info computed by the liveness pass.
12163 * We can't use vmv->range, since that is an abstract live range, and we need
12164 * one which is instruction precise.
12165 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
12167 /* FIXME: Only do this if debugging info is requested */
12168 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
12169 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
12170 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12171 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12173 /* Add spill loads/stores */
12174 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12175 MonoInst *ins;
12177 if (cfg->verbose_level > 2)
12178 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
12180 /* Clear vreg_to_lvreg array */
12181 for (i = 0; i < lvregs_len; i++)
12182 vreg_to_lvreg [lvregs [i]] = 0;
12183 lvregs_len = 0;
12185 cfg->cbb = bb;
12186 MONO_BB_FOR_EACH_INS (bb, ins) {
12187 const char *spec = INS_INFO (ins->opcode);
12188 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
12189 gboolean store, no_lvreg;
12190 int sregs [MONO_MAX_SRC_REGS];
12192 if (G_UNLIKELY (cfg->verbose_level > 2))
12193 mono_print_ins (ins);
12195 if (ins->opcode == OP_NOP)
12196 continue;
12199 * We handle LDADDR here as well, since it can only be decomposed
12200 * when variable addresses are known.
12202 if (ins->opcode == OP_LDADDR) {
12203 MonoInst *var = ins->inst_p0;
12205 if (var->opcode == OP_VTARG_ADDR) {
12206 /* Happens on SPARC/S390 where vtypes are passed by reference */
12207 MonoInst *vtaddr = var->inst_left;
12208 if (vtaddr->opcode == OP_REGVAR) {
12209 ins->opcode = OP_MOVE;
12210 ins->sreg1 = vtaddr->dreg;
12212 else if (var->inst_left->opcode == OP_REGOFFSET) {
12213 ins->opcode = OP_LOAD_MEMBASE;
12214 ins->inst_basereg = vtaddr->inst_basereg;
12215 ins->inst_offset = vtaddr->inst_offset;
12216 } else
12217 NOT_IMPLEMENTED;
12218 } else {
12219 g_assert (var->opcode == OP_REGOFFSET);
12221 ins->opcode = OP_ADD_IMM;
12222 ins->sreg1 = var->inst_basereg;
12223 ins->inst_imm = var->inst_offset;
12226 *need_local_opts = TRUE;
12227 spec = INS_INFO (ins->opcode);
12230 if (ins->opcode < MONO_CEE_LAST) {
12231 mono_print_ins (ins);
12232 g_assert_not_reached ();
12236 * Store opcodes have destbasereg in the dreg, but in reality, it is an
12237 * src register.
12238 * FIXME:
12240 if (MONO_IS_STORE_MEMBASE (ins)) {
12241 tmp_reg = ins->dreg;
12242 ins->dreg = ins->sreg2;
12243 ins->sreg2 = tmp_reg;
12244 store = TRUE;
12246 spec2 [MONO_INST_DEST] = ' ';
12247 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
12248 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
12249 spec2 [MONO_INST_SRC3] = ' ';
12250 spec = spec2;
12251 } else if (MONO_IS_STORE_MEMINDEX (ins))
12252 g_assert_not_reached ();
12253 else
12254 store = FALSE;
12255 no_lvreg = FALSE;
12257 if (G_UNLIKELY (cfg->verbose_level > 2)) {
12258 printf ("\t %.3s %d", spec, ins->dreg);
12259 num_sregs = mono_inst_get_src_registers (ins, sregs);
12260 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
12261 printf (" %d", sregs [srcindex]);
12262 printf ("\n");
12265 /***************/
12266 /* DREG */
12267 /***************/
12268 regtype = spec [MONO_INST_DEST];
12269 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
12270 prev_dreg = -1;
12272 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
12273 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
12274 MonoInst *store_ins;
12275 int store_opcode;
12276 MonoInst *def_ins = ins;
12277 int dreg = ins->dreg; /* The original vreg */
12279 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
12281 if (var->opcode == OP_REGVAR) {
12282 ins->dreg = var->dreg;
12283 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
12285 * Instead of emitting a load+store, use a _membase opcode.
12287 g_assert (var->opcode == OP_REGOFFSET);
12288 if (ins->opcode == OP_MOVE) {
12289 NULLIFY_INS (ins);
12290 def_ins = NULL;
12291 } else {
12292 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
12293 ins->inst_basereg = var->inst_basereg;
12294 ins->inst_offset = var->inst_offset;
12295 ins->dreg = -1;
12297 spec = INS_INFO (ins->opcode);
12298 } else {
12299 guint32 lvreg;
12301 g_assert (var->opcode == OP_REGOFFSET);
12303 prev_dreg = ins->dreg;
12305 /* Invalidate any previous lvreg for this vreg */
12306 vreg_to_lvreg [ins->dreg] = 0;
12308 lvreg = 0;
12310 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
12311 regtype = 'l';
12312 store_opcode = OP_STOREI8_MEMBASE_REG;
12315 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
12317 if (regtype == 'l') {
12318 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
12319 mono_bblock_insert_after_ins (bb, ins, store_ins);
12320 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
12321 mono_bblock_insert_after_ins (bb, ins, store_ins);
12322 def_ins = store_ins;
12324 else {
12325 g_assert (store_opcode != OP_STOREV_MEMBASE);
12327 /* Try to fuse the store into the instruction itself */
12328 /* FIXME: Add more instructions */
12329 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
12330 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
12331 ins->inst_imm = ins->inst_c0;
12332 ins->inst_destbasereg = var->inst_basereg;
12333 ins->inst_offset = var->inst_offset;
12334 spec = INS_INFO (ins->opcode);
12335 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
12336 ins->opcode = store_opcode;
12337 ins->inst_destbasereg = var->inst_basereg;
12338 ins->inst_offset = var->inst_offset;
12340 no_lvreg = TRUE;
12342 tmp_reg = ins->dreg;
12343 ins->dreg = ins->sreg2;
12344 ins->sreg2 = tmp_reg;
12345 store = TRUE;
12347 spec2 [MONO_INST_DEST] = ' ';
12348 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
12349 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
12350 spec2 [MONO_INST_SRC3] = ' ';
12351 spec = spec2;
12352 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
12353 // FIXME: The backends expect the base reg to be in inst_basereg
12354 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
12355 ins->dreg = -1;
12356 ins->inst_basereg = var->inst_basereg;
12357 ins->inst_offset = var->inst_offset;
12358 spec = INS_INFO (ins->opcode);
12359 } else {
12360 /* printf ("INS: "); mono_print_ins (ins); */
12361 /* Create a store instruction */
12362 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
12364 /* Insert it after the instruction */
12365 mono_bblock_insert_after_ins (bb, ins, store_ins);
12367 def_ins = store_ins;
12370 * We can't assign ins->dreg to var->dreg here, since the
12371 * sregs could use it. So set a flag, and do it after
12372 * the sregs.
12374 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
12375 dest_has_lvreg = TRUE;
12380 if (def_ins && !live_range_start [dreg]) {
12381 live_range_start [dreg] = def_ins;
12382 live_range_start_bb [dreg] = bb;
12385 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
12386 MonoInst *tmp;
12388 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
12389 tmp->inst_c1 = dreg;
12390 mono_bblock_insert_after_ins (bb, def_ins, tmp);
12394 /************/
12395 /* SREGS */
12396 /************/
12397 num_sregs = mono_inst_get_src_registers (ins, sregs);
12398 for (srcindex = 0; srcindex < 3; ++srcindex) {
12399 regtype = spec [MONO_INST_SRC1 + srcindex];
12400 sreg = sregs [srcindex];
12402 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
12403 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
12404 MonoInst *var = get_vreg_to_inst (cfg, sreg);
12405 MonoInst *use_ins = ins;
12406 MonoInst *load_ins;
12407 guint32 load_opcode;
12409 if (var->opcode == OP_REGVAR) {
12410 sregs [srcindex] = var->dreg;
12411 //mono_inst_set_src_registers (ins, sregs);
12412 live_range_end [sreg] = use_ins;
12413 live_range_end_bb [sreg] = bb;
12415 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
12416 MonoInst *tmp;
12418 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
12419 /* var->dreg is a hreg */
12420 tmp->inst_c1 = sreg;
12421 mono_bblock_insert_after_ins (bb, ins, tmp);
12424 continue;
12427 g_assert (var->opcode == OP_REGOFFSET);
12429 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
12431 g_assert (load_opcode != OP_LOADV_MEMBASE);
12433 if (vreg_to_lvreg [sreg]) {
12434 g_assert (vreg_to_lvreg [sreg] != -1);
12436 /* The variable is already loaded to an lvreg */
12437 if (G_UNLIKELY (cfg->verbose_level > 2))
12438 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
12439 sregs [srcindex] = vreg_to_lvreg [sreg];
12440 //mono_inst_set_src_registers (ins, sregs);
12441 continue;
12444 /* Try to fuse the load into the instruction */
12445 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
12446 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
12447 sregs [0] = var->inst_basereg;
12448 //mono_inst_set_src_registers (ins, sregs);
12449 ins->inst_offset = var->inst_offset;
12450 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
12451 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
12452 sregs [1] = var->inst_basereg;
12453 //mono_inst_set_src_registers (ins, sregs);
12454 ins->inst_offset = var->inst_offset;
12455 } else {
12456 if (MONO_IS_REAL_MOVE (ins)) {
12457 ins->opcode = OP_NOP;
12458 sreg = ins->dreg;
12459 } else {
12460 //printf ("%d ", srcindex); mono_print_ins (ins);
12462 sreg = alloc_dreg (cfg, stacktypes [regtype]);
12464 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
12465 if (var->dreg == prev_dreg) {
12467 * sreg refers to the value loaded by the load
12468 * emitted below, but we need to use ins->dreg
12469 * since it refers to the store emitted earlier.
12471 sreg = ins->dreg;
12473 g_assert (sreg != -1);
12474 vreg_to_lvreg [var->dreg] = sreg;
12475 g_assert (lvregs_len < 1024);
12476 lvregs [lvregs_len ++] = var->dreg;
12480 sregs [srcindex] = sreg;
12481 //mono_inst_set_src_registers (ins, sregs);
12483 if (regtype == 'l') {
12484 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
12485 mono_bblock_insert_before_ins (bb, ins, load_ins);
12486 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
12487 mono_bblock_insert_before_ins (bb, ins, load_ins);
12488 use_ins = load_ins;
12490 else {
12491 #if SIZEOF_REGISTER == 4
12492 g_assert (load_opcode != OP_LOADI8_MEMBASE);
12493 #endif
12494 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
12495 mono_bblock_insert_before_ins (bb, ins, load_ins);
12496 use_ins = load_ins;
12500 if (var->dreg < orig_next_vreg) {
12501 live_range_end [var->dreg] = use_ins;
12502 live_range_end_bb [var->dreg] = bb;
12505 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
12506 MonoInst *tmp;
12508 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
12509 tmp->inst_c1 = var->dreg;
12510 mono_bblock_insert_after_ins (bb, ins, tmp);
12514 mono_inst_set_src_registers (ins, sregs);
12516 if (dest_has_lvreg) {
12517 g_assert (ins->dreg != -1);
12518 vreg_to_lvreg [prev_dreg] = ins->dreg;
12519 g_assert (lvregs_len < 1024);
12520 lvregs [lvregs_len ++] = prev_dreg;
12521 dest_has_lvreg = FALSE;
12524 if (store) {
12525 tmp_reg = ins->dreg;
12526 ins->dreg = ins->sreg2;
12527 ins->sreg2 = tmp_reg;
12530 if (MONO_IS_CALL (ins)) {
12531 /* Clear vreg_to_lvreg array */
12532 for (i = 0; i < lvregs_len; i++)
12533 vreg_to_lvreg [lvregs [i]] = 0;
12534 lvregs_len = 0;
12535 } else if (ins->opcode == OP_NOP) {
12536 ins->dreg = -1;
12537 MONO_INST_NULLIFY_SREGS (ins);
12540 if (cfg->verbose_level > 2)
12541 mono_print_ins_index (1, ins);
12544 /* Extend the live range based on the liveness info */
12545 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
12546 for (i = 0; i < cfg->num_varinfo; i ++) {
12547 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
12549 if (vreg_is_volatile (cfg, vi->vreg))
12550 /* The liveness info is incomplete */
12551 continue;
12553 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
12554 /* Live from at least the first ins of this bb */
12555 live_range_start [vi->vreg] = bb->code;
12556 live_range_start_bb [vi->vreg] = bb;
12559 if (mono_bitset_test_fast (bb->live_out_set, i)) {
12560 /* Live at least until the last ins of this bb */
12561 live_range_end [vi->vreg] = bb->last_ins;
12562 live_range_end_bb [vi->vreg] = bb;
12568 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
12570 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
12571 * by storing the current native offset into MonoMethodVar->live_range_start/end.
12573 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
12574 for (i = 0; i < cfg->num_varinfo; ++i) {
12575 int vreg = MONO_VARINFO (cfg, i)->vreg;
12576 MonoInst *ins;
12578 if (live_range_start [vreg]) {
12579 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
12580 ins->inst_c0 = i;
12581 ins->inst_c1 = vreg;
12582 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
12584 if (live_range_end [vreg]) {
12585 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
12586 ins->inst_c0 = i;
12587 ins->inst_c1 = vreg;
12588 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
12589 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
12590 else
12591 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
12595 #endif
12597 g_free (live_range_start);
12598 g_free (live_range_end);
12599 g_free (live_range_start_bb);
12600 g_free (live_range_end_bb);
12604 * FIXME:
12605 * - use 'iadd' instead of 'int_add'
12606 * - handling ovf opcodes: decompose in method_to_ir.
12607 * - unify iregs/fregs
12608 * -> partly done, the missing parts are:
12609 * - a more complete unification would involve unifying the hregs as well, so
12610 * code wouldn't need if (fp) all over the place. but that would mean the hregs
12611 * would no longer map to the machine hregs, so the code generators would need to
12612 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
12613 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
12614 * fp/non-fp branches speeds it up by about 15%.
12615 * - use sext/zext opcodes instead of shifts
12616 * - add OP_ICALL
12617 * - get rid of TEMPLOADs if possible and use vregs instead
12618 * - clean up usage of OP_P/OP_ opcodes
12619 * - cleanup usage of DUMMY_USE
12620 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
12621 * stack
12622 * - set the stack type and allocate a dreg in the EMIT_NEW macros
12623 * - get rid of all the <foo>2 stuff when the new JIT is ready.
12624 * - make sure handle_stack_args () is called before the branch is emitted
12625 * - when the new IR is done, get rid of all unused stuff
12626 * - COMPARE/BEQ as separate instructions or unify them ?
12627 * - keeping them separate allows specialized compare instructions like
12628 * compare_imm, compare_membase
12629 * - most back ends unify fp compare+branch, fp compare+ceq
12630 * - integrate mono_save_args into inline_method
12631 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
12632 * - handle long shift opts on 32 bit platforms somehow: they require
12633 * 3 sregs (2 for arg1 and 1 for arg2)
12634 * - make byref a 'normal' type.
12635 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
12636 * variable if needed.
12637 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
12638 * like inline_method.
12639 * - remove inlining restrictions
12640 * - fix LNEG and enable cfold of INEG
12641 * - generalize x86 optimizations like ldelema as a peephole optimization
12642 * - add store_mem_imm for amd64
12643 * - optimize the loading of the interruption flag in the managed->native wrappers
12644 * - avoid special handling of OP_NOP in passes
12645 * - move code inserting instructions into one function/macro.
12646 * - try a coalescing phase after liveness analysis
12647 * - add float -> vreg conversion + local optimizations on !x86
12648 * - figure out how to handle decomposed branches during optimizations, ie.
12649 * compare+branch, op_jump_table+op_br etc.
12650 * - promote RuntimeXHandles to vregs
12651 * - vtype cleanups:
12652 * - add a NEW_VARLOADA_VREG macro
12653 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
12654 * accessing vtype fields.
12655 * - get rid of I8CONST on 64 bit platforms
12656 * - dealing with the increase in code size due to branches created during opcode
12657 * decomposition:
12658 * - use extended basic blocks
12659 * - all parts of the JIT
12660 * - handle_global_vregs () && local regalloc
12661 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
12662 * - sources of increase in code size:
12663 * - vtypes
12664 * - long compares
12665 * - isinst and castclass
12666 * - lvregs not allocated to global registers even if used multiple times
12667 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
12668 * meaningful.
12669 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
12670 * - add all micro optimizations from the old JIT
12671 * - put tree optimizations into the deadce pass
12672 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
12673 * specific function.
12674 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
12675 * fcompare + branchCC.
12676 * - create a helper function for allocating a stack slot, taking into account
12677 * MONO_CFG_HAS_SPILLUP.
12678 * - merge r68207.
12679 * - merge the ia64 switch changes.
12680 * - optimize mono_regstate2_alloc_int/float.
12681 * - fix the pessimistic handling of variables accessed in exception handler blocks.
12682 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
12683 * parts of the tree could be separated by other instructions, killing the tree
12684 * arguments, or stores killing loads etc. Also, should we fold loads into other
12685 * instructions if the result of the load is used multiple times ?
12686 * - make the REM_IMM optimization in mini-x86.c arch-independent.
12687 * - LAST MERGE: 108395.
12688 * - when returning vtypes in registers, generate IR and append it to the end of the
12689 * last bb instead of doing it in the epilog.
12690 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
12695 NOTES
12696 -----
12698 - When to decompose opcodes:
12699 - earlier: this makes some optimizations hard to implement, since the low level IR
12700 no longer contains the neccessary information. But it is easier to do.
12701 - later: harder to implement, enables more optimizations.
12702 - Branches inside bblocks:
12703 - created when decomposing complex opcodes.
12704 - branches to another bblock: harmless, but not tracked by the branch
12705 optimizations, so need to branch to a label at the start of the bblock.
12706 - branches to inside the same bblock: very problematic, trips up the local
12707 reg allocator. Can be fixed by spitting the current bblock, but that is a
12708 complex operation, since some local vregs can become global vregs etc.
12709 - Local/global vregs:
12710 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
12711 local register allocator.
12712 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
12713 structure, created by mono_create_var (). Assigned to hregs or the stack by
12714 the global register allocator.
12715 - When to do optimizations like alu->alu_imm:
12716 - earlier -> saves work later on since the IR will be smaller/simpler
12717 - later -> can work on more instructions
12718 - Handling of valuetypes:
12719 - When a vtype is pushed on the stack, a new temporary is created, an
12720 instruction computing its address (LDADDR) is emitted and pushed on
12721 the stack. Need to optimize cases when the vtype is used immediately as in
12722 argument passing, stloc etc.
12723 - Instead of the to_end stuff in the old JIT, simply call the function handling
12724 the values on the stack before emitting the last instruction of the bb.
12727 #endif /* DISABLE_JIT */